xref: /freebsd/sys/dev/qlnx/qlnxe/qlnx_os.c (revision d37eb51047221dc3322b34db1038ff3aa533883f)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File: qlnx_os.c
30  * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
31  */
32 
33 #include <sys/cdefs.h>
34 #include "qlnx_os.h"
35 #include "bcm_osal.h"
36 #include "reg_addr.h"
37 #include "ecore_gtt_reg_addr.h"
38 #include "ecore.h"
39 #include "ecore_chain.h"
40 #include "ecore_status.h"
41 #include "ecore_hw.h"
42 #include "ecore_rt_defs.h"
43 #include "ecore_init_ops.h"
44 #include "ecore_int.h"
45 #include "ecore_cxt.h"
46 #include "ecore_spq.h"
47 #include "ecore_init_fw_funcs.h"
48 #include "ecore_sp_commands.h"
49 #include "ecore_dev_api.h"
50 #include "ecore_l2_api.h"
51 #include "ecore_mcp.h"
52 #include "ecore_hw_defs.h"
53 #include "mcp_public.h"
54 #include "ecore_iro.h"
55 #include "nvm_cfg.h"
56 #include "ecore_dbg_fw_funcs.h"
57 #include "ecore_iov_api.h"
58 #include "ecore_vf_api.h"
59 
60 #include "qlnx_ioctl.h"
61 #include "qlnx_def.h"
62 #include "qlnx_ver.h"
63 
64 #ifdef QLNX_ENABLE_IWARP
65 #include "qlnx_rdma.h"
66 #endif /* #ifdef QLNX_ENABLE_IWARP */
67 
68 #ifdef CONFIG_ECORE_SRIOV
69 #include <sys/nv.h>
70 #include <sys/iov_schema.h>
71 #include <dev/pci/pci_iov.h>
72 #endif /* #ifdef CONFIG_ECORE_SRIOV */
73 
74 #include <sys/smp.h>
75 
76 /*
77  * static functions
78  */
79 /*
80  * ioctl related functions
81  */
82 static void qlnx_add_sysctls(qlnx_host_t *ha);
83 
84 /*
85  * main driver
86  */
87 static void qlnx_release(qlnx_host_t *ha);
88 static void qlnx_fp_isr(void *arg);
89 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
90 static void qlnx_init(void *arg);
91 static void qlnx_init_locked(qlnx_host_t *ha);
92 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
93 static int qlnx_set_promisc(qlnx_host_t *ha, int enabled);
94 static int qlnx_set_allmulti(qlnx_host_t *ha, int enabled);
95 static int qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data);
96 static int qlnx_media_change(if_t ifp);
97 static void qlnx_media_status(if_t ifp, struct ifmediareq *ifmr);
98 static void qlnx_stop(qlnx_host_t *ha);
99 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
100 		struct mbuf **m_headp);
101 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
102 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
103 			struct qlnx_link_output *if_link);
104 static int qlnx_transmit(if_t ifp, struct mbuf  *mp);
105 static int qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp,
106 		struct mbuf *mp);
107 static void qlnx_qflush(if_t ifp);
108 
109 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
110 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
111 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
112 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
113 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
114 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
115 
116 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
117 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
118 
119 static int qlnx_nic_setup(struct ecore_dev *cdev,
120 		struct ecore_pf_params *func_params);
121 static int qlnx_nic_start(struct ecore_dev *cdev);
122 static int qlnx_slowpath_start(qlnx_host_t *ha);
123 static int qlnx_slowpath_stop(qlnx_host_t *ha);
124 static int qlnx_init_hw(qlnx_host_t *ha);
125 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
126 		char ver_str[VER_SIZE]);
127 static void qlnx_unload(qlnx_host_t *ha);
128 static int qlnx_load(qlnx_host_t *ha);
129 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
130 		uint32_t add_mac);
131 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
132 		uint32_t len);
133 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
134 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
135 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
136 		struct qlnx_rx_queue *rxq);
137 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
138 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
139 		int hwfn_index);
140 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
141 		int hwfn_index);
142 static void qlnx_timer(void *arg);
143 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
144 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
145 static void qlnx_trigger_dump(qlnx_host_t *ha);
146 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
147 			struct qlnx_tx_queue *txq);
148 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
149 		struct qlnx_tx_queue *txq);
150 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
151 		int lro_enable);
152 static void qlnx_fp_taskqueue(void *context, int pending);
153 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
154 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
155 		struct qlnx_agg_info *tpa);
156 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
157 
158 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
159 
160 /*
161  * Hooks to the Operating Systems
162  */
163 static int qlnx_pci_probe (device_t);
164 static int qlnx_pci_attach (device_t);
165 static int qlnx_pci_detach (device_t);
166 
167 #ifndef QLNX_VF
168 
169 #ifdef CONFIG_ECORE_SRIOV
170 
171 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params);
172 static void qlnx_iov_uninit(device_t dev);
173 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
174 static void qlnx_initialize_sriov(qlnx_host_t *ha);
175 static void qlnx_pf_taskqueue(void *context, int pending);
176 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha);
177 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha);
178 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha);
179 
180 #endif /* #ifdef CONFIG_ECORE_SRIOV */
181 
182 static device_method_t qlnx_pci_methods[] = {
183 	/* Device interface */
184 	DEVMETHOD(device_probe, qlnx_pci_probe),
185 	DEVMETHOD(device_attach, qlnx_pci_attach),
186 	DEVMETHOD(device_detach, qlnx_pci_detach),
187 
188 #ifdef CONFIG_ECORE_SRIOV
189 	DEVMETHOD(pci_iov_init, qlnx_iov_init),
190 	DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit),
191 	DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf),
192 #endif /* #ifdef CONFIG_ECORE_SRIOV */
193 	{ 0, 0 }
194 };
195 
196 static driver_t qlnx_pci_driver = {
197 	"ql", qlnx_pci_methods, sizeof (qlnx_host_t),
198 };
199 
200 MODULE_VERSION(if_qlnxe,1);
201 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, 0, 0);
202 
203 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
204 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
205 
206 #else
207 
208 static device_method_t qlnxv_pci_methods[] = {
209 	/* Device interface */
210 	DEVMETHOD(device_probe, qlnx_pci_probe),
211 	DEVMETHOD(device_attach, qlnx_pci_attach),
212 	DEVMETHOD(device_detach, qlnx_pci_detach),
213 	{ 0, 0 }
214 };
215 
216 static driver_t qlnxv_pci_driver = {
217 	"ql", qlnxv_pci_methods, sizeof (qlnx_host_t),
218 };
219 
220 MODULE_VERSION(if_qlnxev,1);
221 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, 0, 0);
222 
223 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1);
224 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1);
225 
226 #endif /* #ifdef QLNX_VF */
227 
228 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
229 
230 char qlnx_dev_str[128];
231 char qlnx_ver_str[VER_SIZE];
232 char qlnx_name_str[NAME_SIZE];
233 
234 /*
235  * Some PCI Configuration Space Related Defines
236  */
237 
238 #ifndef PCI_VENDOR_QLOGIC
239 #define PCI_VENDOR_QLOGIC		0x1077
240 #endif
241 
242 /* 40G Adapter QLE45xxx*/
243 #ifndef QLOGIC_PCI_DEVICE_ID_1634
244 #define QLOGIC_PCI_DEVICE_ID_1634	0x1634
245 #endif
246 
247 /* 100G Adapter QLE45xxx*/
248 #ifndef QLOGIC_PCI_DEVICE_ID_1644
249 #define QLOGIC_PCI_DEVICE_ID_1644	0x1644
250 #endif
251 
252 /* 25G Adapter QLE45xxx*/
253 #ifndef QLOGIC_PCI_DEVICE_ID_1656
254 #define QLOGIC_PCI_DEVICE_ID_1656	0x1656
255 #endif
256 
257 /* 50G Adapter QLE45xxx*/
258 #ifndef QLOGIC_PCI_DEVICE_ID_1654
259 #define QLOGIC_PCI_DEVICE_ID_1654	0x1654
260 #endif
261 
262 /* 10G/25G/40G Adapter QLE41xxx*/
263 #ifndef QLOGIC_PCI_DEVICE_ID_8070
264 #define QLOGIC_PCI_DEVICE_ID_8070	0x8070
265 #endif
266 
267 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/
268 #ifndef QLOGIC_PCI_DEVICE_ID_8090
269 #define QLOGIC_PCI_DEVICE_ID_8090	0x8090
270 #endif
271 
272 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
273     "qlnxe driver parameters");
274 
275 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */
276 static int qlnxe_queue_count = QLNX_DEFAULT_RSS;
277 
278 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
279 		&qlnxe_queue_count, 0, "Multi-Queue queue count");
280 
281 /*
282  * Note on RDMA personality setting
283  *
284  * Read the personality configured in NVRAM
285  * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and
286  * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT
287  * use the personality in NVRAM.
288 
289  * Otherwise use t the personality configured in sysctl.
290  *
291  */
292 #define QLNX_PERSONALITY_DEFAULT	0x0  /* use personality in NVRAM */
293 #define QLNX_PERSONALITY_ETH_ONLY	0x1  /* Override with ETH_ONLY */
294 #define QLNX_PERSONALITY_ETH_IWARP	0x2  /* Override with ETH_IWARP */
295 #define QLNX_PERSONALITY_ETH_ROCE	0x3  /* Override with ETH_ROCE */
296 #define QLNX_PERSONALITY_BITS_PER_FUNC	4
297 #define QLNX_PERSONALIY_MASK		0xF
298 
299 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/
300 static uint64_t qlnxe_rdma_configuration = 0x22222222;
301 
302 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
303                 &qlnxe_rdma_configuration, 0, "RDMA Configuration");
304 
305 int
306 qlnx_vf_device(qlnx_host_t *ha)
307 {
308         uint16_t	device_id;
309 
310         device_id = ha->device_id;
311 
312         if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
313                 return 0;
314 
315         return -1;
316 }
317 
318 static int
319 qlnx_valid_device(qlnx_host_t *ha)
320 {
321         uint16_t device_id;
322 
323         device_id = ha->device_id;
324 
325 #ifndef QLNX_VF
326         if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
327                 (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
328                 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
329                 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
330                 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
331                 return 0;
332 #else
333         if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
334 		return 0;
335 
336 #endif /* #ifndef QLNX_VF */
337         return -1;
338 }
339 
340 #ifdef QLNX_ENABLE_IWARP
341 static int
342 qlnx_rdma_supported(struct qlnx_host *ha)
343 {
344 	uint16_t device_id;
345 
346 	device_id = pci_get_device(ha->pci_dev);
347 
348 	if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
349 		(device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
350 		(device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
351 		(device_id == QLOGIC_PCI_DEVICE_ID_8070))
352 		return (0);
353 
354 	return (-1);
355 }
356 #endif /* #ifdef QLNX_ENABLE_IWARP */
357 
358 /*
359  * Name:	qlnx_pci_probe
360  * Function:	Validate the PCI device to be a QLA80XX device
361  */
362 static int
363 qlnx_pci_probe(device_t dev)
364 {
365 	snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
366 		QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
367 	snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
368 
369 	if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
370                 return (ENXIO);
371 	}
372 
373         switch (pci_get_device(dev)) {
374 #ifndef QLNX_VF
375 
376         case QLOGIC_PCI_DEVICE_ID_1644:
377 		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
378 			"Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
379 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
380 			QLNX_VERSION_BUILD);
381                 device_set_desc_copy(dev, qlnx_dev_str);
382 
383                 break;
384 
385         case QLOGIC_PCI_DEVICE_ID_1634:
386 		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
387 			"Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
388 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
389 			QLNX_VERSION_BUILD);
390                 device_set_desc_copy(dev, qlnx_dev_str);
391 
392                 break;
393 
394         case QLOGIC_PCI_DEVICE_ID_1656:
395 		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
396 			"Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
397 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
398 			QLNX_VERSION_BUILD);
399                 device_set_desc_copy(dev, qlnx_dev_str);
400 
401                 break;
402 
403         case QLOGIC_PCI_DEVICE_ID_1654:
404 		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
405 			"Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
406 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
407 			QLNX_VERSION_BUILD);
408                 device_set_desc_copy(dev, qlnx_dev_str);
409 
410                 break;
411 
412 	case QLOGIC_PCI_DEVICE_ID_8070:
413 		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
414 			"Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)"
415 			" Adapter-Ethernet Function",
416 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
417 			QLNX_VERSION_BUILD);
418 		device_set_desc_copy(dev, qlnx_dev_str);
419 
420 		break;
421 
422 #else
423 	case QLOGIC_PCI_DEVICE_ID_8090:
424 		snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
425 			"Qlogic SRIOV PCI CNA (AH) "
426 			"Adapter-Ethernet Function",
427 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
428 			QLNX_VERSION_BUILD);
429 		device_set_desc_copy(dev, qlnx_dev_str);
430 
431 		break;
432 
433 #endif /* #ifndef QLNX_VF */
434 
435         default:
436                 return (ENXIO);
437         }
438 
439 #ifdef QLNX_ENABLE_IWARP
440 	qlnx_rdma_init();
441 #endif /* #ifdef QLNX_ENABLE_IWARP */
442 
443         return (BUS_PROBE_DEFAULT);
444 }
445 
446 static uint16_t
447 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
448 	struct qlnx_tx_queue *txq)
449 {
450 	u16 hw_bd_cons;
451 	u16 ecore_cons_idx;
452 
453 	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
454 
455 	ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl);
456 
457 	return (hw_bd_cons - ecore_cons_idx);
458 }
459 
460 static void
461 qlnx_sp_intr(void *arg)
462 {
463 	struct ecore_hwfn	*p_hwfn;
464 	qlnx_host_t		*ha;
465 	int			i;
466 
467 	p_hwfn = arg;
468 
469 	if (p_hwfn == NULL) {
470 		printf("%s: spurious slowpath intr\n", __func__);
471 		return;
472 	}
473 
474 	ha = (qlnx_host_t *)p_hwfn->p_dev;
475 
476 	QL_DPRINT2(ha, "enter\n");
477 
478 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
479 		if (&ha->cdev.hwfns[i] == p_hwfn) {
480 			taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
481 			break;
482 		}
483 	}
484 	QL_DPRINT2(ha, "exit\n");
485 
486 	return;
487 }
488 
489 static void
490 qlnx_sp_taskqueue(void *context, int pending)
491 {
492 	struct ecore_hwfn	*p_hwfn;
493 
494 	p_hwfn = context;
495 
496 	if (p_hwfn != NULL) {
497 		qlnx_sp_isr(p_hwfn);
498 	}
499 	return;
500 }
501 
502 static int
503 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
504 {
505 	int	i;
506 	uint8_t	tq_name[32];
507 
508 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
509                 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
510 
511 		bzero(tq_name, sizeof (tq_name));
512 		snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
513 
514 		TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
515 
516 		ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT,
517 			 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
518 
519 		if (ha->sp_taskqueue[i] == NULL)
520 			return (-1);
521 
522 		taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
523 			tq_name);
524 
525 		QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
526 	}
527 
528 	return (0);
529 }
530 
531 static void
532 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
533 {
534 	int	i;
535 
536 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
537 		if (ha->sp_taskqueue[i] != NULL) {
538 			taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
539 			taskqueue_free(ha->sp_taskqueue[i]);
540 		}
541 	}
542 	return;
543 }
544 
545 static void
546 qlnx_fp_taskqueue(void *context, int pending)
547 {
548         struct qlnx_fastpath	*fp;
549         qlnx_host_t		*ha;
550         if_t			ifp;
551 
552         fp = context;
553 
554         if (fp == NULL)
555                 return;
556 
557 	ha = (qlnx_host_t *)fp->edev;
558 
559 	ifp = ha->ifp;
560 
561         if(if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
562                 if (!drbr_empty(ifp, fp->tx_br)) {
563                         if(mtx_trylock(&fp->tx_mtx)) {
564 #ifdef QLNX_TRACE_PERF_DATA
565                                 tx_pkts = fp->tx_pkts_transmitted;
566                                 tx_compl = fp->tx_pkts_completed;
567 #endif
568 
569                                 qlnx_transmit_locked(ifp, fp, NULL);
570 
571 #ifdef QLNX_TRACE_PERF_DATA
572                                 fp->tx_pkts_trans_fp +=
573 					(fp->tx_pkts_transmitted - tx_pkts);
574                                 fp->tx_pkts_compl_fp +=
575 					(fp->tx_pkts_completed - tx_compl);
576 #endif
577                                 mtx_unlock(&fp->tx_mtx);
578                         }
579                 }
580         }
581 
582         QL_DPRINT2(ha, "exit \n");
583         return;
584 }
585 
586 static int
587 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
588 {
589 	int	i;
590 	uint8_t	tq_name[32];
591 	struct qlnx_fastpath *fp;
592 
593 	for (i = 0; i < ha->num_rss; i++) {
594                 fp = &ha->fp_array[i];
595 
596 		bzero(tq_name, sizeof (tq_name));
597 		snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
598 
599 		TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
600 
601 		fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
602 					taskqueue_thread_enqueue,
603 					&fp->fp_taskqueue);
604 
605 		if (fp->fp_taskqueue == NULL)
606 			return (-1);
607 
608 		taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
609 			tq_name);
610 
611 		QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
612 	}
613 
614 	return (0);
615 }
616 
617 static void
618 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
619 {
620 	int			i;
621 	struct qlnx_fastpath	*fp;
622 
623 	for (i = 0; i < ha->num_rss; i++) {
624                 fp = &ha->fp_array[i];
625 
626 		if (fp->fp_taskqueue != NULL) {
627 			taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
628 			taskqueue_free(fp->fp_taskqueue);
629 			fp->fp_taskqueue = NULL;
630 		}
631 	}
632 	return;
633 }
634 
635 static void
636 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
637 {
638 	int			i;
639 	struct qlnx_fastpath	*fp;
640 
641 	for (i = 0; i < ha->num_rss; i++) {
642                 fp = &ha->fp_array[i];
643 
644 		if (fp->fp_taskqueue != NULL) {
645 			QLNX_UNLOCK(ha);
646 			taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
647 			QLNX_LOCK(ha);
648 		}
649 	}
650 	return;
651 }
652 
653 static void
654 qlnx_get_params(qlnx_host_t *ha)
655 {
656 	if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) {
657 		device_printf(ha->pci_dev, "invalid queue_count value (%d)\n",
658 			qlnxe_queue_count);
659 		qlnxe_queue_count = 0;
660 	}
661 	return;
662 }
663 
664 static void
665 qlnx_error_recovery_taskqueue(void *context, int pending)
666 {
667         qlnx_host_t *ha;
668 
669         ha = context;
670 
671         QL_DPRINT2(ha, "enter\n");
672 
673         QLNX_LOCK(ha);
674         qlnx_stop(ha);
675         QLNX_UNLOCK(ha);
676 
677 #ifdef QLNX_ENABLE_IWARP
678 	qlnx_rdma_dev_remove(ha);
679 #endif /* #ifdef QLNX_ENABLE_IWARP */
680 
681         qlnx_slowpath_stop(ha);
682         qlnx_slowpath_start(ha);
683 
684 #ifdef QLNX_ENABLE_IWARP
685 	qlnx_rdma_dev_add(ha);
686 #endif /* #ifdef QLNX_ENABLE_IWARP */
687 
688         qlnx_init(ha);
689 
690         callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
691 
692         QL_DPRINT2(ha, "exit\n");
693 
694         return;
695 }
696 
697 static int
698 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha)
699 {
700         uint8_t tq_name[32];
701 
702         bzero(tq_name, sizeof (tq_name));
703         snprintf(tq_name, sizeof (tq_name), "ql_err_tq");
704 
705         TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha);
706 
707         ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
708                                 taskqueue_thread_enqueue, &ha->err_taskqueue);
709 
710         if (ha->err_taskqueue == NULL)
711                 return (-1);
712 
713         taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name);
714 
715         QL_DPRINT1(ha, "%p\n",ha->err_taskqueue);
716 
717         return (0);
718 }
719 
720 static void
721 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha)
722 {
723         if (ha->err_taskqueue != NULL) {
724                 taskqueue_drain(ha->err_taskqueue, &ha->err_task);
725                 taskqueue_free(ha->err_taskqueue);
726         }
727 
728         ha->err_taskqueue = NULL;
729 
730         return;
731 }
732 
733 /*
734  * Name:	qlnx_pci_attach
735  * Function:	attaches the device to the operating system
736  */
737 static int
738 qlnx_pci_attach(device_t dev)
739 {
740 	qlnx_host_t	*ha = NULL;
741 	uint32_t	rsrc_len_reg __unused = 0;
742 	uint32_t	rsrc_len_dbells = 0;
743 	uint32_t	rsrc_len_msix __unused = 0;
744 	int		i;
745 	uint32_t	mfw_ver;
746 	uint32_t	num_sp_msix = 0;
747 	uint32_t	num_rdma_irqs = 0;
748 
749         if ((ha = device_get_softc(dev)) == NULL) {
750                 device_printf(dev, "cannot get softc\n");
751                 return (ENOMEM);
752         }
753 
754         memset(ha, 0, sizeof (qlnx_host_t));
755 
756         ha->device_id = pci_get_device(dev);
757 
758         if (qlnx_valid_device(ha) != 0) {
759                 device_printf(dev, "device is not valid device\n");
760                 return (ENXIO);
761 	}
762         ha->pci_func = pci_get_function(dev);
763 
764         ha->pci_dev = dev;
765 
766 	sx_init(&ha->hw_lock, "qlnx_hw_lock");
767 
768         ha->flags.lock_init = 1;
769 
770         pci_enable_busmaster(dev);
771 
772 	/*
773 	 * map the PCI BARs
774 	 */
775 
776         ha->reg_rid = PCIR_BAR(0);
777         ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
778                                 RF_ACTIVE);
779 
780         if (ha->pci_reg == NULL) {
781                 device_printf(dev, "unable to map BAR0\n");
782                 goto qlnx_pci_attach_err;
783         }
784 
785         rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
786                                         ha->reg_rid);
787 
788 	ha->dbells_rid = PCIR_BAR(2);
789 	rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev,
790 					SYS_RES_MEMORY,
791 					ha->dbells_rid);
792 	if (rsrc_len_dbells) {
793 		ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
794 					&ha->dbells_rid, RF_ACTIVE);
795 
796 		if (ha->pci_dbells == NULL) {
797 			device_printf(dev, "unable to map BAR1\n");
798 			goto qlnx_pci_attach_err;
799 		}
800 		ha->dbells_phys_addr = (uint64_t)
801 			bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);
802 
803 		ha->dbells_size = rsrc_len_dbells;
804 	} else {
805 		if (qlnx_vf_device(ha) != 0) {
806 			device_printf(dev, " BAR1 size is zero\n");
807 			goto qlnx_pci_attach_err;
808 		}
809 	}
810 
811         ha->msix_rid = PCIR_BAR(4);
812         ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
813                         &ha->msix_rid, RF_ACTIVE);
814 
815         if (ha->msix_bar == NULL) {
816                 device_printf(dev, "unable to map BAR2\n");
817                 goto qlnx_pci_attach_err;
818 	}
819 
820         rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
821                                         ha->msix_rid);
822 
823 	ha->dbg_level = 0x0000;
824 
825 	QL_DPRINT1(ha, "\n\t\t\t"
826 		"pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
827 		"\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
828 		"\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
829 		" msix_avail = 0x%x "
830 		"\n\t\t\t[ncpus = %d]\n",
831 		ha->pci_dev, ha->pci_reg, rsrc_len_reg,
832 		ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
833 		ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
834 		mp_ncpus);
835 	/*
836 	 * allocate dma tags
837 	 */
838 
839 	if (qlnx_alloc_parent_dma_tag(ha))
840                 goto qlnx_pci_attach_err;
841 
842 	if (qlnx_alloc_tx_dma_tag(ha))
843                 goto qlnx_pci_attach_err;
844 
845 	if (qlnx_alloc_rx_dma_tag(ha))
846                 goto qlnx_pci_attach_err;
847 
848 
849 	if (qlnx_init_hw(ha) != 0)
850 		goto qlnx_pci_attach_err;
851 
852         ha->flags.hw_init = 1;
853 
854 	qlnx_get_params(ha);
855 
856 	if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) &&
857 		(qlnxe_queue_count == QLNX_DEFAULT_RSS)) {
858 		qlnxe_queue_count = QLNX_MAX_RSS;
859 	}
860 
861 	/*
862 	 * Allocate MSI-x vectors
863 	 */
864 	if (qlnx_vf_device(ha) != 0) {
865 		if (qlnxe_queue_count == 0)
866 			ha->num_rss = QLNX_DEFAULT_RSS;
867 		else
868 			ha->num_rss = qlnxe_queue_count;
869 
870 		num_sp_msix = ha->cdev.num_hwfns;
871 	} else {
872 		uint8_t max_rxq;
873 		uint8_t max_txq;
874 
875 		ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq);
876 		ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq);
877 
878 		if (max_rxq < max_txq)
879 			ha->num_rss = max_rxq;
880 		else
881 			ha->num_rss = max_txq;
882 
883 		if (ha->num_rss > QLNX_MAX_VF_RSS)
884 			ha->num_rss = QLNX_MAX_VF_RSS;
885 
886 		num_sp_msix = 0;
887 	}
888 
889 	if (ha->num_rss > mp_ncpus)
890 		ha->num_rss = mp_ncpus;
891 
892 	ha->num_tc = QLNX_MAX_TC;
893 
894         ha->msix_count = pci_msix_count(dev);
895 
896 #ifdef QLNX_ENABLE_IWARP
897 
898 	num_rdma_irqs = qlnx_rdma_get_num_irqs(ha);
899 
900 #endif /* #ifdef QLNX_ENABLE_IWARP */
901 
902         if (!ha->msix_count ||
903 		(ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) {
904                 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
905                         ha->msix_count);
906                 goto qlnx_pci_attach_err;
907         }
908 
909 	if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs))
910 		ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs;
911 	else
912 		ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs);
913 
914 	QL_DPRINT1(ha, "\n\t\t\t"
915 		"pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
916 		"\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
917 		"\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
918 		" msix_avail = 0x%x msix_alloc = 0x%x"
919 		"\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
920 		 ha->pci_reg, rsrc_len_reg,
921 		ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
922 		ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
923 		ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
924 
925         if (pci_alloc_msix(dev, &ha->msix_count)) {
926                 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
927                         ha->msix_count);
928                 ha->msix_count = 0;
929                 goto qlnx_pci_attach_err;
930         }
931 
932 	/*
933 	 * Initialize slow path interrupt and task queue
934 	 */
935 
936 	if (num_sp_msix) {
937 		if (qlnx_create_sp_taskqueues(ha) != 0)
938 			goto qlnx_pci_attach_err;
939 
940 		for (i = 0; i < ha->cdev.num_hwfns; i++) {
941 			struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
942 
943 			ha->sp_irq_rid[i] = i + 1;
944 			ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
945 						&ha->sp_irq_rid[i],
946 						(RF_ACTIVE | RF_SHAREABLE));
947 			if (ha->sp_irq[i] == NULL) {
948                 		device_printf(dev,
949 					"could not allocate mbx interrupt\n");
950 				goto qlnx_pci_attach_err;
951 			}
952 
953 			if (bus_setup_intr(dev, ha->sp_irq[i],
954 				(INTR_TYPE_NET | INTR_MPSAFE), NULL,
955 				qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
956 				device_printf(dev,
957 					"could not setup slow path interrupt\n");
958 				goto qlnx_pci_attach_err;
959 			}
960 
961 			QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
962 				" sp_irq %p sp_handle %p\n", p_hwfn,
963 				ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
964 		}
965 	}
966 
967 	/*
968 	 * initialize fast path interrupt
969 	 */
970 	if (qlnx_create_fp_taskqueues(ha) != 0)
971 		goto qlnx_pci_attach_err;
972 
973         for (i = 0; i < ha->num_rss; i++) {
974                 ha->irq_vec[i].rss_idx = i;
975                 ha->irq_vec[i].ha = ha;
976                 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i;
977 
978                 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
979                                 &ha->irq_vec[i].irq_rid,
980                                 (RF_ACTIVE | RF_SHAREABLE));
981 
982                 if (ha->irq_vec[i].irq == NULL) {
983                         device_printf(dev,
984 				"could not allocate interrupt[%d] irq_rid = %d\n",
985 				i, ha->irq_vec[i].irq_rid);
986                         goto qlnx_pci_attach_err;
987                 }
988 
989 		if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
990                         device_printf(dev, "could not allocate tx_br[%d]\n", i);
991                         goto qlnx_pci_attach_err;
992 		}
993 	}
994 
995 	if (qlnx_vf_device(ha) != 0) {
996 		callout_init(&ha->qlnx_callout, 1);
997 		ha->flags.callout_init = 1;
998 
999 		for (i = 0; i < ha->cdev.num_hwfns; i++) {
1000 			if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
1001 				goto qlnx_pci_attach_err;
1002 			if (ha->grcdump_size[i] == 0)
1003 				goto qlnx_pci_attach_err;
1004 
1005 			ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
1006 			QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
1007 				i, ha->grcdump_size[i]);
1008 
1009 			ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
1010 			if (ha->grcdump[i] == NULL) {
1011 				device_printf(dev, "grcdump alloc[%d] failed\n", i);
1012 				goto qlnx_pci_attach_err;
1013 			}
1014 
1015 			if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
1016 				goto qlnx_pci_attach_err;
1017 			if (ha->idle_chk_size[i] == 0)
1018 				goto qlnx_pci_attach_err;
1019 
1020 			ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
1021 			QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
1022 				i, ha->idle_chk_size[i]);
1023 
1024 			ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
1025 
1026 			if (ha->idle_chk[i] == NULL) {
1027 				device_printf(dev, "idle_chk alloc failed\n");
1028 				goto qlnx_pci_attach_err;
1029 			}
1030 		}
1031 
1032 		if (qlnx_create_error_recovery_taskqueue(ha) != 0)
1033 			goto qlnx_pci_attach_err;
1034 	}
1035 
1036 	if (qlnx_slowpath_start(ha) != 0)
1037 		goto qlnx_pci_attach_err;
1038 	else
1039 		ha->flags.slowpath_start = 1;
1040 
1041 	if (qlnx_vf_device(ha) != 0) {
1042 		if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
1043 			qlnx_mdelay(__func__, 1000);
1044 			qlnx_trigger_dump(ha);
1045 
1046 			goto qlnx_pci_attach_err0;
1047 		}
1048 
1049 		if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
1050 			qlnx_mdelay(__func__, 1000);
1051 			qlnx_trigger_dump(ha);
1052 
1053 			goto qlnx_pci_attach_err0;
1054 		}
1055 	} else {
1056 		struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
1057 		ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL);
1058 	}
1059 
1060 	snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
1061 		((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
1062 		((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
1063 	snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
1064 		FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1065 		FW_ENGINEERING_VERSION);
1066 
1067 	QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
1068 		 ha->stormfw_ver, ha->mfw_ver);
1069 
1070 	qlnx_init_ifnet(dev, ha);
1071 
1072 	/*
1073 	 * add sysctls
1074 	 */
1075 	qlnx_add_sysctls(ha);
1076 
1077 qlnx_pci_attach_err0:
1078         /*
1079 	 * create ioctl device interface
1080 	 */
1081 	if (qlnx_vf_device(ha) != 0) {
1082 		if (qlnx_make_cdev(ha)) {
1083 			device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
1084 			goto qlnx_pci_attach_err;
1085 		}
1086 
1087 #ifdef QLNX_ENABLE_IWARP
1088 		qlnx_rdma_dev_add(ha);
1089 #endif /* #ifdef QLNX_ENABLE_IWARP */
1090 	}
1091 
1092 #ifndef QLNX_VF
1093 #ifdef CONFIG_ECORE_SRIOV
1094 
1095 	if (qlnx_vf_device(ha) != 0)
1096 		qlnx_initialize_sriov(ha);
1097 
1098 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1099 #endif /* #ifdef QLNX_VF */
1100 
1101 	QL_DPRINT2(ha, "success\n");
1102 
1103         return (0);
1104 
1105 qlnx_pci_attach_err:
1106 
1107 	qlnx_release(ha);
1108 
1109 	return (ENXIO);
1110 }
1111 
1112 /*
1113  * Name:	qlnx_pci_detach
1114  * Function:	Unhooks the device from the operating system
1115  */
1116 static int
1117 qlnx_pci_detach(device_t dev)
1118 {
1119 	qlnx_host_t	*ha = NULL;
1120 
1121         if ((ha = device_get_softc(dev)) == NULL) {
1122                 device_printf(dev, "%s: cannot get softc\n", __func__);
1123                 return (ENOMEM);
1124         }
1125 
1126 	if (qlnx_vf_device(ha) != 0) {
1127 #ifdef CONFIG_ECORE_SRIOV
1128 		int ret;
1129 
1130 		ret = pci_iov_detach(dev);
1131 		if (ret) {
1132                 	device_printf(dev, "%s: SRIOV in use\n", __func__);
1133 			return (ret);
1134 		}
1135 
1136 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1137 
1138 #ifdef QLNX_ENABLE_IWARP
1139 		if (qlnx_rdma_dev_remove(ha) != 0)
1140 			return (EBUSY);
1141 #endif /* #ifdef QLNX_ENABLE_IWARP */
1142 	}
1143 
1144 	QLNX_LOCK(ha);
1145 	qlnx_stop(ha);
1146 	QLNX_UNLOCK(ha);
1147 
1148 	qlnx_release(ha);
1149 
1150         return (0);
1151 }
1152 
1153 #ifdef QLNX_ENABLE_IWARP
1154 
1155 static uint8_t
1156 qlnx_get_personality(uint8_t pci_func)
1157 {
1158 	uint8_t personality;
1159 
1160 	personality = (qlnxe_rdma_configuration >>
1161 				(pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) &
1162 				QLNX_PERSONALIY_MASK;
1163 	return (personality);
1164 }
1165 
1166 static void
1167 qlnx_set_personality(qlnx_host_t *ha)
1168 {
1169 	uint8_t personality;
1170 
1171 	personality = qlnx_get_personality(ha->pci_func);
1172 
1173 	switch (personality) {
1174 	case QLNX_PERSONALITY_DEFAULT:
1175                	device_printf(ha->pci_dev, "%s: DEFAULT\n",
1176 			__func__);
1177 		ha->personality = ECORE_PCI_DEFAULT;
1178 		break;
1179 
1180 	case QLNX_PERSONALITY_ETH_ONLY:
1181                	device_printf(ha->pci_dev, "%s: ETH_ONLY\n",
1182 			__func__);
1183 		ha->personality = ECORE_PCI_ETH;
1184 		break;
1185 
1186 	case QLNX_PERSONALITY_ETH_IWARP:
1187                	device_printf(ha->pci_dev, "%s: ETH_IWARP\n",
1188 			__func__);
1189 		ha->personality = ECORE_PCI_ETH_IWARP;
1190 		break;
1191 
1192 	case QLNX_PERSONALITY_ETH_ROCE:
1193                	device_printf(ha->pci_dev, "%s: ETH_ROCE\n",
1194 			__func__);
1195 		ha->personality = ECORE_PCI_ETH_ROCE;
1196 		break;
1197 	}
1198 
1199 	return;
1200 }
1201 
1202 #endif /* #ifdef QLNX_ENABLE_IWARP */
1203 
1204 static int
1205 qlnx_init_hw(qlnx_host_t *ha)
1206 {
1207 	int				rval = 0;
1208 	struct ecore_hw_prepare_params	params;
1209 
1210         ha->cdev.ha = ha;
1211 	ecore_init_struct(&ha->cdev);
1212 
1213 	/* ha->dp_module = ECORE_MSG_PROBE |
1214 				ECORE_MSG_INTR |
1215 				ECORE_MSG_SP |
1216 				ECORE_MSG_LINK |
1217 				ECORE_MSG_SPQ |
1218 				ECORE_MSG_RDMA;
1219 	ha->dp_level = ECORE_LEVEL_VERBOSE;*/
1220 	//ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2;
1221 	ha->dp_level = ECORE_LEVEL_NOTICE;
1222 	//ha->dp_level = ECORE_LEVEL_VERBOSE;
1223 
1224 	ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
1225 
1226 	ha->cdev.regview = ha->pci_reg;
1227 
1228 	ha->personality = ECORE_PCI_DEFAULT;
1229 
1230 	if (qlnx_vf_device(ha) == 0) {
1231 		ha->cdev.b_is_vf = true;
1232 
1233 		if (ha->pci_dbells != NULL) {
1234 			ha->cdev.doorbells = ha->pci_dbells;
1235 			ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1236 			ha->cdev.db_size = ha->dbells_size;
1237 		} else {
1238 			ha->pci_dbells = ha->pci_reg;
1239 		}
1240 	} else {
1241 		ha->cdev.doorbells = ha->pci_dbells;
1242 		ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1243 		ha->cdev.db_size = ha->dbells_size;
1244 
1245 #ifdef QLNX_ENABLE_IWARP
1246 
1247 		if (qlnx_rdma_supported(ha) == 0)
1248 			qlnx_set_personality(ha);
1249 
1250 #endif /* #ifdef QLNX_ENABLE_IWARP */
1251 	}
1252 	QL_DPRINT2(ha, "%s: %s\n", __func__,
1253 		(ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet"));
1254 
1255 	bzero(&params, sizeof (struct ecore_hw_prepare_params));
1256 
1257 	params.personality = ha->personality;
1258 
1259 	params.drv_resc_alloc = false;
1260 	params.chk_reg_fifo = false;
1261 	params.initiate_pf_flr = true;
1262 	params.epoch = 0;
1263 
1264 	ecore_hw_prepare(&ha->cdev, &params);
1265 
1266 	qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
1267 
1268 	QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n",
1269 		ha, &ha->cdev, &ha->cdev.hwfns[0]);
1270 
1271 	return (rval);
1272 }
1273 
1274 static void
1275 qlnx_release(qlnx_host_t *ha)
1276 {
1277         device_t	dev;
1278         int		i;
1279 
1280         dev = ha->pci_dev;
1281 
1282 	QL_DPRINT2(ha, "enter\n");
1283 
1284 	for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
1285 		if (ha->idle_chk[i] != NULL) {
1286 			free(ha->idle_chk[i], M_QLNXBUF);
1287 			ha->idle_chk[i] = NULL;
1288 		}
1289 
1290 		if (ha->grcdump[i] != NULL) {
1291 			free(ha->grcdump[i], M_QLNXBUF);
1292 			ha->grcdump[i] = NULL;
1293 		}
1294 	}
1295 
1296         if (ha->flags.callout_init)
1297                 callout_drain(&ha->qlnx_callout);
1298 
1299 	if (ha->flags.slowpath_start) {
1300 		qlnx_slowpath_stop(ha);
1301 	}
1302 
1303         if (ha->flags.hw_init)
1304 		ecore_hw_remove(&ha->cdev);
1305 
1306         qlnx_del_cdev(ha);
1307 
1308         if (ha->ifp != NULL)
1309                 ether_ifdetach(ha->ifp);
1310 
1311 	qlnx_free_tx_dma_tag(ha);
1312 
1313 	qlnx_free_rx_dma_tag(ha);
1314 
1315 	qlnx_free_parent_dma_tag(ha);
1316 
1317 	if (qlnx_vf_device(ha) != 0) {
1318 		qlnx_destroy_error_recovery_taskqueue(ha);
1319 	}
1320 
1321         for (i = 0; i < ha->num_rss; i++) {
1322 		struct qlnx_fastpath *fp = &ha->fp_array[i];
1323 
1324                 if (ha->irq_vec[i].handle) {
1325                         (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1326                                         ha->irq_vec[i].handle);
1327                 }
1328 
1329                 if (ha->irq_vec[i].irq) {
1330                         (void)bus_release_resource(dev, SYS_RES_IRQ,
1331                                 ha->irq_vec[i].irq_rid,
1332                                 ha->irq_vec[i].irq);
1333                 }
1334 
1335 		qlnx_free_tx_br(ha, fp);
1336         }
1337 	qlnx_destroy_fp_taskqueues(ha);
1338 
1339  	for (i = 0; i < ha->cdev.num_hwfns; i++) {
1340         	if (ha->sp_handle[i])
1341                 	(void)bus_teardown_intr(dev, ha->sp_irq[i],
1342 				ha->sp_handle[i]);
1343 
1344         	if (ha->sp_irq[i])
1345 			(void) bus_release_resource(dev, SYS_RES_IRQ,
1346 				ha->sp_irq_rid[i], ha->sp_irq[i]);
1347 	}
1348 
1349 	qlnx_destroy_sp_taskqueues(ha);
1350 
1351         if (ha->msix_count)
1352                 pci_release_msi(dev);
1353 
1354         if (ha->flags.lock_init) {
1355                 sx_destroy(&ha->hw_lock);
1356         }
1357 
1358         if (ha->pci_reg)
1359                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1360                                 ha->pci_reg);
1361 
1362         if (ha->dbells_size && ha->pci_dbells)
1363                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1364                                 ha->pci_dbells);
1365 
1366         if (ha->msix_bar)
1367                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1368                                 ha->msix_bar);
1369 
1370 	QL_DPRINT2(ha, "exit\n");
1371 	return;
1372 }
1373 
1374 static void
1375 qlnx_trigger_dump(qlnx_host_t *ha)
1376 {
1377 	int	i;
1378 
1379 	if (ha->ifp != NULL)
1380 		if_setdrvflagbits(ha->ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
1381 
1382 	QL_DPRINT2(ha, "enter\n");
1383 
1384 	if (qlnx_vf_device(ha) == 0)
1385 		return;
1386 
1387 	ha->error_recovery = 1;
1388 
1389 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
1390 		qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1391 		qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1392 	}
1393 
1394 	QL_DPRINT2(ha, "exit\n");
1395 
1396 	return;
1397 }
1398 
1399 static int
1400 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1401 {
1402         int		err, ret = 0;
1403         qlnx_host_t	*ha;
1404 
1405         err = sysctl_handle_int(oidp, &ret, 0, req);
1406 
1407         if (err || !req->newptr)
1408                 return (err);
1409 
1410         if (ret == 1) {
1411                 ha = (qlnx_host_t *)arg1;
1412                 qlnx_trigger_dump(ha);
1413         }
1414         return (err);
1415 }
1416 
1417 static int
1418 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1419 {
1420         int			err, i, ret = 0, usecs = 0;
1421         qlnx_host_t		*ha;
1422 	struct ecore_hwfn	*p_hwfn;
1423 	struct qlnx_fastpath	*fp;
1424 
1425         err = sysctl_handle_int(oidp, &usecs, 0, req);
1426 
1427         if (err || !req->newptr || !usecs || (usecs > 255))
1428                 return (err);
1429 
1430         ha = (qlnx_host_t *)arg1;
1431 
1432 	if (qlnx_vf_device(ha) == 0)
1433 		return (-1);
1434 
1435 	for (i = 0; i < ha->num_rss; i++) {
1436 		p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1437 
1438         	fp = &ha->fp_array[i];
1439 
1440 		if (fp->txq[0]->handle != NULL) {
1441 			ret = ecore_set_queue_coalesce(p_hwfn, 0,
1442 					(uint16_t)usecs, fp->txq[0]->handle);
1443 		}
1444         }
1445 
1446 	if (!ret)
1447 		ha->tx_coalesce_usecs = (uint8_t)usecs;
1448 
1449         return (err);
1450 }
1451 
1452 static int
1453 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1454 {
1455         int			err, i, ret = 0, usecs = 0;
1456         qlnx_host_t		*ha;
1457 	struct ecore_hwfn	*p_hwfn;
1458 	struct qlnx_fastpath	*fp;
1459 
1460         err = sysctl_handle_int(oidp, &usecs, 0, req);
1461 
1462         if (err || !req->newptr || !usecs || (usecs > 255))
1463                 return (err);
1464 
1465         ha = (qlnx_host_t *)arg1;
1466 
1467 	if (qlnx_vf_device(ha) == 0)
1468 		return (-1);
1469 
1470 	for (i = 0; i < ha->num_rss; i++) {
1471 		p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1472 
1473         	fp = &ha->fp_array[i];
1474 
1475 		if (fp->rxq->handle != NULL) {
1476 			ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1477 					 0, fp->rxq->handle);
1478 		}
1479 	}
1480 
1481 	if (!ret)
1482 		ha->rx_coalesce_usecs = (uint8_t)usecs;
1483 
1484         return (err);
1485 }
1486 
1487 static void
1488 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1489 {
1490         struct sysctl_ctx_list	*ctx;
1491         struct sysctl_oid_list	*children;
1492 	struct sysctl_oid	*ctx_oid;
1493 
1494         ctx = device_get_sysctl_ctx(ha->pci_dev);
1495 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1496 
1497 	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1498 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat");
1499         children = SYSCTL_CHILDREN(ctx_oid);
1500 
1501 	SYSCTL_ADD_QUAD(ctx, children,
1502                 OID_AUTO, "sp_interrupts",
1503                 CTLFLAG_RD, &ha->sp_interrupts,
1504                 "No. of slowpath interrupts");
1505 
1506 	return;
1507 }
1508 
1509 static void
1510 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1511 {
1512         struct sysctl_ctx_list	*ctx;
1513         struct sysctl_oid_list	*children;
1514         struct sysctl_oid_list	*node_children;
1515 	struct sysctl_oid	*ctx_oid;
1516 	int			i, j;
1517 	uint8_t			name_str[16];
1518 
1519         ctx = device_get_sysctl_ctx(ha->pci_dev);
1520 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1521 
1522 	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1523 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat");
1524 	children = SYSCTL_CHILDREN(ctx_oid);
1525 
1526 	for (i = 0; i < ha->num_rss; i++) {
1527 		bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1528 		snprintf(name_str, sizeof(name_str), "%d", i);
1529 
1530 		ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1531 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
1532 		node_children = SYSCTL_CHILDREN(ctx_oid);
1533 
1534 		/* Tx Related */
1535 
1536 		SYSCTL_ADD_QUAD(ctx, node_children,
1537 			OID_AUTO, "tx_pkts_processed",
1538 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1539 			"No. of packets processed for transmission");
1540 
1541 		SYSCTL_ADD_QUAD(ctx, node_children,
1542 			OID_AUTO, "tx_pkts_freed",
1543 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1544 			"No. of freed packets");
1545 
1546 		SYSCTL_ADD_QUAD(ctx, node_children,
1547 			OID_AUTO, "tx_pkts_transmitted",
1548 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1549 			"No. of transmitted packets");
1550 
1551 		SYSCTL_ADD_QUAD(ctx, node_children,
1552 			OID_AUTO, "tx_pkts_completed",
1553 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1554 			"No. of transmit completions");
1555 
1556                 SYSCTL_ADD_QUAD(ctx, node_children,
1557                         OID_AUTO, "tx_non_tso_pkts",
1558                         CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts,
1559                         "No. of non LSO transmited packets");
1560 
1561 #ifdef QLNX_TRACE_PERF_DATA
1562 
1563                 SYSCTL_ADD_QUAD(ctx, node_children,
1564                         OID_AUTO, "tx_pkts_trans_ctx",
1565                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx,
1566                         "No. of transmitted packets in transmit context");
1567 
1568                 SYSCTL_ADD_QUAD(ctx, node_children,
1569                         OID_AUTO, "tx_pkts_compl_ctx",
1570                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx,
1571                         "No. of transmit completions in transmit context");
1572 
1573                 SYSCTL_ADD_QUAD(ctx, node_children,
1574                         OID_AUTO, "tx_pkts_trans_fp",
1575                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp,
1576                         "No. of transmitted packets in taskqueue");
1577 
1578                 SYSCTL_ADD_QUAD(ctx, node_children,
1579                         OID_AUTO, "tx_pkts_compl_fp",
1580                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp,
1581                         "No. of transmit completions in taskqueue");
1582 
1583                 SYSCTL_ADD_QUAD(ctx, node_children,
1584                         OID_AUTO, "tx_pkts_compl_intr",
1585                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr,
1586                         "No. of transmit completions in interrupt ctx");
1587 #endif
1588 
1589                 SYSCTL_ADD_QUAD(ctx, node_children,
1590                         OID_AUTO, "tx_tso_pkts",
1591                         CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts,
1592                         "No. of LSO transmited packets");
1593 
1594 		SYSCTL_ADD_QUAD(ctx, node_children,
1595 			OID_AUTO, "tx_lso_wnd_min_len",
1596 			CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1597 			"tx_lso_wnd_min_len");
1598 
1599 		SYSCTL_ADD_QUAD(ctx, node_children,
1600 			OID_AUTO, "tx_defrag",
1601 			CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1602 			"tx_defrag");
1603 
1604 		SYSCTL_ADD_QUAD(ctx, node_children,
1605 			OID_AUTO, "tx_nsegs_gt_elem_left",
1606 			CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1607 			"tx_nsegs_gt_elem_left");
1608 
1609 		SYSCTL_ADD_UINT(ctx, node_children,
1610 			OID_AUTO, "tx_tso_max_nsegs",
1611 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1612 			ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1613 
1614 		SYSCTL_ADD_UINT(ctx, node_children,
1615 			OID_AUTO, "tx_tso_min_nsegs",
1616 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1617 			ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1618 
1619 		SYSCTL_ADD_UINT(ctx, node_children,
1620 			OID_AUTO, "tx_tso_max_pkt_len",
1621 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1622 			ha->fp_array[i].tx_tso_max_pkt_len,
1623 			"tx_tso_max_pkt_len");
1624 
1625 		SYSCTL_ADD_UINT(ctx, node_children,
1626 			OID_AUTO, "tx_tso_min_pkt_len",
1627 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1628 			ha->fp_array[i].tx_tso_min_pkt_len,
1629 			"tx_tso_min_pkt_len");
1630 
1631 		for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1632 			bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1633 			snprintf(name_str, sizeof(name_str),
1634 				"tx_pkts_nseg_%02d", (j+1));
1635 
1636 			SYSCTL_ADD_QUAD(ctx, node_children,
1637 				OID_AUTO, name_str, CTLFLAG_RD,
1638 				&ha->fp_array[i].tx_pkts[j], name_str);
1639 		}
1640 
1641 #ifdef QLNX_TRACE_PERF_DATA
1642                 for (j = 0; j < 18; j++) {
1643                         bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1644                         snprintf(name_str, sizeof(name_str),
1645                                 "tx_pkts_hist_%02d", (j+1));
1646 
1647                         SYSCTL_ADD_QUAD(ctx, node_children,
1648                                 OID_AUTO, name_str, CTLFLAG_RD,
1649                                 &ha->fp_array[i].tx_pkts_hist[j], name_str);
1650                 }
1651                 for (j = 0; j < 5; j++) {
1652                         bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1653                         snprintf(name_str, sizeof(name_str),
1654                                 "tx_comInt_%02d", (j+1));
1655 
1656                         SYSCTL_ADD_QUAD(ctx, node_children,
1657                                 OID_AUTO, name_str, CTLFLAG_RD,
1658                                 &ha->fp_array[i].tx_comInt[j], name_str);
1659                 }
1660                 for (j = 0; j < 18; j++) {
1661                         bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1662                         snprintf(name_str, sizeof(name_str),
1663                                 "tx_pkts_q_%02d", (j+1));
1664 
1665                         SYSCTL_ADD_QUAD(ctx, node_children,
1666                                 OID_AUTO, name_str, CTLFLAG_RD,
1667                                 &ha->fp_array[i].tx_pkts_q[j], name_str);
1668                 }
1669 #endif
1670 
1671 		SYSCTL_ADD_QUAD(ctx, node_children,
1672 			OID_AUTO, "err_tx_nsegs_gt_elem_left",
1673 			CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1674 			"err_tx_nsegs_gt_elem_left");
1675 
1676 		SYSCTL_ADD_QUAD(ctx, node_children,
1677 			OID_AUTO, "err_tx_dmamap_create",
1678 			CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1679 			"err_tx_dmamap_create");
1680 
1681 		SYSCTL_ADD_QUAD(ctx, node_children,
1682 			OID_AUTO, "err_tx_defrag_dmamap_load",
1683 			CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1684 			"err_tx_defrag_dmamap_load");
1685 
1686 		SYSCTL_ADD_QUAD(ctx, node_children,
1687 			OID_AUTO, "err_tx_non_tso_max_seg",
1688 			CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1689 			"err_tx_non_tso_max_seg");
1690 
1691 		SYSCTL_ADD_QUAD(ctx, node_children,
1692 			OID_AUTO, "err_tx_dmamap_load",
1693 			CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1694 			"err_tx_dmamap_load");
1695 
1696 		SYSCTL_ADD_QUAD(ctx, node_children,
1697 			OID_AUTO, "err_tx_defrag",
1698 			CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1699 			"err_tx_defrag");
1700 
1701 		SYSCTL_ADD_QUAD(ctx, node_children,
1702 			OID_AUTO, "err_tx_free_pkt_null",
1703 			CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1704 			"err_tx_free_pkt_null");
1705 
1706 		SYSCTL_ADD_QUAD(ctx, node_children,
1707 			OID_AUTO, "err_tx_cons_idx_conflict",
1708 			CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1709 			"err_tx_cons_idx_conflict");
1710 
1711 		SYSCTL_ADD_QUAD(ctx, node_children,
1712 			OID_AUTO, "lro_cnt_64",
1713 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1714 			"lro_cnt_64");
1715 
1716 		SYSCTL_ADD_QUAD(ctx, node_children,
1717 			OID_AUTO, "lro_cnt_128",
1718 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1719 			"lro_cnt_128");
1720 
1721 		SYSCTL_ADD_QUAD(ctx, node_children,
1722 			OID_AUTO, "lro_cnt_256",
1723 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1724 			"lro_cnt_256");
1725 
1726 		SYSCTL_ADD_QUAD(ctx, node_children,
1727 			OID_AUTO, "lro_cnt_512",
1728 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1729 			"lro_cnt_512");
1730 
1731 		SYSCTL_ADD_QUAD(ctx, node_children,
1732 			OID_AUTO, "lro_cnt_1024",
1733 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1734 			"lro_cnt_1024");
1735 
1736 		/* Rx Related */
1737 
1738 		SYSCTL_ADD_QUAD(ctx, node_children,
1739 			OID_AUTO, "rx_pkts",
1740 			CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1741 			"No. of received packets");
1742 
1743 		SYSCTL_ADD_QUAD(ctx, node_children,
1744 			OID_AUTO, "tpa_start",
1745 			CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1746 			"No. of tpa_start packets");
1747 
1748 		SYSCTL_ADD_QUAD(ctx, node_children,
1749 			OID_AUTO, "tpa_cont",
1750 			CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1751 			"No. of tpa_cont packets");
1752 
1753 		SYSCTL_ADD_QUAD(ctx, node_children,
1754 			OID_AUTO, "tpa_end",
1755 			CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1756 			"No. of tpa_end packets");
1757 
1758 		SYSCTL_ADD_QUAD(ctx, node_children,
1759 			OID_AUTO, "err_m_getcl",
1760 			CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1761 			"err_m_getcl");
1762 
1763 		SYSCTL_ADD_QUAD(ctx, node_children,
1764 			OID_AUTO, "err_m_getjcl",
1765 			CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1766 			"err_m_getjcl");
1767 
1768 		SYSCTL_ADD_QUAD(ctx, node_children,
1769 			OID_AUTO, "err_rx_hw_errors",
1770 			CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1771 			"err_rx_hw_errors");
1772 
1773 		SYSCTL_ADD_QUAD(ctx, node_children,
1774 			OID_AUTO, "err_rx_alloc_errors",
1775 			CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1776 			"err_rx_alloc_errors");
1777 	}
1778 
1779 	return;
1780 }
1781 
1782 static void
1783 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1784 {
1785         struct sysctl_ctx_list	*ctx;
1786         struct sysctl_oid_list	*children;
1787 	struct sysctl_oid	*ctx_oid;
1788 
1789         ctx = device_get_sysctl_ctx(ha->pci_dev);
1790 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1791 
1792 	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1793 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat");
1794         children = SYSCTL_CHILDREN(ctx_oid);
1795 
1796 	SYSCTL_ADD_QUAD(ctx, children,
1797                 OID_AUTO, "no_buff_discards",
1798                 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1799                 "No. of packets discarded due to lack of buffer");
1800 
1801 	SYSCTL_ADD_QUAD(ctx, children,
1802                 OID_AUTO, "packet_too_big_discard",
1803                 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1804                 "No. of packets discarded because packet was too big");
1805 
1806 	SYSCTL_ADD_QUAD(ctx, children,
1807                 OID_AUTO, "ttl0_discard",
1808                 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1809                 "ttl0_discard");
1810 
1811 	SYSCTL_ADD_QUAD(ctx, children,
1812                 OID_AUTO, "rx_ucast_bytes",
1813                 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1814                 "rx_ucast_bytes");
1815 
1816 	SYSCTL_ADD_QUAD(ctx, children,
1817                 OID_AUTO, "rx_mcast_bytes",
1818                 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1819                 "rx_mcast_bytes");
1820 
1821 	SYSCTL_ADD_QUAD(ctx, children,
1822                 OID_AUTO, "rx_bcast_bytes",
1823                 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1824                 "rx_bcast_bytes");
1825 
1826 	SYSCTL_ADD_QUAD(ctx, children,
1827                 OID_AUTO, "rx_ucast_pkts",
1828                 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1829                 "rx_ucast_pkts");
1830 
1831 	SYSCTL_ADD_QUAD(ctx, children,
1832                 OID_AUTO, "rx_mcast_pkts",
1833                 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1834                 "rx_mcast_pkts");
1835 
1836 	SYSCTL_ADD_QUAD(ctx, children,
1837                 OID_AUTO, "rx_bcast_pkts",
1838                 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1839                 "rx_bcast_pkts");
1840 
1841 	SYSCTL_ADD_QUAD(ctx, children,
1842                 OID_AUTO, "mftag_filter_discards",
1843                 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1844                 "mftag_filter_discards");
1845 
1846 	SYSCTL_ADD_QUAD(ctx, children,
1847                 OID_AUTO, "mac_filter_discards",
1848                 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1849                 "mac_filter_discards");
1850 
1851 	SYSCTL_ADD_QUAD(ctx, children,
1852                 OID_AUTO, "tx_ucast_bytes",
1853                 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1854                 "tx_ucast_bytes");
1855 
1856 	SYSCTL_ADD_QUAD(ctx, children,
1857                 OID_AUTO, "tx_mcast_bytes",
1858                 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1859                 "tx_mcast_bytes");
1860 
1861 	SYSCTL_ADD_QUAD(ctx, children,
1862                 OID_AUTO, "tx_bcast_bytes",
1863                 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1864                 "tx_bcast_bytes");
1865 
1866 	SYSCTL_ADD_QUAD(ctx, children,
1867                 OID_AUTO, "tx_ucast_pkts",
1868                 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1869                 "tx_ucast_pkts");
1870 
1871 	SYSCTL_ADD_QUAD(ctx, children,
1872                 OID_AUTO, "tx_mcast_pkts",
1873                 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1874                 "tx_mcast_pkts");
1875 
1876 	SYSCTL_ADD_QUAD(ctx, children,
1877                 OID_AUTO, "tx_bcast_pkts",
1878                 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1879                 "tx_bcast_pkts");
1880 
1881 	SYSCTL_ADD_QUAD(ctx, children,
1882                 OID_AUTO, "tx_err_drop_pkts",
1883                 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1884                 "tx_err_drop_pkts");
1885 
1886 	SYSCTL_ADD_QUAD(ctx, children,
1887                 OID_AUTO, "tpa_coalesced_pkts",
1888                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1889                 "tpa_coalesced_pkts");
1890 
1891 	SYSCTL_ADD_QUAD(ctx, children,
1892                 OID_AUTO, "tpa_coalesced_events",
1893                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1894                 "tpa_coalesced_events");
1895 
1896 	SYSCTL_ADD_QUAD(ctx, children,
1897                 OID_AUTO, "tpa_aborts_num",
1898                 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1899                 "tpa_aborts_num");
1900 
1901 	SYSCTL_ADD_QUAD(ctx, children,
1902                 OID_AUTO, "tpa_not_coalesced_pkts",
1903                 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1904                 "tpa_not_coalesced_pkts");
1905 
1906 	SYSCTL_ADD_QUAD(ctx, children,
1907                 OID_AUTO, "tpa_coalesced_bytes",
1908                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1909                 "tpa_coalesced_bytes");
1910 
1911 	SYSCTL_ADD_QUAD(ctx, children,
1912                 OID_AUTO, "rx_64_byte_packets",
1913                 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1914                 "rx_64_byte_packets");
1915 
1916 	SYSCTL_ADD_QUAD(ctx, children,
1917                 OID_AUTO, "rx_65_to_127_byte_packets",
1918                 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1919                 "rx_65_to_127_byte_packets");
1920 
1921 	SYSCTL_ADD_QUAD(ctx, children,
1922                 OID_AUTO, "rx_128_to_255_byte_packets",
1923                 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1924                 "rx_128_to_255_byte_packets");
1925 
1926 	SYSCTL_ADD_QUAD(ctx, children,
1927                 OID_AUTO, "rx_256_to_511_byte_packets",
1928                 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1929                 "rx_256_to_511_byte_packets");
1930 
1931 	SYSCTL_ADD_QUAD(ctx, children,
1932                 OID_AUTO, "rx_512_to_1023_byte_packets",
1933                 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1934                 "rx_512_to_1023_byte_packets");
1935 
1936 	SYSCTL_ADD_QUAD(ctx, children,
1937                 OID_AUTO, "rx_1024_to_1518_byte_packets",
1938                 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1939                 "rx_1024_to_1518_byte_packets");
1940 
1941 	SYSCTL_ADD_QUAD(ctx, children,
1942                 OID_AUTO, "rx_1519_to_1522_byte_packets",
1943                 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1944                 "rx_1519_to_1522_byte_packets");
1945 
1946 	SYSCTL_ADD_QUAD(ctx, children,
1947                 OID_AUTO, "rx_1523_to_2047_byte_packets",
1948                 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1949                 "rx_1523_to_2047_byte_packets");
1950 
1951 	SYSCTL_ADD_QUAD(ctx, children,
1952                 OID_AUTO, "rx_2048_to_4095_byte_packets",
1953                 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1954                 "rx_2048_to_4095_byte_packets");
1955 
1956 	SYSCTL_ADD_QUAD(ctx, children,
1957                 OID_AUTO, "rx_4096_to_9216_byte_packets",
1958                 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1959                 "rx_4096_to_9216_byte_packets");
1960 
1961 	SYSCTL_ADD_QUAD(ctx, children,
1962                 OID_AUTO, "rx_9217_to_16383_byte_packets",
1963                 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1964                 "rx_9217_to_16383_byte_packets");
1965 
1966 	SYSCTL_ADD_QUAD(ctx, children,
1967                 OID_AUTO, "rx_crc_errors",
1968                 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1969                 "rx_crc_errors");
1970 
1971 	SYSCTL_ADD_QUAD(ctx, children,
1972                 OID_AUTO, "rx_mac_crtl_frames",
1973                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
1974                 "rx_mac_crtl_frames");
1975 
1976 	SYSCTL_ADD_QUAD(ctx, children,
1977                 OID_AUTO, "rx_pause_frames",
1978                 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
1979                 "rx_pause_frames");
1980 
1981 	SYSCTL_ADD_QUAD(ctx, children,
1982                 OID_AUTO, "rx_pfc_frames",
1983                 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
1984                 "rx_pfc_frames");
1985 
1986 	SYSCTL_ADD_QUAD(ctx, children,
1987                 OID_AUTO, "rx_align_errors",
1988                 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
1989                 "rx_align_errors");
1990 
1991 	SYSCTL_ADD_QUAD(ctx, children,
1992                 OID_AUTO, "rx_carrier_errors",
1993                 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
1994                 "rx_carrier_errors");
1995 
1996 	SYSCTL_ADD_QUAD(ctx, children,
1997                 OID_AUTO, "rx_oversize_packets",
1998                 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
1999                 "rx_oversize_packets");
2000 
2001 	SYSCTL_ADD_QUAD(ctx, children,
2002                 OID_AUTO, "rx_jabbers",
2003                 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
2004                 "rx_jabbers");
2005 
2006 	SYSCTL_ADD_QUAD(ctx, children,
2007                 OID_AUTO, "rx_undersize_packets",
2008                 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
2009                 "rx_undersize_packets");
2010 
2011 	SYSCTL_ADD_QUAD(ctx, children,
2012                 OID_AUTO, "rx_fragments",
2013                 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
2014                 "rx_fragments");
2015 
2016 	SYSCTL_ADD_QUAD(ctx, children,
2017                 OID_AUTO, "tx_64_byte_packets",
2018                 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
2019                 "tx_64_byte_packets");
2020 
2021 	SYSCTL_ADD_QUAD(ctx, children,
2022                 OID_AUTO, "tx_65_to_127_byte_packets",
2023                 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
2024                 "tx_65_to_127_byte_packets");
2025 
2026 	SYSCTL_ADD_QUAD(ctx, children,
2027                 OID_AUTO, "tx_128_to_255_byte_packets",
2028                 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
2029                 "tx_128_to_255_byte_packets");
2030 
2031 	SYSCTL_ADD_QUAD(ctx, children,
2032                 OID_AUTO, "tx_256_to_511_byte_packets",
2033                 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
2034                 "tx_256_to_511_byte_packets");
2035 
2036 	SYSCTL_ADD_QUAD(ctx, children,
2037                 OID_AUTO, "tx_512_to_1023_byte_packets",
2038                 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
2039                 "tx_512_to_1023_byte_packets");
2040 
2041 	SYSCTL_ADD_QUAD(ctx, children,
2042                 OID_AUTO, "tx_1024_to_1518_byte_packets",
2043                 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
2044                 "tx_1024_to_1518_byte_packets");
2045 
2046 	SYSCTL_ADD_QUAD(ctx, children,
2047                 OID_AUTO, "tx_1519_to_2047_byte_packets",
2048                 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
2049                 "tx_1519_to_2047_byte_packets");
2050 
2051 	SYSCTL_ADD_QUAD(ctx, children,
2052                 OID_AUTO, "tx_2048_to_4095_byte_packets",
2053                 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
2054                 "tx_2048_to_4095_byte_packets");
2055 
2056 	SYSCTL_ADD_QUAD(ctx, children,
2057                 OID_AUTO, "tx_4096_to_9216_byte_packets",
2058                 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
2059                 "tx_4096_to_9216_byte_packets");
2060 
2061 	SYSCTL_ADD_QUAD(ctx, children,
2062                 OID_AUTO, "tx_9217_to_16383_byte_packets",
2063                 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
2064                 "tx_9217_to_16383_byte_packets");
2065 
2066 	SYSCTL_ADD_QUAD(ctx, children,
2067                 OID_AUTO, "tx_pause_frames",
2068                 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
2069                 "tx_pause_frames");
2070 
2071 	SYSCTL_ADD_QUAD(ctx, children,
2072                 OID_AUTO, "tx_pfc_frames",
2073                 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
2074                 "tx_pfc_frames");
2075 
2076 	SYSCTL_ADD_QUAD(ctx, children,
2077                 OID_AUTO, "tx_lpi_entry_count",
2078                 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
2079                 "tx_lpi_entry_count");
2080 
2081 	SYSCTL_ADD_QUAD(ctx, children,
2082                 OID_AUTO, "tx_total_collisions",
2083                 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
2084                 "tx_total_collisions");
2085 
2086 	SYSCTL_ADD_QUAD(ctx, children,
2087                 OID_AUTO, "brb_truncates",
2088                 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
2089                 "brb_truncates");
2090 
2091 	SYSCTL_ADD_QUAD(ctx, children,
2092                 OID_AUTO, "brb_discards",
2093                 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
2094                 "brb_discards");
2095 
2096 	SYSCTL_ADD_QUAD(ctx, children,
2097                 OID_AUTO, "rx_mac_bytes",
2098                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
2099                 "rx_mac_bytes");
2100 
2101 	SYSCTL_ADD_QUAD(ctx, children,
2102                 OID_AUTO, "rx_mac_uc_packets",
2103                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
2104                 "rx_mac_uc_packets");
2105 
2106 	SYSCTL_ADD_QUAD(ctx, children,
2107                 OID_AUTO, "rx_mac_mc_packets",
2108                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
2109                 "rx_mac_mc_packets");
2110 
2111 	SYSCTL_ADD_QUAD(ctx, children,
2112                 OID_AUTO, "rx_mac_bc_packets",
2113                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
2114                 "rx_mac_bc_packets");
2115 
2116 	SYSCTL_ADD_QUAD(ctx, children,
2117                 OID_AUTO, "rx_mac_frames_ok",
2118                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
2119                 "rx_mac_frames_ok");
2120 
2121 	SYSCTL_ADD_QUAD(ctx, children,
2122                 OID_AUTO, "tx_mac_bytes",
2123                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
2124                 "tx_mac_bytes");
2125 
2126 	SYSCTL_ADD_QUAD(ctx, children,
2127                 OID_AUTO, "tx_mac_uc_packets",
2128                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
2129                 "tx_mac_uc_packets");
2130 
2131 	SYSCTL_ADD_QUAD(ctx, children,
2132                 OID_AUTO, "tx_mac_mc_packets",
2133                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
2134                 "tx_mac_mc_packets");
2135 
2136 	SYSCTL_ADD_QUAD(ctx, children,
2137                 OID_AUTO, "tx_mac_bc_packets",
2138                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
2139                 "tx_mac_bc_packets");
2140 
2141 	SYSCTL_ADD_QUAD(ctx, children,
2142                 OID_AUTO, "tx_mac_ctrl_frames",
2143                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
2144                 "tx_mac_ctrl_frames");
2145 	return;
2146 }
2147 
2148 static void
2149 qlnx_add_sysctls(qlnx_host_t *ha)
2150 {
2151         device_t		dev = ha->pci_dev;
2152 	struct sysctl_ctx_list	*ctx;
2153 	struct sysctl_oid_list	*children;
2154 
2155 	ctx = device_get_sysctl_ctx(dev);
2156 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2157 
2158 	qlnx_add_fp_stats_sysctls(ha);
2159 	qlnx_add_sp_stats_sysctls(ha);
2160 
2161 	if (qlnx_vf_device(ha) != 0)
2162 		qlnx_add_hw_stats_sysctls(ha);
2163 
2164 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
2165 		CTLFLAG_RD, qlnx_ver_str, 0,
2166 		"Driver Version");
2167 
2168 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
2169 		CTLFLAG_RD, ha->stormfw_ver, 0,
2170 		"STORM Firmware Version");
2171 
2172 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
2173 		CTLFLAG_RD, ha->mfw_ver, 0,
2174 		"Management Firmware Version");
2175 
2176         SYSCTL_ADD_UINT(ctx, children,
2177                 OID_AUTO, "personality", CTLFLAG_RD,
2178                 &ha->personality, ha->personality,
2179 		"\tpersonality = 0 => Ethernet Only\n"
2180 		"\tpersonality = 3 => Ethernet and RoCE\n"
2181 		"\tpersonality = 4 => Ethernet and iWARP\n"
2182 		"\tpersonality = 6 => Default in Shared Memory\n");
2183 
2184         ha->dbg_level = 0;
2185         SYSCTL_ADD_UINT(ctx, children,
2186                 OID_AUTO, "debug", CTLFLAG_RW,
2187                 &ha->dbg_level, ha->dbg_level, "Debug Level");
2188 
2189         ha->dp_level = 0x01;
2190         SYSCTL_ADD_UINT(ctx, children,
2191                 OID_AUTO, "dp_level", CTLFLAG_RW,
2192                 &ha->dp_level, ha->dp_level, "DP Level");
2193 
2194         ha->dbg_trace_lro_cnt = 0;
2195         SYSCTL_ADD_UINT(ctx, children,
2196                 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
2197                 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
2198 		"Trace LRO Counts");
2199 
2200         ha->dbg_trace_tso_pkt_len = 0;
2201         SYSCTL_ADD_UINT(ctx, children,
2202                 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
2203                 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
2204 		"Trace TSO packet lengths");
2205 
2206         ha->dp_module = 0;
2207         SYSCTL_ADD_UINT(ctx, children,
2208                 OID_AUTO, "dp_module", CTLFLAG_RW,
2209                 &ha->dp_module, ha->dp_module, "DP Module");
2210 
2211         ha->err_inject = 0;
2212 
2213         SYSCTL_ADD_UINT(ctx, children,
2214                 OID_AUTO, "err_inject", CTLFLAG_RW,
2215                 &ha->err_inject, ha->err_inject, "Error Inject");
2216 
2217 	ha->storm_stats_enable = 0;
2218 
2219 	SYSCTL_ADD_UINT(ctx, children,
2220 		OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
2221 		&ha->storm_stats_enable, ha->storm_stats_enable,
2222 		"Enable Storm Statistics Gathering");
2223 
2224 	ha->storm_stats_index = 0;
2225 
2226 	SYSCTL_ADD_UINT(ctx, children,
2227 		OID_AUTO, "storm_stats_index", CTLFLAG_RD,
2228 		&ha->storm_stats_index, ha->storm_stats_index,
2229 		"Enable Storm Statistics Gathering Current Index");
2230 
2231 	ha->grcdump_taken = 0;
2232 	SYSCTL_ADD_UINT(ctx, children,
2233 		OID_AUTO, "grcdump_taken", CTLFLAG_RD,
2234 		&ha->grcdump_taken, ha->grcdump_taken,
2235 		"grcdump_taken");
2236 
2237 	ha->idle_chk_taken = 0;
2238 	SYSCTL_ADD_UINT(ctx, children,
2239 		OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
2240 		&ha->idle_chk_taken, ha->idle_chk_taken,
2241 		"idle_chk_taken");
2242 
2243 	SYSCTL_ADD_UINT(ctx, children,
2244 		OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
2245 		&ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
2246 		"rx_coalesce_usecs");
2247 
2248 	SYSCTL_ADD_UINT(ctx, children,
2249 		OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
2250 		&ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
2251 		"tx_coalesce_usecs");
2252 
2253 	SYSCTL_ADD_PROC(ctx, children,
2254 	    OID_AUTO, "trigger_dump",
2255 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2256 	    (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump");
2257 
2258 	SYSCTL_ADD_PROC(ctx, children,
2259 	    OID_AUTO, "set_rx_coalesce_usecs",
2260 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2261 	    (void *)ha, 0, qlnx_set_rx_coalesce, "I",
2262 	    "rx interrupt coalesce period microseconds");
2263 
2264 	SYSCTL_ADD_PROC(ctx, children,
2265 	    OID_AUTO, "set_tx_coalesce_usecs",
2266 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2267 	    (void *)ha, 0, qlnx_set_tx_coalesce, "I",
2268 	    "tx interrupt coalesce period microseconds");
2269 
2270 	ha->rx_pkt_threshold = 128;
2271         SYSCTL_ADD_UINT(ctx, children,
2272                 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
2273                 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
2274 		"No. of Rx Pkts to process at a time");
2275 
2276 	ha->rx_jumbo_buf_eq_mtu = 0;
2277         SYSCTL_ADD_UINT(ctx, children,
2278                 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
2279                 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
2280 		"== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
2281 		"otherwise Rx Jumbo buffers are set to >= MTU size\n");
2282 
2283 	SYSCTL_ADD_QUAD(ctx, children,
2284                 OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
2285 		&ha->err_illegal_intr, "err_illegal_intr");
2286 
2287 	SYSCTL_ADD_QUAD(ctx, children,
2288                 OID_AUTO, "err_fp_null", CTLFLAG_RD,
2289 		&ha->err_fp_null, "err_fp_null");
2290 
2291 	SYSCTL_ADD_QUAD(ctx, children,
2292                 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
2293 		&ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
2294 	return;
2295 }
2296 
2297 /*****************************************************************************
2298  * Operating System Network Interface Functions
2299  *****************************************************************************/
2300 
2301 static void
2302 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
2303 {
2304 	uint16_t	device_id;
2305         if_t		ifp;
2306 
2307         ifp = ha->ifp = if_alloc(IFT_ETHER);
2308 
2309         if (ifp == NULL)
2310                 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
2311 
2312         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2313 
2314 	device_id = pci_get_device(ha->pci_dev);
2315 
2316         if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
2317 		if_setbaudrate(ifp, IF_Gbps(40));
2318         else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2319 			(device_id == QLOGIC_PCI_DEVICE_ID_8070))
2320 		if_setbaudrate(ifp, IF_Gbps(25));
2321         else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
2322 		if_setbaudrate(ifp, IF_Gbps(50));
2323         else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
2324 		if_setbaudrate(ifp, IF_Gbps(100));
2325 
2326         if_setcapabilities(ifp, IFCAP_LINKSTATE);
2327 
2328         if_setinitfn(ifp, qlnx_init);
2329         if_setsoftc(ifp, ha);
2330         if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2331         if_setioctlfn(ifp, qlnx_ioctl);
2332         if_settransmitfn(ifp, qlnx_transmit);
2333         if_setqflushfn(ifp, qlnx_qflush);
2334 
2335         if_setsendqlen(ifp, qlnx_get_ifq_snd_maxlen(ha));
2336         if_setsendqready(ifp);
2337 
2338 	if_setgetcounterfn(ifp, qlnx_get_counter);
2339 
2340         ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
2341 
2342         memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
2343 
2344 	if (!ha->primary_mac[0] && !ha->primary_mac[1] &&
2345 		!ha->primary_mac[2] && !ha->primary_mac[3] &&
2346 		!ha->primary_mac[4] && !ha->primary_mac[5]) {
2347 		uint32_t rnd;
2348 
2349 		rnd = arc4random();
2350 
2351 		ha->primary_mac[0] = 0x00;
2352 		ha->primary_mac[1] = 0x0e;
2353 		ha->primary_mac[2] = 0x1e;
2354 		ha->primary_mac[3] = rnd & 0xFF;
2355 		ha->primary_mac[4] = (rnd >> 8) & 0xFF;
2356 		ha->primary_mac[5] = (rnd >> 16) & 0xFF;
2357 	}
2358 
2359 	ether_ifattach(ifp, ha->primary_mac);
2360 	bcopy(if_getlladdr(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
2361 
2362 	if_setcapabilities(ifp, IFCAP_HWCSUM);
2363 	if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0);
2364 
2365 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
2366 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
2367 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0);
2368 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
2369 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0);
2370 	if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
2371 	if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0);
2372 	if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
2373 
2374 	if_sethwtsomax(ifp,  QLNX_MAX_TSO_FRAME_SIZE -
2375 				(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2376 	if_sethwtsomaxsegcount(ifp, QLNX_MAX_SEGMENTS - 1); /* hdr */
2377 	if_sethwtsomaxsegsize(ifp, QLNX_MAX_TX_MBUF_SIZE);
2378 
2379         if_setcapenable(ifp, if_getcapabilities(ifp));
2380 
2381 	if_sethwassist(ifp, CSUM_IP);
2382 	if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0);
2383 	if_sethwassistbits(ifp, CSUM_TCP_IPV6 | CSUM_UDP_IPV6, 0);
2384 	if_sethwassistbits(ifp, CSUM_TSO, 0);
2385 
2386 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2387 
2388         ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
2389 		qlnx_media_status);
2390 
2391         if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
2392 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
2393 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
2394 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
2395         } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2396 			(device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
2397 		ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
2398 		ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
2399         } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
2400 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2401 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2402         } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
2403 		ifmedia_add(&ha->media,
2404 			(IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
2405 		ifmedia_add(&ha->media,
2406 			(IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
2407 		ifmedia_add(&ha->media,
2408 			(IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
2409 	}
2410 
2411         ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2412         ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2413 
2414         ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2415 
2416         QL_DPRINT2(ha, "exit\n");
2417 
2418         return;
2419 }
2420 
2421 static void
2422 qlnx_init_locked(qlnx_host_t *ha)
2423 {
2424 	if_t		ifp = ha->ifp;
2425 
2426 	QL_DPRINT1(ha, "Driver Initialization start \n");
2427 
2428 	qlnx_stop(ha);
2429 
2430 	if (qlnx_load(ha) == 0) {
2431 		if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2432 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2433 
2434 #ifdef QLNX_ENABLE_IWARP
2435 		if (qlnx_vf_device(ha) != 0) {
2436 			qlnx_rdma_dev_open(ha);
2437 		}
2438 #endif /* #ifdef QLNX_ENABLE_IWARP */
2439 	}
2440 
2441 	return;
2442 }
2443 
2444 static void
2445 qlnx_init(void *arg)
2446 {
2447 	qlnx_host_t	*ha;
2448 
2449 	ha = (qlnx_host_t *)arg;
2450 
2451 	QL_DPRINT2(ha, "enter\n");
2452 
2453 	QLNX_LOCK(ha);
2454 	qlnx_init_locked(ha);
2455 	QLNX_UNLOCK(ha);
2456 
2457 	QL_DPRINT2(ha, "exit\n");
2458 
2459 	return;
2460 }
2461 
2462 static int
2463 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2464 {
2465 	struct ecore_filter_mcast	*mcast;
2466 	struct ecore_dev		*cdev;
2467 	int				rc;
2468 
2469 	cdev = &ha->cdev;
2470 
2471 	mcast = &ha->ecore_mcast;
2472 	bzero(mcast, sizeof(struct ecore_filter_mcast));
2473 
2474 	if (add_mac)
2475 		mcast->opcode = ECORE_FILTER_ADD;
2476 	else
2477 		mcast->opcode = ECORE_FILTER_REMOVE;
2478 
2479 	mcast->num_mc_addrs = 1;
2480 	memcpy(mcast->mac, mac_addr, ETH_ALEN);
2481 
2482 	rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2483 
2484 	return (rc);
2485 }
2486 
2487 static int
2488 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2489 {
2490         int	i;
2491 
2492         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2493                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2494                         return 0; /* its been already added */
2495         }
2496 
2497         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2498                 if ((ha->mcast[i].addr[0] == 0) &&
2499                         (ha->mcast[i].addr[1] == 0) &&
2500                         (ha->mcast[i].addr[2] == 0) &&
2501                         (ha->mcast[i].addr[3] == 0) &&
2502                         (ha->mcast[i].addr[4] == 0) &&
2503                         (ha->mcast[i].addr[5] == 0)) {
2504                         if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2505                                 return (-1);
2506 
2507                         bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2508                         ha->nmcast++;
2509 
2510                         return 0;
2511                 }
2512         }
2513         return 0;
2514 }
2515 
2516 static int
2517 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2518 {
2519         int	i;
2520 
2521         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2522                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2523                         if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2524                                 return (-1);
2525 
2526                         ha->mcast[i].addr[0] = 0;
2527                         ha->mcast[i].addr[1] = 0;
2528                         ha->mcast[i].addr[2] = 0;
2529                         ha->mcast[i].addr[3] = 0;
2530                         ha->mcast[i].addr[4] = 0;
2531                         ha->mcast[i].addr[5] = 0;
2532 
2533                         ha->nmcast--;
2534 
2535                         return 0;
2536                 }
2537         }
2538         return 0;
2539 }
2540 
2541 /*
2542  * Name: qls_hw_set_multi
2543  * Function: Sets the Multicast Addresses provided the host O.S into the
2544  *      hardware (for the given interface)
2545  */
2546 static void
2547 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2548 	uint32_t add_mac)
2549 {
2550         int	i;
2551 
2552         for (i = 0; i < mcnt; i++) {
2553                 if (add_mac) {
2554                         if (qlnx_hw_add_mcast(ha, mta))
2555                                 break;
2556                 } else {
2557                         if (qlnx_hw_del_mcast(ha, mta))
2558                                 break;
2559                 }
2560 
2561                 mta += ETHER_HDR_LEN;
2562         }
2563         return;
2564 }
2565 
2566 static u_int
2567 qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
2568 {
2569 	uint8_t *mta = arg;
2570 
2571 	if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2572 		return (0);
2573 
2574 	bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2575 
2576 	return (1);
2577 }
2578 
2579 static int
2580 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2581 {
2582 	uint8_t		mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN];
2583 	if_t		ifp = ha->ifp;
2584 	u_int		mcnt;
2585 
2586 	if (qlnx_vf_device(ha) == 0)
2587 		return (0);
2588 
2589 	mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta);
2590 
2591 	QLNX_LOCK(ha);
2592 	qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2593 	QLNX_UNLOCK(ha);
2594 
2595 	return (0);
2596 }
2597 
2598 static int
2599 qlnx_set_promisc(qlnx_host_t *ha, int enabled)
2600 {
2601 	int	rc = 0;
2602 	uint8_t	filter;
2603 
2604 	if (qlnx_vf_device(ha) == 0)
2605 		return (0);
2606 
2607 	filter = ha->filter;
2608 	if (enabled) {
2609 		filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2610 		filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2611 	} else {
2612 		filter &= ~ECORE_ACCEPT_MCAST_UNMATCHED;
2613 		filter &= ~ECORE_ACCEPT_UCAST_UNMATCHED;
2614 	}
2615 
2616 	rc = qlnx_set_rx_accept_filter(ha, filter);
2617 	return (rc);
2618 }
2619 
2620 static int
2621 qlnx_set_allmulti(qlnx_host_t *ha, int enabled)
2622 {
2623 	int	rc = 0;
2624 	uint8_t	filter;
2625 
2626 	if (qlnx_vf_device(ha) == 0)
2627 		return (0);
2628 
2629 	filter = ha->filter;
2630 	if (enabled) {
2631 		filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2632 	} else {
2633 		filter &= ~ECORE_ACCEPT_MCAST_UNMATCHED;
2634 	}
2635 	rc = qlnx_set_rx_accept_filter(ha, filter);
2636 
2637 	return (rc);
2638 }
2639 
2640 static int
2641 qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data)
2642 {
2643 	int		ret = 0, mask;
2644 	struct ifreq	*ifr = (struct ifreq *)data;
2645 #ifdef INET
2646 	struct ifaddr	*ifa = (struct ifaddr *)data;
2647 #endif
2648 	qlnx_host_t	*ha;
2649 
2650 	ha = (qlnx_host_t *)if_getsoftc(ifp);
2651 
2652 	switch (cmd) {
2653 	case SIOCSIFADDR:
2654 		QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2655 
2656 #ifdef INET
2657 		if (ifa->ifa_addr->sa_family == AF_INET) {
2658 			if_setflagbits(ifp, IFF_UP, 0);
2659 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
2660 				QLNX_LOCK(ha);
2661 				qlnx_init_locked(ha);
2662 				QLNX_UNLOCK(ha);
2663 			}
2664 			QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2665 				   cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2666 
2667 			arp_ifinit(ifp, ifa);
2668 			break;
2669 		}
2670 #endif
2671 		ether_ioctl(ifp, cmd, data);
2672 		break;
2673 
2674 	case SIOCSIFMTU:
2675 		QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2676 
2677 		if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2678 			ret = EINVAL;
2679 		} else {
2680 			QLNX_LOCK(ha);
2681 			if_setmtu(ifp, ifr->ifr_mtu);
2682 			ha->max_frame_size =
2683 				if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
2684 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2685 				qlnx_init_locked(ha);
2686 			}
2687 
2688 			QLNX_UNLOCK(ha);
2689 		}
2690 
2691 		break;
2692 
2693 	case SIOCSIFFLAGS:
2694 		QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2695 
2696 		QLNX_LOCK(ha);
2697 
2698 		if (if_getflags(ifp) & IFF_UP) {
2699 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2700 				if ((if_getflags(ifp) ^ ha->if_flags) &
2701 					IFF_PROMISC) {
2702 					ret = qlnx_set_promisc(ha, ifp->if_flags & IFF_PROMISC);
2703 				} else if ((if_getflags(ifp) ^ ha->if_flags) &
2704 					IFF_ALLMULTI) {
2705 					ret = qlnx_set_allmulti(ha, ifp->if_flags & IFF_ALLMULTI);
2706 				}
2707 			} else {
2708 				ha->max_frame_size = if_getmtu(ifp) +
2709 					ETHER_HDR_LEN + ETHER_CRC_LEN;
2710 				qlnx_init_locked(ha);
2711 			}
2712 		} else {
2713 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2714 				qlnx_stop(ha);
2715 		}
2716 
2717 		ha->if_flags = if_getflags(ifp);
2718 		QLNX_UNLOCK(ha);
2719 		break;
2720 
2721 	case SIOCADDMULTI:
2722 		QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2723 
2724 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2725 			if (qlnx_set_multi(ha, 1))
2726 				ret = EINVAL;
2727 		}
2728 		break;
2729 
2730 	case SIOCDELMULTI:
2731 		QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2732 
2733 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2734 			if (qlnx_set_multi(ha, 0))
2735 				ret = EINVAL;
2736 		}
2737 		break;
2738 
2739 	case SIOCSIFMEDIA:
2740 	case SIOCGIFMEDIA:
2741 		QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2742 
2743 		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2744 		break;
2745 
2746 	case SIOCSIFCAP:
2747 
2748 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2749 
2750 		QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2751 
2752 		if (mask & IFCAP_HWCSUM)
2753 			if_togglecapenable(ifp, IFCAP_HWCSUM);
2754 		if (mask & IFCAP_TSO4)
2755 			if_togglecapenable(ifp, IFCAP_TSO4);
2756 		if (mask & IFCAP_TSO6)
2757 			if_togglecapenable(ifp, IFCAP_TSO6);
2758 		if (mask & IFCAP_VLAN_HWTAGGING)
2759 			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2760 		if (mask & IFCAP_VLAN_HWTSO)
2761 			if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
2762 		if (mask & IFCAP_LRO)
2763 			if_togglecapenable(ifp, IFCAP_LRO);
2764 
2765 		QLNX_LOCK(ha);
2766 
2767 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2768 			qlnx_init_locked(ha);
2769 
2770 		QLNX_UNLOCK(ha);
2771 
2772 		VLAN_CAPABILITIES(ifp);
2773 		break;
2774 
2775 	case SIOCGI2C:
2776 	{
2777 		struct ifi2creq i2c;
2778 		struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2779 		struct ecore_ptt *p_ptt;
2780 
2781 		ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2782 
2783 		if (ret)
2784 			break;
2785 
2786 		if ((i2c.len > sizeof (i2c.data)) ||
2787 			(i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2788 			ret = EINVAL;
2789 			break;
2790 		}
2791 
2792 		p_ptt = ecore_ptt_acquire(p_hwfn);
2793 
2794 		if (!p_ptt) {
2795 			QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2796 			ret = -1;
2797 			break;
2798 		}
2799 
2800 		ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2801 			(ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2802 			i2c.len, &i2c.data[0]);
2803 
2804 		ecore_ptt_release(p_hwfn, p_ptt);
2805 
2806 		if (ret) {
2807 			ret = -1;
2808 			break;
2809 		}
2810 
2811 		ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2812 
2813 		QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2814 			 len = %d addr = 0x%02x offset = 0x%04x \
2815 			 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2816 			 0x%02x 0x%02x 0x%02x\n",
2817 			ret, i2c.len, i2c.dev_addr, i2c.offset,
2818 			i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2819 			i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2820 		break;
2821 	}
2822 
2823 	default:
2824 		QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2825 		ret = ether_ioctl(ifp, cmd, data);
2826 		break;
2827 	}
2828 
2829 	return (ret);
2830 }
2831 
2832 static int
2833 qlnx_media_change(if_t ifp)
2834 {
2835 	qlnx_host_t	*ha;
2836 	struct ifmedia	*ifm;
2837 	int		ret = 0;
2838 
2839 	ha = (qlnx_host_t *)if_getsoftc(ifp);
2840 
2841 	QL_DPRINT2(ha, "enter\n");
2842 
2843 	ifm = &ha->media;
2844 
2845 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2846 		ret = EINVAL;
2847 
2848 	QL_DPRINT2(ha, "exit\n");
2849 
2850 	return (ret);
2851 }
2852 
2853 static void
2854 qlnx_media_status(if_t ifp, struct ifmediareq *ifmr)
2855 {
2856 	qlnx_host_t		*ha;
2857 
2858 	ha = (qlnx_host_t *)if_getsoftc(ifp);
2859 
2860 	QL_DPRINT2(ha, "enter\n");
2861 
2862 	ifmr->ifm_status = IFM_AVALID;
2863 	ifmr->ifm_active = IFM_ETHER;
2864 
2865 	if (ha->link_up) {
2866 		ifmr->ifm_status |= IFM_ACTIVE;
2867 		ifmr->ifm_active |=
2868 			(IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2869 
2870 		if (ha->if_link.link_partner_caps &
2871 			(QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2872 			ifmr->ifm_active |=
2873 				(IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2874 	}
2875 
2876 	QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2877 
2878 	return;
2879 }
2880 
2881 static void
2882 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2883 	struct qlnx_tx_queue *txq)
2884 {
2885 	u16			idx;
2886 	struct mbuf		*mp;
2887 	bus_dmamap_t		map;
2888 	int			i;
2889 //	struct eth_tx_bd	*tx_data_bd;
2890 	struct eth_tx_1st_bd	*first_bd;
2891 	int			nbds = 0;
2892 
2893 	idx = txq->sw_tx_cons;
2894 	mp = txq->sw_tx_ring[idx].mp;
2895 	map = txq->sw_tx_ring[idx].map;
2896 
2897 	if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2898 		QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2899 
2900 		QL_DPRINT1(ha, "(mp == NULL) "
2901 			" tx_idx = 0x%x"
2902 			" ecore_prod_idx = 0x%x"
2903 			" ecore_cons_idx = 0x%x"
2904 			" hw_bd_cons = 0x%x"
2905 			" txq_db_last = 0x%x"
2906 			" elem_left = 0x%x\n",
2907 			fp->rss_id,
2908 			ecore_chain_get_prod_idx(&txq->tx_pbl),
2909 			ecore_chain_get_cons_idx(&txq->tx_pbl),
2910 			le16toh(*txq->hw_cons_ptr),
2911 			txq->tx_db.raw,
2912 			ecore_chain_get_elem_left(&txq->tx_pbl));
2913 
2914 		fp->err_tx_free_pkt_null++;
2915 
2916 		//DEBUG
2917 		qlnx_trigger_dump(ha);
2918 
2919 		return;
2920 	} else {
2921 		QLNX_INC_OPACKETS((ha->ifp));
2922 		QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2923 
2924 		bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2925 		bus_dmamap_unload(ha->tx_tag, map);
2926 
2927 		fp->tx_pkts_freed++;
2928 		fp->tx_pkts_completed++;
2929 
2930 		m_freem(mp);
2931 	}
2932 
2933 	first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
2934 	nbds = first_bd->data.nbds;
2935 
2936 //	BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
2937 
2938 	for (i = 1; i < nbds; i++) {
2939 		/* tx_data_bd = */ ecore_chain_consume(&txq->tx_pbl);
2940 //		BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
2941 	}
2942 	txq->sw_tx_ring[idx].flags = 0;
2943 	txq->sw_tx_ring[idx].mp = NULL;
2944 	txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
2945 
2946 	return;
2947 }
2948 
2949 static void
2950 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2951 	struct qlnx_tx_queue *txq)
2952 {
2953 	u16 hw_bd_cons;
2954 	u16 ecore_cons_idx;
2955 	uint16_t diff;
2956 	uint16_t idx, idx2;
2957 
2958 	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
2959 
2960 	while (hw_bd_cons !=
2961 		(ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
2962 		diff = hw_bd_cons - ecore_cons_idx;
2963 		if ((diff > TX_RING_SIZE) ||
2964 			QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2965 			QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2966 
2967 			QL_DPRINT1(ha, "(diff = 0x%x) "
2968 				" tx_idx = 0x%x"
2969 				" ecore_prod_idx = 0x%x"
2970 				" ecore_cons_idx = 0x%x"
2971 				" hw_bd_cons = 0x%x"
2972 				" txq_db_last = 0x%x"
2973 				" elem_left = 0x%x\n",
2974 				diff,
2975 				fp->rss_id,
2976 				ecore_chain_get_prod_idx(&txq->tx_pbl),
2977 				ecore_chain_get_cons_idx(&txq->tx_pbl),
2978 				le16toh(*txq->hw_cons_ptr),
2979 				txq->tx_db.raw,
2980 				ecore_chain_get_elem_left(&txq->tx_pbl));
2981 
2982 			fp->err_tx_cons_idx_conflict++;
2983 
2984 			//DEBUG
2985 			qlnx_trigger_dump(ha);
2986 		}
2987 
2988 		idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2989 		idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1);
2990 		prefetch(txq->sw_tx_ring[idx].mp);
2991 		prefetch(txq->sw_tx_ring[idx2].mp);
2992 
2993 		qlnx_free_tx_pkt(ha, fp, txq);
2994 
2995 		txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2996 	}
2997 	return;
2998 }
2999 
3000 static int
3001 qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp, struct mbuf *mp)
3002 {
3003         int                     ret = 0;
3004         struct qlnx_tx_queue    *txq;
3005         qlnx_host_t *           ha;
3006         uint16_t elem_left;
3007 
3008         txq = fp->txq[0];
3009         ha = (qlnx_host_t *)fp->edev;
3010 
3011         if ((!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) || (!ha->link_up)) {
3012                 if(mp != NULL)
3013                         ret = drbr_enqueue(ifp, fp->tx_br, mp);
3014                 return (ret);
3015         }
3016 
3017         if(mp != NULL)
3018                 ret  = drbr_enqueue(ifp, fp->tx_br, mp);
3019 
3020         mp = drbr_peek(ifp, fp->tx_br);
3021 
3022         while (mp != NULL) {
3023                 if (qlnx_send(ha, fp, &mp)) {
3024                         if (mp != NULL) {
3025                                 drbr_putback(ifp, fp->tx_br, mp);
3026                         } else {
3027                                 fp->tx_pkts_processed++;
3028                                 drbr_advance(ifp, fp->tx_br);
3029                         }
3030                         goto qlnx_transmit_locked_exit;
3031 
3032                 } else {
3033                         drbr_advance(ifp, fp->tx_br);
3034                         fp->tx_pkts_transmitted++;
3035                         fp->tx_pkts_processed++;
3036                 }
3037 
3038                 mp = drbr_peek(ifp, fp->tx_br);
3039         }
3040 
3041 qlnx_transmit_locked_exit:
3042         if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) ||
3043                 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))
3044                                         < QLNX_TX_ELEM_MAX_THRESH))
3045                 (void)qlnx_tx_int(ha, fp, fp->txq[0]);
3046 
3047         QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret);
3048         return ret;
3049 }
3050 
3051 static int
3052 qlnx_transmit(if_t ifp, struct mbuf  *mp)
3053 {
3054         qlnx_host_t		*ha = (qlnx_host_t *)if_getsoftc(ifp);
3055         struct qlnx_fastpath	*fp;
3056         int			rss_id = 0, ret = 0;
3057 
3058 #ifdef QLNX_TRACEPERF_DATA
3059         uint64_t tx_pkts = 0, tx_compl = 0;
3060 #endif
3061 
3062         QL_DPRINT2(ha, "enter\n");
3063 
3064         if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
3065                 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
3066 					ha->num_rss;
3067 
3068         fp = &ha->fp_array[rss_id];
3069 
3070         if (fp->tx_br == NULL) {
3071                 ret = EINVAL;
3072                 goto qlnx_transmit_exit;
3073         }
3074 
3075         if (mtx_trylock(&fp->tx_mtx)) {
3076 #ifdef QLNX_TRACEPERF_DATA
3077                         tx_pkts = fp->tx_pkts_transmitted;
3078                         tx_compl = fp->tx_pkts_completed;
3079 #endif
3080 
3081                         ret = qlnx_transmit_locked(ifp, fp, mp);
3082 
3083 #ifdef QLNX_TRACEPERF_DATA
3084                         fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts);
3085                         fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl);
3086 #endif
3087                         mtx_unlock(&fp->tx_mtx);
3088         } else {
3089                 if (mp != NULL && (fp->fp_taskqueue != NULL)) {
3090                         ret = drbr_enqueue(ifp, fp->tx_br, mp);
3091                         taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
3092                 }
3093         }
3094 
3095 qlnx_transmit_exit:
3096 
3097         QL_DPRINT2(ha, "exit ret = %d\n", ret);
3098         return ret;
3099 }
3100 
3101 static void
3102 qlnx_qflush(if_t ifp)
3103 {
3104 	int			rss_id;
3105 	struct qlnx_fastpath	*fp;
3106 	struct mbuf		*mp;
3107 	qlnx_host_t		*ha;
3108 
3109 	ha = (qlnx_host_t *)if_getsoftc(ifp);
3110 
3111 	QL_DPRINT2(ha, "enter\n");
3112 
3113 	for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
3114 		fp = &ha->fp_array[rss_id];
3115 
3116 		if (fp == NULL)
3117 			continue;
3118 
3119 		if (fp->tx_br) {
3120 			mtx_lock(&fp->tx_mtx);
3121 
3122 			while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
3123 				fp->tx_pkts_freed++;
3124 				m_freem(mp);
3125 			}
3126 			mtx_unlock(&fp->tx_mtx);
3127 		}
3128 	}
3129 	QL_DPRINT2(ha, "exit\n");
3130 
3131 	return;
3132 }
3133 
3134 static void
3135 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
3136 {
3137 	uint32_t		offset;
3138 
3139 	offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells);
3140 
3141 	bus_write_4(ha->pci_dbells, offset, value);
3142 	bus_barrier(ha->pci_reg,  0, 0, BUS_SPACE_BARRIER_READ);
3143 	bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
3144 
3145 	return;
3146 }
3147 
3148 static uint32_t
3149 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
3150 {
3151         struct ether_vlan_header	*eh = NULL;
3152         struct ip			*ip = NULL;
3153         struct ip6_hdr			*ip6 = NULL;
3154         struct tcphdr			*th = NULL;
3155         uint32_t			ehdrlen = 0, ip_hlen = 0, offset = 0;
3156         uint16_t			etype = 0;
3157         uint8_t				buf[sizeof(struct ip6_hdr)];
3158 
3159         eh = mtod(mp, struct ether_vlan_header *);
3160 
3161         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3162                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3163                 etype = ntohs(eh->evl_proto);
3164         } else {
3165                 ehdrlen = ETHER_HDR_LEN;
3166                 etype = ntohs(eh->evl_encap_proto);
3167         }
3168 
3169         switch (etype) {
3170                 case ETHERTYPE_IP:
3171                         ip = (struct ip *)(mp->m_data + ehdrlen);
3172 
3173                         ip_hlen = sizeof (struct ip);
3174 
3175                         if (mp->m_len < (ehdrlen + ip_hlen)) {
3176                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
3177                                 ip = (struct ip *)buf;
3178                         }
3179 
3180                         th = (struct tcphdr *)(ip + 1);
3181 			offset = ip_hlen + ehdrlen + (th->th_off << 2);
3182                 break;
3183 
3184                 case ETHERTYPE_IPV6:
3185                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3186 
3187                         ip_hlen = sizeof(struct ip6_hdr);
3188 
3189                         if (mp->m_len < (ehdrlen + ip_hlen)) {
3190                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
3191                                         buf);
3192                                 ip6 = (struct ip6_hdr *)buf;
3193                         }
3194                         th = (struct tcphdr *)(ip6 + 1);
3195 			offset = ip_hlen + ehdrlen + (th->th_off << 2);
3196                 break;
3197 
3198                 default:
3199                 break;
3200         }
3201 
3202         return (offset);
3203 }
3204 
3205 static __inline int
3206 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
3207 	uint32_t offset)
3208 {
3209 	int			i;
3210 	uint32_t		sum, nbds_in_hdr = 1;
3211         uint32_t		window;
3212         bus_dma_segment_t	*s_seg;
3213 
3214         /* If the header spans multiple segments, skip those segments */
3215 
3216         if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM)
3217                 return (0);
3218 
3219         i = 0;
3220 
3221         while ((i < nsegs) && (offset >= segs->ds_len)) {
3222                 offset = offset - segs->ds_len;
3223                 segs++;
3224                 i++;
3225                 nbds_in_hdr++;
3226         }
3227 
3228         window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr;
3229 
3230         nsegs = nsegs - i;
3231 
3232         while (nsegs >= window) {
3233                 sum = 0;
3234                 s_seg = segs;
3235 
3236                 for (i = 0; i < window; i++){
3237                         sum += s_seg->ds_len;
3238                         s_seg++;
3239                 }
3240 
3241                 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
3242                         fp->tx_lso_wnd_min_len++;
3243                         return (-1);
3244                 }
3245 
3246                 nsegs = nsegs - 1;
3247                 segs++;
3248         }
3249 
3250 	return (0);
3251 }
3252 
3253 static int
3254 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
3255 {
3256 	bus_dma_segment_t	*segs;
3257 	bus_dmamap_t		map = 0;
3258 	uint32_t		nsegs = 0;
3259 	int			ret = -1;
3260 	struct mbuf		*m_head = *m_headp;
3261 	uint16_t		idx = 0;
3262 	uint16_t		elem_left;
3263 
3264 	uint8_t			nbd = 0;
3265 	struct qlnx_tx_queue    *txq;
3266 
3267 	struct eth_tx_1st_bd    *first_bd;
3268 	struct eth_tx_2nd_bd    *second_bd;
3269 	struct eth_tx_3rd_bd    *third_bd;
3270 	struct eth_tx_bd        *tx_data_bd;
3271 
3272 	int			seg_idx = 0;
3273 	uint32_t		nbds_in_hdr = 0;
3274 	uint32_t		offset = 0;
3275 
3276 #ifdef QLNX_TRACE_PERF_DATA
3277         uint16_t                bd_used;
3278 #endif
3279 
3280 	QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id);
3281 
3282 	if (!ha->link_up)
3283 		return (-1);
3284 
3285 	first_bd	= NULL;
3286 	second_bd	= NULL;
3287 	third_bd	= NULL;
3288 	tx_data_bd	= NULL;
3289 
3290 	txq = fp->txq[0];
3291 
3292         if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) <
3293 		QLNX_TX_ELEM_MIN_THRESH) {
3294                 fp->tx_nsegs_gt_elem_left++;
3295                 fp->err_tx_nsegs_gt_elem_left++;
3296 
3297                 return (ENOBUFS);
3298         }
3299 
3300 	idx = txq->sw_tx_prod;
3301 
3302 	map = txq->sw_tx_ring[idx].map;
3303 	segs = txq->segs;
3304 
3305 	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
3306 			BUS_DMA_NOWAIT);
3307 
3308 	if (ha->dbg_trace_tso_pkt_len) {
3309 		if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3310 			if (!fp->tx_tso_min_pkt_len) {
3311 				fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3312 				fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3313 			} else {
3314 				if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
3315 					fp->tx_tso_min_pkt_len =
3316 						m_head->m_pkthdr.len;
3317 				if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
3318 					fp->tx_tso_max_pkt_len =
3319 						m_head->m_pkthdr.len;
3320 			}
3321 		}
3322 	}
3323 
3324 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3325 		offset = qlnx_tcp_offset(ha, m_head);
3326 
3327 	if ((ret == EFBIG) ||
3328 		((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
3329 			(!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
3330 		((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
3331 			qlnx_tso_check(fp, segs, nsegs, offset))))) {
3332 		struct mbuf *m;
3333 
3334 		QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
3335 
3336 		fp->tx_defrag++;
3337 
3338 		m = m_defrag(m_head, M_NOWAIT);
3339 		if (m == NULL) {
3340 			fp->err_tx_defrag++;
3341 			fp->tx_pkts_freed++;
3342 			m_freem(m_head);
3343 			*m_headp = NULL;
3344 			QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
3345 			return (ENOBUFS);
3346 		}
3347 
3348 		m_head = m;
3349 		*m_headp = m_head;
3350 
3351 		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
3352 				segs, &nsegs, BUS_DMA_NOWAIT))) {
3353 			fp->err_tx_defrag_dmamap_load++;
3354 
3355 			QL_DPRINT1(ha,
3356 				"bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
3357 				ret, m_head->m_pkthdr.len);
3358 
3359 			fp->tx_pkts_freed++;
3360 			m_freem(m_head);
3361 			*m_headp = NULL;
3362 
3363 			return (ret);
3364 		}
3365 
3366 		if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
3367 			!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3368 			fp->err_tx_non_tso_max_seg++;
3369 
3370 			QL_DPRINT1(ha,
3371 				"(%d) nsegs too many for non-TSO [%d, %d]\n",
3372 				ret, nsegs, m_head->m_pkthdr.len);
3373 
3374 			fp->tx_pkts_freed++;
3375 			m_freem(m_head);
3376 			*m_headp = NULL;
3377 
3378 			return (ret);
3379 		}
3380 		if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3381 			offset = qlnx_tcp_offset(ha, m_head);
3382 
3383 	} else if (ret) {
3384 		fp->err_tx_dmamap_load++;
3385 
3386 		QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
3387 			   ret, m_head->m_pkthdr.len);
3388 		fp->tx_pkts_freed++;
3389 		m_freem(m_head);
3390 		*m_headp = NULL;
3391 		return (ret);
3392 	}
3393 
3394 	QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
3395 
3396 	if (ha->dbg_trace_tso_pkt_len) {
3397 		if (nsegs < QLNX_FP_MAX_SEGS)
3398 			fp->tx_pkts[(nsegs - 1)]++;
3399 		else
3400 			fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
3401 	}
3402 
3403 #ifdef QLNX_TRACE_PERF_DATA
3404         if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3405                 if(m_head->m_pkthdr.len <= 2048)
3406                         fp->tx_pkts_hist[0]++;
3407                 else if((m_head->m_pkthdr.len > 2048) &&
3408 				(m_head->m_pkthdr.len <= 4096))
3409                         fp->tx_pkts_hist[1]++;
3410                 else if((m_head->m_pkthdr.len > 4096) &&
3411 				(m_head->m_pkthdr.len <= 8192))
3412                         fp->tx_pkts_hist[2]++;
3413                 else if((m_head->m_pkthdr.len > 8192) &&
3414 				(m_head->m_pkthdr.len <= 12288 ))
3415                         fp->tx_pkts_hist[3]++;
3416                 else if((m_head->m_pkthdr.len > 11288) &&
3417 				(m_head->m_pkthdr.len <= 16394))
3418                         fp->tx_pkts_hist[4]++;
3419                 else if((m_head->m_pkthdr.len > 16384) &&
3420 				(m_head->m_pkthdr.len <= 20480))
3421                         fp->tx_pkts_hist[5]++;
3422                 else if((m_head->m_pkthdr.len > 20480) &&
3423 				(m_head->m_pkthdr.len <= 24576))
3424                         fp->tx_pkts_hist[6]++;
3425                 else if((m_head->m_pkthdr.len > 24576) &&
3426 				(m_head->m_pkthdr.len <= 28672))
3427                         fp->tx_pkts_hist[7]++;
3428                 else if((m_head->m_pkthdr.len > 28762) &&
3429 				(m_head->m_pkthdr.len <= 32768))
3430                         fp->tx_pkts_hist[8]++;
3431                 else if((m_head->m_pkthdr.len > 32768) &&
3432 				(m_head->m_pkthdr.len <= 36864))
3433                         fp->tx_pkts_hist[9]++;
3434                 else if((m_head->m_pkthdr.len > 36864) &&
3435 				(m_head->m_pkthdr.len <= 40960))
3436                         fp->tx_pkts_hist[10]++;
3437                 else if((m_head->m_pkthdr.len > 40960) &&
3438 				(m_head->m_pkthdr.len <= 45056))
3439                         fp->tx_pkts_hist[11]++;
3440                 else if((m_head->m_pkthdr.len > 45056) &&
3441 				(m_head->m_pkthdr.len <= 49152))
3442                         fp->tx_pkts_hist[12]++;
3443                 else if((m_head->m_pkthdr.len > 49512) &&
3444 				m_head->m_pkthdr.len <= 53248))
3445                         fp->tx_pkts_hist[13]++;
3446                 else if((m_head->m_pkthdr.len > 53248) &&
3447 				(m_head->m_pkthdr.len <= 57344))
3448                         fp->tx_pkts_hist[14]++;
3449                 else if((m_head->m_pkthdr.len > 53248) &&
3450 				(m_head->m_pkthdr.len <= 57344))
3451                         fp->tx_pkts_hist[15]++;
3452                 else if((m_head->m_pkthdr.len > 57344) &&
3453 				(m_head->m_pkthdr.len <= 61440))
3454                         fp->tx_pkts_hist[16]++;
3455                 else
3456                         fp->tx_pkts_hist[17]++;
3457         }
3458 
3459         if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3460                 elem_left =  ecore_chain_get_elem_left(&txq->tx_pbl);
3461                 bd_used = TX_RING_SIZE - elem_left;
3462 
3463                 if(bd_used <= 100)
3464                         fp->tx_pkts_q[0]++;
3465                 else if((bd_used > 100) && (bd_used <= 500))
3466                         fp->tx_pkts_q[1]++;
3467                 else if((bd_used > 500) && (bd_used <= 1000))
3468                         fp->tx_pkts_q[2]++;
3469                 else if((bd_used > 1000) && (bd_used <= 2000))
3470                         fp->tx_pkts_q[3]++;
3471                 else if((bd_used > 3000) && (bd_used <= 4000))
3472                         fp->tx_pkts_q[4]++;
3473                 else if((bd_used > 4000) && (bd_used <= 5000))
3474                         fp->tx_pkts_q[5]++;
3475                 else if((bd_used > 6000) && (bd_used <= 7000))
3476                         fp->tx_pkts_q[6]++;
3477                 else if((bd_used > 7000) && (bd_used <= 8000))
3478                         fp->tx_pkts_q[7]++;
3479                 else if((bd_used > 8000) && (bd_used <= 9000))
3480                         fp->tx_pkts_q[8]++;
3481                 else if((bd_used > 9000) && (bd_used <= 10000))
3482                         fp->tx_pkts_q[9]++;
3483                 else if((bd_used > 10000) && (bd_used <= 11000))
3484                         fp->tx_pkts_q[10]++;
3485                 else if((bd_used > 11000) && (bd_used <= 12000))
3486                         fp->tx_pkts_q[11]++;
3487                 else if((bd_used > 12000) && (bd_used <= 13000))
3488                         fp->tx_pkts_q[12]++;
3489                 else if((bd_used > 13000) && (bd_used <= 14000))
3490                         fp->tx_pkts_q[13]++;
3491                 else if((bd_used > 14000) && (bd_used <= 15000))
3492                         fp->tx_pkts_q[14]++;
3493                else if((bd_used > 15000) && (bd_used <= 16000))
3494                         fp->tx_pkts_q[15]++;
3495                 else
3496                         fp->tx_pkts_q[16]++;
3497         }
3498 
3499 #endif /* end of QLNX_TRACE_PERF_DATA */
3500 
3501 	if ((nsegs + QLNX_TX_ELEM_RESERVE) >
3502 		(int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
3503 		QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
3504 			" in chain[%d] trying to free packets\n",
3505 			nsegs, elem_left, fp->rss_id);
3506 
3507 		fp->tx_nsegs_gt_elem_left++;
3508 
3509 		(void)qlnx_tx_int(ha, fp, txq);
3510 
3511 		if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
3512 			ecore_chain_get_elem_left(&txq->tx_pbl))) {
3513 			QL_DPRINT1(ha,
3514 				"(%d, 0x%x) insuffient BDs in chain[%d]\n",
3515 				nsegs, elem_left, fp->rss_id);
3516 
3517 			fp->err_tx_nsegs_gt_elem_left++;
3518 			fp->tx_ring_full = 1;
3519 			if (ha->storm_stats_enable)
3520 				ha->storm_stats_gather = 1;
3521 			return (ENOBUFS);
3522 		}
3523 	}
3524 
3525 	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
3526 
3527 	txq->sw_tx_ring[idx].mp = m_head;
3528 
3529 	first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
3530 
3531 	memset(first_bd, 0, sizeof(*first_bd));
3532 
3533 	first_bd->data.bd_flags.bitfields =
3534 		1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
3535 
3536 	BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
3537 
3538 	nbd++;
3539 
3540 	if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
3541 		first_bd->data.bd_flags.bitfields |=
3542 			(1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3543 	}
3544 
3545 	if (m_head->m_pkthdr.csum_flags &
3546 		(CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
3547 		first_bd->data.bd_flags.bitfields |=
3548 			(1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
3549 	}
3550 
3551         if (m_head->m_flags & M_VLANTAG) {
3552                 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
3553 		first_bd->data.bd_flags.bitfields |=
3554 			(1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
3555         }
3556 
3557 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3558                 first_bd->data.bd_flags.bitfields |=
3559 			(1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
3560 		first_bd->data.bd_flags.bitfields |=
3561 			(1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3562 
3563 		nbds_in_hdr = 1;
3564 
3565 		if (offset == segs->ds_len) {
3566 			BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3567 			segs++;
3568 			seg_idx++;
3569 
3570 			second_bd = (struct eth_tx_2nd_bd *)
3571 					ecore_chain_produce(&txq->tx_pbl);
3572 			memset(second_bd, 0, sizeof(*second_bd));
3573 			nbd++;
3574 
3575 			if (seg_idx < nsegs) {
3576 				BD_SET_UNMAP_ADDR_LEN(second_bd, \
3577 					(segs->ds_addr), (segs->ds_len));
3578 				segs++;
3579 				seg_idx++;
3580 			}
3581 
3582 			third_bd = (struct eth_tx_3rd_bd *)
3583 					ecore_chain_produce(&txq->tx_pbl);
3584 			memset(third_bd, 0, sizeof(*third_bd));
3585 			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3586 			third_bd->data.bitfields |=
3587 				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3588 			nbd++;
3589 
3590 			if (seg_idx < nsegs) {
3591 				BD_SET_UNMAP_ADDR_LEN(third_bd, \
3592 					(segs->ds_addr), (segs->ds_len));
3593 				segs++;
3594 				seg_idx++;
3595 			}
3596 
3597 			for (; seg_idx < nsegs; seg_idx++) {
3598 				tx_data_bd = (struct eth_tx_bd *)
3599 					ecore_chain_produce(&txq->tx_pbl);
3600 				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3601 				BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3602 					segs->ds_addr,\
3603 					segs->ds_len);
3604 				segs++;
3605 				nbd++;
3606 			}
3607 
3608 		} else if (offset < segs->ds_len) {
3609 			BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3610 
3611 			second_bd = (struct eth_tx_2nd_bd *)
3612 					ecore_chain_produce(&txq->tx_pbl);
3613 			memset(second_bd, 0, sizeof(*second_bd));
3614 			BD_SET_UNMAP_ADDR_LEN(second_bd, \
3615 				(segs->ds_addr + offset),\
3616 				(segs->ds_len - offset));
3617 			nbd++;
3618 			segs++;
3619 
3620 			third_bd = (struct eth_tx_3rd_bd *)
3621 					ecore_chain_produce(&txq->tx_pbl);
3622 			memset(third_bd, 0, sizeof(*third_bd));
3623 
3624 			BD_SET_UNMAP_ADDR_LEN(third_bd, \
3625 					segs->ds_addr,\
3626 					segs->ds_len);
3627 			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3628 			third_bd->data.bitfields |=
3629 				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3630 			segs++;
3631 			nbd++;
3632 
3633 			for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3634 				tx_data_bd = (struct eth_tx_bd *)
3635 					ecore_chain_produce(&txq->tx_pbl);
3636 				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3637 				BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3638 					segs->ds_addr,\
3639 					segs->ds_len);
3640 				segs++;
3641 				nbd++;
3642 			}
3643 
3644 		} else {
3645 			offset = offset - segs->ds_len;
3646 			segs++;
3647 
3648 			for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3649 				if (offset)
3650 					nbds_in_hdr++;
3651 
3652 				tx_data_bd = (struct eth_tx_bd *)
3653 					ecore_chain_produce(&txq->tx_pbl);
3654 				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3655 
3656 				if (second_bd == NULL) {
3657 					second_bd = (struct eth_tx_2nd_bd *)
3658 								tx_data_bd;
3659 				} else if (third_bd == NULL) {
3660 					third_bd = (struct eth_tx_3rd_bd *)
3661 								tx_data_bd;
3662 				}
3663 
3664 				if (offset && (offset < segs->ds_len)) {
3665 					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3666 						segs->ds_addr, offset);
3667 
3668 					tx_data_bd = (struct eth_tx_bd *)
3669 					ecore_chain_produce(&txq->tx_pbl);
3670 
3671 					memset(tx_data_bd, 0,
3672 						sizeof(*tx_data_bd));
3673 
3674 					if (second_bd == NULL) {
3675 						second_bd =
3676 					(struct eth_tx_2nd_bd *)tx_data_bd;
3677 					} else if (third_bd == NULL) {
3678 						third_bd =
3679 					(struct eth_tx_3rd_bd *)tx_data_bd;
3680 					}
3681 					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3682 						(segs->ds_addr + offset), \
3683 						(segs->ds_len - offset));
3684 					nbd++;
3685 					offset = 0;
3686 				} else {
3687 					if (offset)
3688 						offset = offset - segs->ds_len;
3689 					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3690 						segs->ds_addr, segs->ds_len);
3691 				}
3692 				segs++;
3693 				nbd++;
3694 			}
3695 
3696 			if (third_bd == NULL) {
3697 				third_bd = (struct eth_tx_3rd_bd *)
3698 					ecore_chain_produce(&txq->tx_pbl);
3699 				memset(third_bd, 0, sizeof(*third_bd));
3700 			}
3701 
3702 			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3703 			third_bd->data.bitfields |=
3704 				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3705 		}
3706 		fp->tx_tso_pkts++;
3707 	} else {
3708 		segs++;
3709 		for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3710 			tx_data_bd = (struct eth_tx_bd *)
3711 					ecore_chain_produce(&txq->tx_pbl);
3712 			memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3713 			BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3714 				segs->ds_len);
3715 			segs++;
3716 			nbd++;
3717 		}
3718 		first_bd->data.bitfields =
3719 			(m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3720 				 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3721 		first_bd->data.bitfields =
3722 			htole16(first_bd->data.bitfields);
3723 		fp->tx_non_tso_pkts++;
3724 	}
3725 
3726 	first_bd->data.nbds = nbd;
3727 
3728 	if (ha->dbg_trace_tso_pkt_len) {
3729 		if (fp->tx_tso_max_nsegs < nsegs)
3730 			fp->tx_tso_max_nsegs = nsegs;
3731 
3732 		if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3733 			fp->tx_tso_min_nsegs = nsegs;
3734 	}
3735 
3736 	txq->sw_tx_ring[idx].nsegs = nsegs;
3737 	txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3738 
3739 	txq->tx_db.data.bd_prod =
3740 		htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3741 
3742 	qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3743 
3744 	QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id);
3745 	return (0);
3746 }
3747 
3748 static void
3749 qlnx_stop(qlnx_host_t *ha)
3750 {
3751 	if_t		ifp = ha->ifp;
3752 	int		i;
3753 
3754 	if_setdrvflagbits(ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
3755 
3756 	/*
3757 	 * We simply lock and unlock each fp->tx_mtx to
3758 	 * propagate the if_drv_flags
3759 	 * state to each tx thread
3760 	 */
3761         QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3762 
3763 	if (ha->state == QLNX_STATE_OPEN) {
3764         	for (i = 0; i < ha->num_rss; i++) {
3765 			struct qlnx_fastpath *fp = &ha->fp_array[i];
3766 
3767 			mtx_lock(&fp->tx_mtx);
3768 			mtx_unlock(&fp->tx_mtx);
3769 
3770 			if (fp->fp_taskqueue != NULL)
3771 				taskqueue_enqueue(fp->fp_taskqueue,
3772 					&fp->fp_task);
3773 		}
3774 	}
3775 #ifdef QLNX_ENABLE_IWARP
3776 	if (qlnx_vf_device(ha) != 0) {
3777 		qlnx_rdma_dev_close(ha);
3778 	}
3779 #endif /* #ifdef QLNX_ENABLE_IWARP */
3780 
3781 	qlnx_unload(ha);
3782 
3783 	return;
3784 }
3785 
3786 static int
3787 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3788 {
3789         return(TX_RING_SIZE - 1);
3790 }
3791 
3792 uint8_t *
3793 qlnx_get_mac_addr(qlnx_host_t *ha)
3794 {
3795 	struct ecore_hwfn	*p_hwfn;
3796 	unsigned char mac[ETHER_ADDR_LEN];
3797 	uint8_t			p_is_forced;
3798 
3799 	p_hwfn = &ha->cdev.hwfns[0];
3800 
3801 	if (qlnx_vf_device(ha) != 0)
3802 		return (p_hwfn->hw_info.hw_mac_addr);
3803 
3804 	ecore_vf_read_bulletin(p_hwfn, &p_is_forced);
3805 	if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) ==
3806 		true) {
3807 		device_printf(ha->pci_dev, "%s: p_is_forced = %d"
3808 			" mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
3809 			p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3810         	memcpy(ha->primary_mac, mac, ETH_ALEN);
3811 	}
3812 
3813 	return (ha->primary_mac);
3814 }
3815 
3816 static uint32_t
3817 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3818 {
3819 	uint32_t	ifm_type = 0;
3820 
3821 	switch (if_link->media_type) {
3822 	case MEDIA_MODULE_FIBER:
3823 	case MEDIA_UNSPECIFIED:
3824 		if (if_link->speed == (100 * 1000))
3825 			ifm_type = QLNX_IFM_100G_SR4;
3826 		else if (if_link->speed == (40 * 1000))
3827 			ifm_type = IFM_40G_SR4;
3828 		else if (if_link->speed == (25 * 1000))
3829 			ifm_type = QLNX_IFM_25G_SR;
3830 		else if (if_link->speed == (10 * 1000))
3831 			ifm_type = (IFM_10G_LR | IFM_10G_SR);
3832 		else if (if_link->speed == (1 * 1000))
3833 			ifm_type = (IFM_1000_SX | IFM_1000_LX);
3834 
3835 		break;
3836 
3837 	case MEDIA_DA_TWINAX:
3838 		if (if_link->speed == (100 * 1000))
3839 			ifm_type = QLNX_IFM_100G_CR4;
3840 		else if (if_link->speed == (40 * 1000))
3841 			ifm_type = IFM_40G_CR4;
3842 		else if (if_link->speed == (25 * 1000))
3843 			ifm_type = QLNX_IFM_25G_CR;
3844 		else if (if_link->speed == (10 * 1000))
3845 			ifm_type = IFM_10G_TWINAX;
3846 
3847 		break;
3848 
3849 	default :
3850 		ifm_type = IFM_UNKNOWN;
3851 		break;
3852 	}
3853 	return (ifm_type);
3854 }
3855 
3856 /*****************************************************************************
3857  * Interrupt Service Functions
3858  *****************************************************************************/
3859 
3860 static int
3861 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3862 	struct mbuf *mp_head, uint16_t len)
3863 {
3864 	struct mbuf		*mp, *mpf, *mpl;
3865 	struct sw_rx_data	*sw_rx_data;
3866 	struct qlnx_rx_queue	*rxq;
3867 	uint16_t 		len_in_buffer;
3868 
3869 	rxq = fp->rxq;
3870 	mpf = mpl = mp = NULL;
3871 
3872 	while (len) {
3873         	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3874 
3875                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3876                 mp = sw_rx_data->data;
3877 
3878 		if (mp == NULL) {
3879                 	QL_DPRINT1(ha, "mp = NULL\n");
3880 			fp->err_rx_mp_null++;
3881         		rxq->sw_rx_cons  =
3882 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3883 
3884 			if (mpf != NULL)
3885 				m_freem(mpf);
3886 
3887 			return (-1);
3888 		}
3889 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3890 			BUS_DMASYNC_POSTREAD);
3891 
3892                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3893                         QL_DPRINT1(ha, "New buffer allocation failed, dropping"
3894 				" incoming packet and reusing its buffer\n");
3895 
3896                         qlnx_reuse_rx_data(rxq);
3897                         fp->err_rx_alloc_errors++;
3898 
3899 			if (mpf != NULL)
3900 				m_freem(mpf);
3901 
3902 			return (-1);
3903 		}
3904                 ecore_chain_consume(&rxq->rx_bd_ring);
3905 
3906 		if (len > rxq->rx_buf_size)
3907 			len_in_buffer = rxq->rx_buf_size;
3908 		else
3909 			len_in_buffer = len;
3910 
3911 		len = len - len_in_buffer;
3912 
3913 		mp->m_flags &= ~M_PKTHDR;
3914 		mp->m_next = NULL;
3915 		mp->m_len = len_in_buffer;
3916 
3917 		if (mpf == NULL)
3918 			mpf = mpl = mp;
3919 		else {
3920 			mpl->m_next = mp;
3921 			mpl = mp;
3922 		}
3923 	}
3924 
3925 	if (mpf != NULL)
3926 		mp_head->m_next = mpf;
3927 
3928 	return (0);
3929 }
3930 
3931 static void
3932 qlnx_tpa_start(qlnx_host_t *ha,
3933 	struct qlnx_fastpath *fp,
3934 	struct qlnx_rx_queue *rxq,
3935 	struct eth_fast_path_rx_tpa_start_cqe *cqe)
3936 {
3937 	uint32_t		agg_index;
3938         if_t ifp = ha->ifp;
3939 	struct mbuf		*mp;
3940 	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
3941 	struct sw_rx_data	*sw_rx_data;
3942 	dma_addr_t		addr;
3943 	bus_dmamap_t		map;
3944 	struct eth_rx_bd	*rx_bd;
3945 	int			i;
3946 	uint8_t			hash_type;
3947 
3948 	agg_index = cqe->tpa_agg_index;
3949 
3950         QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
3951                 \t type = 0x%x\n \
3952                 \t bitfields = 0x%x\n \
3953                 \t seg_len = 0x%x\n \
3954                 \t pars_flags = 0x%x\n \
3955                 \t vlan_tag = 0x%x\n \
3956                 \t rss_hash = 0x%x\n \
3957                 \t len_on_first_bd = 0x%x\n \
3958                 \t placement_offset = 0x%x\n \
3959                 \t tpa_agg_index = 0x%x\n \
3960                 \t header_len = 0x%x\n \
3961                 \t ext_bd_len_list[0] = 0x%x\n \
3962                 \t ext_bd_len_list[1] = 0x%x\n \
3963                 \t ext_bd_len_list[2] = 0x%x\n \
3964                 \t ext_bd_len_list[3] = 0x%x\n \
3965                 \t ext_bd_len_list[4] = 0x%x\n",
3966                 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
3967                 cqe->pars_flags.flags, cqe->vlan_tag,
3968                 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
3969                 cqe->tpa_agg_index, cqe->header_len,
3970                 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
3971                 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
3972                 cqe->ext_bd_len_list[4]);
3973 
3974 	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3975 		fp->err_rx_tpa_invalid_agg_num++;
3976 		return;
3977 	}
3978 
3979 	sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3980 	bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
3981 	mp = sw_rx_data->data;
3982 
3983 	QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
3984 
3985 	if (mp == NULL) {
3986                	QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
3987 		fp->err_rx_mp_null++;
3988        		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3989 
3990 		return;
3991 	}
3992 
3993 	if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
3994 		QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
3995 			" flags = %x, dropping incoming packet\n", fp->rss_id,
3996 			rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
3997 
3998 		fp->err_rx_hw_errors++;
3999 
4000 		qlnx_reuse_rx_data(rxq);
4001 
4002 		QLNX_INC_IERRORS(ifp);
4003 
4004 		return;
4005 	}
4006 
4007 	if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4008 		QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4009 			" dropping incoming packet and reusing its buffer\n",
4010 			fp->rss_id);
4011 
4012 		fp->err_rx_alloc_errors++;
4013 		QLNX_INC_IQDROPS(ifp);
4014 
4015 		/*
4016 		 * Load the tpa mbuf into the rx ring and save the
4017 		 * posted mbuf
4018 		 */
4019 
4020 		map = sw_rx_data->map;
4021 		addr = sw_rx_data->dma_addr;
4022 
4023 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
4024 
4025 		sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
4026 		sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
4027 		sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
4028 
4029 		rxq->tpa_info[agg_index].rx_buf.data = mp;
4030 		rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
4031 		rxq->tpa_info[agg_index].rx_buf.map = map;
4032 
4033 		rx_bd = (struct eth_rx_bd *)
4034 				ecore_chain_produce(&rxq->rx_bd_ring);
4035 
4036 		rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
4037 		rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
4038 
4039 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4040 			BUS_DMASYNC_PREREAD);
4041 
4042 		rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
4043 		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4044 
4045 		ecore_chain_consume(&rxq->rx_bd_ring);
4046 
4047 		/* Now reuse any buffers posted in ext_bd_len_list */
4048 		for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4049 			if (cqe->ext_bd_len_list[i] == 0)
4050 				break;
4051 
4052 			qlnx_reuse_rx_data(rxq);
4053 		}
4054 
4055 		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4056 		return;
4057 	}
4058 
4059 	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4060 		QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
4061 			" dropping incoming packet and reusing its buffer\n",
4062 			fp->rss_id);
4063 
4064 		QLNX_INC_IQDROPS(ifp);
4065 
4066 		/* if we already have mbuf head in aggregation free it */
4067 		if (rxq->tpa_info[agg_index].mpf) {
4068 			m_freem(rxq->tpa_info[agg_index].mpf);
4069 			rxq->tpa_info[agg_index].mpl = NULL;
4070 		}
4071 		rxq->tpa_info[agg_index].mpf = mp;
4072 		rxq->tpa_info[agg_index].mpl = NULL;
4073 
4074 		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4075 		ecore_chain_consume(&rxq->rx_bd_ring);
4076 
4077 		/* Now reuse any buffers posted in ext_bd_len_list */
4078 		for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4079 			if (cqe->ext_bd_len_list[i] == 0)
4080 				break;
4081 
4082 			qlnx_reuse_rx_data(rxq);
4083 		}
4084 		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4085 
4086 		return;
4087 	}
4088 
4089 	/*
4090 	 * first process the ext_bd_len_list
4091 	 * if this fails then we simply drop the packet
4092 	 */
4093 	ecore_chain_consume(&rxq->rx_bd_ring);
4094 	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4095 
4096 	for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4097 		QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
4098 
4099 		if (cqe->ext_bd_len_list[i] == 0)
4100 			break;
4101 
4102 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4103 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4104 			BUS_DMASYNC_POSTREAD);
4105 
4106 		mpc = sw_rx_data->data;
4107 
4108 		if (mpc == NULL) {
4109 			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4110 			fp->err_rx_mp_null++;
4111 			if (mpf != NULL)
4112 				m_freem(mpf);
4113 			mpf = mpl = NULL;
4114 			rxq->tpa_info[agg_index].agg_state =
4115 						QLNX_AGG_STATE_ERROR;
4116 			ecore_chain_consume(&rxq->rx_bd_ring);
4117 			rxq->sw_rx_cons =
4118 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4119 			continue;
4120 		}
4121 
4122 		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4123 			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4124 				" dropping incoming packet and reusing its"
4125 				" buffer\n", fp->rss_id);
4126 
4127 			qlnx_reuse_rx_data(rxq);
4128 
4129 			if (mpf != NULL)
4130 				m_freem(mpf);
4131 			mpf = mpl = NULL;
4132 
4133 			rxq->tpa_info[agg_index].agg_state =
4134 						QLNX_AGG_STATE_ERROR;
4135 
4136 			ecore_chain_consume(&rxq->rx_bd_ring);
4137 			rxq->sw_rx_cons =
4138 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4139 
4140 			continue;
4141 		}
4142 
4143 		mpc->m_flags &= ~M_PKTHDR;
4144 		mpc->m_next = NULL;
4145 		mpc->m_len = cqe->ext_bd_len_list[i];
4146 
4147 		if (mpf == NULL) {
4148 			mpf = mpl = mpc;
4149 		} else {
4150 			mpl->m_len = ha->rx_buf_size;
4151 			mpl->m_next = mpc;
4152 			mpl = mpc;
4153 		}
4154 
4155 		ecore_chain_consume(&rxq->rx_bd_ring);
4156 		rxq->sw_rx_cons =
4157 			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4158 	}
4159 
4160 	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4161 		QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
4162 			" incoming packet and reusing its buffer\n",
4163 			fp->rss_id);
4164 
4165 		QLNX_INC_IQDROPS(ifp);
4166 
4167 		rxq->tpa_info[agg_index].mpf = mp;
4168 		rxq->tpa_info[agg_index].mpl = NULL;
4169 
4170 		return;
4171 	}
4172 
4173         rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
4174 
4175         if (mpf != NULL) {
4176                 mp->m_len = ha->rx_buf_size;
4177                 mp->m_next = mpf;
4178                 rxq->tpa_info[agg_index].mpf = mp;
4179                 rxq->tpa_info[agg_index].mpl = mpl;
4180         } else {
4181                 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
4182                 rxq->tpa_info[agg_index].mpf = mp;
4183                 rxq->tpa_info[agg_index].mpl = mp;
4184                 mp->m_next = NULL;
4185         }
4186 
4187 	mp->m_flags |= M_PKTHDR;
4188 
4189 	/* assign packet to this interface interface */
4190 	mp->m_pkthdr.rcvif = ifp;
4191 
4192 	/* assume no hardware checksum has complated */
4193 	mp->m_pkthdr.csum_flags = 0;
4194 
4195 	//mp->m_pkthdr.flowid = fp->rss_id;
4196 	mp->m_pkthdr.flowid = cqe->rss_hash;
4197 
4198 	hash_type = cqe->bitfields &
4199 			(ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4200 			ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4201 
4202 	switch (hash_type) {
4203 	case RSS_HASH_TYPE_IPV4:
4204 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4205 		break;
4206 
4207 	case RSS_HASH_TYPE_TCP_IPV4:
4208 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4209 		break;
4210 
4211 	case RSS_HASH_TYPE_IPV6:
4212 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4213 		break;
4214 
4215 	case RSS_HASH_TYPE_TCP_IPV6:
4216 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4217 		break;
4218 
4219 	default:
4220 		M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4221 		break;
4222 	}
4223 
4224 	mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
4225 					CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4226 
4227 	mp->m_pkthdr.csum_data = 0xFFFF;
4228 
4229 	if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
4230 		mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
4231 		mp->m_flags |= M_VLANTAG;
4232 	}
4233 
4234 	rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
4235 
4236         QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
4237 		fp->rss_id, rxq->tpa_info[agg_index].agg_state,
4238                 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
4239 
4240 	return;
4241 }
4242 
4243 static void
4244 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4245 	struct qlnx_rx_queue *rxq,
4246 	struct eth_fast_path_rx_tpa_cont_cqe *cqe)
4247 {
4248 	struct sw_rx_data	*sw_rx_data;
4249 	int			i;
4250 	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
4251 	struct mbuf		*mp;
4252 	uint32_t		agg_index;
4253 
4254         QL_DPRINT7(ha, "[%d]: enter\n \
4255                 \t type = 0x%x\n \
4256                 \t tpa_agg_index = 0x%x\n \
4257                 \t len_list[0] = 0x%x\n \
4258                 \t len_list[1] = 0x%x\n \
4259                 \t len_list[2] = 0x%x\n \
4260                 \t len_list[3] = 0x%x\n \
4261                 \t len_list[4] = 0x%x\n \
4262                 \t len_list[5] = 0x%x\n",
4263                 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4264                 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4265                 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
4266 
4267 	agg_index = cqe->tpa_agg_index;
4268 
4269 	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4270 		QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4271 		fp->err_rx_tpa_invalid_agg_num++;
4272 		return;
4273 	}
4274 
4275 	for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
4276 		QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4277 
4278 		if (cqe->len_list[i] == 0)
4279 			break;
4280 
4281 		if (rxq->tpa_info[agg_index].agg_state !=
4282 			QLNX_AGG_STATE_START) {
4283 			qlnx_reuse_rx_data(rxq);
4284 			continue;
4285 		}
4286 
4287 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4288 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4289 			BUS_DMASYNC_POSTREAD);
4290 
4291 		mpc = sw_rx_data->data;
4292 
4293 		if (mpc == NULL) {
4294 			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4295 
4296 			fp->err_rx_mp_null++;
4297 			if (mpf != NULL)
4298 				m_freem(mpf);
4299 			mpf = mpl = NULL;
4300 			rxq->tpa_info[agg_index].agg_state =
4301 						QLNX_AGG_STATE_ERROR;
4302 			ecore_chain_consume(&rxq->rx_bd_ring);
4303 			rxq->sw_rx_cons =
4304 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4305 			continue;
4306 		}
4307 
4308 		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4309 			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4310 				" dropping incoming packet and reusing its"
4311 				" buffer\n", fp->rss_id);
4312 
4313 			qlnx_reuse_rx_data(rxq);
4314 
4315 			if (mpf != NULL)
4316 				m_freem(mpf);
4317 			mpf = mpl = NULL;
4318 
4319 			rxq->tpa_info[agg_index].agg_state =
4320 						QLNX_AGG_STATE_ERROR;
4321 
4322 			ecore_chain_consume(&rxq->rx_bd_ring);
4323 			rxq->sw_rx_cons =
4324 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4325 
4326 			continue;
4327 		}
4328 
4329 		mpc->m_flags &= ~M_PKTHDR;
4330 		mpc->m_next = NULL;
4331 		mpc->m_len = cqe->len_list[i];
4332 
4333 		if (mpf == NULL) {
4334 			mpf = mpl = mpc;
4335 		} else {
4336 			mpl->m_len = ha->rx_buf_size;
4337 			mpl->m_next = mpc;
4338 			mpl = mpc;
4339 		}
4340 
4341 		ecore_chain_consume(&rxq->rx_bd_ring);
4342 		rxq->sw_rx_cons =
4343 			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4344 	}
4345 
4346         QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
4347                   fp->rss_id, mpf, mpl);
4348 
4349 	if (mpf != NULL) {
4350 		mp = rxq->tpa_info[agg_index].mpl;
4351 		mp->m_len = ha->rx_buf_size;
4352 		mp->m_next = mpf;
4353 		rxq->tpa_info[agg_index].mpl = mpl;
4354 	}
4355 
4356 	return;
4357 }
4358 
4359 static int
4360 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4361 	struct qlnx_rx_queue *rxq,
4362 	struct eth_fast_path_rx_tpa_end_cqe *cqe)
4363 {
4364 	struct sw_rx_data	*sw_rx_data;
4365 	int			i;
4366 	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
4367 	struct mbuf		*mp;
4368 	uint32_t		agg_index;
4369 	uint32_t		len = 0;
4370         if_t ifp = ha->ifp;
4371 
4372         QL_DPRINT7(ha, "[%d]: enter\n \
4373                 \t type = 0x%x\n \
4374                 \t tpa_agg_index = 0x%x\n \
4375                 \t total_packet_len = 0x%x\n \
4376                 \t num_of_bds = 0x%x\n \
4377                 \t end_reason = 0x%x\n \
4378                 \t num_of_coalesced_segs = 0x%x\n \
4379                 \t ts_delta = 0x%x\n \
4380                 \t len_list[0] = 0x%x\n \
4381                 \t len_list[1] = 0x%x\n \
4382                 \t len_list[2] = 0x%x\n \
4383                 \t len_list[3] = 0x%x\n",
4384                  fp->rss_id, cqe->type, cqe->tpa_agg_index,
4385                 cqe->total_packet_len, cqe->num_of_bds,
4386                 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
4387                 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4388                 cqe->len_list[3]);
4389 
4390 	agg_index = cqe->tpa_agg_index;
4391 
4392 	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4393 		QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4394 
4395 		fp->err_rx_tpa_invalid_agg_num++;
4396 		return (0);
4397 	}
4398 
4399 	for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
4400 		QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4401 
4402 		if (cqe->len_list[i] == 0)
4403 			break;
4404 
4405 		if (rxq->tpa_info[agg_index].agg_state !=
4406 			QLNX_AGG_STATE_START) {
4407 			QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
4408 
4409 			qlnx_reuse_rx_data(rxq);
4410 			continue;
4411 		}
4412 
4413 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4414 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4415 			BUS_DMASYNC_POSTREAD);
4416 
4417 		mpc = sw_rx_data->data;
4418 
4419 		if (mpc == NULL) {
4420 			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4421 
4422 			fp->err_rx_mp_null++;
4423 			if (mpf != NULL)
4424 				m_freem(mpf);
4425 			mpf = mpl = NULL;
4426 			rxq->tpa_info[agg_index].agg_state =
4427 						QLNX_AGG_STATE_ERROR;
4428 			ecore_chain_consume(&rxq->rx_bd_ring);
4429 			rxq->sw_rx_cons =
4430 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4431 			continue;
4432 		}
4433 
4434 		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4435 			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4436 				" dropping incoming packet and reusing its"
4437 				" buffer\n", fp->rss_id);
4438 
4439 			qlnx_reuse_rx_data(rxq);
4440 
4441 			if (mpf != NULL)
4442 				m_freem(mpf);
4443 			mpf = mpl = NULL;
4444 
4445 			rxq->tpa_info[agg_index].agg_state =
4446 						QLNX_AGG_STATE_ERROR;
4447 
4448 			ecore_chain_consume(&rxq->rx_bd_ring);
4449 			rxq->sw_rx_cons =
4450 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4451 
4452 			continue;
4453 		}
4454 
4455 		mpc->m_flags &= ~M_PKTHDR;
4456 		mpc->m_next = NULL;
4457 		mpc->m_len = cqe->len_list[i];
4458 
4459 		if (mpf == NULL) {
4460 			mpf = mpl = mpc;
4461 		} else {
4462 			mpl->m_len = ha->rx_buf_size;
4463 			mpl->m_next = mpc;
4464 			mpl = mpc;
4465 		}
4466 
4467 		ecore_chain_consume(&rxq->rx_bd_ring);
4468 		rxq->sw_rx_cons =
4469 			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4470 	}
4471 
4472 	QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
4473 
4474 	if (mpf != NULL) {
4475 		QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
4476 
4477 		mp = rxq->tpa_info[agg_index].mpl;
4478 		mp->m_len = ha->rx_buf_size;
4479 		mp->m_next = mpf;
4480 	}
4481 
4482 	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
4483 		QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
4484 
4485 		if (rxq->tpa_info[agg_index].mpf != NULL)
4486 			m_freem(rxq->tpa_info[agg_index].mpf);
4487 		rxq->tpa_info[agg_index].mpf = NULL;
4488 		rxq->tpa_info[agg_index].mpl = NULL;
4489 		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4490 		return (0);
4491 	}
4492 
4493 	mp = rxq->tpa_info[agg_index].mpf;
4494 	m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
4495 	mp->m_pkthdr.len = cqe->total_packet_len;
4496 
4497 	if (mp->m_next  == NULL)
4498 		mp->m_len = mp->m_pkthdr.len;
4499 	else {
4500 		/* compute the total packet length */
4501 		mpf = mp;
4502 		while (mpf != NULL) {
4503 			len += mpf->m_len;
4504 			mpf = mpf->m_next;
4505 		}
4506 
4507 		if (cqe->total_packet_len > len) {
4508 			mpl = rxq->tpa_info[agg_index].mpl;
4509 			mpl->m_len += (cqe->total_packet_len - len);
4510 		}
4511 	}
4512 
4513 	QLNX_INC_IPACKETS(ifp);
4514 	QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
4515 
4516         QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \
4517 		m_len = 0x%x m_pkthdr_len = 0x%x\n",
4518                 fp->rss_id, mp->m_pkthdr.csum_data,
4519                 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
4520 
4521 	if_input(ifp, mp);
4522 
4523 	rxq->tpa_info[agg_index].mpf = NULL;
4524 	rxq->tpa_info[agg_index].mpl = NULL;
4525 	rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4526 
4527 	return (cqe->num_of_coalesced_segs);
4528 }
4529 
4530 static int
4531 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
4532 	int lro_enable)
4533 {
4534         uint16_t		hw_comp_cons, sw_comp_cons;
4535         int			rx_pkt = 0;
4536         struct qlnx_rx_queue	*rxq = fp->rxq;
4537         if_t ifp = ha->ifp;
4538 	struct ecore_dev	*cdev = &ha->cdev;
4539 	struct ecore_hwfn       *p_hwfn;
4540 
4541 #ifdef QLNX_SOFT_LRO
4542 	struct lro_ctrl		*lro;
4543 
4544 	lro = &rxq->lro;
4545 #endif /* #ifdef QLNX_SOFT_LRO */
4546 
4547         hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
4548         sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4549 
4550 	p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4551 
4552         /* Memory barrier to prevent the CPU from doing speculative reads of CQE
4553          * / BD in the while-loop before reading hw_comp_cons. If the CQE is
4554          * read before it is written by FW, then FW writes CQE and SB, and then
4555          * the CPU reads the hw_comp_cons, it will use an old CQE.
4556          */
4557 
4558         /* Loop to complete all indicated BDs */
4559         while (sw_comp_cons != hw_comp_cons) {
4560                 union eth_rx_cqe		*cqe;
4561                 struct eth_fast_path_rx_reg_cqe	*fp_cqe;
4562                 struct sw_rx_data		*sw_rx_data;
4563 		register struct mbuf		*mp;
4564                 enum eth_rx_cqe_type		cqe_type;
4565                 uint16_t			len, pad, len_on_first_bd;
4566                 uint8_t				*data;
4567 		uint8_t				hash_type;
4568 
4569                 /* Get the CQE from the completion ring */
4570                 cqe = (union eth_rx_cqe *)
4571                         ecore_chain_consume(&rxq->rx_comp_ring);
4572                 cqe_type = cqe->fast_path_regular.type;
4573 
4574                 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4575                         QL_DPRINT3(ha, "Got a slowath CQE\n");
4576 
4577                         ecore_eth_cqe_completion(p_hwfn,
4578                                         (struct eth_slow_path_rx_cqe *)cqe);
4579                         goto next_cqe;
4580                 }
4581 
4582 		if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4583 			switch (cqe_type) {
4584 			case ETH_RX_CQE_TYPE_TPA_START:
4585 				qlnx_tpa_start(ha, fp, rxq,
4586 					&cqe->fast_path_tpa_start);
4587 				fp->tpa_start++;
4588 				break;
4589 
4590 			case ETH_RX_CQE_TYPE_TPA_CONT:
4591 				qlnx_tpa_cont(ha, fp, rxq,
4592 					&cqe->fast_path_tpa_cont);
4593 				fp->tpa_cont++;
4594 				break;
4595 
4596 			case ETH_RX_CQE_TYPE_TPA_END:
4597 				rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4598 						&cqe->fast_path_tpa_end);
4599 				fp->tpa_end++;
4600 				break;
4601 
4602 			default:
4603 				break;
4604 			}
4605 
4606                         goto next_cqe;
4607 		}
4608 
4609                 /* Get the data from the SW ring */
4610                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4611                 mp = sw_rx_data->data;
4612 
4613 		if (mp == NULL) {
4614                 	QL_DPRINT1(ha, "mp = NULL\n");
4615 			fp->err_rx_mp_null++;
4616         		rxq->sw_rx_cons  =
4617 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4618 			goto next_cqe;
4619 		}
4620 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4621 			BUS_DMASYNC_POSTREAD);
4622 
4623                 /* non GRO */
4624                 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4625                 len =  le16toh(fp_cqe->pkt_len);
4626                 pad = fp_cqe->placement_offset;
4627 #if 0
4628 		QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4629 			" len %u, parsing flags = %d pad  = %d\n",
4630 			cqe_type, fp_cqe->bitfields,
4631 			le16toh(fp_cqe->vlan_tag),
4632 			len, le16toh(fp_cqe->pars_flags.flags), pad);
4633 #endif
4634 		data = mtod(mp, uint8_t *);
4635 		data = data + pad;
4636 
4637 		if (0)
4638 			qlnx_dump_buf8(ha, __func__, data, len);
4639 
4640                 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4641                  * is always with a fixed size. If allocation fails, we take the
4642                  * consumed BD and return it to the ring in the PROD position.
4643                  * The packet that was received on that BD will be dropped (and
4644                  * not passed to the upper stack).
4645                  */
4646 		/* If this is an error packet then drop it */
4647 		if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4648 			CQE_FLAGS_ERR) {
4649 			QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4650 				" dropping incoming packet\n", sw_comp_cons,
4651 			le16toh(cqe->fast_path_regular.pars_flags.flags));
4652 			fp->err_rx_hw_errors++;
4653 
4654                         qlnx_reuse_rx_data(rxq);
4655 
4656 			QLNX_INC_IERRORS(ifp);
4657 
4658 			goto next_cqe;
4659 		}
4660 
4661                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4662                         QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4663 				" incoming packet and reusing its buffer\n");
4664                         qlnx_reuse_rx_data(rxq);
4665 
4666                         fp->err_rx_alloc_errors++;
4667 
4668 			QLNX_INC_IQDROPS(ifp);
4669 
4670                         goto next_cqe;
4671                 }
4672 
4673                 ecore_chain_consume(&rxq->rx_bd_ring);
4674 
4675 		len_on_first_bd = fp_cqe->len_on_first_bd;
4676 		m_adj(mp, pad);
4677 		mp->m_pkthdr.len = len;
4678 
4679 		if ((len > 60 ) && (len > len_on_first_bd)) {
4680 			mp->m_len = len_on_first_bd;
4681 
4682 			if (qlnx_rx_jumbo_chain(ha, fp, mp,
4683 				(len - len_on_first_bd)) != 0) {
4684 				m_freem(mp);
4685 
4686 				QLNX_INC_IQDROPS(ifp);
4687 
4688                         	goto next_cqe;
4689 			}
4690 
4691 		} else if (len_on_first_bd < len) {
4692 			fp->err_rx_jumbo_chain_pkts++;
4693 		} else {
4694 			mp->m_len = len;
4695 		}
4696 
4697 		mp->m_flags |= M_PKTHDR;
4698 
4699 		/* assign packet to this interface interface */
4700 		mp->m_pkthdr.rcvif = ifp;
4701 
4702 		/* assume no hardware checksum has complated */
4703 		mp->m_pkthdr.csum_flags = 0;
4704 
4705 		mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4706 
4707 		hash_type = fp_cqe->bitfields &
4708 				(ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4709 				ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4710 
4711 		switch (hash_type) {
4712 		case RSS_HASH_TYPE_IPV4:
4713 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4714 			break;
4715 
4716 		case RSS_HASH_TYPE_TCP_IPV4:
4717 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4718 			break;
4719 
4720 		case RSS_HASH_TYPE_IPV6:
4721 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4722 			break;
4723 
4724 		case RSS_HASH_TYPE_TCP_IPV6:
4725 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4726 			break;
4727 
4728 		default:
4729 			M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4730 			break;
4731 		}
4732 
4733 		if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4734 			mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4735 		}
4736 
4737 		if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4738 			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4739 		}
4740 
4741 		if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4742 			mp->m_pkthdr.csum_data = 0xFFFF;
4743 			mp->m_pkthdr.csum_flags |=
4744 				(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4745 		}
4746 
4747 		if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4748 			mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4749 			mp->m_flags |= M_VLANTAG;
4750 		}
4751 
4752 		QLNX_INC_IPACKETS(ifp);
4753 		QLNX_INC_IBYTES(ifp, len);
4754 
4755 #ifdef QLNX_SOFT_LRO
4756 		if (lro_enable)
4757 			tcp_lro_queue_mbuf(lro, mp);
4758 		else
4759 			if_input(ifp, mp);
4760 #else
4761 
4762 		if_input(ifp, mp);
4763 
4764 #endif /* #ifdef QLNX_SOFT_LRO */
4765 
4766                 rx_pkt++;
4767 
4768         	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4769 
4770 next_cqe:	/* don't consume bd rx buffer */
4771                 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4772                 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4773 
4774 		/* CR TPA - revisit how to handle budget in TPA perhaps
4775 		   increase on "end" */
4776                 if (rx_pkt == budget)
4777                         break;
4778         } /* repeat while sw_comp_cons != hw_comp_cons... */
4779 
4780         /* Update producers */
4781         qlnx_update_rx_prod(p_hwfn, rxq);
4782 
4783         return rx_pkt;
4784 }
4785 
4786 /*
4787  * fast path interrupt
4788  */
4789 
4790 static void
4791 qlnx_fp_isr(void *arg)
4792 {
4793         qlnx_ivec_t		*ivec = arg;
4794         qlnx_host_t		*ha;
4795         struct qlnx_fastpath	*fp = NULL;
4796         int			idx;
4797 
4798         ha = ivec->ha;
4799 
4800         if (ha->state != QLNX_STATE_OPEN) {
4801                 return;
4802         }
4803 
4804         idx = ivec->rss_idx;
4805 
4806         if ((idx = ivec->rss_idx) >= ha->num_rss) {
4807                 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4808                 ha->err_illegal_intr++;
4809                 return;
4810         }
4811         fp = &ha->fp_array[idx];
4812 
4813         if (fp == NULL) {
4814                 ha->err_fp_null++;
4815         } else {
4816 		int			rx_int = 0;
4817 #ifdef QLNX_SOFT_LRO
4818 		int			total_rx_count = 0;
4819 #endif
4820 		int 			lro_enable, tc;
4821 		struct qlnx_tx_queue	*txq;
4822 		uint16_t		elem_left;
4823 
4824 		lro_enable = if_getcapenable(ha->ifp) & IFCAP_LRO;
4825 
4826                 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4827 
4828                 do {
4829                         for (tc = 0; tc < ha->num_tc; tc++) {
4830 				txq = fp->txq[tc];
4831 
4832 				if((int)(elem_left =
4833 					ecore_chain_get_elem_left(&txq->tx_pbl)) <
4834 						QLNX_TX_ELEM_THRESH)  {
4835                                 	if (mtx_trylock(&fp->tx_mtx)) {
4836 #ifdef QLNX_TRACE_PERF_DATA
4837 						tx_compl = fp->tx_pkts_completed;
4838 #endif
4839 
4840 						qlnx_tx_int(ha, fp, fp->txq[tc]);
4841 #ifdef QLNX_TRACE_PERF_DATA
4842 						fp->tx_pkts_compl_intr +=
4843 							(fp->tx_pkts_completed - tx_compl);
4844 						if ((fp->tx_pkts_completed - tx_compl) <= 32)
4845 							fp->tx_comInt[0]++;
4846 						else if (((fp->tx_pkts_completed - tx_compl) > 32) &&
4847 							((fp->tx_pkts_completed - tx_compl) <= 64))
4848 							fp->tx_comInt[1]++;
4849 						else if(((fp->tx_pkts_completed - tx_compl) > 64) &&
4850 							((fp->tx_pkts_completed - tx_compl) <= 128))
4851 							fp->tx_comInt[2]++;
4852 						else if(((fp->tx_pkts_completed - tx_compl) > 128))
4853 							fp->tx_comInt[3]++;
4854 #endif
4855 						mtx_unlock(&fp->tx_mtx);
4856 					}
4857 				}
4858                         }
4859 
4860                         rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
4861                                         lro_enable);
4862 
4863                         if (rx_int) {
4864                                 fp->rx_pkts += rx_int;
4865 #ifdef QLNX_SOFT_LRO
4866                                 total_rx_count += rx_int;
4867 #endif
4868                         }
4869 
4870                 } while (rx_int);
4871 
4872 #ifdef QLNX_SOFT_LRO
4873                 {
4874                         struct lro_ctrl *lro;
4875 
4876                         lro = &fp->rxq->lro;
4877 
4878                         if (lro_enable && total_rx_count) {
4879 
4880 #ifdef QLNX_TRACE_LRO_CNT
4881                                 if (lro->lro_mbuf_count & ~1023)
4882                                         fp->lro_cnt_1024++;
4883                                 else if (lro->lro_mbuf_count & ~511)
4884                                         fp->lro_cnt_512++;
4885                                 else if (lro->lro_mbuf_count & ~255)
4886                                         fp->lro_cnt_256++;
4887                                 else if (lro->lro_mbuf_count & ~127)
4888                                         fp->lro_cnt_128++;
4889                                 else if (lro->lro_mbuf_count & ~63)
4890                                         fp->lro_cnt_64++;
4891 #endif /* #ifdef QLNX_TRACE_LRO_CNT */
4892 
4893                                 tcp_lro_flush_all(lro);
4894                         }
4895                 }
4896 #endif /* #ifdef QLNX_SOFT_LRO */
4897 
4898                 ecore_sb_update_sb_idx(fp->sb_info);
4899                 rmb();
4900                 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
4901         }
4902 
4903         return;
4904 }
4905 
4906 /*
4907  * slow path interrupt processing function
4908  * can be invoked in polled mode or in interrupt mode via taskqueue.
4909  */
4910 void
4911 qlnx_sp_isr(void *arg)
4912 {
4913 	struct ecore_hwfn	*p_hwfn;
4914 	qlnx_host_t		*ha;
4915 
4916 	p_hwfn = arg;
4917 
4918 	ha = (qlnx_host_t *)p_hwfn->p_dev;
4919 
4920 	ha->sp_interrupts++;
4921 
4922 	QL_DPRINT2(ha, "enter\n");
4923 
4924 	ecore_int_sp_dpc(p_hwfn);
4925 
4926 	QL_DPRINT2(ha, "exit\n");
4927 
4928 	return;
4929 }
4930 
4931 /*****************************************************************************
4932  * Support Functions for DMA'able Memory
4933  *****************************************************************************/
4934 
4935 static void
4936 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
4937 {
4938         *((bus_addr_t *)arg) = 0;
4939 
4940         if (error) {
4941                 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
4942                 return;
4943         }
4944 
4945         *((bus_addr_t *)arg) = segs[0].ds_addr;
4946 
4947         return;
4948 }
4949 
4950 static int
4951 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4952 {
4953         int             ret = 0;
4954         bus_addr_t      b_addr;
4955 
4956         ret = bus_dma_tag_create(
4957                         ha->parent_tag,/* parent */
4958                         dma_buf->alignment,
4959                         ((bus_size_t)(1ULL << 32)),/* boundary */
4960                         BUS_SPACE_MAXADDR,      /* lowaddr */
4961                         BUS_SPACE_MAXADDR,      /* highaddr */
4962                         NULL, NULL,             /* filter, filterarg */
4963                         dma_buf->size,          /* maxsize */
4964                         1,                      /* nsegments */
4965                         dma_buf->size,          /* maxsegsize */
4966                         0,                      /* flags */
4967                         NULL, NULL,             /* lockfunc, lockarg */
4968                         &dma_buf->dma_tag);
4969 
4970         if (ret) {
4971                 QL_DPRINT1(ha, "could not create dma tag\n");
4972                 goto qlnx_alloc_dmabuf_exit;
4973         }
4974         ret = bus_dmamem_alloc(dma_buf->dma_tag,
4975                         (void **)&dma_buf->dma_b,
4976                         (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
4977                         &dma_buf->dma_map);
4978         if (ret) {
4979                 bus_dma_tag_destroy(dma_buf->dma_tag);
4980                 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
4981                 goto qlnx_alloc_dmabuf_exit;
4982         }
4983 
4984         ret = bus_dmamap_load(dma_buf->dma_tag,
4985                         dma_buf->dma_map,
4986                         dma_buf->dma_b,
4987                         dma_buf->size,
4988                         qlnx_dmamap_callback,
4989                         &b_addr, BUS_DMA_NOWAIT);
4990 
4991         if (ret || !b_addr) {
4992                 bus_dma_tag_destroy(dma_buf->dma_tag);
4993                 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
4994                         dma_buf->dma_map);
4995                 ret = -1;
4996                 goto qlnx_alloc_dmabuf_exit;
4997         }
4998 
4999         dma_buf->dma_addr = b_addr;
5000 
5001 qlnx_alloc_dmabuf_exit:
5002 
5003         return ret;
5004 }
5005 
5006 static void
5007 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
5008 {
5009 	bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
5010         bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
5011         bus_dma_tag_destroy(dma_buf->dma_tag);
5012 	return;
5013 }
5014 
5015 void *
5016 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
5017 {
5018 	qlnx_dma_t	dma_buf;
5019 	qlnx_dma_t	*dma_p;
5020 	qlnx_host_t	*ha __unused;
5021 
5022 	ha = (qlnx_host_t *)ecore_dev;
5023 
5024 	size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5025 
5026 	memset(&dma_buf, 0, sizeof (qlnx_dma_t));
5027 
5028 	dma_buf.size = size + PAGE_SIZE;
5029 	dma_buf.alignment = 8;
5030 
5031 	if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
5032 		return (NULL);
5033 	bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
5034 
5035 	*phys = dma_buf.dma_addr;
5036 
5037 	dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
5038 
5039 	memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
5040 
5041 	QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5042 		(void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
5043 		dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
5044 
5045 	return (dma_buf.dma_b);
5046 }
5047 
5048 void
5049 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
5050 	uint32_t size)
5051 {
5052 	qlnx_dma_t dma_buf, *dma_p;
5053 	qlnx_host_t	*ha;
5054 
5055 	ha = (qlnx_host_t *)ecore_dev;
5056 
5057 	if (v_addr == NULL)
5058 		return;
5059 
5060 	size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5061 
5062 	dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
5063 
5064 	QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5065 		(void *)dma_p->dma_map, (void *)dma_p->dma_tag,
5066 		dma_p->dma_b, (void *)dma_p->dma_addr, size);
5067 
5068 	dma_buf = *dma_p;
5069 
5070 	if (!ha->qlnxr_debug)
5071 	qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
5072 	return;
5073 }
5074 
5075 static int
5076 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
5077 {
5078         int             ret;
5079         device_t        dev;
5080 
5081         dev = ha->pci_dev;
5082 
5083         /*
5084          * Allocate parent DMA Tag
5085          */
5086         ret = bus_dma_tag_create(
5087                         bus_get_dma_tag(dev),   /* parent */
5088                         1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
5089                         BUS_SPACE_MAXADDR,      /* lowaddr */
5090                         BUS_SPACE_MAXADDR,      /* highaddr */
5091                         NULL, NULL,             /* filter, filterarg */
5092                         BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
5093                         0,                      /* nsegments */
5094                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
5095                         0,                      /* flags */
5096                         NULL, NULL,             /* lockfunc, lockarg */
5097                         &ha->parent_tag);
5098 
5099         if (ret) {
5100                 QL_DPRINT1(ha, "could not create parent dma tag\n");
5101                 return (-1);
5102         }
5103 
5104         ha->flags.parent_tag = 1;
5105 
5106         return (0);
5107 }
5108 
5109 static void
5110 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
5111 {
5112         if (ha->parent_tag != NULL) {
5113                 bus_dma_tag_destroy(ha->parent_tag);
5114 		ha->parent_tag = NULL;
5115         }
5116 	return;
5117 }
5118 
5119 static int
5120 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
5121 {
5122         if (bus_dma_tag_create(NULL,    /* parent */
5123                 1, 0,    /* alignment, bounds */
5124                 BUS_SPACE_MAXADDR,       /* lowaddr */
5125                 BUS_SPACE_MAXADDR,       /* highaddr */
5126                 NULL, NULL,      /* filter, filterarg */
5127                 QLNX_MAX_TSO_FRAME_SIZE,     /* maxsize */
5128                 QLNX_MAX_SEGMENTS,        /* nsegments */
5129                 QLNX_MAX_TX_MBUF_SIZE,	  /* maxsegsize */
5130                 0,        /* flags */
5131                 NULL,    /* lockfunc */
5132                 NULL,    /* lockfuncarg */
5133                 &ha->tx_tag)) {
5134                 QL_DPRINT1(ha, "tx_tag alloc failed\n");
5135                 return (-1);
5136         }
5137 
5138 	return (0);
5139 }
5140 
5141 static void
5142 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
5143 {
5144         if (ha->tx_tag != NULL) {
5145                 bus_dma_tag_destroy(ha->tx_tag);
5146 		ha->tx_tag = NULL;
5147         }
5148 	return;
5149 }
5150 
5151 static int
5152 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
5153 {
5154         if (bus_dma_tag_create(NULL,    /* parent */
5155                         1, 0,    /* alignment, bounds */
5156                         BUS_SPACE_MAXADDR,       /* lowaddr */
5157                         BUS_SPACE_MAXADDR,       /* highaddr */
5158                         NULL, NULL,      /* filter, filterarg */
5159                         MJUM9BYTES,     /* maxsize */
5160                         1,        /* nsegments */
5161                         MJUM9BYTES,        /* maxsegsize */
5162                         0,        /* flags */
5163                         NULL,    /* lockfunc */
5164                         NULL,    /* lockfuncarg */
5165                         &ha->rx_tag)) {
5166                 QL_DPRINT1(ha, " rx_tag alloc failed\n");
5167 
5168                 return (-1);
5169         }
5170 	return (0);
5171 }
5172 
5173 static void
5174 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
5175 {
5176         if (ha->rx_tag != NULL) {
5177                 bus_dma_tag_destroy(ha->rx_tag);
5178 		ha->rx_tag = NULL;
5179         }
5180 	return;
5181 }
5182 
5183 /*********************************
5184  * Exported functions
5185  *********************************/
5186 uint32_t
5187 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
5188 {
5189 	uint32_t bar_size;
5190 
5191 	bar_id = bar_id * 2;
5192 
5193 	bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
5194 				SYS_RES_MEMORY,
5195 				PCIR_BAR(bar_id));
5196 
5197 	return (bar_size);
5198 }
5199 
5200 uint32_t
5201 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
5202 {
5203 	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5204 				pci_reg, 1);
5205 	return 0;
5206 }
5207 
5208 uint32_t
5209 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
5210 	uint16_t *reg_value)
5211 {
5212 	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5213 				pci_reg, 2);
5214 	return 0;
5215 }
5216 
5217 uint32_t
5218 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
5219 	uint32_t *reg_value)
5220 {
5221 	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5222 				pci_reg, 4);
5223 	return 0;
5224 }
5225 
5226 void
5227 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
5228 {
5229 	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5230 		pci_reg, reg_value, 1);
5231 	return;
5232 }
5233 
5234 void
5235 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
5236 	uint16_t reg_value)
5237 {
5238 	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5239 		pci_reg, reg_value, 2);
5240 	return;
5241 }
5242 
5243 void
5244 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
5245 	uint32_t reg_value)
5246 {
5247 	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5248 		pci_reg, reg_value, 4);
5249 	return;
5250 }
5251 
5252 int
5253 qlnx_pci_find_capability(void *ecore_dev, int cap)
5254 {
5255 	int		reg;
5256 	qlnx_host_t	*ha;
5257 
5258 	ha = ecore_dev;
5259 
5260 	if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, &reg) == 0)
5261 		return reg;
5262 	else {
5263 		QL_DPRINT1(ha, "failed\n");
5264 		return 0;
5265 	}
5266 }
5267 
5268 int
5269 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap)
5270 {
5271 	int		reg;
5272 	qlnx_host_t	*ha;
5273 
5274 	ha = ecore_dev;
5275 
5276 	if (pci_find_extcap(ha->pci_dev, ext_cap, &reg) == 0)
5277 		return reg;
5278 	else {
5279 		QL_DPRINT1(ha, "failed\n");
5280 		return 0;
5281 	}
5282 }
5283 
5284 uint32_t
5285 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
5286 {
5287 	uint32_t		data32;
5288 	struct ecore_hwfn	*p_hwfn;
5289 
5290 	p_hwfn = hwfn;
5291 
5292 	data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5293 			(bus_size_t)(p_hwfn->reg_offset + reg_addr));
5294 
5295 	return (data32);
5296 }
5297 
5298 void
5299 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5300 {
5301 	struct ecore_hwfn	*p_hwfn = hwfn;
5302 
5303 	bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5304 		(bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5305 
5306 	return;
5307 }
5308 
5309 void
5310 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
5311 {
5312 	struct ecore_hwfn	*p_hwfn = hwfn;
5313 
5314 	bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5315 		(bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5316 	return;
5317 }
5318 
5319 void
5320 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value)
5321 {
5322 	struct ecore_dev	*cdev;
5323 	struct ecore_hwfn	*p_hwfn;
5324 	uint32_t	offset;
5325 
5326 	p_hwfn = hwfn;
5327 
5328 	cdev = p_hwfn->p_dev;
5329 
5330 	offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells));
5331 	bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value);
5332 
5333 	return;
5334 }
5335 
5336 void
5337 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5338 {
5339 	struct ecore_hwfn	*p_hwfn = hwfn;
5340 
5341 	bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \
5342 		(bus_size_t)(p_hwfn->db_offset + reg_addr), value);
5343 
5344 	return;
5345 }
5346 
5347 uint32_t
5348 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
5349 {
5350 	uint32_t		data32;
5351 	bus_size_t		offset;
5352 	struct ecore_dev	*cdev;
5353 
5354 	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5355 	offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5356 
5357 	data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
5358 
5359 	return (data32);
5360 }
5361 
5362 void
5363 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
5364 {
5365 	bus_size_t		offset;
5366 	struct ecore_dev	*cdev;
5367 
5368 	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5369 	offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5370 
5371 	bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5372 
5373 	return;
5374 }
5375 
5376 void
5377 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
5378 {
5379 	bus_size_t		offset;
5380 	struct ecore_dev	*cdev;
5381 
5382 	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5383 	offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5384 
5385 	bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5386 	return;
5387 }
5388 
5389 void *
5390 qlnx_zalloc(uint32_t size)
5391 {
5392 	caddr_t	va;
5393 
5394 	va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
5395 	bzero(va, size);
5396 	return ((void *)va);
5397 }
5398 
5399 void
5400 qlnx_barrier(void *p_dev)
5401 {
5402 	qlnx_host_t	*ha;
5403 
5404 	ha = ((struct ecore_dev *) p_dev)->ha;
5405 	bus_barrier(ha->pci_reg,  0, 0, BUS_SPACE_BARRIER_WRITE);
5406 }
5407 
5408 void
5409 qlnx_link_update(void *p_hwfn)
5410 {
5411 	qlnx_host_t	*ha;
5412 	int		prev_link_state;
5413 
5414 	ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5415 
5416 	qlnx_fill_link(ha, p_hwfn, &ha->if_link);
5417 
5418 	prev_link_state = ha->link_up;
5419 	ha->link_up = ha->if_link.link_up;
5420 
5421         if (prev_link_state !=  ha->link_up) {
5422                 if (ha->link_up) {
5423                         if_link_state_change(ha->ifp, LINK_STATE_UP);
5424                 } else {
5425                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
5426                 }
5427         }
5428 #ifndef QLNX_VF
5429 #ifdef CONFIG_ECORE_SRIOV
5430 
5431 	if (qlnx_vf_device(ha) != 0) {
5432 		if (ha->sriov_initialized)
5433 			qlnx_inform_vf_link_state(p_hwfn, ha);
5434 	}
5435 
5436 #endif /* #ifdef CONFIG_ECORE_SRIOV */
5437 #endif /* #ifdef QLNX_VF */
5438 
5439         return;
5440 }
5441 
5442 static void
5443 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn,
5444 	struct ecore_vf_acquire_sw_info *p_sw_info)
5445 {
5446 	p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) |
5447 					(QLNX_VERSION_MINOR << 16) |
5448 					 QLNX_VERSION_BUILD;
5449 	p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD;
5450 
5451 	return;
5452 }
5453 
5454 void
5455 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req,
5456 	void *p_sw_info)
5457 {
5458 	__qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info);
5459 
5460 	return;
5461 }
5462 
5463 void
5464 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
5465 	struct qlnx_link_output *if_link)
5466 {
5467 	struct ecore_mcp_link_params    link_params;
5468 	struct ecore_mcp_link_state     link_state;
5469 	uint8_t				p_change;
5470 	struct ecore_ptt *p_ptt = NULL;
5471 
5472 	memset(if_link, 0, sizeof(*if_link));
5473 	memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
5474 	memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
5475 
5476 	ha = (qlnx_host_t *)hwfn->p_dev;
5477 
5478 	/* Prepare source inputs */
5479 	/* we only deal with physical functions */
5480 	if (qlnx_vf_device(ha) != 0) {
5481         	p_ptt = ecore_ptt_acquire(hwfn);
5482 
5483 	        if (p_ptt == NULL) {
5484 			QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5485 			return;
5486 		}
5487 
5488 		ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type);
5489 		ecore_ptt_release(hwfn, p_ptt);
5490 
5491 		memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
5492 			sizeof(link_params));
5493 		memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
5494 			sizeof(link_state));
5495 	} else {
5496 		ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type);
5497 		ecore_vf_read_bulletin(hwfn, &p_change);
5498 		ecore_vf_get_link_params(hwfn, &link_params);
5499 		ecore_vf_get_link_state(hwfn, &link_state);
5500 	}
5501 
5502 	/* Set the link parameters to pass to protocol driver */
5503 	if (link_state.link_up) {
5504 		if_link->link_up = true;
5505 		if_link->speed = link_state.speed;
5506 	}
5507 
5508 	if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
5509 
5510 	if (link_params.speed.autoneg)
5511 		if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
5512 
5513 	if (link_params.pause.autoneg ||
5514 		(link_params.pause.forced_rx && link_params.pause.forced_tx))
5515 		if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
5516 
5517 	if (link_params.pause.autoneg || link_params.pause.forced_rx ||
5518 		link_params.pause.forced_tx)
5519 		if_link->supported_caps |= QLNX_LINK_CAP_Pause;
5520 
5521 	if (link_params.speed.advertised_speeds &
5522 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
5523 		if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
5524                                            QLNX_LINK_CAP_1000baseT_Full;
5525 
5526 	if (link_params.speed.advertised_speeds &
5527 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
5528 		if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5529 
5530 	if (link_params.speed.advertised_speeds &
5531 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
5532 		if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5533 
5534 	if (link_params.speed.advertised_speeds &
5535 		NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
5536 		if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5537 
5538 	if (link_params.speed.advertised_speeds &
5539 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
5540 		if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5541 
5542 	if (link_params.speed.advertised_speeds &
5543 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
5544 		if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5545 
5546 	if_link->advertised_caps = if_link->supported_caps;
5547 
5548 	if_link->autoneg = link_params.speed.autoneg;
5549 	if_link->duplex = QLNX_LINK_DUPLEX;
5550 
5551 	/* Link partner capabilities */
5552 
5553 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
5554 		if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
5555 
5556 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
5557 		if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
5558 
5559 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
5560 		if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5561 
5562 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
5563 		if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5564 
5565 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
5566 		if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5567 
5568 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
5569 		if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5570 
5571 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
5572 		if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5573 
5574 	if (link_state.an_complete)
5575 		if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
5576 
5577 	if (link_state.partner_adv_pause)
5578 		if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
5579 
5580 	if ((link_state.partner_adv_pause ==
5581 		ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
5582 		(link_state.partner_adv_pause ==
5583 			ECORE_LINK_PARTNER_BOTH_PAUSE))
5584 		if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
5585 
5586 	return;
5587 }
5588 
5589 void
5590 qlnx_schedule_recovery(void *p_hwfn)
5591 {
5592 	qlnx_host_t	*ha;
5593 
5594 	ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5595 
5596 	if (qlnx_vf_device(ha) != 0) {
5597 		taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
5598 	}
5599 
5600 	return;
5601 }
5602 
5603 static int
5604 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
5605 {
5606         int	rc, i;
5607 
5608         for (i = 0; i < cdev->num_hwfns; i++) {
5609                 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5610                 p_hwfn->pf_params = *func_params;
5611 
5612 #ifdef QLNX_ENABLE_IWARP
5613 		if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) {
5614 			p_hwfn->using_ll2 = true;
5615 		}
5616 #endif /* #ifdef QLNX_ENABLE_IWARP */
5617         }
5618 
5619         rc = ecore_resc_alloc(cdev);
5620         if (rc)
5621                 goto qlnx_nic_setup_exit;
5622 
5623         ecore_resc_setup(cdev);
5624 
5625 qlnx_nic_setup_exit:
5626 
5627         return rc;
5628 }
5629 
5630 static int
5631 qlnx_nic_start(struct ecore_dev *cdev)
5632 {
5633         int				rc;
5634 	struct ecore_hw_init_params	params;
5635 
5636 	bzero(&params, sizeof (struct ecore_hw_init_params));
5637 
5638 	params.p_tunn = NULL;
5639 	params.b_hw_start = true;
5640 	params.int_mode = cdev->int_mode;
5641 	params.allow_npar_tx_switch = true;
5642 	params.bin_fw_data = NULL;
5643 
5644         rc = ecore_hw_init(cdev, &params);
5645         if (rc) {
5646                 ecore_resc_free(cdev);
5647                 return rc;
5648         }
5649 
5650         return 0;
5651 }
5652 
5653 static int
5654 qlnx_slowpath_start(qlnx_host_t *ha)
5655 {
5656 	struct ecore_dev	*cdev;
5657 	struct ecore_pf_params	pf_params;
5658 	int			rc;
5659 
5660 	memset(&pf_params, 0, sizeof(struct ecore_pf_params));
5661 	pf_params.eth_pf_params.num_cons  =
5662 		(ha->num_rss) * (ha->num_tc + 1);
5663 
5664 #ifdef QLNX_ENABLE_IWARP
5665 	if (qlnx_vf_device(ha) != 0) {
5666 		if(ha->personality == ECORE_PCI_ETH_IWARP) {
5667 			device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n");
5668 			pf_params.rdma_pf_params.num_qps = 1024;
5669 			pf_params.rdma_pf_params.num_srqs = 1024;
5670 			pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5671 			pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP;
5672 		} else if(ha->personality == ECORE_PCI_ETH_ROCE) {
5673 			device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n");
5674 			pf_params.rdma_pf_params.num_qps = 8192;
5675 			pf_params.rdma_pf_params.num_srqs = 8192;
5676 			//pf_params.rdma_pf_params.min_dpis = 0;
5677 			pf_params.rdma_pf_params.min_dpis = 8;
5678 			pf_params.rdma_pf_params.roce_edpm_mode = 0;
5679 			pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5680 			pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE;
5681 		}
5682 	}
5683 #endif /* #ifdef QLNX_ENABLE_IWARP */
5684 
5685 	cdev = &ha->cdev;
5686 
5687 	rc = qlnx_nic_setup(cdev, &pf_params);
5688         if (rc)
5689                 goto qlnx_slowpath_start_exit;
5690 
5691         cdev->int_mode = ECORE_INT_MODE_MSIX;
5692         cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5693 
5694 #ifdef QLNX_MAX_COALESCE
5695 	cdev->rx_coalesce_usecs = 255;
5696 	cdev->tx_coalesce_usecs = 255;
5697 #endif
5698 
5699 	rc = qlnx_nic_start(cdev);
5700 
5701 	ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5702 	ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5703 
5704 #ifdef QLNX_USER_LLDP
5705 	(void)qlnx_set_lldp_tlvx(ha, NULL);
5706 #endif /* #ifdef QLNX_USER_LLDP */
5707 
5708 qlnx_slowpath_start_exit:
5709 
5710 	return (rc);
5711 }
5712 
5713 static int
5714 qlnx_slowpath_stop(qlnx_host_t *ha)
5715 {
5716 	struct ecore_dev	*cdev;
5717 	device_t		dev = ha->pci_dev;
5718 	int			i;
5719 
5720 	cdev = &ha->cdev;
5721 
5722 	ecore_hw_stop(cdev);
5723 
5724  	for (i = 0; i < ha->cdev.num_hwfns; i++) {
5725         	if (ha->sp_handle[i])
5726                 	(void)bus_teardown_intr(dev, ha->sp_irq[i],
5727 				ha->sp_handle[i]);
5728 
5729 		ha->sp_handle[i] = NULL;
5730 
5731         	if (ha->sp_irq[i])
5732 			(void) bus_release_resource(dev, SYS_RES_IRQ,
5733 				ha->sp_irq_rid[i], ha->sp_irq[i]);
5734 		ha->sp_irq[i] = NULL;
5735 	}
5736 
5737         ecore_resc_free(cdev);
5738 
5739         return 0;
5740 }
5741 
5742 static void
5743 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5744 	char ver_str[VER_SIZE])
5745 {
5746         int	i;
5747 
5748         memcpy(cdev->name, name, NAME_SIZE);
5749 
5750         for_each_hwfn(cdev, i) {
5751                 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5752         }
5753 
5754         cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5755 
5756 	return ;
5757 }
5758 
5759 void
5760 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5761 {
5762 	enum ecore_mcp_protocol_type	type;
5763 	union ecore_mcp_protocol_stats	*stats;
5764 	struct ecore_eth_stats		eth_stats;
5765 	qlnx_host_t			*ha;
5766 
5767 	ha = cdev;
5768 	stats = proto_stats;
5769 	type = proto_type;
5770 
5771         switch (type) {
5772         case ECORE_MCP_LAN_STATS:
5773                 ecore_get_vport_stats((struct ecore_dev *)cdev, &eth_stats);
5774                 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5775                 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5776                 stats->lan_stats.fcs_err = -1;
5777                 break;
5778 
5779 	default:
5780 		ha->err_get_proto_invalid_type++;
5781 
5782 		QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5783 		break;
5784 	}
5785 	return;
5786 }
5787 
5788 static int
5789 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5790 {
5791 	struct ecore_hwfn	*p_hwfn;
5792 	struct ecore_ptt	*p_ptt;
5793 
5794 	p_hwfn = &ha->cdev.hwfns[0];
5795 	p_ptt = ecore_ptt_acquire(p_hwfn);
5796 
5797 	if (p_ptt ==  NULL) {
5798                 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5799                 return (-1);
5800 	}
5801 	ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5802 
5803 	ecore_ptt_release(p_hwfn, p_ptt);
5804 
5805 	return (0);
5806 }
5807 
5808 static int
5809 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5810 {
5811 	struct ecore_hwfn	*p_hwfn;
5812 	struct ecore_ptt	*p_ptt;
5813 
5814 	p_hwfn = &ha->cdev.hwfns[0];
5815 	p_ptt = ecore_ptt_acquire(p_hwfn);
5816 
5817 	if (p_ptt ==  NULL) {
5818                 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
5819                 return (-1);
5820 	}
5821 	ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5822 
5823 	ecore_ptt_release(p_hwfn, p_ptt);
5824 
5825 	return (0);
5826 }
5827 
5828 static int
5829 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
5830 {
5831 	bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5832 	bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5833 	bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5834 
5835         return 0;
5836 }
5837 
5838 static void
5839 qlnx_init_fp(qlnx_host_t *ha)
5840 {
5841 	int rss_id, txq_array_index, tc;
5842 
5843 	for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5844 		struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5845 
5846 		fp->rss_id = rss_id;
5847 		fp->edev = ha;
5848 		fp->sb_info = &ha->sb_array[rss_id];
5849 		fp->rxq = &ha->rxq_array[rss_id];
5850 		fp->rxq->rxq_id = rss_id;
5851 
5852 		for (tc = 0; tc < ha->num_tc; tc++) {
5853                         txq_array_index = tc * ha->num_rss + rss_id;
5854                         fp->txq[tc] = &ha->txq_array[txq_array_index];
5855                         fp->txq[tc]->index = txq_array_index;
5856 		}
5857 
5858 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
5859 			rss_id);
5860 
5861 		fp->tx_ring_full = 0;
5862 
5863 		/* reset all the statistics counters */
5864 
5865 		fp->tx_pkts_processed = 0;
5866 		fp->tx_pkts_freed = 0;
5867 		fp->tx_pkts_transmitted = 0;
5868 		fp->tx_pkts_completed = 0;
5869 
5870 #ifdef QLNX_TRACE_PERF_DATA
5871 		fp->tx_pkts_trans_ctx = 0;
5872 		fp->tx_pkts_compl_ctx = 0;
5873 		fp->tx_pkts_trans_fp = 0;
5874 		fp->tx_pkts_compl_fp = 0;
5875 		fp->tx_pkts_compl_intr = 0;
5876 #endif
5877 		fp->tx_lso_wnd_min_len = 0;
5878 		fp->tx_defrag = 0;
5879 		fp->tx_nsegs_gt_elem_left = 0;
5880 		fp->tx_tso_max_nsegs = 0;
5881 		fp->tx_tso_min_nsegs = 0;
5882 		fp->err_tx_nsegs_gt_elem_left = 0;
5883 		fp->err_tx_dmamap_create = 0;
5884 		fp->err_tx_defrag_dmamap_load = 0;
5885 		fp->err_tx_non_tso_max_seg = 0;
5886 		fp->err_tx_dmamap_load = 0;
5887 		fp->err_tx_defrag = 0;
5888 		fp->err_tx_free_pkt_null = 0;
5889 		fp->err_tx_cons_idx_conflict = 0;
5890 
5891 		fp->rx_pkts = 0;
5892 		fp->err_m_getcl = 0;
5893 		fp->err_m_getjcl = 0;
5894         }
5895 	return;
5896 }
5897 
5898 void
5899 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
5900 {
5901 	struct ecore_dev	*cdev;
5902 
5903 	cdev = &ha->cdev;
5904 
5905         if (sb_info->sb_virt) {
5906                 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
5907 			(sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
5908 		sb_info->sb_virt = NULL;
5909 	}
5910 }
5911 
5912 static int
5913 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
5914 	void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
5915 {
5916         struct ecore_hwfn	*p_hwfn;
5917         int			hwfn_index, rc;
5918         u16			rel_sb_id;
5919 
5920         hwfn_index = sb_id % cdev->num_hwfns;
5921         p_hwfn = &cdev->hwfns[hwfn_index];
5922         rel_sb_id = sb_id / cdev->num_hwfns;
5923 
5924         QL_DPRINT2(((qlnx_host_t *)cdev),
5925                 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
5926                 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
5927                 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
5928                 sb_virt_addr, (void *)sb_phy_addr);
5929 
5930         rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
5931                              sb_virt_addr, sb_phy_addr, rel_sb_id);
5932 
5933         return rc;
5934 }
5935 
5936 /* This function allocates fast-path status block memory */
5937 int
5938 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
5939 {
5940         struct status_block_e4	*sb_virt;
5941         bus_addr_t		sb_phys;
5942         int			rc;
5943 	uint32_t		size;
5944 	struct ecore_dev	*cdev;
5945 
5946 	cdev = &ha->cdev;
5947 
5948 	size = sizeof(*sb_virt);
5949 	sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
5950 
5951         if (!sb_virt) {
5952                 QL_DPRINT1(ha, "Status block allocation failed\n");
5953                 return -ENOMEM;
5954         }
5955 
5956         rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
5957         if (rc) {
5958                 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
5959         }
5960 
5961 	return rc;
5962 }
5963 
5964 static void
5965 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5966 {
5967         int			i;
5968 	struct sw_rx_data	*rx_buf;
5969 
5970         for (i = 0; i < rxq->num_rx_buffers; i++) {
5971                 rx_buf = &rxq->sw_rx_ring[i];
5972 
5973 		if (rx_buf->data != NULL) {
5974 			if (rx_buf->map != NULL) {
5975 				bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5976 				bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5977 				rx_buf->map = NULL;
5978 			}
5979 			m_freem(rx_buf->data);
5980 			rx_buf->data = NULL;
5981 		}
5982         }
5983 	return;
5984 }
5985 
5986 static void
5987 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5988 {
5989 	struct ecore_dev	*cdev;
5990 	int			i;
5991 
5992 	cdev = &ha->cdev;
5993 
5994 	qlnx_free_rx_buffers(ha, rxq);
5995 
5996 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5997 		qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
5998 		if (rxq->tpa_info[i].mpf != NULL)
5999 			m_freem(rxq->tpa_info[i].mpf);
6000 	}
6001 
6002 	bzero((void *)&rxq->sw_rx_ring[0],
6003 		(sizeof (struct sw_rx_data) * RX_RING_SIZE));
6004 
6005         /* Free the real RQ ring used by FW */
6006 	if (rxq->rx_bd_ring.p_virt_addr) {
6007                 ecore_chain_free(cdev, &rxq->rx_bd_ring);
6008                 rxq->rx_bd_ring.p_virt_addr = NULL;
6009         }
6010 
6011         /* Free the real completion ring used by FW */
6012         if (rxq->rx_comp_ring.p_virt_addr &&
6013                         rxq->rx_comp_ring.pbl_sp.p_virt_table) {
6014                 ecore_chain_free(cdev, &rxq->rx_comp_ring);
6015                 rxq->rx_comp_ring.p_virt_addr = NULL;
6016                 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
6017         }
6018 
6019 #ifdef QLNX_SOFT_LRO
6020 	{
6021 		struct lro_ctrl *lro;
6022 
6023 		lro = &rxq->lro;
6024 		tcp_lro_free(lro);
6025 	}
6026 #endif /* #ifdef QLNX_SOFT_LRO */
6027 
6028 	return;
6029 }
6030 
6031 static int
6032 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6033 {
6034         register struct mbuf	*mp;
6035         uint16_t		rx_buf_size;
6036         struct sw_rx_data	*sw_rx_data;
6037         struct eth_rx_bd	*rx_bd;
6038         dma_addr_t		dma_addr;
6039 	bus_dmamap_t		map;
6040 	bus_dma_segment_t       segs[1];
6041 	int			nsegs;
6042 	int			ret;
6043 
6044         rx_buf_size = rxq->rx_buf_size;
6045 
6046 	mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6047 
6048         if (mp == NULL) {
6049                 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6050                 return -ENOMEM;
6051         }
6052 
6053 	mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6054 
6055 	map = (bus_dmamap_t)0;
6056 
6057 	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6058 			BUS_DMA_NOWAIT);
6059 	dma_addr = segs[0].ds_addr;
6060 
6061 	if (ret || !dma_addr || (nsegs != 1)) {
6062 		m_freem(mp);
6063 		QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6064                            ret, (long long unsigned int)dma_addr, nsegs);
6065 		return -ENOMEM;
6066 	}
6067 
6068         sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
6069         sw_rx_data->data = mp;
6070         sw_rx_data->dma_addr = dma_addr;
6071         sw_rx_data->map = map;
6072 
6073         /* Advance PROD and get BD pointer */
6074         rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
6075         rx_bd->addr.hi = htole32(U64_HI(dma_addr));
6076         rx_bd->addr.lo = htole32(U64_LO(dma_addr));
6077 	bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6078 
6079         rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6080 
6081         return 0;
6082 }
6083 
6084 static int
6085 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
6086 	struct qlnx_agg_info *tpa)
6087 {
6088 	struct mbuf		*mp;
6089         dma_addr_t		dma_addr;
6090 	bus_dmamap_t		map;
6091 	bus_dma_segment_t       segs[1];
6092 	int			nsegs;
6093 	int			ret;
6094         struct sw_rx_data	*rx_buf;
6095 
6096 	mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6097 
6098         if (mp == NULL) {
6099                 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6100                 return -ENOMEM;
6101         }
6102 
6103 	mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6104 
6105 	map = (bus_dmamap_t)0;
6106 
6107 	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6108 			BUS_DMA_NOWAIT);
6109 	dma_addr = segs[0].ds_addr;
6110 
6111 	if (ret || !dma_addr || (nsegs != 1)) {
6112 		m_freem(mp);
6113 		QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6114 			ret, (long long unsigned int)dma_addr, nsegs);
6115 		return -ENOMEM;
6116 	}
6117 
6118         rx_buf = &tpa->rx_buf;
6119 
6120 	memset(rx_buf, 0, sizeof (struct sw_rx_data));
6121 
6122         rx_buf->data = mp;
6123         rx_buf->dma_addr = dma_addr;
6124         rx_buf->map = map;
6125 
6126 	bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6127 
6128 	return (0);
6129 }
6130 
6131 static void
6132 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
6133 {
6134         struct sw_rx_data	*rx_buf;
6135 
6136 	rx_buf = &tpa->rx_buf;
6137 
6138 	if (rx_buf->data != NULL) {
6139 		if (rx_buf->map != NULL) {
6140 			bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6141 			bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6142 			rx_buf->map = NULL;
6143 		}
6144 		m_freem(rx_buf->data);
6145 		rx_buf->data = NULL;
6146 	}
6147 	return;
6148 }
6149 
6150 /* This function allocates all memory needed per Rx queue */
6151 static int
6152 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6153 {
6154         int			i, rc, num_allocated;
6155 	struct ecore_dev	 *cdev;
6156 
6157 	cdev = &ha->cdev;
6158 
6159         rxq->num_rx_buffers = RX_RING_SIZE;
6160 
6161 	rxq->rx_buf_size = ha->rx_buf_size;
6162 
6163         /* Allocate the parallel driver ring for Rx buffers */
6164 	bzero((void *)&rxq->sw_rx_ring[0],
6165 		(sizeof (struct sw_rx_data) * RX_RING_SIZE));
6166 
6167         /* Allocate FW Rx ring  */
6168 
6169         rc = ecore_chain_alloc(cdev,
6170 			ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6171 			ECORE_CHAIN_MODE_NEXT_PTR,
6172 			ECORE_CHAIN_CNT_TYPE_U16,
6173 			RX_RING_SIZE,
6174 			sizeof(struct eth_rx_bd),
6175 			&rxq->rx_bd_ring, NULL);
6176 
6177         if (rc)
6178                 goto err;
6179 
6180         /* Allocate FW completion ring */
6181         rc = ecore_chain_alloc(cdev,
6182                         ECORE_CHAIN_USE_TO_CONSUME,
6183                         ECORE_CHAIN_MODE_PBL,
6184 			ECORE_CHAIN_CNT_TYPE_U16,
6185                         RX_RING_SIZE,
6186                         sizeof(union eth_rx_cqe),
6187                         &rxq->rx_comp_ring, NULL);
6188 
6189         if (rc)
6190                 goto err;
6191 
6192         /* Allocate buffers for the Rx ring */
6193 
6194 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6195 		rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
6196 			&rxq->tpa_info[i]);
6197                 if (rc)
6198                         break;
6199 	}
6200 
6201         for (i = 0; i < rxq->num_rx_buffers; i++) {
6202                 rc = qlnx_alloc_rx_buffer(ha, rxq);
6203                 if (rc)
6204                         break;
6205         }
6206         num_allocated = i;
6207         if (!num_allocated) {
6208 		QL_DPRINT1(ha, "Rx buffers allocation failed\n");
6209                 goto err;
6210         } else if (num_allocated < rxq->num_rx_buffers) {
6211 		QL_DPRINT1(ha, "Allocated less buffers than"
6212 			" desired (%d allocated)\n", num_allocated);
6213         }
6214 
6215 #ifdef QLNX_SOFT_LRO
6216 
6217 	{
6218 		struct lro_ctrl *lro;
6219 
6220 		lro = &rxq->lro;
6221 
6222 		if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
6223 			QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6224 				   rxq->rxq_id);
6225 			goto err;
6226 		}
6227 
6228 		lro->ifp = ha->ifp;
6229 	}
6230 #endif /* #ifdef QLNX_SOFT_LRO */
6231         return 0;
6232 
6233 err:
6234         qlnx_free_mem_rxq(ha, rxq);
6235         return -ENOMEM;
6236 }
6237 
6238 static void
6239 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6240 	struct qlnx_tx_queue *txq)
6241 {
6242 	struct ecore_dev	*cdev;
6243 
6244 	cdev = &ha->cdev;
6245 
6246 	bzero((void *)&txq->sw_tx_ring[0],
6247 		(sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6248 
6249         /* Free the real RQ ring used by FW */
6250         if (txq->tx_pbl.p_virt_addr) {
6251                 ecore_chain_free(cdev, &txq->tx_pbl);
6252                 txq->tx_pbl.p_virt_addr = NULL;
6253         }
6254 	return;
6255 }
6256 
6257 /* This function allocates all memory needed per Tx queue */
6258 static int
6259 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6260 	struct qlnx_tx_queue *txq)
6261 {
6262         int			ret = ECORE_SUCCESS;
6263         union eth_tx_bd_types	*p_virt;
6264 	struct ecore_dev	*cdev;
6265 
6266 	cdev = &ha->cdev;
6267 
6268 	bzero((void *)&txq->sw_tx_ring[0],
6269 		(sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6270 
6271         /* Allocate the real Tx ring to be used by FW */
6272         ret = ecore_chain_alloc(cdev,
6273                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6274                         ECORE_CHAIN_MODE_PBL,
6275 			ECORE_CHAIN_CNT_TYPE_U16,
6276                         TX_RING_SIZE,
6277                         sizeof(*p_virt),
6278                         &txq->tx_pbl, NULL);
6279 
6280         if (ret != ECORE_SUCCESS) {
6281                 goto err;
6282         }
6283 
6284 	txq->num_tx_buffers = TX_RING_SIZE;
6285 
6286         return 0;
6287 
6288 err:
6289         qlnx_free_mem_txq(ha, fp, txq);
6290         return -ENOMEM;
6291 }
6292 
6293 static void
6294 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6295 {
6296 	struct mbuf	*mp;
6297 	if_t		ifp = ha->ifp;
6298 
6299 	if (mtx_initialized(&fp->tx_mtx)) {
6300 		if (fp->tx_br != NULL) {
6301 			mtx_lock(&fp->tx_mtx);
6302 
6303 			while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
6304 				fp->tx_pkts_freed++;
6305 				m_freem(mp);
6306 			}
6307 
6308 			mtx_unlock(&fp->tx_mtx);
6309 
6310 			buf_ring_free(fp->tx_br, M_DEVBUF);
6311 			fp->tx_br = NULL;
6312 		}
6313 		mtx_destroy(&fp->tx_mtx);
6314 	}
6315 	return;
6316 }
6317 
6318 static void
6319 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6320 {
6321         int	tc;
6322 
6323         qlnx_free_mem_sb(ha, fp->sb_info);
6324 
6325         qlnx_free_mem_rxq(ha, fp->rxq);
6326 
6327         for (tc = 0; tc < ha->num_tc; tc++)
6328                 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
6329 
6330 	return;
6331 }
6332 
6333 static int
6334 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6335 {
6336 	snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
6337 		"qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
6338 
6339 	mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
6340 
6341         fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
6342                                    M_NOWAIT, &fp->tx_mtx);
6343         if (fp->tx_br == NULL) {
6344 		QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
6345 			ha->dev_unit, fp->rss_id);
6346 		return -ENOMEM;
6347         }
6348 	return 0;
6349 }
6350 
6351 static int
6352 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6353 {
6354         int	rc, tc;
6355 
6356         rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
6357         if (rc)
6358                 goto err;
6359 
6360 	if (ha->rx_jumbo_buf_eq_mtu) {
6361 		if (ha->max_frame_size <= MCLBYTES)
6362 			ha->rx_buf_size = MCLBYTES;
6363 		else if (ha->max_frame_size <= MJUMPAGESIZE)
6364 			ha->rx_buf_size = MJUMPAGESIZE;
6365 		else if (ha->max_frame_size <= MJUM9BYTES)
6366 			ha->rx_buf_size = MJUM9BYTES;
6367 		else if (ha->max_frame_size <= MJUM16BYTES)
6368 			ha->rx_buf_size = MJUM16BYTES;
6369 	} else {
6370 		if (ha->max_frame_size <= MCLBYTES)
6371 			ha->rx_buf_size = MCLBYTES;
6372 		else
6373 			ha->rx_buf_size = MJUMPAGESIZE;
6374 	}
6375 
6376         rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
6377         if (rc)
6378                 goto err;
6379 
6380         for (tc = 0; tc < ha->num_tc; tc++) {
6381                 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
6382                 if (rc)
6383                         goto err;
6384         }
6385 
6386         return 0;
6387 
6388 err:
6389         qlnx_free_mem_fp(ha, fp);
6390         return -ENOMEM;
6391 }
6392 
6393 static void
6394 qlnx_free_mem_load(qlnx_host_t *ha)
6395 {
6396         int			i;
6397 
6398         for (i = 0; i < ha->num_rss; i++) {
6399                 struct qlnx_fastpath *fp = &ha->fp_array[i];
6400 
6401                 qlnx_free_mem_fp(ha, fp);
6402         }
6403 	return;
6404 }
6405 
6406 static int
6407 qlnx_alloc_mem_load(qlnx_host_t *ha)
6408 {
6409         int	rc = 0, rss_id;
6410 
6411         for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6412                 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6413 
6414                 rc = qlnx_alloc_mem_fp(ha, fp);
6415                 if (rc)
6416                         break;
6417         }
6418 	return (rc);
6419 }
6420 
6421 static int
6422 qlnx_start_vport(struct ecore_dev *cdev,
6423                 u8 vport_id,
6424                 u16 mtu,
6425                 u8 drop_ttl0_flg,
6426                 u8 inner_vlan_removal_en_flg,
6427 		u8 tx_switching,
6428 		u8 hw_lro_enable)
6429 {
6430         int					rc, i;
6431 	struct ecore_sp_vport_start_params	vport_start_params = { 0 };
6432 	qlnx_host_t				*ha __unused;
6433 
6434 	ha = (qlnx_host_t *)cdev;
6435 
6436 	vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
6437 	vport_start_params.tx_switching = 0;
6438 	vport_start_params.handle_ptp_pkts = 0;
6439 	vport_start_params.only_untagged = 0;
6440 	vport_start_params.drop_ttl0 = drop_ttl0_flg;
6441 
6442 	vport_start_params.tpa_mode =
6443 		(hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
6444 	vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6445 
6446 	vport_start_params.vport_id = vport_id;
6447 	vport_start_params.mtu = mtu;
6448 
6449 	QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
6450 
6451         for_each_hwfn(cdev, i) {
6452                 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6453 
6454 		vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
6455 		vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6456 
6457                 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
6458 
6459                 if (rc) {
6460 			QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
6461 				" with MTU %d\n" , vport_id, mtu);
6462                         return -ENOMEM;
6463                 }
6464 
6465                 ecore_hw_start_fastpath(p_hwfn);
6466 
6467 		QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
6468 			vport_id, mtu);
6469         }
6470         return 0;
6471 }
6472 
6473 static int
6474 qlnx_update_vport(struct ecore_dev *cdev,
6475 	struct qlnx_update_vport_params *params)
6476 {
6477         struct ecore_sp_vport_update_params	sp_params;
6478         int					rc, i, j, fp_index;
6479 	struct ecore_hwfn			*p_hwfn;
6480         struct ecore_rss_params			*rss;
6481 	qlnx_host_t				*ha = (qlnx_host_t *)cdev;
6482         struct qlnx_fastpath			*fp;
6483 
6484         memset(&sp_params, 0, sizeof(sp_params));
6485         /* Translate protocol params into sp params */
6486         sp_params.vport_id = params->vport_id;
6487 
6488         sp_params.update_vport_active_rx_flg =
6489 		params->update_vport_active_rx_flg;
6490         sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
6491 
6492         sp_params.update_vport_active_tx_flg =
6493 		params->update_vport_active_tx_flg;
6494         sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
6495 
6496         sp_params.update_inner_vlan_removal_flg =
6497                 params->update_inner_vlan_removal_flg;
6498         sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
6499 
6500 	sp_params.sge_tpa_params = params->sge_tpa_params;
6501 
6502         /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
6503          * We need to re-fix the rss values per engine for CMT.
6504          */
6505 	if (params->rss_params->update_rss_config)
6506         sp_params.rss_params = params->rss_params;
6507 	else
6508 		sp_params.rss_params =  NULL;
6509 
6510         for_each_hwfn(cdev, i) {
6511 		p_hwfn = &cdev->hwfns[i];
6512 
6513 		if ((cdev->num_hwfns > 1) &&
6514 			params->rss_params->update_rss_config &&
6515 			params->rss_params->rss_enable) {
6516 			rss = params->rss_params;
6517 
6518 			for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
6519 				fp_index = ((cdev->num_hwfns * j) + i) %
6520 						ha->num_rss;
6521 
6522                 		fp = &ha->fp_array[fp_index];
6523                         	rss->rss_ind_table[j] = fp->rxq->handle;
6524 			}
6525 
6526 			for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
6527 				QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
6528 					rss->rss_ind_table[j],
6529 					rss->rss_ind_table[j+1],
6530 					rss->rss_ind_table[j+2],
6531 					rss->rss_ind_table[j+3],
6532 					rss->rss_ind_table[j+4],
6533 					rss->rss_ind_table[j+5],
6534 					rss->rss_ind_table[j+6],
6535 					rss->rss_ind_table[j+7]);
6536 					j += 8;
6537 			}
6538 		}
6539 
6540                 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6541 
6542 		QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
6543 
6544                 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
6545                                            ECORE_SPQ_MODE_EBLOCK, NULL);
6546                 if (rc) {
6547 			QL_DPRINT1(ha, "Failed to update VPORT\n");
6548                         return rc;
6549                 }
6550 
6551                 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
6552 			rx_active_flag %d [tx_update %d], [rx_update %d]\n",
6553 			params->vport_id, params->vport_active_tx_flg,
6554 			params->vport_active_rx_flg,
6555 			params->update_vport_active_tx_flg,
6556 			params->update_vport_active_rx_flg);
6557         }
6558 
6559         return 0;
6560 }
6561 
6562 static void
6563 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
6564 {
6565         struct eth_rx_bd	*rx_bd_cons =
6566 					ecore_chain_consume(&rxq->rx_bd_ring);
6567         struct eth_rx_bd	*rx_bd_prod =
6568 					ecore_chain_produce(&rxq->rx_bd_ring);
6569         struct sw_rx_data	*sw_rx_data_cons =
6570 					&rxq->sw_rx_ring[rxq->sw_rx_cons];
6571         struct sw_rx_data	*sw_rx_data_prod =
6572 					&rxq->sw_rx_ring[rxq->sw_rx_prod];
6573 
6574         sw_rx_data_prod->data = sw_rx_data_cons->data;
6575         memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
6576 
6577         rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
6578         rxq->sw_rx_prod  = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6579 
6580 	return;
6581 }
6582 
6583 static void
6584 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
6585 {
6586 
6587         uint16_t	 	bd_prod;
6588         uint16_t		cqe_prod;
6589 	union {
6590 		struct eth_rx_prod_data rx_prod_data;
6591 		uint32_t		data32;
6592 	} rx_prods;
6593 
6594         bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
6595         cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
6596 
6597         /* Update producers */
6598         rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
6599         rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
6600 
6601         /* Make sure that the BD and SGE data is updated before updating the
6602          * producers since FW might read the BD/SGE right after the producer
6603          * is updated.
6604          */
6605 	wmb();
6606 
6607 #ifdef ECORE_CONFIG_DIRECT_HWFN
6608 	internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
6609 		sizeof(rx_prods), &rx_prods.data32);
6610 #else
6611 	internal_ram_wr(rxq->hw_rxq_prod_addr,
6612 		sizeof(rx_prods), &rx_prods.data32);
6613 #endif
6614 
6615         /* mmiowb is needed to synchronize doorbell writes from more than one
6616          * processor. It guarantees that the write arrives to the device before
6617          * the napi lock is released and another qlnx_poll is called (possibly
6618          * on another CPU). Without this barrier, the next doorbell can bypass
6619          * this doorbell. This is applicable to IA64/Altix systems.
6620          */
6621         wmb();
6622 
6623 	return;
6624 }
6625 
6626 static uint32_t qlnx_hash_key[] = {
6627                 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
6628                 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
6629                 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
6630                 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
6631                 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
6632                 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
6633                 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
6634                 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
6635                 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
6636                 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
6637 
6638 static int
6639 qlnx_start_queues(qlnx_host_t *ha)
6640 {
6641         int				rc, tc, i, vport_id = 0,
6642 					drop_ttl0_flg = 1, vlan_removal_en = 1,
6643 					tx_switching = 0, hw_lro_enable = 0;
6644         struct ecore_dev		*cdev = &ha->cdev;
6645         struct ecore_rss_params		*rss_params = &ha->rss_params;
6646         struct qlnx_update_vport_params	vport_update_params;
6647         if_t				ifp;
6648         struct ecore_hwfn		*p_hwfn;
6649 	struct ecore_sge_tpa_params	tpa_params;
6650 	struct ecore_queue_start_common_params qparams;
6651         struct qlnx_fastpath		*fp;
6652 
6653 	ifp = ha->ifp;
6654 
6655 	QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
6656 
6657         if (!ha->num_rss) {
6658 		QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
6659 			" are no Rx queues\n");
6660                 return -EINVAL;
6661         }
6662 
6663 #ifndef QLNX_SOFT_LRO
6664         hw_lro_enable = if_getcapenable(ifp) & IFCAP_LRO;
6665 #endif /* #ifndef QLNX_SOFT_LRO */
6666 
6667         rc = qlnx_start_vport(cdev, vport_id, if_getmtu(ifp), drop_ttl0_flg,
6668 			vlan_removal_en, tx_switching, hw_lro_enable);
6669 
6670         if (rc) {
6671                 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
6672                 return rc;
6673         }
6674 
6675 	QL_DPRINT2(ha, "Start vport ramrod passed, "
6676 		"vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
6677 		vport_id, (int)(if_getmtu(ifp) + 0xe), vlan_removal_en);
6678 
6679         for_each_rss(i) {
6680 		struct ecore_rxq_start_ret_params rx_ret_params;
6681 		struct ecore_txq_start_ret_params tx_ret_params;
6682 
6683                 fp = &ha->fp_array[i];
6684         	p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6685 
6686 		bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
6687 		bzero(&rx_ret_params,
6688 			sizeof (struct ecore_rxq_start_ret_params));
6689 
6690 		qparams.queue_id = i ;
6691 		qparams.vport_id = vport_id;
6692 		qparams.stats_id = vport_id;
6693 		qparams.p_sb = fp->sb_info;
6694 		qparams.sb_idx = RX_PI;
6695 
6696 
6697 		rc = ecore_eth_rx_queue_start(p_hwfn,
6698 			p_hwfn->hw_info.opaque_fid,
6699 			&qparams,
6700 			fp->rxq->rx_buf_size,	/* bd_max_bytes */
6701 			/* bd_chain_phys_addr */
6702 			fp->rxq->rx_bd_ring.p_phys_addr,
6703 			/* cqe_pbl_addr */
6704 			ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6705 			/* cqe_pbl_size */
6706 			ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6707 			&rx_ret_params);
6708 
6709                 if (rc) {
6710                 	QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6711                         return rc;
6712                 }
6713 
6714 		fp->rxq->hw_rxq_prod_addr	= rx_ret_params.p_prod;
6715 		fp->rxq->handle			= rx_ret_params.p_handle;
6716                 fp->rxq->hw_cons_ptr		=
6717 				&fp->sb_info->sb_virt->pi_array[RX_PI];
6718 
6719                 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6720 
6721                 for (tc = 0; tc < ha->num_tc; tc++) {
6722                         struct qlnx_tx_queue *txq = fp->txq[tc];
6723 
6724 			bzero(&qparams,
6725 				sizeof(struct ecore_queue_start_common_params));
6726 			bzero(&tx_ret_params,
6727 				sizeof (struct ecore_txq_start_ret_params));
6728 
6729 			qparams.queue_id = txq->index / cdev->num_hwfns ;
6730 			qparams.vport_id = vport_id;
6731 			qparams.stats_id = vport_id;
6732 			qparams.p_sb = fp->sb_info;
6733 			qparams.sb_idx = TX_PI(tc);
6734 
6735 			rc = ecore_eth_tx_queue_start(p_hwfn,
6736 				p_hwfn->hw_info.opaque_fid,
6737 				&qparams, tc,
6738 				/* bd_chain_phys_addr */
6739 				ecore_chain_get_pbl_phys(&txq->tx_pbl),
6740 				ecore_chain_get_page_cnt(&txq->tx_pbl),
6741 				&tx_ret_params);
6742 
6743                         if (rc) {
6744                 		QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6745 					   txq->index, rc);
6746                                 return rc;
6747                         }
6748 
6749 			txq->doorbell_addr = tx_ret_params.p_doorbell;
6750 			txq->handle = tx_ret_params.p_handle;
6751 
6752                         txq->hw_cons_ptr =
6753                                 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6754                         SET_FIELD(txq->tx_db.data.params,
6755                                   ETH_DB_DATA_DEST, DB_DEST_XCM);
6756                         SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6757                                   DB_AGG_CMD_SET);
6758                         SET_FIELD(txq->tx_db.data.params,
6759                                   ETH_DB_DATA_AGG_VAL_SEL,
6760                                   DQ_XCM_ETH_TX_BD_PROD_CMD);
6761 
6762                         txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
6763                 }
6764         }
6765 
6766         /* Fill struct with RSS params */
6767         if (ha->num_rss > 1) {
6768                 rss_params->update_rss_config = 1;
6769                 rss_params->rss_enable = 1;
6770                 rss_params->update_rss_capabilities = 1;
6771                 rss_params->update_rss_ind_table = 1;
6772                 rss_params->update_rss_key = 1;
6773                 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
6774                                        ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
6775                 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
6776 
6777                 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
6778                 	fp = &ha->fp_array[(i % ha->num_rss)];
6779                         rss_params->rss_ind_table[i] = fp->rxq->handle;
6780 		}
6781 
6782                 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
6783 			rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
6784 
6785         } else {
6786                 memset(rss_params, 0, sizeof(*rss_params));
6787         }
6788 
6789         /* Prepare and send the vport enable */
6790         memset(&vport_update_params, 0, sizeof(vport_update_params));
6791         vport_update_params.vport_id = vport_id;
6792         vport_update_params.update_vport_active_tx_flg = 1;
6793         vport_update_params.vport_active_tx_flg = 1;
6794         vport_update_params.update_vport_active_rx_flg = 1;
6795         vport_update_params.vport_active_rx_flg = 1;
6796         vport_update_params.rss_params = rss_params;
6797         vport_update_params.update_inner_vlan_removal_flg = 1;
6798         vport_update_params.inner_vlan_removal_flg = 1;
6799 
6800 	if (hw_lro_enable) {
6801 		memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
6802 
6803 		tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6804 
6805 		tpa_params.update_tpa_en_flg = 1;
6806 		tpa_params.tpa_ipv4_en_flg = 1;
6807 		tpa_params.tpa_ipv6_en_flg = 1;
6808 
6809 		tpa_params.update_tpa_param_flg = 1;
6810 		tpa_params.tpa_pkt_split_flg = 0;
6811 		tpa_params.tpa_hdr_data_split_flg = 0;
6812 		tpa_params.tpa_gro_consistent_flg = 0;
6813 		tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
6814 		tpa_params.tpa_max_size = (uint16_t)(-1);
6815 		tpa_params.tpa_min_size_to_start = if_getmtu(ifp) / 2;
6816 		tpa_params.tpa_min_size_to_cont = if_getmtu(ifp) / 2;
6817 
6818 		vport_update_params.sge_tpa_params = &tpa_params;
6819 	}
6820 
6821         rc = qlnx_update_vport(cdev, &vport_update_params);
6822         if (rc) {
6823 		QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6824                 return rc;
6825         }
6826 
6827         return 0;
6828 }
6829 
6830 static int
6831 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6832 	struct qlnx_tx_queue *txq)
6833 {
6834 	uint16_t	hw_bd_cons;
6835 	uint16_t	ecore_cons_idx;
6836 
6837 	QL_DPRINT2(ha, "enter\n");
6838 
6839 	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6840 
6841 	while (hw_bd_cons !=
6842 		(ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6843 		mtx_lock(&fp->tx_mtx);
6844 
6845 		(void)qlnx_tx_int(ha, fp, txq);
6846 
6847 		mtx_unlock(&fp->tx_mtx);
6848 
6849 		qlnx_mdelay(__func__, 2);
6850 
6851 		hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6852 	}
6853 
6854 	QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6855 
6856         return 0;
6857 }
6858 
6859 static int
6860 qlnx_stop_queues(qlnx_host_t *ha)
6861 {
6862         struct qlnx_update_vport_params	vport_update_params;
6863         struct ecore_dev		*cdev;
6864         struct qlnx_fastpath		*fp;
6865         int				rc, tc, i;
6866 
6867         cdev = &ha->cdev;
6868 
6869         /* Disable the vport */
6870 
6871         memset(&vport_update_params, 0, sizeof(vport_update_params));
6872 
6873         vport_update_params.vport_id = 0;
6874         vport_update_params.update_vport_active_tx_flg = 1;
6875         vport_update_params.vport_active_tx_flg = 0;
6876         vport_update_params.update_vport_active_rx_flg = 1;
6877         vport_update_params.vport_active_rx_flg = 0;
6878         vport_update_params.rss_params = &ha->rss_params;
6879         vport_update_params.rss_params->update_rss_config = 0;
6880         vport_update_params.rss_params->rss_enable = 0;
6881         vport_update_params.update_inner_vlan_removal_flg = 0;
6882         vport_update_params.inner_vlan_removal_flg = 0;
6883 
6884 	QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
6885 
6886         rc = qlnx_update_vport(cdev, &vport_update_params);
6887         if (rc) {
6888 		QL_DPRINT1(ha, "Failed to update vport\n");
6889                 return rc;
6890         }
6891 
6892         /* Flush Tx queues. If needed, request drain from MCP */
6893         for_each_rss(i) {
6894                 fp = &ha->fp_array[i];
6895 
6896                 for (tc = 0; tc < ha->num_tc; tc++) {
6897                         struct qlnx_tx_queue *txq = fp->txq[tc];
6898 
6899                         rc = qlnx_drain_txq(ha, fp, txq);
6900                         if (rc)
6901                                 return rc;
6902                 }
6903         }
6904 
6905         /* Stop all Queues in reverse order*/
6906         for (i = ha->num_rss - 1; i >= 0; i--) {
6907 		struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
6908 
6909                 fp = &ha->fp_array[i];
6910 
6911                 /* Stop the Tx Queue(s)*/
6912                 for (tc = 0; tc < ha->num_tc; tc++) {
6913 			int tx_queue_id __unused;
6914 
6915 			tx_queue_id = tc * ha->num_rss + i;
6916 			rc = ecore_eth_tx_queue_stop(p_hwfn,
6917 					fp->txq[tc]->handle);
6918 
6919                         if (rc) {
6920 				QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
6921 					   tx_queue_id);
6922                                 return rc;
6923                         }
6924                 }
6925 
6926                 /* Stop the Rx Queue*/
6927 		rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
6928 				false);
6929                 if (rc) {
6930                         QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
6931                         return rc;
6932                 }
6933         }
6934 
6935         /* Stop the vport */
6936 	for_each_hwfn(cdev, i) {
6937 		struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6938 
6939 		rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
6940 
6941 		if (rc) {
6942                         QL_DPRINT1(ha, "Failed to stop VPORT\n");
6943 			return rc;
6944 		}
6945 	}
6946 
6947         return rc;
6948 }
6949 
6950 static int
6951 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
6952 	enum ecore_filter_opcode opcode,
6953 	unsigned char mac[ETH_ALEN])
6954 {
6955 	struct ecore_filter_ucast	ucast;
6956 	struct ecore_dev		*cdev;
6957 	int				rc;
6958 
6959 	cdev = &ha->cdev;
6960 
6961 	bzero(&ucast, sizeof(struct ecore_filter_ucast));
6962 
6963         ucast.opcode = opcode;
6964         ucast.type = ECORE_FILTER_MAC;
6965         ucast.is_rx_filter = 1;
6966         ucast.vport_to_add_to = 0;
6967         memcpy(&ucast.mac[0], mac, ETH_ALEN);
6968 
6969 	rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6970 
6971         return (rc);
6972 }
6973 
6974 static int
6975 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
6976 {
6977 	struct ecore_filter_ucast	ucast;
6978 	struct ecore_dev		*cdev;
6979 	int				rc;
6980 
6981 	bzero(&ucast, sizeof(struct ecore_filter_ucast));
6982 
6983 	ucast.opcode = ECORE_FILTER_REPLACE;
6984 	ucast.type = ECORE_FILTER_MAC;
6985 	ucast.is_rx_filter = 1;
6986 
6987 	cdev = &ha->cdev;
6988 
6989 	rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6990 
6991 	return (rc);
6992 }
6993 
6994 static int
6995 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
6996 {
6997 	struct ecore_filter_mcast	*mcast;
6998 	struct ecore_dev		*cdev;
6999 	int				rc, i;
7000 
7001 	cdev = &ha->cdev;
7002 
7003 	mcast = &ha->ecore_mcast;
7004 	bzero(mcast, sizeof(struct ecore_filter_mcast));
7005 
7006 	mcast->opcode = ECORE_FILTER_REMOVE;
7007 
7008 	for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
7009 		if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
7010 			ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
7011 			ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
7012 			memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN);
7013 			mcast->num_mc_addrs++;
7014 		}
7015 	}
7016 	mcast = &ha->ecore_mcast;
7017 
7018 	rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
7019 
7020 	bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
7021 	ha->nmcast = 0;
7022 
7023 	return (rc);
7024 }
7025 
7026 static int
7027 qlnx_clean_filters(qlnx_host_t *ha)
7028 {
7029         int	rc = 0;
7030 
7031 	/* Remove all unicast macs */
7032 	rc = qlnx_remove_all_ucast_mac(ha);
7033 	if (rc)
7034 		return rc;
7035 
7036 	/* Remove all multicast macs */
7037 	rc = qlnx_remove_all_mcast_mac(ha);
7038 	if (rc)
7039 		return rc;
7040 
7041         rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
7042 
7043         return (rc);
7044 }
7045 
7046 static int
7047 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
7048 {
7049 	struct ecore_filter_accept_flags	accept;
7050 	int					rc = 0;
7051 	struct ecore_dev			*cdev;
7052 
7053 	cdev = &ha->cdev;
7054 
7055 	bzero(&accept, sizeof(struct ecore_filter_accept_flags));
7056 
7057 	accept.update_rx_mode_config = 1;
7058 	accept.rx_accept_filter = filter;
7059 
7060 	accept.update_tx_mode_config = 1;
7061 	accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
7062 		ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
7063 
7064 	rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
7065 			ECORE_SPQ_MODE_CB, NULL);
7066 
7067 	return (rc);
7068 }
7069 
7070 static int
7071 qlnx_set_rx_mode(qlnx_host_t *ha)
7072 {
7073 	int	rc = 0;
7074 	uint8_t	filter;
7075 
7076 	rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
7077         if (rc)
7078                 return rc;
7079 
7080 	rc = qlnx_remove_all_mcast_mac(ha);
7081         if (rc)
7082                 return rc;
7083 
7084 	filter = ECORE_ACCEPT_UCAST_MATCHED |
7085 			ECORE_ACCEPT_MCAST_MATCHED |
7086 			ECORE_ACCEPT_BCAST;
7087 
7088 	if (qlnx_vf_device(ha) == 0 || (ha->ifp->if_flags & IFF_PROMISC)) {
7089 		filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
7090 		filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
7091 	} else if (ha->ifp->if_flags & IFF_ALLMULTI) {
7092 		filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
7093 	}
7094 	ha->filter = filter;
7095 
7096 	rc = qlnx_set_rx_accept_filter(ha, filter);
7097 
7098 	return (rc);
7099 }
7100 
7101 static int
7102 qlnx_set_link(qlnx_host_t *ha, bool link_up)
7103 {
7104         int			i, rc = 0;
7105 	struct ecore_dev	*cdev;
7106 	struct ecore_hwfn	*hwfn;
7107 	struct ecore_ptt	*ptt;
7108 
7109 	if (qlnx_vf_device(ha) == 0)
7110 		return (0);
7111 
7112 	cdev = &ha->cdev;
7113 
7114         for_each_hwfn(cdev, i) {
7115                 hwfn = &cdev->hwfns[i];
7116 
7117                 ptt = ecore_ptt_acquire(hwfn);
7118        	        if (!ptt)
7119                         return -EBUSY;
7120 
7121                 rc = ecore_mcp_set_link(hwfn, ptt, link_up);
7122 
7123                 ecore_ptt_release(hwfn, ptt);
7124 
7125                 if (rc)
7126                         return rc;
7127         }
7128         return (rc);
7129 }
7130 
7131 static uint64_t
7132 qlnx_get_counter(if_t ifp, ift_counter cnt)
7133 {
7134 	qlnx_host_t *ha;
7135 	uint64_t count;
7136 
7137         ha = (qlnx_host_t *)if_getsoftc(ifp);
7138 
7139         switch (cnt) {
7140         case IFCOUNTER_IPACKETS:
7141 		count = ha->hw_stats.common.rx_ucast_pkts +
7142 			ha->hw_stats.common.rx_mcast_pkts +
7143 			ha->hw_stats.common.rx_bcast_pkts;
7144 		break;
7145 
7146         case IFCOUNTER_IERRORS:
7147 		count = ha->hw_stats.common.rx_crc_errors +
7148 			ha->hw_stats.common.rx_align_errors +
7149 			ha->hw_stats.common.rx_oversize_packets +
7150 			ha->hw_stats.common.rx_undersize_packets;
7151 		break;
7152 
7153         case IFCOUNTER_OPACKETS:
7154 		count = ha->hw_stats.common.tx_ucast_pkts +
7155 			ha->hw_stats.common.tx_mcast_pkts +
7156 			ha->hw_stats.common.tx_bcast_pkts;
7157 		break;
7158 
7159         case IFCOUNTER_OERRORS:
7160                 count = ha->hw_stats.common.tx_err_drop_pkts;
7161 		break;
7162 
7163         case IFCOUNTER_COLLISIONS:
7164                 return (0);
7165 
7166         case IFCOUNTER_IBYTES:
7167 		count = ha->hw_stats.common.rx_ucast_bytes +
7168 			ha->hw_stats.common.rx_mcast_bytes +
7169 			ha->hw_stats.common.rx_bcast_bytes;
7170 		break;
7171 
7172         case IFCOUNTER_OBYTES:
7173 		count = ha->hw_stats.common.tx_ucast_bytes +
7174 			ha->hw_stats.common.tx_mcast_bytes +
7175 			ha->hw_stats.common.tx_bcast_bytes;
7176 		break;
7177 
7178         case IFCOUNTER_IMCASTS:
7179 		count = ha->hw_stats.common.rx_mcast_bytes;
7180 		break;
7181 
7182         case IFCOUNTER_OMCASTS:
7183 		count = ha->hw_stats.common.tx_mcast_bytes;
7184 		break;
7185 
7186         case IFCOUNTER_IQDROPS:
7187         case IFCOUNTER_OQDROPS:
7188         case IFCOUNTER_NOPROTO:
7189 
7190         default:
7191                 return (if_get_counter_default(ifp, cnt));
7192         }
7193 	return (count);
7194 }
7195 
7196 static void
7197 qlnx_timer(void *arg)
7198 {
7199 	qlnx_host_t	*ha;
7200 
7201 	ha = (qlnx_host_t *)arg;
7202 
7203 	if (ha->error_recovery) {
7204 		ha->error_recovery = 0;
7205 		taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
7206 		return;
7207 	}
7208 
7209        	ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
7210 
7211 	if (ha->storm_stats_gather)
7212 		qlnx_sample_storm_stats(ha);
7213 
7214 	callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7215 
7216 	return;
7217 }
7218 
7219 static int
7220 qlnx_load(qlnx_host_t *ha)
7221 {
7222 	int			i;
7223 	int			rc = 0;
7224         device_t		dev;
7225 
7226         dev = ha->pci_dev;
7227 
7228 	QL_DPRINT2(ha, "enter\n");
7229 
7230         rc = qlnx_alloc_mem_arrays(ha);
7231         if (rc)
7232                 goto qlnx_load_exit0;
7233 
7234         qlnx_init_fp(ha);
7235 
7236         rc = qlnx_alloc_mem_load(ha);
7237         if (rc)
7238                 goto qlnx_load_exit1;
7239 
7240         QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
7241 		   ha->num_rss, ha->num_tc);
7242 
7243 	for (i = 0; i < ha->num_rss; i++) {
7244 		if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
7245                         (INTR_TYPE_NET | INTR_MPSAFE),
7246                         NULL, qlnx_fp_isr, &ha->irq_vec[i],
7247                         &ha->irq_vec[i].handle))) {
7248                         QL_DPRINT1(ha, "could not setup interrupt\n");
7249                         goto qlnx_load_exit2;
7250 		}
7251 
7252 		QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
7253 			 irq %p handle %p\n", i,
7254 			ha->irq_vec[i].irq_rid,
7255 			ha->irq_vec[i].irq, ha->irq_vec[i].handle);
7256 
7257 		bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
7258 	}
7259 
7260         rc = qlnx_start_queues(ha);
7261         if (rc)
7262                 goto qlnx_load_exit2;
7263 
7264         QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
7265 
7266         /* Add primary mac and set Rx filters */
7267         rc = qlnx_set_rx_mode(ha);
7268         if (rc)
7269                 goto qlnx_load_exit2;
7270 
7271         /* Ask for link-up using current configuration */
7272 	qlnx_set_link(ha, true);
7273 
7274 	if (qlnx_vf_device(ha) == 0)
7275 		qlnx_link_update(&ha->cdev.hwfns[0]);
7276 
7277         ha->state = QLNX_STATE_OPEN;
7278 
7279 	bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
7280 
7281 	if (ha->flags.callout_init)
7282         	callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7283 
7284         goto qlnx_load_exit0;
7285 
7286 qlnx_load_exit2:
7287         qlnx_free_mem_load(ha);
7288 
7289 qlnx_load_exit1:
7290         ha->num_rss = 0;
7291 
7292 qlnx_load_exit0:
7293 	QL_DPRINT2(ha, "exit [%d]\n", rc);
7294         return rc;
7295 }
7296 
7297 static void
7298 qlnx_drain_soft_lro(qlnx_host_t *ha)
7299 {
7300 #ifdef QLNX_SOFT_LRO
7301 
7302 	if_t		ifp;
7303 	int		i;
7304 
7305 	ifp = ha->ifp;
7306 
7307 	if (if_getcapenable(ifp) & IFCAP_LRO) {
7308 	        for (i = 0; i < ha->num_rss; i++) {
7309 			struct qlnx_fastpath *fp = &ha->fp_array[i];
7310 			struct lro_ctrl *lro;
7311 
7312 			lro = &fp->rxq->lro;
7313 
7314 			tcp_lro_flush_all(lro);
7315                 }
7316 	}
7317 
7318 #endif /* #ifdef QLNX_SOFT_LRO */
7319 
7320 	return;
7321 }
7322 
7323 static void
7324 qlnx_unload(qlnx_host_t *ha)
7325 {
7326 	struct ecore_dev	*cdev;
7327         device_t		dev;
7328 	int			i;
7329 
7330 	cdev = &ha->cdev;
7331         dev = ha->pci_dev;
7332 
7333 	QL_DPRINT2(ha, "enter\n");
7334         QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
7335 
7336 	if (ha->state == QLNX_STATE_OPEN) {
7337 		qlnx_set_link(ha, false);
7338 		qlnx_clean_filters(ha);
7339 		qlnx_stop_queues(ha);
7340 		ecore_hw_stop_fastpath(cdev);
7341 
7342 		for (i = 0; i < ha->num_rss; i++) {
7343 			if (ha->irq_vec[i].handle) {
7344 				(void)bus_teardown_intr(dev,
7345 					ha->irq_vec[i].irq,
7346 					ha->irq_vec[i].handle);
7347 				ha->irq_vec[i].handle = NULL;
7348 			}
7349 		}
7350 
7351 		qlnx_drain_fp_taskqueues(ha);
7352 		qlnx_drain_soft_lro(ha);
7353         	qlnx_free_mem_load(ha);
7354 	}
7355 
7356 	if (ha->flags.callout_init)
7357 		callout_drain(&ha->qlnx_callout);
7358 
7359 	qlnx_mdelay(__func__, 1000);
7360 
7361         ha->state = QLNX_STATE_CLOSED;
7362 
7363 	QL_DPRINT2(ha, "exit\n");
7364 	return;
7365 }
7366 
7367 static int
7368 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7369 {
7370 	int			rval = -1;
7371 	struct ecore_hwfn	*p_hwfn;
7372 	struct ecore_ptt	*p_ptt;
7373 
7374 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7375 
7376 	p_hwfn = &ha->cdev.hwfns[hwfn_index];
7377 	p_ptt = ecore_ptt_acquire(p_hwfn);
7378 
7379         if (!p_ptt) {
7380 		QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7381                 return (rval);
7382         }
7383 
7384         rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7385 
7386 	if (rval == DBG_STATUS_OK)
7387                 rval = 0;
7388         else {
7389 		QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
7390 			"[0x%x]\n", rval);
7391 	}
7392 
7393         ecore_ptt_release(p_hwfn, p_ptt);
7394 
7395         return (rval);
7396 }
7397 
7398 static int
7399 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7400 {
7401 	int			rval = -1;
7402 	struct ecore_hwfn	*p_hwfn;
7403 	struct ecore_ptt	*p_ptt;
7404 
7405 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7406 
7407 	p_hwfn = &ha->cdev.hwfns[hwfn_index];
7408 	p_ptt = ecore_ptt_acquire(p_hwfn);
7409 
7410         if (!p_ptt) {
7411 		QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7412                 return (rval);
7413         }
7414 
7415         rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7416 
7417 	if (rval == DBG_STATUS_OK)
7418                 rval = 0;
7419         else {
7420 		QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
7421 			" [0x%x]\n", rval);
7422 	}
7423 
7424         ecore_ptt_release(p_hwfn, p_ptt);
7425 
7426         return (rval);
7427 }
7428 
7429 static void
7430 qlnx_sample_storm_stats(qlnx_host_t *ha)
7431 {
7432         int			i, index;
7433         struct ecore_dev	*cdev;
7434 	qlnx_storm_stats_t	*s_stats;
7435 	uint32_t		reg;
7436         struct ecore_ptt	*p_ptt;
7437         struct ecore_hwfn	*hwfn;
7438 
7439 	if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
7440 		ha->storm_stats_gather = 0;
7441 		return;
7442 	}
7443 
7444         cdev = &ha->cdev;
7445 
7446         for_each_hwfn(cdev, i) {
7447                 hwfn = &cdev->hwfns[i];
7448 
7449                 p_ptt = ecore_ptt_acquire(hwfn);
7450                 if (!p_ptt)
7451                         return;
7452 
7453 		index = ha->storm_stats_index +
7454 				(i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
7455 
7456 		s_stats = &ha->storm_stats[index];
7457 
7458 		/* XSTORM */
7459 		reg = XSEM_REG_FAST_MEMORY +
7460 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7461 		s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7462 
7463 		reg = XSEM_REG_FAST_MEMORY +
7464 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7465 		s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7466 
7467 		reg = XSEM_REG_FAST_MEMORY +
7468 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7469 		s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7470 
7471 		reg = XSEM_REG_FAST_MEMORY +
7472 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7473 		s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7474 
7475 		/* YSTORM */
7476 		reg = YSEM_REG_FAST_MEMORY +
7477 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7478 		s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7479 
7480 		reg = YSEM_REG_FAST_MEMORY +
7481 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7482 		s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7483 
7484 		reg = YSEM_REG_FAST_MEMORY +
7485 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7486 		s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7487 
7488 		reg = YSEM_REG_FAST_MEMORY +
7489 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7490 		s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7491 
7492 		/* PSTORM */
7493 		reg = PSEM_REG_FAST_MEMORY +
7494 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7495 		s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7496 
7497 		reg = PSEM_REG_FAST_MEMORY +
7498 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7499 		s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7500 
7501 		reg = PSEM_REG_FAST_MEMORY +
7502 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7503 		s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7504 
7505 		reg = PSEM_REG_FAST_MEMORY +
7506 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7507 		s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7508 
7509 		/* TSTORM */
7510 		reg = TSEM_REG_FAST_MEMORY +
7511 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7512 		s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7513 
7514 		reg = TSEM_REG_FAST_MEMORY +
7515 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7516 		s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7517 
7518 		reg = TSEM_REG_FAST_MEMORY +
7519 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7520 		s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7521 
7522 		reg = TSEM_REG_FAST_MEMORY +
7523 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7524 		s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7525 
7526 		/* MSTORM */
7527 		reg = MSEM_REG_FAST_MEMORY +
7528 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7529 		s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7530 
7531 		reg = MSEM_REG_FAST_MEMORY +
7532 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7533 		s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7534 
7535 		reg = MSEM_REG_FAST_MEMORY +
7536 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7537 		s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7538 
7539 		reg = MSEM_REG_FAST_MEMORY +
7540 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7541 		s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7542 
7543 		/* USTORM */
7544 		reg = USEM_REG_FAST_MEMORY +
7545 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7546 		s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7547 
7548 		reg = USEM_REG_FAST_MEMORY +
7549 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7550 		s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7551 
7552 		reg = USEM_REG_FAST_MEMORY +
7553 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7554 		s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7555 
7556 		reg = USEM_REG_FAST_MEMORY +
7557 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7558 		s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7559 
7560                 ecore_ptt_release(hwfn, p_ptt);
7561         }
7562 
7563 	ha->storm_stats_index++;
7564 
7565         return;
7566 }
7567 
7568 /*
7569  * Name: qlnx_dump_buf8
7570  * Function: dumps a buffer as bytes
7571  */
7572 static void
7573 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
7574 {
7575         device_t	dev;
7576         uint32_t	i = 0;
7577         uint8_t		*buf;
7578 
7579         dev = ha->pci_dev;
7580         buf = dbuf;
7581 
7582         device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
7583 
7584         while (len >= 16) {
7585                 device_printf(dev,"0x%08x:"
7586                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7587                         " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7588                         buf[0], buf[1], buf[2], buf[3],
7589                         buf[4], buf[5], buf[6], buf[7],
7590                         buf[8], buf[9], buf[10], buf[11],
7591                         buf[12], buf[13], buf[14], buf[15]);
7592                 i += 16;
7593                 len -= 16;
7594                 buf += 16;
7595         }
7596         switch (len) {
7597         case 1:
7598                 device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
7599                 break;
7600         case 2:
7601                 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
7602                 break;
7603         case 3:
7604                 device_printf(dev,"0x%08x: %02x %02x %02x\n",
7605                         i, buf[0], buf[1], buf[2]);
7606                 break;
7607         case 4:
7608                 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
7609                         buf[0], buf[1], buf[2], buf[3]);
7610                 break;
7611         case 5:
7612                 device_printf(dev,"0x%08x:"
7613                         " %02x %02x %02x %02x %02x\n", i,
7614                         buf[0], buf[1], buf[2], buf[3], buf[4]);
7615                 break;
7616         case 6:
7617                 device_printf(dev,"0x%08x:"
7618                         " %02x %02x %02x %02x %02x %02x\n", i,
7619                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
7620                 break;
7621         case 7:
7622                 device_printf(dev,"0x%08x:"
7623                         " %02x %02x %02x %02x %02x %02x %02x\n", i,
7624                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
7625                 break;
7626         case 8:
7627                 device_printf(dev,"0x%08x:"
7628                         " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7629                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7630                         buf[7]);
7631                 break;
7632         case 9:
7633                 device_printf(dev,"0x%08x:"
7634                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7635                         " %02x\n", i,
7636                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7637                         buf[7], buf[8]);
7638                 break;
7639         case 10:
7640                 device_printf(dev,"0x%08x:"
7641                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7642                         " %02x %02x\n", i,
7643                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7644                         buf[7], buf[8], buf[9]);
7645                 break;
7646         case 11:
7647                 device_printf(dev,"0x%08x:"
7648                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7649                         " %02x %02x %02x\n", i,
7650                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7651                         buf[7], buf[8], buf[9], buf[10]);
7652                 break;
7653         case 12:
7654                 device_printf(dev,"0x%08x:"
7655                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7656                         " %02x %02x %02x %02x\n", i,
7657                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7658                         buf[7], buf[8], buf[9], buf[10], buf[11]);
7659                 break;
7660         case 13:
7661                 device_printf(dev,"0x%08x:"
7662                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7663                         " %02x %02x %02x %02x %02x\n", i,
7664                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7665                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
7666                 break;
7667         case 14:
7668                 device_printf(dev,"0x%08x:"
7669                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7670                         " %02x %02x %02x %02x %02x %02x\n", i,
7671                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7672                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7673                         buf[13]);
7674                 break;
7675         case 15:
7676                 device_printf(dev,"0x%08x:"
7677                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7678                         " %02x %02x %02x %02x %02x %02x %02x\n", i,
7679                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7680                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7681                         buf[13], buf[14]);
7682                 break;
7683         default:
7684                 break;
7685         }
7686 
7687         device_printf(dev, "%s: %s dump end\n", __func__, msg);
7688 
7689         return;
7690 }
7691 
7692 #ifdef CONFIG_ECORE_SRIOV
7693 
7694 static void
7695 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id)
7696 {
7697         struct ecore_public_vf_info *vf_info;
7698 
7699         vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false);
7700 
7701         if (!vf_info)
7702                 return;
7703 
7704         /* Clear the VF mac */
7705         memset(vf_info->forced_mac, 0, ETH_ALEN);
7706 
7707         vf_info->forced_vlan = 0;
7708 
7709 	return;
7710 }
7711 
7712 void
7713 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id)
7714 {
7715 	__qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id);
7716 	return;
7717 }
7718 
7719 static int
7720 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid,
7721 	struct ecore_filter_ucast *params)
7722 {
7723         struct ecore_public_vf_info *vf;
7724 
7725 	if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
7726 		QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev),
7727 			"VF[%d] vport not initialized\n", vfid);
7728 		return ECORE_INVAL;
7729 	}
7730 
7731         vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true);
7732         if (!vf)
7733                 return -EINVAL;
7734 
7735         /* No real decision to make; Store the configured MAC */
7736         if (params->type == ECORE_FILTER_MAC ||
7737             params->type == ECORE_FILTER_MAC_VLAN)
7738                 memcpy(params->mac, vf->forced_mac, ETH_ALEN);
7739 
7740         return 0;
7741 }
7742 
7743 int
7744 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params)
7745 {
7746 	return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params));
7747 }
7748 
7749 static int
7750 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid,
7751         struct ecore_sp_vport_update_params *params, uint16_t * tlvs)
7752 {
7753 	if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) {
7754 		QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev),
7755 			"VF[%d] vport not initialized\n", vfid);
7756 		return ECORE_INVAL;
7757 	}
7758 
7759         /* Untrusted VFs can't even be trusted to know that fact.
7760          * Simply indicate everything is configured fine, and trace
7761          * configuration 'behind their back'.
7762          */
7763         if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM)))
7764                 return 0;
7765 
7766         return 0;
7767 
7768 }
7769 int
7770 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs)
7771 {
7772 	return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs));
7773 }
7774 
7775 static int
7776 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn)
7777 {
7778 	int			i;
7779 	struct ecore_dev	*cdev;
7780 
7781 	cdev = p_hwfn->p_dev;
7782 
7783 	for (i = 0; i < cdev->num_hwfns; i++) {
7784 		if (&cdev->hwfns[i] == p_hwfn)
7785 			break;
7786 	}
7787 
7788 	if (i >= cdev->num_hwfns)
7789 		return (-1);
7790 
7791 	return (i);
7792 }
7793 
7794 static int
7795 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id)
7796 {
7797 	qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7798 	int i;
7799 
7800 	QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n",
7801 		ha, p_hwfn->p_dev, p_hwfn, rel_vf_id);
7802 
7803 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7804 		return (-1);
7805 
7806 	if (ha->sriov_task[i].pf_taskqueue != NULL) {
7807 		atomic_testandset_32(&ha->sriov_task[i].flags,
7808 			QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG);
7809 
7810 		taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7811 			&ha->sriov_task[i].pf_task);
7812 	}
7813 
7814 	return (ECORE_SUCCESS);
7815 }
7816 
7817 int
7818 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id)
7819 {
7820 	return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id));
7821 }
7822 
7823 static void
7824 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn)
7825 {
7826 	qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7827 	int i;
7828 
7829 	if (!ha->sriov_initialized)
7830 		return;
7831 
7832 	QL_DPRINT2(ha,  "ha = %p cdev = %p p_hwfn = %p \n",
7833 		ha, p_hwfn->p_dev, p_hwfn);
7834 
7835 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7836 		return;
7837 
7838 	if (ha->sriov_task[i].pf_taskqueue != NULL) {
7839 		atomic_testandset_32(&ha->sriov_task[i].flags,
7840 			QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE);
7841 
7842 		taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7843 			&ha->sriov_task[i].pf_task);
7844 	}
7845 
7846 	return;
7847 }
7848 
7849 void
7850 qlnx_vf_flr_update(void *p_hwfn)
7851 {
7852 	__qlnx_vf_flr_update(p_hwfn);
7853 
7854 	return;
7855 }
7856 
7857 #ifndef QLNX_VF
7858 
7859 static void
7860 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn)
7861 {
7862 	qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7863 	int i;
7864 
7865 	QL_DPRINT2(ha,  "ha = %p cdev = %p p_hwfn = %p \n",
7866 		ha, p_hwfn->p_dev, p_hwfn);
7867 
7868 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7869 		return;
7870 
7871 	QL_DPRINT2(ha,  "ha = %p cdev = %p p_hwfn = %p i = %d\n",
7872 		ha, p_hwfn->p_dev, p_hwfn, i);
7873 
7874 	if (ha->sriov_task[i].pf_taskqueue != NULL) {
7875 		atomic_testandset_32(&ha->sriov_task[i].flags,
7876 			QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE);
7877 
7878 		taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7879 			&ha->sriov_task[i].pf_task);
7880 	}
7881 }
7882 
7883 static void
7884 qlnx_initialize_sriov(qlnx_host_t *ha)
7885 {
7886 	device_t	dev;
7887 	nvlist_t	*pf_schema, *vf_schema;
7888 	int		iov_error;
7889 
7890 	dev = ha->pci_dev;
7891 
7892 	pf_schema = pci_iov_schema_alloc_node();
7893 	vf_schema = pci_iov_schema_alloc_node();
7894 
7895 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
7896 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
7897 		IOV_SCHEMA_HASDEFAULT, FALSE);
7898 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
7899 		IOV_SCHEMA_HASDEFAULT, FALSE);
7900 	pci_iov_schema_add_uint16(vf_schema, "num-queues",
7901 		IOV_SCHEMA_HASDEFAULT, 1);
7902 
7903 	iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
7904 
7905 	if (iov_error != 0) {
7906 		ha->sriov_initialized = 0;
7907 	} else {
7908 		device_printf(dev, "SRIOV initialized\n");
7909 		ha->sriov_initialized = 1;
7910 	}
7911 
7912 	return;
7913 }
7914 
7915 static void
7916 qlnx_sriov_disable(qlnx_host_t *ha)
7917 {
7918 	struct ecore_dev *cdev;
7919 	int i, j;
7920 
7921 	cdev = &ha->cdev;
7922 
7923 	ecore_iov_set_vfs_to_disable(cdev, true);
7924 
7925 	for_each_hwfn(cdev, i) {
7926 		struct ecore_hwfn *hwfn = &cdev->hwfns[i];
7927 		struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
7928 
7929 		if (!ptt) {
7930 			QL_DPRINT1(ha, "Failed to acquire ptt\n");
7931 			return;
7932 		}
7933 		/* Clean WFQ db and configure equal weight for all vports */
7934 		ecore_clean_wfq_db(hwfn, ptt);
7935 
7936 		ecore_for_each_vf(hwfn, j) {
7937 			int k = 0;
7938 
7939 			if (!ecore_iov_is_valid_vfid(hwfn, j, true, false))
7940 				continue;
7941 
7942 			if (ecore_iov_is_vf_started(hwfn, j)) {
7943 				/* Wait until VF is disabled before releasing */
7944 
7945 				for (k = 0; k < 100; k++) {
7946 					if (!ecore_iov_is_vf_stopped(hwfn, j)) {
7947 						qlnx_mdelay(__func__, 10);
7948 					} else
7949 						break;
7950 				}
7951 			}
7952 
7953 			if (k < 100)
7954 				ecore_iov_release_hw_for_vf(&cdev->hwfns[i],
7955                                                           ptt, j);
7956 			else {
7957 				QL_DPRINT1(ha,
7958 					"Timeout waiting for VF's FLR to end\n");
7959 			}
7960 		}
7961 		ecore_ptt_release(hwfn, ptt);
7962 	}
7963 
7964 	ecore_iov_set_vfs_to_disable(cdev, false);
7965 
7966 	return;
7967 }
7968 
7969 static void
7970 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid,
7971 	struct ecore_iov_vf_init_params *params)
7972 {
7973         u16 base, i;
7974 
7975         /* Since we have an equal resource distribution per-VF, and we assume
7976          * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting
7977          * sequentially from there.
7978          */
7979         base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues;
7980 
7981         params->rel_vf_id = vfid;
7982 
7983         for (i = 0; i < params->num_queues; i++) {
7984                 params->req_rx_queue[i] = base + i;
7985                 params->req_tx_queue[i] = base + i;
7986         }
7987 
7988         /* PF uses indices 0 for itself; Set vport/RSS afterwards */
7989         params->vport_id = vfid + 1;
7990         params->rss_eng_id = vfid + 1;
7991 
7992 	return;
7993 }
7994 
7995 static int
7996 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params)
7997 {
7998 	qlnx_host_t		*ha;
7999 	struct ecore_dev	*cdev;
8000 	struct ecore_iov_vf_init_params params;
8001 	int ret, j, i;
8002 	uint32_t max_vfs;
8003 
8004 	if ((ha = device_get_softc(dev)) == NULL) {
8005 		device_printf(dev, "%s: cannot get softc\n", __func__);
8006 		return (-1);
8007 	}
8008 
8009 	if (qlnx_create_pf_taskqueues(ha) != 0)
8010 		goto qlnx_iov_init_err0;
8011 
8012 	cdev = &ha->cdev;
8013 
8014 	max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT);
8015 
8016 	QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n",
8017 		dev, num_vfs, max_vfs);
8018 
8019         if (num_vfs >= max_vfs) {
8020                 QL_DPRINT1(ha, "Can start at most %d VFs\n",
8021                           (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1));
8022 		goto qlnx_iov_init_err0;
8023         }
8024 
8025 	ha->vf_attr =  malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF,
8026 				M_NOWAIT);
8027 
8028 	if (ha->vf_attr == NULL)
8029 		goto qlnx_iov_init_err0;
8030 
8031         memset(&params, 0, sizeof(params));
8032 
8033         /* Initialize HW for VF access */
8034         for_each_hwfn(cdev, j) {
8035                 struct ecore_hwfn *hwfn = &cdev->hwfns[j];
8036                 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
8037 
8038                 /* Make sure not to use more than 16 queues per VF */
8039                 params.num_queues = min_t(int,
8040                                           (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs),
8041                                           16);
8042 
8043                 if (!ptt) {
8044                         QL_DPRINT1(ha, "Failed to acquire ptt\n");
8045                         goto qlnx_iov_init_err1;
8046                 }
8047 
8048                 for (i = 0; i < num_vfs; i++) {
8049                         if (!ecore_iov_is_valid_vfid(hwfn, i, false, true))
8050                                 continue;
8051 
8052                         qlnx_sriov_enable_qid_config(hwfn, i, &params);
8053 
8054                         ret = ecore_iov_init_hw_for_vf(hwfn, ptt, &params);
8055 
8056                         if (ret) {
8057                                 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i);
8058                                 ecore_ptt_release(hwfn, ptt);
8059                                 goto qlnx_iov_init_err1;
8060                         }
8061                 }
8062 
8063                 ecore_ptt_release(hwfn, ptt);
8064         }
8065 
8066 	ha->num_vfs = num_vfs;
8067 	qlnx_inform_vf_link_state(&cdev->hwfns[0], ha);
8068 
8069 	QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs);
8070 
8071 	return (0);
8072 
8073 qlnx_iov_init_err1:
8074 	qlnx_sriov_disable(ha);
8075 
8076 qlnx_iov_init_err0:
8077 	qlnx_destroy_pf_taskqueues(ha);
8078 	ha->num_vfs = 0;
8079 
8080 	return (-1);
8081 }
8082 
8083 static void
8084 qlnx_iov_uninit(device_t dev)
8085 {
8086 	qlnx_host_t	*ha;
8087 
8088 	if ((ha = device_get_softc(dev)) == NULL) {
8089 		device_printf(dev, "%s: cannot get softc\n", __func__);
8090 		return;
8091 	}
8092 
8093 	QL_DPRINT2(ha," dev = %p enter\n", dev);
8094 
8095 	qlnx_sriov_disable(ha);
8096 	qlnx_destroy_pf_taskqueues(ha);
8097 
8098 	free(ha->vf_attr, M_QLNXBUF);
8099 	ha->vf_attr = NULL;
8100 
8101 	ha->num_vfs = 0;
8102 
8103 	QL_DPRINT2(ha," dev = %p exit\n", dev);
8104 	return;
8105 }
8106 
8107 static int
8108 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
8109 {
8110 	qlnx_host_t	*ha;
8111 	qlnx_vf_attr_t	*vf_attr;
8112 	unsigned const char *mac;
8113 	size_t size;
8114 	struct ecore_hwfn *p_hwfn;
8115 
8116 	if ((ha = device_get_softc(dev)) == NULL) {
8117 		device_printf(dev, "%s: cannot get softc\n", __func__);
8118 		return (-1);
8119 	}
8120 
8121 	QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum);
8122 
8123 	if (vfnum > (ha->num_vfs - 1)) {
8124 		QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n",
8125 			vfnum, (ha->num_vfs - 1));
8126 	}
8127 
8128 	vf_attr = &ha->vf_attr[vfnum];
8129 
8130         if (nvlist_exists_binary(params, "mac-addr")) {
8131                 mac = nvlist_get_binary(params, "mac-addr", &size);
8132                 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN);
8133 		device_printf(dev,
8134 			"%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
8135 			__func__, vf_attr->mac_addr[0],
8136 			vf_attr->mac_addr[1], vf_attr->mac_addr[2],
8137 			vf_attr->mac_addr[3], vf_attr->mac_addr[4],
8138 			vf_attr->mac_addr[5]);
8139 		p_hwfn = &ha->cdev.hwfns[0];
8140 		ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr,
8141 			vfnum);
8142 	}
8143 
8144 	QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum);
8145 	return (0);
8146 }
8147 
8148 static void
8149 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8150 {
8151         uint64_t events[ECORE_VF_ARRAY_LENGTH];
8152         struct ecore_ptt *ptt;
8153         int i;
8154 
8155         ptt = ecore_ptt_acquire(p_hwfn);
8156         if (!ptt) {
8157                 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8158 		__qlnx_pf_vf_msg(p_hwfn, 0);
8159                 return;
8160         }
8161 
8162         ecore_iov_pf_get_pending_events(p_hwfn, events);
8163 
8164         QL_DPRINT2(ha, "Event mask of VF events:"
8165 		"0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n",
8166                    events[0], events[1], events[2]);
8167 
8168         ecore_for_each_vf(p_hwfn, i) {
8169                 /* Skip VFs with no pending messages */
8170                 if (!(events[i / 64] & (1ULL << (i % 64))))
8171                         continue;
8172 
8173 		QL_DPRINT2(ha,
8174                            "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
8175                            i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
8176 
8177                 /* Copy VF's message to PF's request buffer for that VF */
8178                 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i))
8179                         continue;
8180 
8181                 ecore_iov_process_mbx_req(p_hwfn, ptt, i);
8182         }
8183 
8184         ecore_ptt_release(p_hwfn, ptt);
8185 
8186 	return;
8187 }
8188 
8189 static void
8190 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8191 {
8192         struct ecore_ptt *ptt;
8193 	int ret;
8194 
8195 	ptt = ecore_ptt_acquire(p_hwfn);
8196 
8197 	if (!ptt) {
8198                 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8199 		__qlnx_vf_flr_update(p_hwfn);
8200                 return;
8201 	}
8202 
8203 	ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt);
8204 
8205 	if (ret) {
8206                 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n");
8207 	}
8208 
8209 	ecore_ptt_release(p_hwfn, ptt);
8210 
8211 	return;
8212 }
8213 
8214 static void
8215 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8216 {
8217         struct ecore_ptt *ptt;
8218 	int i;
8219 
8220 	ptt = ecore_ptt_acquire(p_hwfn);
8221 
8222 	if (!ptt) {
8223                 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8224 		qlnx_vf_bulleting_update(p_hwfn);
8225                 return;
8226 	}
8227 
8228 	ecore_for_each_vf(p_hwfn, i) {
8229 		QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n",
8230 			p_hwfn, i);
8231 		ecore_iov_post_vf_bulletin(p_hwfn, i, ptt);
8232 	}
8233 
8234 	ecore_ptt_release(p_hwfn, ptt);
8235 
8236 	return;
8237 }
8238 
8239 static void
8240 qlnx_pf_taskqueue(void *context, int pending)
8241 {
8242 	struct ecore_hwfn	*p_hwfn;
8243 	qlnx_host_t		*ha;
8244 	int			i;
8245 
8246 	p_hwfn = context;
8247 
8248 	if (p_hwfn == NULL)
8249 		return;
8250 
8251 	ha = (qlnx_host_t *)(p_hwfn->p_dev);
8252 
8253 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8254 		return;
8255 
8256 	if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8257 		QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG))
8258 		qlnx_handle_vf_msg(ha, p_hwfn);
8259 
8260 	if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8261 		QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE))
8262 		qlnx_handle_vf_flr_update(ha, p_hwfn);
8263 
8264 	if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8265 		QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE))
8266 		qlnx_handle_bulletin_update(ha, p_hwfn);
8267 
8268 	return;
8269 }
8270 
8271 static int
8272 qlnx_create_pf_taskqueues(qlnx_host_t *ha)
8273 {
8274 	int	i;
8275 	uint8_t	tq_name[32];
8276 
8277 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
8278                 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
8279 
8280 		bzero(tq_name, sizeof (tq_name));
8281 		snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i);
8282 
8283 		TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn);
8284 
8285 		ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
8286 			 taskqueue_thread_enqueue,
8287 			&ha->sriov_task[i].pf_taskqueue);
8288 
8289 		if (ha->sriov_task[i].pf_taskqueue == NULL)
8290 			return (-1);
8291 
8292 		taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1,
8293 			PI_NET, "%s", tq_name);
8294 
8295 		QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue);
8296 	}
8297 
8298 	return (0);
8299 }
8300 
8301 static void
8302 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha)
8303 {
8304 	int	i;
8305 
8306 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
8307 		if (ha->sriov_task[i].pf_taskqueue != NULL) {
8308 			taskqueue_drain(ha->sriov_task[i].pf_taskqueue,
8309 				&ha->sriov_task[i].pf_task);
8310 			taskqueue_free(ha->sriov_task[i].pf_taskqueue);
8311 			ha->sriov_task[i].pf_taskqueue = NULL;
8312 		}
8313 	}
8314 	return;
8315 }
8316 
8317 static void
8318 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha)
8319 {
8320 	struct ecore_mcp_link_capabilities caps;
8321 	struct ecore_mcp_link_params params;
8322 	struct ecore_mcp_link_state link;
8323 	int i;
8324 
8325 	if (!p_hwfn->pf_iov_info)
8326 		return;
8327 
8328 	memset(&params, 0, sizeof(struct ecore_mcp_link_params));
8329 	memset(&link, 0, sizeof(struct ecore_mcp_link_state));
8330 	memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities));
8331 
8332 	memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
8333         memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
8334         memcpy(&params, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
8335 
8336 	QL_DPRINT2(ha, "called\n");
8337 
8338         /* Update bulletin of all future possible VFs with link configuration */
8339         for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
8340                 /* Modify link according to the VF's configured link state */
8341 
8342                 link.link_up = false;
8343 
8344                 if (ha->link_up) {
8345                         link.link_up = true;
8346                         /* Set speed according to maximum supported by HW.
8347                          * that is 40G for regular devices and 100G for CMT
8348                          * mode devices.
8349                          */
8350                         link.speed = (p_hwfn->p_dev->num_hwfns > 1) ?
8351 						100000 : link.speed;
8352 		}
8353 		QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up);
8354                 ecore_iov_set_link(p_hwfn, i, &params, &link, &caps);
8355         }
8356 
8357 	qlnx_vf_bulleting_update(p_hwfn);
8358 
8359 	return;
8360 }
8361 #endif /* #ifndef QLNX_VF */
8362 #endif /* #ifdef CONFIG_ECORE_SRIOV */
8363