xref: /freebsd/sys/dev/qlnx/qlnxe/qlnx_os.c (revision 386960a8805edc6d5ac6bb6215ad102a83314549)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File: qlnx_os.c
30  * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
31  */
32 
33 #include "opt_inet.h"
34 
35 #include <sys/cdefs.h>
36 #include "qlnx_os.h"
37 #include "bcm_osal.h"
38 #include "reg_addr.h"
39 #include "ecore_gtt_reg_addr.h"
40 #include "ecore.h"
41 #include "ecore_chain.h"
42 #include "ecore_status.h"
43 #include "ecore_hw.h"
44 #include "ecore_rt_defs.h"
45 #include "ecore_init_ops.h"
46 #include "ecore_int.h"
47 #include "ecore_cxt.h"
48 #include "ecore_spq.h"
49 #include "ecore_init_fw_funcs.h"
50 #include "ecore_sp_commands.h"
51 #include "ecore_dev_api.h"
52 #include "ecore_l2_api.h"
53 #include "ecore_mcp.h"
54 #include "ecore_hw_defs.h"
55 #include "mcp_public.h"
56 #include "ecore_iro.h"
57 #include "nvm_cfg.h"
58 #include "ecore_dbg_fw_funcs.h"
59 #include "ecore_iov_api.h"
60 #include "ecore_vf_api.h"
61 
62 #include "qlnx_ioctl.h"
63 #include "qlnx_def.h"
64 #include "qlnx_ver.h"
65 
66 #ifdef QLNX_ENABLE_IWARP
67 #include "qlnx_rdma.h"
68 #endif /* #ifdef QLNX_ENABLE_IWARP */
69 
70 #ifdef CONFIG_ECORE_SRIOV
71 #include <sys/nv.h>
72 #include <sys/iov_schema.h>
73 #include <dev/pci/pci_iov.h>
74 #endif /* #ifdef CONFIG_ECORE_SRIOV */
75 
76 #include <sys/smp.h>
77 
78 /*
79  * static functions
80  */
81 /*
82  * ioctl related functions
83  */
84 static void qlnx_add_sysctls(qlnx_host_t *ha);
85 
86 /*
87  * main driver
88  */
89 static void qlnx_release(qlnx_host_t *ha);
90 static void qlnx_fp_isr(void *arg);
91 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
92 static void qlnx_init(void *arg);
93 static void qlnx_init_locked(qlnx_host_t *ha);
94 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
95 static int qlnx_set_promisc(qlnx_host_t *ha, int enabled);
96 static int qlnx_set_allmulti(qlnx_host_t *ha, int enabled);
97 static int qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data);
98 static int qlnx_media_change(if_t ifp);
99 static void qlnx_media_status(if_t ifp, struct ifmediareq *ifmr);
100 static void qlnx_stop(qlnx_host_t *ha);
101 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
102 		struct mbuf **m_headp);
103 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
104 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
105 			struct qlnx_link_output *if_link);
106 static int qlnx_transmit(if_t ifp, struct mbuf  *mp);
107 static int qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp,
108 		struct mbuf *mp);
109 static void qlnx_qflush(if_t ifp);
110 
111 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
112 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
113 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
114 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
115 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
116 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
117 
118 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
119 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
120 
121 static int qlnx_nic_setup(struct ecore_dev *cdev,
122 		struct ecore_pf_params *func_params);
123 static int qlnx_nic_start(struct ecore_dev *cdev);
124 static int qlnx_slowpath_start(qlnx_host_t *ha);
125 static int qlnx_slowpath_stop(qlnx_host_t *ha);
126 static int qlnx_init_hw(qlnx_host_t *ha);
127 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
128 		char ver_str[VER_SIZE]);
129 static void qlnx_unload(qlnx_host_t *ha);
130 static int qlnx_load(qlnx_host_t *ha);
131 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
132 		uint32_t add_mac);
133 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
134 		uint32_t len);
135 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
136 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
137 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
138 		struct qlnx_rx_queue *rxq);
139 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
140 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
141 		int hwfn_index);
142 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
143 		int hwfn_index);
144 static void qlnx_timer(void *arg);
145 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
146 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
147 static void qlnx_trigger_dump(qlnx_host_t *ha);
148 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
149 			struct qlnx_tx_queue *txq);
150 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
151 		struct qlnx_tx_queue *txq);
152 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
153 		int lro_enable);
154 static void qlnx_fp_taskqueue(void *context, int pending);
155 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
156 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
157 		struct qlnx_agg_info *tpa);
158 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
159 
160 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
161 
162 /*
163  * Hooks to the Operating Systems
164  */
165 static int qlnx_pci_probe (device_t);
166 static int qlnx_pci_attach (device_t);
167 static int qlnx_pci_detach (device_t);
168 
169 #ifndef QLNX_VF
170 
171 #ifdef CONFIG_ECORE_SRIOV
172 
173 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params);
174 static void qlnx_iov_uninit(device_t dev);
175 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
176 static void qlnx_initialize_sriov(qlnx_host_t *ha);
177 static void qlnx_pf_taskqueue(void *context, int pending);
178 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha);
179 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha);
180 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha);
181 
182 #endif /* #ifdef CONFIG_ECORE_SRIOV */
183 
184 static device_method_t qlnx_pci_methods[] = {
185 	/* Device interface */
186 	DEVMETHOD(device_probe, qlnx_pci_probe),
187 	DEVMETHOD(device_attach, qlnx_pci_attach),
188 	DEVMETHOD(device_detach, qlnx_pci_detach),
189 
190 #ifdef CONFIG_ECORE_SRIOV
191 	DEVMETHOD(pci_iov_init, qlnx_iov_init),
192 	DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit),
193 	DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf),
194 #endif /* #ifdef CONFIG_ECORE_SRIOV */
195 	{ 0, 0 }
196 };
197 
198 static driver_t qlnx_pci_driver = {
199 	"ql", qlnx_pci_methods, sizeof (qlnx_host_t),
200 };
201 
202 MODULE_VERSION(if_qlnxe,1);
203 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, 0, 0);
204 
205 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
206 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
207 
208 #else
209 
210 static device_method_t qlnxv_pci_methods[] = {
211 	/* Device interface */
212 	DEVMETHOD(device_probe, qlnx_pci_probe),
213 	DEVMETHOD(device_attach, qlnx_pci_attach),
214 	DEVMETHOD(device_detach, qlnx_pci_detach),
215 	{ 0, 0 }
216 };
217 
218 static driver_t qlnxv_pci_driver = {
219 	"ql", qlnxv_pci_methods, sizeof (qlnx_host_t),
220 };
221 
222 MODULE_VERSION(if_qlnxev,1);
223 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, 0, 0);
224 
225 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1);
226 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1);
227 
228 #endif /* #ifdef QLNX_VF */
229 
230 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
231 
232 char qlnx_ver_str[VER_SIZE];
233 char qlnx_name_str[NAME_SIZE];
234 
235 /*
236  * Some PCI Configuration Space Related Defines
237  */
238 
239 #ifndef PCI_VENDOR_QLOGIC
240 #define PCI_VENDOR_QLOGIC		0x1077
241 #endif
242 
243 /* 40G Adapter QLE45xxx*/
244 #ifndef QLOGIC_PCI_DEVICE_ID_1634
245 #define QLOGIC_PCI_DEVICE_ID_1634	0x1634
246 #endif
247 
248 /* 100G Adapter QLE45xxx*/
249 #ifndef QLOGIC_PCI_DEVICE_ID_1644
250 #define QLOGIC_PCI_DEVICE_ID_1644	0x1644
251 #endif
252 
253 /* 25G Adapter QLE45xxx*/
254 #ifndef QLOGIC_PCI_DEVICE_ID_1656
255 #define QLOGIC_PCI_DEVICE_ID_1656	0x1656
256 #endif
257 
258 /* 50G Adapter QLE45xxx*/
259 #ifndef QLOGIC_PCI_DEVICE_ID_1654
260 #define QLOGIC_PCI_DEVICE_ID_1654	0x1654
261 #endif
262 
263 /* 10G/25G/40G Adapter QLE41xxx*/
264 #ifndef QLOGIC_PCI_DEVICE_ID_8070
265 #define QLOGIC_PCI_DEVICE_ID_8070	0x8070
266 #endif
267 
268 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/
269 #ifndef QLOGIC_PCI_DEVICE_ID_8090
270 #define QLOGIC_PCI_DEVICE_ID_8090	0x8090
271 #endif
272 
273 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
274     "qlnxe driver parameters");
275 
276 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */
277 static int qlnxe_queue_count = QLNX_DEFAULT_RSS;
278 
279 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
280 		&qlnxe_queue_count, 0, "Multi-Queue queue count");
281 
282 /*
283  * Note on RDMA personality setting
284  *
285  * Read the personality configured in NVRAM
286  * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and
287  * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT
288  * use the personality in NVRAM.
289 
290  * Otherwise use t the personality configured in sysctl.
291  *
292  */
293 #define QLNX_PERSONALITY_DEFAULT	0x0  /* use personality in NVRAM */
294 #define QLNX_PERSONALITY_ETH_ONLY	0x1  /* Override with ETH_ONLY */
295 #define QLNX_PERSONALITY_ETH_IWARP	0x2  /* Override with ETH_IWARP */
296 #define QLNX_PERSONALITY_ETH_ROCE	0x3  /* Override with ETH_ROCE */
297 #define QLNX_PERSONALITY_BITS_PER_FUNC	4
298 #define QLNX_PERSONALIY_MASK		0xF
299 
300 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/
301 static uint64_t qlnxe_rdma_configuration = 0x22222222;
302 
303 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
304                 &qlnxe_rdma_configuration, 0, "RDMA Configuration");
305 
306 int
qlnx_vf_device(qlnx_host_t * ha)307 qlnx_vf_device(qlnx_host_t *ha)
308 {
309         uint16_t	device_id;
310 
311         device_id = ha->device_id;
312 
313         if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
314                 return 0;
315 
316         return -1;
317 }
318 
319 static int
qlnx_valid_device(qlnx_host_t * ha)320 qlnx_valid_device(qlnx_host_t *ha)
321 {
322         uint16_t device_id;
323 
324         device_id = ha->device_id;
325 
326 #ifndef QLNX_VF
327         if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
328                 (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
329                 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
330                 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
331                 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
332                 return 0;
333 #else
334         if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
335 		return 0;
336 
337 #endif /* #ifndef QLNX_VF */
338         return -1;
339 }
340 
341 #ifdef QLNX_ENABLE_IWARP
342 static int
qlnx_rdma_supported(struct qlnx_host * ha)343 qlnx_rdma_supported(struct qlnx_host *ha)
344 {
345 	uint16_t device_id;
346 
347 	device_id = pci_get_device(ha->pci_dev);
348 
349 	if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
350 		(device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
351 		(device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
352 		(device_id == QLOGIC_PCI_DEVICE_ID_8070))
353 		return (0);
354 
355 	return (-1);
356 }
357 #endif /* #ifdef QLNX_ENABLE_IWARP */
358 
359 /*
360  * Name:	qlnx_pci_probe
361  * Function:	Validate the PCI device to be a QLA80XX device
362  */
363 static int
qlnx_pci_probe(device_t dev)364 qlnx_pci_probe(device_t dev)
365 {
366 	snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
367 		QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
368 	snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
369 
370 	if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
371                 return (ENXIO);
372 	}
373 
374         switch (pci_get_device(dev)) {
375 #ifndef QLNX_VF
376 
377         case QLOGIC_PCI_DEVICE_ID_1644:
378 		device_set_descf(dev, "%s v%d.%d.%d",
379 			"Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
380 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
381 			QLNX_VERSION_BUILD);
382                 break;
383 
384         case QLOGIC_PCI_DEVICE_ID_1634:
385 		device_set_descf(dev, "%s v%d.%d.%d",
386 			"Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
387 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
388 			QLNX_VERSION_BUILD);
389                 break;
390 
391         case QLOGIC_PCI_DEVICE_ID_1656:
392 		device_set_descf(dev, "%s v%d.%d.%d",
393 			"Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
394 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
395 			QLNX_VERSION_BUILD);
396                 break;
397 
398         case QLOGIC_PCI_DEVICE_ID_1654:
399 		device_set_descf(dev, "%s v%d.%d.%d",
400 			"Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
401 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
402 			QLNX_VERSION_BUILD);
403                 break;
404 
405 	case QLOGIC_PCI_DEVICE_ID_8070:
406 		device_set_descf(dev, "%s v%d.%d.%d",
407 			"Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)"
408 			" Adapter-Ethernet Function",
409 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
410 			QLNX_VERSION_BUILD);
411 		break;
412 
413 #else
414 	case QLOGIC_PCI_DEVICE_ID_8090:
415 		device_set_descf(dev, "%s v%d.%d.%d",
416 			"Qlogic SRIOV PCI CNA (AH) "
417 			"Adapter-Ethernet Function",
418 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
419 			QLNX_VERSION_BUILD);
420 		break;
421 
422 #endif /* #ifndef QLNX_VF */
423 
424         default:
425                 return (ENXIO);
426         }
427 
428 #ifdef QLNX_ENABLE_IWARP
429 	qlnx_rdma_init();
430 #endif /* #ifdef QLNX_ENABLE_IWARP */
431 
432         return (BUS_PROBE_DEFAULT);
433 }
434 
435 static uint16_t
qlnx_num_tx_compl(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)436 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
437 	struct qlnx_tx_queue *txq)
438 {
439 	u16 hw_bd_cons;
440 	u16 ecore_cons_idx;
441 
442 	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
443 
444 	ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl);
445 
446 	return (hw_bd_cons - ecore_cons_idx);
447 }
448 
449 static void
qlnx_sp_intr(void * arg)450 qlnx_sp_intr(void *arg)
451 {
452 	struct ecore_hwfn	*p_hwfn;
453 	qlnx_host_t		*ha;
454 	int			i;
455 
456 	p_hwfn = arg;
457 
458 	if (p_hwfn == NULL) {
459 		printf("%s: spurious slowpath intr\n", __func__);
460 		return;
461 	}
462 
463 	ha = (qlnx_host_t *)p_hwfn->p_dev;
464 
465 	QL_DPRINT2(ha, "enter\n");
466 
467 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
468 		if (&ha->cdev.hwfns[i] == p_hwfn) {
469 			taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
470 			break;
471 		}
472 	}
473 	QL_DPRINT2(ha, "exit\n");
474 
475 	return;
476 }
477 
478 static void
qlnx_sp_taskqueue(void * context,int pending)479 qlnx_sp_taskqueue(void *context, int pending)
480 {
481 	struct ecore_hwfn	*p_hwfn;
482 
483 	p_hwfn = context;
484 
485 	if (p_hwfn != NULL) {
486 		qlnx_sp_isr(p_hwfn);
487 	}
488 	return;
489 }
490 
491 static int
qlnx_create_sp_taskqueues(qlnx_host_t * ha)492 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
493 {
494 	int	i;
495 	uint8_t	tq_name[32];
496 
497 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
498                 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
499 
500 		bzero(tq_name, sizeof (tq_name));
501 		snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
502 
503 		TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
504 
505 		ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT,
506 			 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
507 
508 		if (ha->sp_taskqueue[i] == NULL)
509 			return (-1);
510 
511 		taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
512 			tq_name);
513 
514 		QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
515 	}
516 
517 	return (0);
518 }
519 
520 static void
qlnx_destroy_sp_taskqueues(qlnx_host_t * ha)521 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
522 {
523 	int	i;
524 
525 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
526 		if (ha->sp_taskqueue[i] != NULL) {
527 			taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
528 			taskqueue_free(ha->sp_taskqueue[i]);
529 		}
530 	}
531 	return;
532 }
533 
534 static void
qlnx_fp_taskqueue(void * context,int pending)535 qlnx_fp_taskqueue(void *context, int pending)
536 {
537         struct qlnx_fastpath	*fp;
538         qlnx_host_t		*ha;
539         if_t			ifp;
540 
541         fp = context;
542 
543         if (fp == NULL)
544                 return;
545 
546 	ha = (qlnx_host_t *)fp->edev;
547 
548 	ifp = ha->ifp;
549 
550         if(if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
551                 if (!drbr_empty(ifp, fp->tx_br)) {
552                         if(mtx_trylock(&fp->tx_mtx)) {
553 #ifdef QLNX_TRACE_PERF_DATA
554                                 tx_pkts = fp->tx_pkts_transmitted;
555                                 tx_compl = fp->tx_pkts_completed;
556 #endif
557 
558                                 qlnx_transmit_locked(ifp, fp, NULL);
559 
560 #ifdef QLNX_TRACE_PERF_DATA
561                                 fp->tx_pkts_trans_fp +=
562 					(fp->tx_pkts_transmitted - tx_pkts);
563                                 fp->tx_pkts_compl_fp +=
564 					(fp->tx_pkts_completed - tx_compl);
565 #endif
566                                 mtx_unlock(&fp->tx_mtx);
567                         }
568                 }
569         }
570 
571         QL_DPRINT2(ha, "exit \n");
572         return;
573 }
574 
575 static int
qlnx_create_fp_taskqueues(qlnx_host_t * ha)576 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
577 {
578 	int	i;
579 	uint8_t	tq_name[32];
580 	struct qlnx_fastpath *fp;
581 
582 	for (i = 0; i < ha->num_rss; i++) {
583                 fp = &ha->fp_array[i];
584 
585 		bzero(tq_name, sizeof (tq_name));
586 		snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
587 
588 		TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
589 
590 		fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
591 					taskqueue_thread_enqueue,
592 					&fp->fp_taskqueue);
593 
594 		if (fp->fp_taskqueue == NULL)
595 			return (-1);
596 
597 		taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
598 			tq_name);
599 
600 		QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
601 	}
602 
603 	return (0);
604 }
605 
606 static void
qlnx_destroy_fp_taskqueues(qlnx_host_t * ha)607 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
608 {
609 	int			i;
610 	struct qlnx_fastpath	*fp;
611 
612 	for (i = 0; i < ha->num_rss; i++) {
613                 fp = &ha->fp_array[i];
614 
615 		if (fp->fp_taskqueue != NULL) {
616 			taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
617 			taskqueue_free(fp->fp_taskqueue);
618 			fp->fp_taskqueue = NULL;
619 		}
620 	}
621 	return;
622 }
623 
624 static void
qlnx_drain_fp_taskqueues(qlnx_host_t * ha)625 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
626 {
627 	int			i;
628 	struct qlnx_fastpath	*fp;
629 
630 	for (i = 0; i < ha->num_rss; i++) {
631                 fp = &ha->fp_array[i];
632 
633 		if (fp->fp_taskqueue != NULL) {
634 			QLNX_UNLOCK(ha);
635 			taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
636 			QLNX_LOCK(ha);
637 		}
638 	}
639 	return;
640 }
641 
642 static void
qlnx_get_params(qlnx_host_t * ha)643 qlnx_get_params(qlnx_host_t *ha)
644 {
645 	if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) {
646 		device_printf(ha->pci_dev, "invalid queue_count value (%d)\n",
647 			qlnxe_queue_count);
648 		qlnxe_queue_count = 0;
649 	}
650 	return;
651 }
652 
653 static void
qlnx_error_recovery_taskqueue(void * context,int pending)654 qlnx_error_recovery_taskqueue(void *context, int pending)
655 {
656         qlnx_host_t *ha;
657 
658         ha = context;
659 
660         QL_DPRINT2(ha, "enter\n");
661 
662         QLNX_LOCK(ha);
663         qlnx_stop(ha);
664         QLNX_UNLOCK(ha);
665 
666 #ifdef QLNX_ENABLE_IWARP
667 	qlnx_rdma_dev_remove(ha);
668 #endif /* #ifdef QLNX_ENABLE_IWARP */
669 
670         qlnx_slowpath_stop(ha);
671         qlnx_slowpath_start(ha);
672 
673 #ifdef QLNX_ENABLE_IWARP
674 	qlnx_rdma_dev_add(ha);
675 #endif /* #ifdef QLNX_ENABLE_IWARP */
676 
677         qlnx_init(ha);
678 
679         callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
680 
681         QL_DPRINT2(ha, "exit\n");
682 
683         return;
684 }
685 
686 static int
qlnx_create_error_recovery_taskqueue(qlnx_host_t * ha)687 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha)
688 {
689         uint8_t tq_name[32];
690 
691         bzero(tq_name, sizeof (tq_name));
692         snprintf(tq_name, sizeof (tq_name), "ql_err_tq");
693 
694         TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha);
695 
696         ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
697                                 taskqueue_thread_enqueue, &ha->err_taskqueue);
698 
699         if (ha->err_taskqueue == NULL)
700                 return (-1);
701 
702         taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name);
703 
704         QL_DPRINT1(ha, "%p\n",ha->err_taskqueue);
705 
706         return (0);
707 }
708 
709 static void
qlnx_destroy_error_recovery_taskqueue(qlnx_host_t * ha)710 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha)
711 {
712         if (ha->err_taskqueue != NULL) {
713                 taskqueue_drain(ha->err_taskqueue, &ha->err_task);
714                 taskqueue_free(ha->err_taskqueue);
715         }
716 
717         ha->err_taskqueue = NULL;
718 
719         return;
720 }
721 
722 /*
723  * Name:	qlnx_pci_attach
724  * Function:	attaches the device to the operating system
725  */
726 static int
qlnx_pci_attach(device_t dev)727 qlnx_pci_attach(device_t dev)
728 {
729 	qlnx_host_t	*ha = NULL;
730 	uint32_t	rsrc_len_reg __unused = 0;
731 	uint32_t	rsrc_len_dbells = 0;
732 	uint32_t	rsrc_len_msix __unused = 0;
733 	int		i;
734 	uint32_t	mfw_ver;
735 	uint32_t	num_sp_msix = 0;
736 	uint32_t	num_rdma_irqs = 0;
737 
738         if ((ha = device_get_softc(dev)) == NULL) {
739                 device_printf(dev, "cannot get softc\n");
740                 return (ENOMEM);
741         }
742 
743         memset(ha, 0, sizeof (qlnx_host_t));
744 
745         ha->device_id = pci_get_device(dev);
746 
747         if (qlnx_valid_device(ha) != 0) {
748                 device_printf(dev, "device is not valid device\n");
749                 return (ENXIO);
750 	}
751         ha->pci_func = pci_get_function(dev);
752 
753         ha->pci_dev = dev;
754 
755 	sx_init(&ha->hw_lock, "qlnx_hw_lock");
756 
757         ha->flags.lock_init = 1;
758 
759         pci_enable_busmaster(dev);
760 
761 	/*
762 	 * map the PCI BARs
763 	 */
764 
765         ha->reg_rid = PCIR_BAR(0);
766         ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
767                                 RF_ACTIVE);
768 
769         if (ha->pci_reg == NULL) {
770                 device_printf(dev, "unable to map BAR0\n");
771                 goto qlnx_pci_attach_err;
772         }
773 
774         rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
775                                         ha->reg_rid);
776 
777 	ha->dbells_rid = PCIR_BAR(2);
778 	rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev,
779 					SYS_RES_MEMORY,
780 					ha->dbells_rid);
781 	if (rsrc_len_dbells) {
782 		ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
783 					&ha->dbells_rid, RF_ACTIVE);
784 
785 		if (ha->pci_dbells == NULL) {
786 			device_printf(dev, "unable to map BAR1\n");
787 			goto qlnx_pci_attach_err;
788 		}
789 		ha->dbells_phys_addr = (uint64_t)
790 			bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);
791 
792 		ha->dbells_size = rsrc_len_dbells;
793 	} else {
794 		if (qlnx_vf_device(ha) != 0) {
795 			device_printf(dev, " BAR1 size is zero\n");
796 			goto qlnx_pci_attach_err;
797 		}
798 	}
799 
800         ha->msix_rid = PCIR_BAR(4);
801         ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
802                         &ha->msix_rid, RF_ACTIVE);
803 
804         if (ha->msix_bar == NULL) {
805                 device_printf(dev, "unable to map BAR2\n");
806                 goto qlnx_pci_attach_err;
807 	}
808 
809         rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
810                                         ha->msix_rid);
811 
812 	ha->dbg_level = 0x0000;
813 
814 	QL_DPRINT1(ha, "\n\t\t\t"
815 		"pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
816 		"\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
817 		"\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
818 		" msix_avail = 0x%x "
819 		"\n\t\t\t[ncpus = %d]\n",
820 		ha->pci_dev, ha->pci_reg, rsrc_len_reg,
821 		ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
822 		ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
823 		mp_ncpus);
824 	/*
825 	 * allocate dma tags
826 	 */
827 
828 	if (qlnx_alloc_parent_dma_tag(ha))
829                 goto qlnx_pci_attach_err;
830 
831 	if (qlnx_alloc_tx_dma_tag(ha))
832                 goto qlnx_pci_attach_err;
833 
834 	if (qlnx_alloc_rx_dma_tag(ha))
835                 goto qlnx_pci_attach_err;
836 
837 
838 	if (qlnx_init_hw(ha) != 0)
839 		goto qlnx_pci_attach_err;
840 
841         ha->flags.hw_init = 1;
842 
843 	qlnx_get_params(ha);
844 
845 	if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) &&
846 		(qlnxe_queue_count == QLNX_DEFAULT_RSS)) {
847 		qlnxe_queue_count = QLNX_MAX_RSS;
848 	}
849 
850 	/*
851 	 * Allocate MSI-x vectors
852 	 */
853 	if (qlnx_vf_device(ha) != 0) {
854 		if (qlnxe_queue_count == 0)
855 			ha->num_rss = QLNX_DEFAULT_RSS;
856 		else
857 			ha->num_rss = qlnxe_queue_count;
858 
859 		num_sp_msix = ha->cdev.num_hwfns;
860 	} else {
861 		uint8_t max_rxq;
862 		uint8_t max_txq;
863 
864 		ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq);
865 		ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq);
866 
867 		if (max_rxq < max_txq)
868 			ha->num_rss = max_rxq;
869 		else
870 			ha->num_rss = max_txq;
871 
872 		if (ha->num_rss > QLNX_MAX_VF_RSS)
873 			ha->num_rss = QLNX_MAX_VF_RSS;
874 
875 		num_sp_msix = 0;
876 	}
877 
878 	if (ha->num_rss > mp_ncpus)
879 		ha->num_rss = mp_ncpus;
880 
881 	ha->num_tc = QLNX_MAX_TC;
882 
883         ha->msix_count = pci_msix_count(dev);
884 
885 #ifdef QLNX_ENABLE_IWARP
886 
887 	num_rdma_irqs = qlnx_rdma_get_num_irqs(ha);
888 
889 #endif /* #ifdef QLNX_ENABLE_IWARP */
890 
891         if (!ha->msix_count ||
892 		(ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) {
893                 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
894                         ha->msix_count);
895                 goto qlnx_pci_attach_err;
896         }
897 
898 	if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs))
899 		ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs;
900 	else
901 		ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs);
902 
903 	QL_DPRINT1(ha, "\n\t\t\t"
904 		"pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
905 		"\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
906 		"\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
907 		" msix_avail = 0x%x msix_alloc = 0x%x"
908 		"\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
909 		 ha->pci_reg, rsrc_len_reg,
910 		ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
911 		ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
912 		ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
913 
914         if (pci_alloc_msix(dev, &ha->msix_count)) {
915                 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
916                         ha->msix_count);
917                 ha->msix_count = 0;
918                 goto qlnx_pci_attach_err;
919         }
920 
921 	/*
922 	 * Initialize slow path interrupt and task queue
923 	 */
924 
925 	if (num_sp_msix) {
926 		if (qlnx_create_sp_taskqueues(ha) != 0)
927 			goto qlnx_pci_attach_err;
928 
929 		for (i = 0; i < ha->cdev.num_hwfns; i++) {
930 			struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
931 
932 			ha->sp_irq_rid[i] = i + 1;
933 			ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
934 						&ha->sp_irq_rid[i],
935 						(RF_ACTIVE | RF_SHAREABLE));
936 			if (ha->sp_irq[i] == NULL) {
937                 		device_printf(dev,
938 					"could not allocate mbx interrupt\n");
939 				goto qlnx_pci_attach_err;
940 			}
941 
942 			if (bus_setup_intr(dev, ha->sp_irq[i],
943 				(INTR_TYPE_NET | INTR_MPSAFE), NULL,
944 				qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
945 				device_printf(dev,
946 					"could not setup slow path interrupt\n");
947 				goto qlnx_pci_attach_err;
948 			}
949 
950 			QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
951 				" sp_irq %p sp_handle %p\n", p_hwfn,
952 				ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
953 		}
954 	}
955 
956 	/*
957 	 * initialize fast path interrupt
958 	 */
959 	if (qlnx_create_fp_taskqueues(ha) != 0)
960 		goto qlnx_pci_attach_err;
961 
962         for (i = 0; i < ha->num_rss; i++) {
963                 ha->irq_vec[i].rss_idx = i;
964                 ha->irq_vec[i].ha = ha;
965                 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i;
966 
967                 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
968                                 &ha->irq_vec[i].irq_rid,
969                                 (RF_ACTIVE | RF_SHAREABLE));
970 
971                 if (ha->irq_vec[i].irq == NULL) {
972                         device_printf(dev,
973 				"could not allocate interrupt[%d] irq_rid = %d\n",
974 				i, ha->irq_vec[i].irq_rid);
975                         goto qlnx_pci_attach_err;
976                 }
977 
978 		if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
979                         device_printf(dev, "could not allocate tx_br[%d]\n", i);
980                         goto qlnx_pci_attach_err;
981 		}
982 	}
983 
984 	if (qlnx_vf_device(ha) != 0) {
985 		callout_init(&ha->qlnx_callout, 1);
986 		ha->flags.callout_init = 1;
987 
988 		for (i = 0; i < ha->cdev.num_hwfns; i++) {
989 			if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
990 				goto qlnx_pci_attach_err;
991 			if (ha->grcdump_size[i] == 0)
992 				goto qlnx_pci_attach_err;
993 
994 			ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
995 			QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
996 				i, ha->grcdump_size[i]);
997 
998 			ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
999 			if (ha->grcdump[i] == NULL) {
1000 				device_printf(dev, "grcdump alloc[%d] failed\n", i);
1001 				goto qlnx_pci_attach_err;
1002 			}
1003 
1004 			if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
1005 				goto qlnx_pci_attach_err;
1006 			if (ha->idle_chk_size[i] == 0)
1007 				goto qlnx_pci_attach_err;
1008 
1009 			ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
1010 			QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
1011 				i, ha->idle_chk_size[i]);
1012 
1013 			ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
1014 
1015 			if (ha->idle_chk[i] == NULL) {
1016 				device_printf(dev, "idle_chk alloc failed\n");
1017 				goto qlnx_pci_attach_err;
1018 			}
1019 		}
1020 
1021 		if (qlnx_create_error_recovery_taskqueue(ha) != 0)
1022 			goto qlnx_pci_attach_err;
1023 	}
1024 
1025 	if (qlnx_slowpath_start(ha) != 0)
1026 		goto qlnx_pci_attach_err;
1027 	else
1028 		ha->flags.slowpath_start = 1;
1029 
1030 	if (qlnx_vf_device(ha) != 0) {
1031 		if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
1032 			qlnx_mdelay(__func__, 1000);
1033 			qlnx_trigger_dump(ha);
1034 
1035 			goto qlnx_pci_attach_err0;
1036 		}
1037 
1038 		if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
1039 			qlnx_mdelay(__func__, 1000);
1040 			qlnx_trigger_dump(ha);
1041 
1042 			goto qlnx_pci_attach_err0;
1043 		}
1044 	} else {
1045 		struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
1046 		ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL);
1047 	}
1048 
1049 	snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
1050 		((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
1051 		((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
1052 	snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
1053 		FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1054 		FW_ENGINEERING_VERSION);
1055 
1056 	QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
1057 		 ha->stormfw_ver, ha->mfw_ver);
1058 
1059 	qlnx_init_ifnet(dev, ha);
1060 
1061 	/*
1062 	 * add sysctls
1063 	 */
1064 	qlnx_add_sysctls(ha);
1065 
1066 qlnx_pci_attach_err0:
1067         /*
1068 	 * create ioctl device interface
1069 	 */
1070 	if (qlnx_vf_device(ha) != 0) {
1071 		if (qlnx_make_cdev(ha)) {
1072 			device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
1073 			goto qlnx_pci_attach_err;
1074 		}
1075 
1076 #ifdef QLNX_ENABLE_IWARP
1077 		qlnx_rdma_dev_add(ha);
1078 #endif /* #ifdef QLNX_ENABLE_IWARP */
1079 	}
1080 
1081 #ifndef QLNX_VF
1082 #ifdef CONFIG_ECORE_SRIOV
1083 
1084 	if (qlnx_vf_device(ha) != 0)
1085 		qlnx_initialize_sriov(ha);
1086 
1087 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1088 #endif /* #ifdef QLNX_VF */
1089 
1090 	QL_DPRINT2(ha, "success\n");
1091 
1092         return (0);
1093 
1094 qlnx_pci_attach_err:
1095 
1096 	qlnx_release(ha);
1097 
1098 	return (ENXIO);
1099 }
1100 
1101 /*
1102  * Name:	qlnx_pci_detach
1103  * Function:	Unhooks the device from the operating system
1104  */
1105 static int
qlnx_pci_detach(device_t dev)1106 qlnx_pci_detach(device_t dev)
1107 {
1108 	qlnx_host_t	*ha = NULL;
1109 
1110         if ((ha = device_get_softc(dev)) == NULL) {
1111                 device_printf(dev, "%s: cannot get softc\n", __func__);
1112                 return (ENOMEM);
1113         }
1114 
1115 	if (qlnx_vf_device(ha) != 0) {
1116 #ifdef CONFIG_ECORE_SRIOV
1117 		int ret;
1118 
1119 		ret = pci_iov_detach(dev);
1120 		if (ret) {
1121                 	device_printf(dev, "%s: SRIOV in use\n", __func__);
1122 			return (ret);
1123 		}
1124 
1125 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1126 
1127 #ifdef QLNX_ENABLE_IWARP
1128 		if (qlnx_rdma_dev_remove(ha) != 0)
1129 			return (EBUSY);
1130 #endif /* #ifdef QLNX_ENABLE_IWARP */
1131 	}
1132 
1133 	QLNX_LOCK(ha);
1134 	qlnx_stop(ha);
1135 	QLNX_UNLOCK(ha);
1136 
1137 	qlnx_release(ha);
1138 
1139         return (0);
1140 }
1141 
1142 #ifdef QLNX_ENABLE_IWARP
1143 
1144 static uint8_t
qlnx_get_personality(uint8_t pci_func)1145 qlnx_get_personality(uint8_t pci_func)
1146 {
1147 	uint8_t personality;
1148 
1149 	personality = (qlnxe_rdma_configuration >>
1150 				(pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) &
1151 				QLNX_PERSONALIY_MASK;
1152 	return (personality);
1153 }
1154 
1155 static void
qlnx_set_personality(qlnx_host_t * ha)1156 qlnx_set_personality(qlnx_host_t *ha)
1157 {
1158 	uint8_t personality;
1159 
1160 	personality = qlnx_get_personality(ha->pci_func);
1161 
1162 	switch (personality) {
1163 	case QLNX_PERSONALITY_DEFAULT:
1164                	device_printf(ha->pci_dev, "%s: DEFAULT\n",
1165 			__func__);
1166 		ha->personality = ECORE_PCI_DEFAULT;
1167 		break;
1168 
1169 	case QLNX_PERSONALITY_ETH_ONLY:
1170                	device_printf(ha->pci_dev, "%s: ETH_ONLY\n",
1171 			__func__);
1172 		ha->personality = ECORE_PCI_ETH;
1173 		break;
1174 
1175 	case QLNX_PERSONALITY_ETH_IWARP:
1176                	device_printf(ha->pci_dev, "%s: ETH_IWARP\n",
1177 			__func__);
1178 		ha->personality = ECORE_PCI_ETH_IWARP;
1179 		break;
1180 
1181 	case QLNX_PERSONALITY_ETH_ROCE:
1182                	device_printf(ha->pci_dev, "%s: ETH_ROCE\n",
1183 			__func__);
1184 		ha->personality = ECORE_PCI_ETH_ROCE;
1185 		break;
1186 	}
1187 
1188 	return;
1189 }
1190 
1191 #endif /* #ifdef QLNX_ENABLE_IWARP */
1192 
1193 static int
qlnx_init_hw(qlnx_host_t * ha)1194 qlnx_init_hw(qlnx_host_t *ha)
1195 {
1196 	int				rval = 0;
1197 	struct ecore_hw_prepare_params	params;
1198 
1199         ha->cdev.ha = ha;
1200 	ecore_init_struct(&ha->cdev);
1201 
1202 	/* ha->dp_module = ECORE_MSG_PROBE |
1203 				ECORE_MSG_INTR |
1204 				ECORE_MSG_SP |
1205 				ECORE_MSG_LINK |
1206 				ECORE_MSG_SPQ |
1207 				ECORE_MSG_RDMA;
1208 	ha->dp_level = ECORE_LEVEL_VERBOSE;*/
1209 	//ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2;
1210 	ha->dp_level = ECORE_LEVEL_NOTICE;
1211 	//ha->dp_level = ECORE_LEVEL_VERBOSE;
1212 
1213 	ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
1214 
1215 	ha->cdev.regview = ha->pci_reg;
1216 
1217 	ha->personality = ECORE_PCI_DEFAULT;
1218 
1219 	if (qlnx_vf_device(ha) == 0) {
1220 		ha->cdev.b_is_vf = true;
1221 
1222 		if (ha->pci_dbells != NULL) {
1223 			ha->cdev.doorbells = ha->pci_dbells;
1224 			ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1225 			ha->cdev.db_size = ha->dbells_size;
1226 		} else {
1227 			ha->pci_dbells = ha->pci_reg;
1228 		}
1229 	} else {
1230 		ha->cdev.doorbells = ha->pci_dbells;
1231 		ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1232 		ha->cdev.db_size = ha->dbells_size;
1233 
1234 #ifdef QLNX_ENABLE_IWARP
1235 
1236 		if (qlnx_rdma_supported(ha) == 0)
1237 			qlnx_set_personality(ha);
1238 
1239 #endif /* #ifdef QLNX_ENABLE_IWARP */
1240 	}
1241 	QL_DPRINT2(ha, "%s: %s\n", __func__,
1242 		(ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet"));
1243 
1244 	bzero(&params, sizeof (struct ecore_hw_prepare_params));
1245 
1246 	params.personality = ha->personality;
1247 
1248 	params.drv_resc_alloc = false;
1249 	params.chk_reg_fifo = false;
1250 	params.initiate_pf_flr = true;
1251 	params.epoch = 0;
1252 
1253 	ecore_hw_prepare(&ha->cdev, &params);
1254 
1255 	qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
1256 
1257 	QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n",
1258 		ha, &ha->cdev, &ha->cdev.hwfns[0]);
1259 
1260 	return (rval);
1261 }
1262 
1263 static void
qlnx_release(qlnx_host_t * ha)1264 qlnx_release(qlnx_host_t *ha)
1265 {
1266         device_t	dev;
1267         int		i;
1268 
1269         dev = ha->pci_dev;
1270 
1271 	QL_DPRINT2(ha, "enter\n");
1272 
1273 	for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
1274 		if (ha->idle_chk[i] != NULL) {
1275 			free(ha->idle_chk[i], M_QLNXBUF);
1276 			ha->idle_chk[i] = NULL;
1277 		}
1278 
1279 		if (ha->grcdump[i] != NULL) {
1280 			free(ha->grcdump[i], M_QLNXBUF);
1281 			ha->grcdump[i] = NULL;
1282 		}
1283 	}
1284 
1285         if (ha->flags.callout_init)
1286                 callout_drain(&ha->qlnx_callout);
1287 
1288 	if (ha->flags.slowpath_start) {
1289 		qlnx_slowpath_stop(ha);
1290 	}
1291 
1292         if (ha->flags.hw_init)
1293 		ecore_hw_remove(&ha->cdev);
1294 
1295         qlnx_del_cdev(ha);
1296 
1297         if (ha->ifp != NULL)
1298                 ether_ifdetach(ha->ifp);
1299 
1300 	qlnx_free_tx_dma_tag(ha);
1301 
1302 	qlnx_free_rx_dma_tag(ha);
1303 
1304 	qlnx_free_parent_dma_tag(ha);
1305 
1306 	if (qlnx_vf_device(ha) != 0) {
1307 		qlnx_destroy_error_recovery_taskqueue(ha);
1308 	}
1309 
1310         for (i = 0; i < ha->num_rss; i++) {
1311 		struct qlnx_fastpath *fp = &ha->fp_array[i];
1312 
1313                 if (ha->irq_vec[i].handle) {
1314                         (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1315                                         ha->irq_vec[i].handle);
1316                 }
1317 
1318                 if (ha->irq_vec[i].irq) {
1319                         (void)bus_release_resource(dev, SYS_RES_IRQ,
1320                                 ha->irq_vec[i].irq_rid,
1321                                 ha->irq_vec[i].irq);
1322                 }
1323 
1324 		qlnx_free_tx_br(ha, fp);
1325         }
1326 	qlnx_destroy_fp_taskqueues(ha);
1327 
1328  	for (i = 0; i < ha->cdev.num_hwfns; i++) {
1329         	if (ha->sp_handle[i])
1330                 	(void)bus_teardown_intr(dev, ha->sp_irq[i],
1331 				ha->sp_handle[i]);
1332 
1333         	if (ha->sp_irq[i])
1334 			(void) bus_release_resource(dev, SYS_RES_IRQ,
1335 				ha->sp_irq_rid[i], ha->sp_irq[i]);
1336 	}
1337 
1338 	qlnx_destroy_sp_taskqueues(ha);
1339 
1340         if (ha->msix_count)
1341                 pci_release_msi(dev);
1342 
1343         if (ha->flags.lock_init) {
1344                 sx_destroy(&ha->hw_lock);
1345         }
1346 
1347         if (ha->pci_reg)
1348                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1349                                 ha->pci_reg);
1350 
1351         if (ha->dbells_size && ha->pci_dbells)
1352                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1353                                 ha->pci_dbells);
1354 
1355         if (ha->msix_bar)
1356                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1357                                 ha->msix_bar);
1358 
1359 	QL_DPRINT2(ha, "exit\n");
1360 	return;
1361 }
1362 
1363 static void
qlnx_trigger_dump(qlnx_host_t * ha)1364 qlnx_trigger_dump(qlnx_host_t *ha)
1365 {
1366 	int	i;
1367 
1368 	if (ha->ifp != NULL)
1369 		if_setdrvflagbits(ha->ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
1370 
1371 	QL_DPRINT2(ha, "enter\n");
1372 
1373 	if (qlnx_vf_device(ha) == 0)
1374 		return;
1375 
1376 	ha->error_recovery = 1;
1377 
1378 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
1379 		qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1380 		qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1381 	}
1382 
1383 	QL_DPRINT2(ha, "exit\n");
1384 
1385 	return;
1386 }
1387 
1388 static int
qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)1389 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1390 {
1391         int		err, ret = 0;
1392         qlnx_host_t	*ha;
1393 
1394         err = sysctl_handle_int(oidp, &ret, 0, req);
1395 
1396         if (err || !req->newptr)
1397                 return (err);
1398 
1399         if (ret == 1) {
1400                 ha = (qlnx_host_t *)arg1;
1401                 qlnx_trigger_dump(ha);
1402         }
1403         return (err);
1404 }
1405 
1406 static int
qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)1407 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1408 {
1409         int			err, i, ret = 0, usecs = 0;
1410         qlnx_host_t		*ha;
1411 	struct ecore_hwfn	*p_hwfn;
1412 	struct qlnx_fastpath	*fp;
1413 
1414         err = sysctl_handle_int(oidp, &usecs, 0, req);
1415 
1416         if (err || !req->newptr || !usecs || (usecs > 255))
1417                 return (err);
1418 
1419         ha = (qlnx_host_t *)arg1;
1420 
1421 	if (qlnx_vf_device(ha) == 0)
1422 		return (-1);
1423 
1424 	for (i = 0; i < ha->num_rss; i++) {
1425 		p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1426 
1427         	fp = &ha->fp_array[i];
1428 
1429 		if (fp->txq[0]->handle != NULL) {
1430 			ret = ecore_set_queue_coalesce(p_hwfn, 0,
1431 					(uint16_t)usecs, fp->txq[0]->handle);
1432 		}
1433         }
1434 
1435 	if (!ret)
1436 		ha->tx_coalesce_usecs = (uint8_t)usecs;
1437 
1438         return (err);
1439 }
1440 
1441 static int
qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)1442 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1443 {
1444         int			err, i, ret = 0, usecs = 0;
1445         qlnx_host_t		*ha;
1446 	struct ecore_hwfn	*p_hwfn;
1447 	struct qlnx_fastpath	*fp;
1448 
1449         err = sysctl_handle_int(oidp, &usecs, 0, req);
1450 
1451         if (err || !req->newptr || !usecs || (usecs > 255))
1452                 return (err);
1453 
1454         ha = (qlnx_host_t *)arg1;
1455 
1456 	if (qlnx_vf_device(ha) == 0)
1457 		return (-1);
1458 
1459 	for (i = 0; i < ha->num_rss; i++) {
1460 		p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1461 
1462         	fp = &ha->fp_array[i];
1463 
1464 		if (fp->rxq->handle != NULL) {
1465 			ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1466 					 0, fp->rxq->handle);
1467 		}
1468 	}
1469 
1470 	if (!ret)
1471 		ha->rx_coalesce_usecs = (uint8_t)usecs;
1472 
1473         return (err);
1474 }
1475 
1476 static void
qlnx_add_sp_stats_sysctls(qlnx_host_t * ha)1477 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1478 {
1479         struct sysctl_ctx_list	*ctx;
1480         struct sysctl_oid_list	*children;
1481 	struct sysctl_oid	*ctx_oid;
1482 
1483         ctx = device_get_sysctl_ctx(ha->pci_dev);
1484 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1485 
1486 	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1487 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat");
1488         children = SYSCTL_CHILDREN(ctx_oid);
1489 
1490 	SYSCTL_ADD_QUAD(ctx, children,
1491                 OID_AUTO, "sp_interrupts",
1492                 CTLFLAG_RD, &ha->sp_interrupts,
1493                 "No. of slowpath interrupts");
1494 
1495 	return;
1496 }
1497 
1498 static void
qlnx_add_fp_stats_sysctls(qlnx_host_t * ha)1499 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1500 {
1501         struct sysctl_ctx_list	*ctx;
1502         struct sysctl_oid_list	*children;
1503         struct sysctl_oid_list	*node_children;
1504 	struct sysctl_oid	*ctx_oid;
1505 	int			i, j;
1506 	uint8_t			name_str[16];
1507 
1508         ctx = device_get_sysctl_ctx(ha->pci_dev);
1509 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1510 
1511 	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1512 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat");
1513 	children = SYSCTL_CHILDREN(ctx_oid);
1514 
1515 	for (i = 0; i < ha->num_rss; i++) {
1516 		bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1517 		snprintf(name_str, sizeof(name_str), "%d", i);
1518 
1519 		ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1520 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
1521 		node_children = SYSCTL_CHILDREN(ctx_oid);
1522 
1523 		/* Tx Related */
1524 
1525 		SYSCTL_ADD_QUAD(ctx, node_children,
1526 			OID_AUTO, "tx_pkts_processed",
1527 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1528 			"No. of packets processed for transmission");
1529 
1530 		SYSCTL_ADD_QUAD(ctx, node_children,
1531 			OID_AUTO, "tx_pkts_freed",
1532 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1533 			"No. of freed packets");
1534 
1535 		SYSCTL_ADD_QUAD(ctx, node_children,
1536 			OID_AUTO, "tx_pkts_transmitted",
1537 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1538 			"No. of transmitted packets");
1539 
1540 		SYSCTL_ADD_QUAD(ctx, node_children,
1541 			OID_AUTO, "tx_pkts_completed",
1542 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1543 			"No. of transmit completions");
1544 
1545                 SYSCTL_ADD_QUAD(ctx, node_children,
1546                         OID_AUTO, "tx_non_tso_pkts",
1547                         CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts,
1548                         "No. of non LSO transmited packets");
1549 
1550 #ifdef QLNX_TRACE_PERF_DATA
1551 
1552                 SYSCTL_ADD_QUAD(ctx, node_children,
1553                         OID_AUTO, "tx_pkts_trans_ctx",
1554                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx,
1555                         "No. of transmitted packets in transmit context");
1556 
1557                 SYSCTL_ADD_QUAD(ctx, node_children,
1558                         OID_AUTO, "tx_pkts_compl_ctx",
1559                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx,
1560                         "No. of transmit completions in transmit context");
1561 
1562                 SYSCTL_ADD_QUAD(ctx, node_children,
1563                         OID_AUTO, "tx_pkts_trans_fp",
1564                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp,
1565                         "No. of transmitted packets in taskqueue");
1566 
1567                 SYSCTL_ADD_QUAD(ctx, node_children,
1568                         OID_AUTO, "tx_pkts_compl_fp",
1569                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp,
1570                         "No. of transmit completions in taskqueue");
1571 
1572                 SYSCTL_ADD_QUAD(ctx, node_children,
1573                         OID_AUTO, "tx_pkts_compl_intr",
1574                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr,
1575                         "No. of transmit completions in interrupt ctx");
1576 #endif
1577 
1578                 SYSCTL_ADD_QUAD(ctx, node_children,
1579                         OID_AUTO, "tx_tso_pkts",
1580                         CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts,
1581                         "No. of LSO transmited packets");
1582 
1583 		SYSCTL_ADD_QUAD(ctx, node_children,
1584 			OID_AUTO, "tx_lso_wnd_min_len",
1585 			CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1586 			"tx_lso_wnd_min_len");
1587 
1588 		SYSCTL_ADD_QUAD(ctx, node_children,
1589 			OID_AUTO, "tx_defrag",
1590 			CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1591 			"tx_defrag");
1592 
1593 		SYSCTL_ADD_QUAD(ctx, node_children,
1594 			OID_AUTO, "tx_nsegs_gt_elem_left",
1595 			CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1596 			"tx_nsegs_gt_elem_left");
1597 
1598 		SYSCTL_ADD_UINT(ctx, node_children,
1599 			OID_AUTO, "tx_tso_max_nsegs",
1600 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1601 			ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1602 
1603 		SYSCTL_ADD_UINT(ctx, node_children,
1604 			OID_AUTO, "tx_tso_min_nsegs",
1605 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1606 			ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1607 
1608 		SYSCTL_ADD_UINT(ctx, node_children,
1609 			OID_AUTO, "tx_tso_max_pkt_len",
1610 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1611 			ha->fp_array[i].tx_tso_max_pkt_len,
1612 			"tx_tso_max_pkt_len");
1613 
1614 		SYSCTL_ADD_UINT(ctx, node_children,
1615 			OID_AUTO, "tx_tso_min_pkt_len",
1616 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1617 			ha->fp_array[i].tx_tso_min_pkt_len,
1618 			"tx_tso_min_pkt_len");
1619 
1620 		for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1621 			bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1622 			snprintf(name_str, sizeof(name_str),
1623 				"tx_pkts_nseg_%02d", (j+1));
1624 
1625 			SYSCTL_ADD_QUAD(ctx, node_children,
1626 				OID_AUTO, name_str, CTLFLAG_RD,
1627 				&ha->fp_array[i].tx_pkts[j], name_str);
1628 		}
1629 
1630 #ifdef QLNX_TRACE_PERF_DATA
1631                 for (j = 0; j < 18; j++) {
1632                         bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1633                         snprintf(name_str, sizeof(name_str),
1634                                 "tx_pkts_hist_%02d", (j+1));
1635 
1636                         SYSCTL_ADD_QUAD(ctx, node_children,
1637                                 OID_AUTO, name_str, CTLFLAG_RD,
1638                                 &ha->fp_array[i].tx_pkts_hist[j], name_str);
1639                 }
1640                 for (j = 0; j < 5; j++) {
1641                         bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1642                         snprintf(name_str, sizeof(name_str),
1643                                 "tx_comInt_%02d", (j+1));
1644 
1645                         SYSCTL_ADD_QUAD(ctx, node_children,
1646                                 OID_AUTO, name_str, CTLFLAG_RD,
1647                                 &ha->fp_array[i].tx_comInt[j], name_str);
1648                 }
1649                 for (j = 0; j < 18; j++) {
1650                         bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1651                         snprintf(name_str, sizeof(name_str),
1652                                 "tx_pkts_q_%02d", (j+1));
1653 
1654                         SYSCTL_ADD_QUAD(ctx, node_children,
1655                                 OID_AUTO, name_str, CTLFLAG_RD,
1656                                 &ha->fp_array[i].tx_pkts_q[j], name_str);
1657                 }
1658 #endif
1659 
1660 		SYSCTL_ADD_QUAD(ctx, node_children,
1661 			OID_AUTO, "err_tx_nsegs_gt_elem_left",
1662 			CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1663 			"err_tx_nsegs_gt_elem_left");
1664 
1665 		SYSCTL_ADD_QUAD(ctx, node_children,
1666 			OID_AUTO, "err_tx_dmamap_create",
1667 			CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1668 			"err_tx_dmamap_create");
1669 
1670 		SYSCTL_ADD_QUAD(ctx, node_children,
1671 			OID_AUTO, "err_tx_defrag_dmamap_load",
1672 			CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1673 			"err_tx_defrag_dmamap_load");
1674 
1675 		SYSCTL_ADD_QUAD(ctx, node_children,
1676 			OID_AUTO, "err_tx_non_tso_max_seg",
1677 			CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1678 			"err_tx_non_tso_max_seg");
1679 
1680 		SYSCTL_ADD_QUAD(ctx, node_children,
1681 			OID_AUTO, "err_tx_dmamap_load",
1682 			CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1683 			"err_tx_dmamap_load");
1684 
1685 		SYSCTL_ADD_QUAD(ctx, node_children,
1686 			OID_AUTO, "err_tx_defrag",
1687 			CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1688 			"err_tx_defrag");
1689 
1690 		SYSCTL_ADD_QUAD(ctx, node_children,
1691 			OID_AUTO, "err_tx_free_pkt_null",
1692 			CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1693 			"err_tx_free_pkt_null");
1694 
1695 		SYSCTL_ADD_QUAD(ctx, node_children,
1696 			OID_AUTO, "err_tx_cons_idx_conflict",
1697 			CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1698 			"err_tx_cons_idx_conflict");
1699 
1700 		SYSCTL_ADD_QUAD(ctx, node_children,
1701 			OID_AUTO, "lro_cnt_64",
1702 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1703 			"lro_cnt_64");
1704 
1705 		SYSCTL_ADD_QUAD(ctx, node_children,
1706 			OID_AUTO, "lro_cnt_128",
1707 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1708 			"lro_cnt_128");
1709 
1710 		SYSCTL_ADD_QUAD(ctx, node_children,
1711 			OID_AUTO, "lro_cnt_256",
1712 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1713 			"lro_cnt_256");
1714 
1715 		SYSCTL_ADD_QUAD(ctx, node_children,
1716 			OID_AUTO, "lro_cnt_512",
1717 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1718 			"lro_cnt_512");
1719 
1720 		SYSCTL_ADD_QUAD(ctx, node_children,
1721 			OID_AUTO, "lro_cnt_1024",
1722 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1723 			"lro_cnt_1024");
1724 
1725 		/* Rx Related */
1726 
1727 		SYSCTL_ADD_QUAD(ctx, node_children,
1728 			OID_AUTO, "rx_pkts",
1729 			CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1730 			"No. of received packets");
1731 
1732 		SYSCTL_ADD_QUAD(ctx, node_children,
1733 			OID_AUTO, "tpa_start",
1734 			CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1735 			"No. of tpa_start packets");
1736 
1737 		SYSCTL_ADD_QUAD(ctx, node_children,
1738 			OID_AUTO, "tpa_cont",
1739 			CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1740 			"No. of tpa_cont packets");
1741 
1742 		SYSCTL_ADD_QUAD(ctx, node_children,
1743 			OID_AUTO, "tpa_end",
1744 			CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1745 			"No. of tpa_end packets");
1746 
1747 		SYSCTL_ADD_QUAD(ctx, node_children,
1748 			OID_AUTO, "err_m_getcl",
1749 			CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1750 			"err_m_getcl");
1751 
1752 		SYSCTL_ADD_QUAD(ctx, node_children,
1753 			OID_AUTO, "err_m_getjcl",
1754 			CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1755 			"err_m_getjcl");
1756 
1757 		SYSCTL_ADD_QUAD(ctx, node_children,
1758 			OID_AUTO, "err_rx_hw_errors",
1759 			CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1760 			"err_rx_hw_errors");
1761 
1762 		SYSCTL_ADD_QUAD(ctx, node_children,
1763 			OID_AUTO, "err_rx_alloc_errors",
1764 			CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1765 			"err_rx_alloc_errors");
1766 	}
1767 
1768 	return;
1769 }
1770 
1771 static void
qlnx_add_hw_stats_sysctls(qlnx_host_t * ha)1772 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1773 {
1774         struct sysctl_ctx_list	*ctx;
1775         struct sysctl_oid_list	*children;
1776 	struct sysctl_oid	*ctx_oid;
1777 
1778         ctx = device_get_sysctl_ctx(ha->pci_dev);
1779 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1780 
1781 	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1782 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat");
1783         children = SYSCTL_CHILDREN(ctx_oid);
1784 
1785 	SYSCTL_ADD_QUAD(ctx, children,
1786                 OID_AUTO, "no_buff_discards",
1787                 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1788                 "No. of packets discarded due to lack of buffer");
1789 
1790 	SYSCTL_ADD_QUAD(ctx, children,
1791                 OID_AUTO, "packet_too_big_discard",
1792                 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1793                 "No. of packets discarded because packet was too big");
1794 
1795 	SYSCTL_ADD_QUAD(ctx, children,
1796                 OID_AUTO, "ttl0_discard",
1797                 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1798                 "ttl0_discard");
1799 
1800 	SYSCTL_ADD_QUAD(ctx, children,
1801                 OID_AUTO, "rx_ucast_bytes",
1802                 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1803                 "rx_ucast_bytes");
1804 
1805 	SYSCTL_ADD_QUAD(ctx, children,
1806                 OID_AUTO, "rx_mcast_bytes",
1807                 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1808                 "rx_mcast_bytes");
1809 
1810 	SYSCTL_ADD_QUAD(ctx, children,
1811                 OID_AUTO, "rx_bcast_bytes",
1812                 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1813                 "rx_bcast_bytes");
1814 
1815 	SYSCTL_ADD_QUAD(ctx, children,
1816                 OID_AUTO, "rx_ucast_pkts",
1817                 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1818                 "rx_ucast_pkts");
1819 
1820 	SYSCTL_ADD_QUAD(ctx, children,
1821                 OID_AUTO, "rx_mcast_pkts",
1822                 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1823                 "rx_mcast_pkts");
1824 
1825 	SYSCTL_ADD_QUAD(ctx, children,
1826                 OID_AUTO, "rx_bcast_pkts",
1827                 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1828                 "rx_bcast_pkts");
1829 
1830 	SYSCTL_ADD_QUAD(ctx, children,
1831                 OID_AUTO, "mftag_filter_discards",
1832                 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1833                 "mftag_filter_discards");
1834 
1835 	SYSCTL_ADD_QUAD(ctx, children,
1836                 OID_AUTO, "mac_filter_discards",
1837                 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1838                 "mac_filter_discards");
1839 
1840 	SYSCTL_ADD_QUAD(ctx, children,
1841                 OID_AUTO, "tx_ucast_bytes",
1842                 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1843                 "tx_ucast_bytes");
1844 
1845 	SYSCTL_ADD_QUAD(ctx, children,
1846                 OID_AUTO, "tx_mcast_bytes",
1847                 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1848                 "tx_mcast_bytes");
1849 
1850 	SYSCTL_ADD_QUAD(ctx, children,
1851                 OID_AUTO, "tx_bcast_bytes",
1852                 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1853                 "tx_bcast_bytes");
1854 
1855 	SYSCTL_ADD_QUAD(ctx, children,
1856                 OID_AUTO, "tx_ucast_pkts",
1857                 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1858                 "tx_ucast_pkts");
1859 
1860 	SYSCTL_ADD_QUAD(ctx, children,
1861                 OID_AUTO, "tx_mcast_pkts",
1862                 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1863                 "tx_mcast_pkts");
1864 
1865 	SYSCTL_ADD_QUAD(ctx, children,
1866                 OID_AUTO, "tx_bcast_pkts",
1867                 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1868                 "tx_bcast_pkts");
1869 
1870 	SYSCTL_ADD_QUAD(ctx, children,
1871                 OID_AUTO, "tx_err_drop_pkts",
1872                 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1873                 "tx_err_drop_pkts");
1874 
1875 	SYSCTL_ADD_QUAD(ctx, children,
1876                 OID_AUTO, "tpa_coalesced_pkts",
1877                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1878                 "tpa_coalesced_pkts");
1879 
1880 	SYSCTL_ADD_QUAD(ctx, children,
1881                 OID_AUTO, "tpa_coalesced_events",
1882                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1883                 "tpa_coalesced_events");
1884 
1885 	SYSCTL_ADD_QUAD(ctx, children,
1886                 OID_AUTO, "tpa_aborts_num",
1887                 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1888                 "tpa_aborts_num");
1889 
1890 	SYSCTL_ADD_QUAD(ctx, children,
1891                 OID_AUTO, "tpa_not_coalesced_pkts",
1892                 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1893                 "tpa_not_coalesced_pkts");
1894 
1895 	SYSCTL_ADD_QUAD(ctx, children,
1896                 OID_AUTO, "tpa_coalesced_bytes",
1897                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1898                 "tpa_coalesced_bytes");
1899 
1900 	SYSCTL_ADD_QUAD(ctx, children,
1901                 OID_AUTO, "rx_64_byte_packets",
1902                 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1903                 "rx_64_byte_packets");
1904 
1905 	SYSCTL_ADD_QUAD(ctx, children,
1906                 OID_AUTO, "rx_65_to_127_byte_packets",
1907                 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1908                 "rx_65_to_127_byte_packets");
1909 
1910 	SYSCTL_ADD_QUAD(ctx, children,
1911                 OID_AUTO, "rx_128_to_255_byte_packets",
1912                 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1913                 "rx_128_to_255_byte_packets");
1914 
1915 	SYSCTL_ADD_QUAD(ctx, children,
1916                 OID_AUTO, "rx_256_to_511_byte_packets",
1917                 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1918                 "rx_256_to_511_byte_packets");
1919 
1920 	SYSCTL_ADD_QUAD(ctx, children,
1921                 OID_AUTO, "rx_512_to_1023_byte_packets",
1922                 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1923                 "rx_512_to_1023_byte_packets");
1924 
1925 	SYSCTL_ADD_QUAD(ctx, children,
1926                 OID_AUTO, "rx_1024_to_1518_byte_packets",
1927                 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1928                 "rx_1024_to_1518_byte_packets");
1929 
1930 	SYSCTL_ADD_QUAD(ctx, children,
1931                 OID_AUTO, "rx_1519_to_1522_byte_packets",
1932                 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1933                 "rx_1519_to_1522_byte_packets");
1934 
1935 	SYSCTL_ADD_QUAD(ctx, children,
1936                 OID_AUTO, "rx_1523_to_2047_byte_packets",
1937                 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1938                 "rx_1523_to_2047_byte_packets");
1939 
1940 	SYSCTL_ADD_QUAD(ctx, children,
1941                 OID_AUTO, "rx_2048_to_4095_byte_packets",
1942                 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1943                 "rx_2048_to_4095_byte_packets");
1944 
1945 	SYSCTL_ADD_QUAD(ctx, children,
1946                 OID_AUTO, "rx_4096_to_9216_byte_packets",
1947                 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1948                 "rx_4096_to_9216_byte_packets");
1949 
1950 	SYSCTL_ADD_QUAD(ctx, children,
1951                 OID_AUTO, "rx_9217_to_16383_byte_packets",
1952                 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1953                 "rx_9217_to_16383_byte_packets");
1954 
1955 	SYSCTL_ADD_QUAD(ctx, children,
1956                 OID_AUTO, "rx_crc_errors",
1957                 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1958                 "rx_crc_errors");
1959 
1960 	SYSCTL_ADD_QUAD(ctx, children,
1961                 OID_AUTO, "rx_mac_crtl_frames",
1962                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
1963                 "rx_mac_crtl_frames");
1964 
1965 	SYSCTL_ADD_QUAD(ctx, children,
1966                 OID_AUTO, "rx_pause_frames",
1967                 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
1968                 "rx_pause_frames");
1969 
1970 	SYSCTL_ADD_QUAD(ctx, children,
1971                 OID_AUTO, "rx_pfc_frames",
1972                 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
1973                 "rx_pfc_frames");
1974 
1975 	SYSCTL_ADD_QUAD(ctx, children,
1976                 OID_AUTO, "rx_align_errors",
1977                 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
1978                 "rx_align_errors");
1979 
1980 	SYSCTL_ADD_QUAD(ctx, children,
1981                 OID_AUTO, "rx_carrier_errors",
1982                 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
1983                 "rx_carrier_errors");
1984 
1985 	SYSCTL_ADD_QUAD(ctx, children,
1986                 OID_AUTO, "rx_oversize_packets",
1987                 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
1988                 "rx_oversize_packets");
1989 
1990 	SYSCTL_ADD_QUAD(ctx, children,
1991                 OID_AUTO, "rx_jabbers",
1992                 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
1993                 "rx_jabbers");
1994 
1995 	SYSCTL_ADD_QUAD(ctx, children,
1996                 OID_AUTO, "rx_undersize_packets",
1997                 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
1998                 "rx_undersize_packets");
1999 
2000 	SYSCTL_ADD_QUAD(ctx, children,
2001                 OID_AUTO, "rx_fragments",
2002                 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
2003                 "rx_fragments");
2004 
2005 	SYSCTL_ADD_QUAD(ctx, children,
2006                 OID_AUTO, "tx_64_byte_packets",
2007                 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
2008                 "tx_64_byte_packets");
2009 
2010 	SYSCTL_ADD_QUAD(ctx, children,
2011                 OID_AUTO, "tx_65_to_127_byte_packets",
2012                 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
2013                 "tx_65_to_127_byte_packets");
2014 
2015 	SYSCTL_ADD_QUAD(ctx, children,
2016                 OID_AUTO, "tx_128_to_255_byte_packets",
2017                 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
2018                 "tx_128_to_255_byte_packets");
2019 
2020 	SYSCTL_ADD_QUAD(ctx, children,
2021                 OID_AUTO, "tx_256_to_511_byte_packets",
2022                 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
2023                 "tx_256_to_511_byte_packets");
2024 
2025 	SYSCTL_ADD_QUAD(ctx, children,
2026                 OID_AUTO, "tx_512_to_1023_byte_packets",
2027                 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
2028                 "tx_512_to_1023_byte_packets");
2029 
2030 	SYSCTL_ADD_QUAD(ctx, children,
2031                 OID_AUTO, "tx_1024_to_1518_byte_packets",
2032                 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
2033                 "tx_1024_to_1518_byte_packets");
2034 
2035 	SYSCTL_ADD_QUAD(ctx, children,
2036                 OID_AUTO, "tx_1519_to_2047_byte_packets",
2037                 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
2038                 "tx_1519_to_2047_byte_packets");
2039 
2040 	SYSCTL_ADD_QUAD(ctx, children,
2041                 OID_AUTO, "tx_2048_to_4095_byte_packets",
2042                 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
2043                 "tx_2048_to_4095_byte_packets");
2044 
2045 	SYSCTL_ADD_QUAD(ctx, children,
2046                 OID_AUTO, "tx_4096_to_9216_byte_packets",
2047                 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
2048                 "tx_4096_to_9216_byte_packets");
2049 
2050 	SYSCTL_ADD_QUAD(ctx, children,
2051                 OID_AUTO, "tx_9217_to_16383_byte_packets",
2052                 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
2053                 "tx_9217_to_16383_byte_packets");
2054 
2055 	SYSCTL_ADD_QUAD(ctx, children,
2056                 OID_AUTO, "tx_pause_frames",
2057                 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
2058                 "tx_pause_frames");
2059 
2060 	SYSCTL_ADD_QUAD(ctx, children,
2061                 OID_AUTO, "tx_pfc_frames",
2062                 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
2063                 "tx_pfc_frames");
2064 
2065 	SYSCTL_ADD_QUAD(ctx, children,
2066                 OID_AUTO, "tx_lpi_entry_count",
2067                 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
2068                 "tx_lpi_entry_count");
2069 
2070 	SYSCTL_ADD_QUAD(ctx, children,
2071                 OID_AUTO, "tx_total_collisions",
2072                 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
2073                 "tx_total_collisions");
2074 
2075 	SYSCTL_ADD_QUAD(ctx, children,
2076                 OID_AUTO, "brb_truncates",
2077                 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
2078                 "brb_truncates");
2079 
2080 	SYSCTL_ADD_QUAD(ctx, children,
2081                 OID_AUTO, "brb_discards",
2082                 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
2083                 "brb_discards");
2084 
2085 	SYSCTL_ADD_QUAD(ctx, children,
2086                 OID_AUTO, "rx_mac_bytes",
2087                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
2088                 "rx_mac_bytes");
2089 
2090 	SYSCTL_ADD_QUAD(ctx, children,
2091                 OID_AUTO, "rx_mac_uc_packets",
2092                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
2093                 "rx_mac_uc_packets");
2094 
2095 	SYSCTL_ADD_QUAD(ctx, children,
2096                 OID_AUTO, "rx_mac_mc_packets",
2097                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
2098                 "rx_mac_mc_packets");
2099 
2100 	SYSCTL_ADD_QUAD(ctx, children,
2101                 OID_AUTO, "rx_mac_bc_packets",
2102                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
2103                 "rx_mac_bc_packets");
2104 
2105 	SYSCTL_ADD_QUAD(ctx, children,
2106                 OID_AUTO, "rx_mac_frames_ok",
2107                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
2108                 "rx_mac_frames_ok");
2109 
2110 	SYSCTL_ADD_QUAD(ctx, children,
2111                 OID_AUTO, "tx_mac_bytes",
2112                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
2113                 "tx_mac_bytes");
2114 
2115 	SYSCTL_ADD_QUAD(ctx, children,
2116                 OID_AUTO, "tx_mac_uc_packets",
2117                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
2118                 "tx_mac_uc_packets");
2119 
2120 	SYSCTL_ADD_QUAD(ctx, children,
2121                 OID_AUTO, "tx_mac_mc_packets",
2122                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
2123                 "tx_mac_mc_packets");
2124 
2125 	SYSCTL_ADD_QUAD(ctx, children,
2126                 OID_AUTO, "tx_mac_bc_packets",
2127                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
2128                 "tx_mac_bc_packets");
2129 
2130 	SYSCTL_ADD_QUAD(ctx, children,
2131                 OID_AUTO, "tx_mac_ctrl_frames",
2132                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
2133                 "tx_mac_ctrl_frames");
2134 	return;
2135 }
2136 
2137 static void
qlnx_add_sysctls(qlnx_host_t * ha)2138 qlnx_add_sysctls(qlnx_host_t *ha)
2139 {
2140         device_t		dev = ha->pci_dev;
2141 	struct sysctl_ctx_list	*ctx;
2142 	struct sysctl_oid_list	*children;
2143 
2144 	ctx = device_get_sysctl_ctx(dev);
2145 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2146 
2147 	qlnx_add_fp_stats_sysctls(ha);
2148 	qlnx_add_sp_stats_sysctls(ha);
2149 
2150 	if (qlnx_vf_device(ha) != 0)
2151 		qlnx_add_hw_stats_sysctls(ha);
2152 
2153 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
2154 		CTLFLAG_RD, qlnx_ver_str, 0,
2155 		"Driver Version");
2156 
2157 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
2158 		CTLFLAG_RD, ha->stormfw_ver, 0,
2159 		"STORM Firmware Version");
2160 
2161 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
2162 		CTLFLAG_RD, ha->mfw_ver, 0,
2163 		"Management Firmware Version");
2164 
2165         SYSCTL_ADD_UINT(ctx, children,
2166                 OID_AUTO, "personality", CTLFLAG_RD,
2167                 &ha->personality, ha->personality,
2168 		"\tpersonality = 0 => Ethernet Only\n"
2169 		"\tpersonality = 3 => Ethernet and RoCE\n"
2170 		"\tpersonality = 4 => Ethernet and iWARP\n"
2171 		"\tpersonality = 6 => Default in Shared Memory\n");
2172 
2173         ha->dbg_level = 0;
2174         SYSCTL_ADD_UINT(ctx, children,
2175                 OID_AUTO, "debug", CTLFLAG_RW,
2176                 &ha->dbg_level, ha->dbg_level, "Debug Level");
2177 
2178         ha->dp_level = 0x01;
2179         SYSCTL_ADD_UINT(ctx, children,
2180                 OID_AUTO, "dp_level", CTLFLAG_RW,
2181                 &ha->dp_level, ha->dp_level, "DP Level");
2182 
2183         ha->dbg_trace_lro_cnt = 0;
2184         SYSCTL_ADD_UINT(ctx, children,
2185                 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
2186                 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
2187 		"Trace LRO Counts");
2188 
2189         ha->dbg_trace_tso_pkt_len = 0;
2190         SYSCTL_ADD_UINT(ctx, children,
2191                 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
2192                 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
2193 		"Trace TSO packet lengths");
2194 
2195         ha->dp_module = 0;
2196         SYSCTL_ADD_UINT(ctx, children,
2197                 OID_AUTO, "dp_module", CTLFLAG_RW,
2198                 &ha->dp_module, ha->dp_module, "DP Module");
2199 
2200         ha->err_inject = 0;
2201 
2202         SYSCTL_ADD_UINT(ctx, children,
2203                 OID_AUTO, "err_inject", CTLFLAG_RW,
2204                 &ha->err_inject, ha->err_inject, "Error Inject");
2205 
2206 	ha->storm_stats_enable = 0;
2207 
2208 	SYSCTL_ADD_UINT(ctx, children,
2209 		OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
2210 		&ha->storm_stats_enable, ha->storm_stats_enable,
2211 		"Enable Storm Statistics Gathering");
2212 
2213 	ha->storm_stats_index = 0;
2214 
2215 	SYSCTL_ADD_UINT(ctx, children,
2216 		OID_AUTO, "storm_stats_index", CTLFLAG_RD,
2217 		&ha->storm_stats_index, ha->storm_stats_index,
2218 		"Enable Storm Statistics Gathering Current Index");
2219 
2220 	ha->grcdump_taken = 0;
2221 	SYSCTL_ADD_UINT(ctx, children,
2222 		OID_AUTO, "grcdump_taken", CTLFLAG_RD,
2223 		&ha->grcdump_taken, ha->grcdump_taken,
2224 		"grcdump_taken");
2225 
2226 	ha->idle_chk_taken = 0;
2227 	SYSCTL_ADD_UINT(ctx, children,
2228 		OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
2229 		&ha->idle_chk_taken, ha->idle_chk_taken,
2230 		"idle_chk_taken");
2231 
2232 	SYSCTL_ADD_UINT(ctx, children,
2233 		OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
2234 		&ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
2235 		"rx_coalesce_usecs");
2236 
2237 	SYSCTL_ADD_UINT(ctx, children,
2238 		OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
2239 		&ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
2240 		"tx_coalesce_usecs");
2241 
2242 	SYSCTL_ADD_PROC(ctx, children,
2243 	    OID_AUTO, "trigger_dump",
2244 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2245 	    (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump");
2246 
2247 	SYSCTL_ADD_PROC(ctx, children,
2248 	    OID_AUTO, "set_rx_coalesce_usecs",
2249 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2250 	    (void *)ha, 0, qlnx_set_rx_coalesce, "I",
2251 	    "rx interrupt coalesce period microseconds");
2252 
2253 	SYSCTL_ADD_PROC(ctx, children,
2254 	    OID_AUTO, "set_tx_coalesce_usecs",
2255 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2256 	    (void *)ha, 0, qlnx_set_tx_coalesce, "I",
2257 	    "tx interrupt coalesce period microseconds");
2258 
2259 	ha->rx_pkt_threshold = 128;
2260         SYSCTL_ADD_UINT(ctx, children,
2261                 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
2262                 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
2263 		"No. of Rx Pkts to process at a time");
2264 
2265 	ha->rx_jumbo_buf_eq_mtu = 0;
2266         SYSCTL_ADD_UINT(ctx, children,
2267                 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
2268                 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
2269 		"== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
2270 		"otherwise Rx Jumbo buffers are set to >= MTU size\n");
2271 
2272 	SYSCTL_ADD_QUAD(ctx, children,
2273                 OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
2274 		&ha->err_illegal_intr, "err_illegal_intr");
2275 
2276 	SYSCTL_ADD_QUAD(ctx, children,
2277                 OID_AUTO, "err_fp_null", CTLFLAG_RD,
2278 		&ha->err_fp_null, "err_fp_null");
2279 
2280 	SYSCTL_ADD_QUAD(ctx, children,
2281                 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
2282 		&ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
2283 	return;
2284 }
2285 
2286 /*****************************************************************************
2287  * Operating System Network Interface Functions
2288  *****************************************************************************/
2289 
2290 static void
qlnx_init_ifnet(device_t dev,qlnx_host_t * ha)2291 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
2292 {
2293 	uint16_t	device_id;
2294         if_t		ifp;
2295 
2296         ifp = ha->ifp = if_alloc(IFT_ETHER);
2297         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2298 
2299 	device_id = pci_get_device(ha->pci_dev);
2300 
2301         if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
2302 		if_setbaudrate(ifp, IF_Gbps(40));
2303         else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2304 			(device_id == QLOGIC_PCI_DEVICE_ID_8070))
2305 		if_setbaudrate(ifp, IF_Gbps(25));
2306         else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
2307 		if_setbaudrate(ifp, IF_Gbps(50));
2308         else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
2309 		if_setbaudrate(ifp, IF_Gbps(100));
2310 
2311         if_setinitfn(ifp, qlnx_init);
2312         if_setsoftc(ifp, ha);
2313         if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2314         if_setioctlfn(ifp, qlnx_ioctl);
2315         if_settransmitfn(ifp, qlnx_transmit);
2316         if_setqflushfn(ifp, qlnx_qflush);
2317 
2318         if_setsendqlen(ifp, qlnx_get_ifq_snd_maxlen(ha));
2319         if_setsendqready(ifp);
2320 
2321 	if_setgetcounterfn(ifp, qlnx_get_counter);
2322 
2323         ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
2324 
2325         memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
2326 
2327 	if (!ha->primary_mac[0] && !ha->primary_mac[1] &&
2328 		!ha->primary_mac[2] && !ha->primary_mac[3] &&
2329 		!ha->primary_mac[4] && !ha->primary_mac[5]) {
2330 		uint32_t rnd;
2331 
2332 		rnd = arc4random();
2333 
2334 		ha->primary_mac[0] = 0x00;
2335 		ha->primary_mac[1] = 0x0e;
2336 		ha->primary_mac[2] = 0x1e;
2337 		ha->primary_mac[3] = rnd & 0xFF;
2338 		ha->primary_mac[4] = (rnd >> 8) & 0xFF;
2339 		ha->primary_mac[5] = (rnd >> 16) & 0xFF;
2340 	}
2341 
2342 	if_setcapabilities(ifp, IFCAP_HWCSUM);
2343 	if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0);
2344 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
2345 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
2346 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0);
2347 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
2348 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0);
2349 	if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
2350 	if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0);
2351 	if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
2352 	if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE, 0);
2353 	if_setcapabilitiesbit(ifp, IFCAP_HWSTATS, 0);
2354 
2355 	if_sethwtsomax(ifp,  QLNX_MAX_TSO_FRAME_SIZE -
2356 				(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2357 	if_sethwtsomaxsegcount(ifp, QLNX_MAX_SEGMENTS - 1); /* hdr */
2358 	if_sethwtsomaxsegsize(ifp, QLNX_MAX_TX_MBUF_SIZE);
2359 
2360         if_setcapenable(ifp, if_getcapabilities(ifp));
2361 
2362 	if_sethwassist(ifp, CSUM_IP);
2363 	if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0);
2364 	if_sethwassistbits(ifp, CSUM_TCP_IPV6 | CSUM_UDP_IPV6, 0);
2365 	if_sethwassistbits(ifp, CSUM_TSO, 0);
2366 
2367 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2368 
2369         ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
2370 		qlnx_media_status);
2371 
2372         if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
2373 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
2374 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
2375 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
2376         } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2377 			(device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
2378 		ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
2379 		ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
2380         } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
2381 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2382 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2383         } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
2384 		ifmedia_add(&ha->media,
2385 			(IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
2386 		ifmedia_add(&ha->media,
2387 			(IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
2388 		ifmedia_add(&ha->media,
2389 			(IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
2390 	}
2391 
2392         ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2393         ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2394 
2395         ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2396 
2397 	ether_ifattach(ifp, ha->primary_mac);
2398 	bcopy(if_getlladdr(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
2399 
2400         QL_DPRINT2(ha, "exit\n");
2401 
2402         return;
2403 }
2404 
2405 static void
qlnx_init_locked(qlnx_host_t * ha)2406 qlnx_init_locked(qlnx_host_t *ha)
2407 {
2408 	if_t		ifp = ha->ifp;
2409 
2410 	QL_DPRINT1(ha, "Driver Initialization start \n");
2411 
2412 	qlnx_stop(ha);
2413 
2414 	if (qlnx_load(ha) == 0) {
2415 		if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2416 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2417 
2418 #ifdef QLNX_ENABLE_IWARP
2419 		if (qlnx_vf_device(ha) != 0) {
2420 			qlnx_rdma_dev_open(ha);
2421 		}
2422 #endif /* #ifdef QLNX_ENABLE_IWARP */
2423 	}
2424 
2425 	return;
2426 }
2427 
2428 static void
qlnx_init(void * arg)2429 qlnx_init(void *arg)
2430 {
2431 	qlnx_host_t	*ha;
2432 
2433 	ha = (qlnx_host_t *)arg;
2434 
2435 	QL_DPRINT2(ha, "enter\n");
2436 
2437 	QLNX_LOCK(ha);
2438 	qlnx_init_locked(ha);
2439 	QLNX_UNLOCK(ha);
2440 
2441 	QL_DPRINT2(ha, "exit\n");
2442 
2443 	return;
2444 }
2445 
2446 static int
qlnx_config_mcast_mac_addr(qlnx_host_t * ha,uint8_t * mac_addr,uint32_t add_mac)2447 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2448 {
2449 	struct ecore_filter_mcast	*mcast;
2450 	struct ecore_dev		*cdev;
2451 	int				rc;
2452 
2453 	cdev = &ha->cdev;
2454 
2455 	mcast = &ha->ecore_mcast;
2456 	bzero(mcast, sizeof(struct ecore_filter_mcast));
2457 
2458 	if (add_mac)
2459 		mcast->opcode = ECORE_FILTER_ADD;
2460 	else
2461 		mcast->opcode = ECORE_FILTER_REMOVE;
2462 
2463 	mcast->num_mc_addrs = 1;
2464 	memcpy(mcast->mac, mac_addr, ETH_ALEN);
2465 
2466 	rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2467 
2468 	return (rc);
2469 }
2470 
2471 static int
qlnx_hw_add_mcast(qlnx_host_t * ha,uint8_t * mta)2472 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2473 {
2474         int	i;
2475 
2476         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2477                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2478                         return 0; /* its been already added */
2479         }
2480 
2481         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2482                 if ((ha->mcast[i].addr[0] == 0) &&
2483                         (ha->mcast[i].addr[1] == 0) &&
2484                         (ha->mcast[i].addr[2] == 0) &&
2485                         (ha->mcast[i].addr[3] == 0) &&
2486                         (ha->mcast[i].addr[4] == 0) &&
2487                         (ha->mcast[i].addr[5] == 0)) {
2488                         if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2489                                 return (-1);
2490 
2491                         bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2492                         ha->nmcast++;
2493 
2494                         return 0;
2495                 }
2496         }
2497         return 0;
2498 }
2499 
2500 static int
qlnx_hw_del_mcast(qlnx_host_t * ha,uint8_t * mta)2501 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2502 {
2503         int	i;
2504 
2505         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2506                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2507                         if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2508                                 return (-1);
2509 
2510                         ha->mcast[i].addr[0] = 0;
2511                         ha->mcast[i].addr[1] = 0;
2512                         ha->mcast[i].addr[2] = 0;
2513                         ha->mcast[i].addr[3] = 0;
2514                         ha->mcast[i].addr[4] = 0;
2515                         ha->mcast[i].addr[5] = 0;
2516 
2517                         ha->nmcast--;
2518 
2519                         return 0;
2520                 }
2521         }
2522         return 0;
2523 }
2524 
2525 /*
2526  * Name: qls_hw_set_multi
2527  * Function: Sets the Multicast Addresses provided the host O.S into the
2528  *      hardware (for the given interface)
2529  */
2530 static void
qlnx_hw_set_multi(qlnx_host_t * ha,uint8_t * mta,uint32_t mcnt,uint32_t add_mac)2531 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2532 	uint32_t add_mac)
2533 {
2534         int	i;
2535 
2536         for (i = 0; i < mcnt; i++) {
2537                 if (add_mac) {
2538                         if (qlnx_hw_add_mcast(ha, mta))
2539                                 break;
2540                 } else {
2541                         if (qlnx_hw_del_mcast(ha, mta))
2542                                 break;
2543                 }
2544 
2545                 mta += ETHER_HDR_LEN;
2546         }
2547         return;
2548 }
2549 
2550 static u_int
qlnx_copy_maddr(void * arg,struct sockaddr_dl * sdl,u_int mcnt)2551 qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
2552 {
2553 	uint8_t *mta = arg;
2554 
2555 	if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2556 		return (0);
2557 
2558 	bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2559 
2560 	return (1);
2561 }
2562 
2563 static int
qlnx_set_multi(qlnx_host_t * ha,uint32_t add_multi)2564 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2565 {
2566 	uint8_t		mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN];
2567 	if_t		ifp = ha->ifp;
2568 	u_int		mcnt;
2569 
2570 	if (qlnx_vf_device(ha) == 0)
2571 		return (0);
2572 
2573 	mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta);
2574 
2575 	QLNX_LOCK(ha);
2576 	qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2577 	QLNX_UNLOCK(ha);
2578 
2579 	return (0);
2580 }
2581 
2582 static int
qlnx_set_promisc(qlnx_host_t * ha,int enabled)2583 qlnx_set_promisc(qlnx_host_t *ha, int enabled)
2584 {
2585 	int	rc = 0;
2586 	uint8_t	filter;
2587 
2588 	if (qlnx_vf_device(ha) == 0)
2589 		return (0);
2590 
2591 	filter = ha->filter;
2592 	if (enabled) {
2593 		filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2594 		filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2595 	} else {
2596 		filter &= ~ECORE_ACCEPT_MCAST_UNMATCHED;
2597 		filter &= ~ECORE_ACCEPT_UCAST_UNMATCHED;
2598 	}
2599 
2600 	rc = qlnx_set_rx_accept_filter(ha, filter);
2601 	return (rc);
2602 }
2603 
2604 static int
qlnx_set_allmulti(qlnx_host_t * ha,int enabled)2605 qlnx_set_allmulti(qlnx_host_t *ha, int enabled)
2606 {
2607 	int	rc = 0;
2608 	uint8_t	filter;
2609 
2610 	if (qlnx_vf_device(ha) == 0)
2611 		return (0);
2612 
2613 	filter = ha->filter;
2614 	if (enabled) {
2615 		filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2616 	} else {
2617 		filter &= ~ECORE_ACCEPT_MCAST_UNMATCHED;
2618 	}
2619 	rc = qlnx_set_rx_accept_filter(ha, filter);
2620 
2621 	return (rc);
2622 }
2623 
2624 static int
qlnx_ioctl(if_t ifp,u_long cmd,caddr_t data)2625 qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data)
2626 {
2627 	int		ret = 0, mask;
2628 	int		flags;
2629 	struct ifreq	*ifr = (struct ifreq *)data;
2630 #ifdef INET
2631 	struct ifaddr	*ifa = (struct ifaddr *)data;
2632 #endif
2633 	qlnx_host_t	*ha;
2634 
2635 	ha = (qlnx_host_t *)if_getsoftc(ifp);
2636 
2637 	switch (cmd) {
2638 	case SIOCSIFADDR:
2639 		QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2640 
2641 #ifdef INET
2642 		if (ifa->ifa_addr->sa_family == AF_INET) {
2643 			if_setflagbits(ifp, IFF_UP, 0);
2644 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
2645 				QLNX_LOCK(ha);
2646 				qlnx_init_locked(ha);
2647 				QLNX_UNLOCK(ha);
2648 			}
2649 			QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2650 				   cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2651 
2652 			arp_ifinit(ifp, ifa);
2653 			break;
2654 		}
2655 #endif
2656 		ether_ioctl(ifp, cmd, data);
2657 		break;
2658 
2659 	case SIOCSIFMTU:
2660 		QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2661 
2662 		if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2663 			ret = EINVAL;
2664 		} else {
2665 			QLNX_LOCK(ha);
2666 			if_setmtu(ifp, ifr->ifr_mtu);
2667 			ha->max_frame_size =
2668 				if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
2669 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2670 				qlnx_init_locked(ha);
2671 			}
2672 
2673 			QLNX_UNLOCK(ha);
2674 		}
2675 
2676 		break;
2677 
2678 	case SIOCSIFFLAGS:
2679 		QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2680 
2681 		QLNX_LOCK(ha);
2682 		flags = if_getflags(ifp);
2683 
2684 		if (flags & IFF_UP) {
2685 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2686 				if ((flags ^ ha->if_flags) &
2687 					IFF_PROMISC) {
2688 					ret = qlnx_set_promisc(ha, flags & IFF_PROMISC);
2689 				} else if ((if_getflags(ifp) ^ ha->if_flags) &
2690 					IFF_ALLMULTI) {
2691 					ret = qlnx_set_allmulti(ha, flags & IFF_ALLMULTI);
2692 				}
2693 			} else {
2694 				ha->max_frame_size = if_getmtu(ifp) +
2695 					ETHER_HDR_LEN + ETHER_CRC_LEN;
2696 				qlnx_init_locked(ha);
2697 			}
2698 		} else {
2699 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2700 				qlnx_stop(ha);
2701 		}
2702 
2703 		ha->if_flags = if_getflags(ifp);
2704 		QLNX_UNLOCK(ha);
2705 		break;
2706 
2707 	case SIOCADDMULTI:
2708 		QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2709 
2710 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2711 			if (qlnx_set_multi(ha, 1))
2712 				ret = EINVAL;
2713 		}
2714 		break;
2715 
2716 	case SIOCDELMULTI:
2717 		QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2718 
2719 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2720 			if (qlnx_set_multi(ha, 0))
2721 				ret = EINVAL;
2722 		}
2723 		break;
2724 
2725 	case SIOCSIFMEDIA:
2726 	case SIOCGIFMEDIA:
2727 		QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2728 
2729 		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2730 		break;
2731 
2732 	case SIOCSIFCAP:
2733 
2734 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2735 
2736 		QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2737 
2738 		if (mask & IFCAP_HWCSUM)
2739 			if_togglecapenable(ifp, IFCAP_HWCSUM);
2740 		if (mask & IFCAP_TSO4)
2741 			if_togglecapenable(ifp, IFCAP_TSO4);
2742 		if (mask & IFCAP_TSO6)
2743 			if_togglecapenable(ifp, IFCAP_TSO6);
2744 		if (mask & IFCAP_VLAN_HWTAGGING)
2745 			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2746 		if (mask & IFCAP_VLAN_HWTSO)
2747 			if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
2748 		if (mask & IFCAP_LRO)
2749 			if_togglecapenable(ifp, IFCAP_LRO);
2750 
2751 		QLNX_LOCK(ha);
2752 
2753 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2754 			qlnx_init_locked(ha);
2755 
2756 		QLNX_UNLOCK(ha);
2757 
2758 		VLAN_CAPABILITIES(ifp);
2759 		break;
2760 
2761 	case SIOCGI2C:
2762 	{
2763 		struct ifi2creq i2c;
2764 		struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2765 		struct ecore_ptt *p_ptt;
2766 
2767 		ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2768 
2769 		if (ret)
2770 			break;
2771 
2772 		if ((i2c.len > sizeof (i2c.data)) ||
2773 			(i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2774 			ret = EINVAL;
2775 			break;
2776 		}
2777 
2778 		p_ptt = ecore_ptt_acquire(p_hwfn);
2779 
2780 		if (!p_ptt) {
2781 			QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2782 			ret = ERESTART;
2783 			break;
2784 		}
2785 
2786 		ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2787 			(ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2788 			i2c.len, &i2c.data[0]);
2789 
2790 		ecore_ptt_release(p_hwfn, p_ptt);
2791 
2792 		if (ret) {
2793 			ret = ENODEV;
2794 			break;
2795 		}
2796 
2797 		ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2798 
2799 		QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2800 			 len = %d addr = 0x%02x offset = 0x%04x \
2801 			 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2802 			 0x%02x 0x%02x 0x%02x\n",
2803 			ret, i2c.len, i2c.dev_addr, i2c.offset,
2804 			i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2805 			i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2806 		break;
2807 	}
2808 
2809 	default:
2810 		QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2811 		ret = ether_ioctl(ifp, cmd, data);
2812 		break;
2813 	}
2814 
2815 	return (ret);
2816 }
2817 
2818 static int
qlnx_media_change(if_t ifp)2819 qlnx_media_change(if_t ifp)
2820 {
2821 	qlnx_host_t	*ha;
2822 	struct ifmedia	*ifm;
2823 	int		ret = 0;
2824 
2825 	ha = (qlnx_host_t *)if_getsoftc(ifp);
2826 
2827 	QL_DPRINT2(ha, "enter\n");
2828 
2829 	ifm = &ha->media;
2830 
2831 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2832 		ret = EINVAL;
2833 
2834 	QL_DPRINT2(ha, "exit\n");
2835 
2836 	return (ret);
2837 }
2838 
2839 static void
qlnx_media_status(if_t ifp,struct ifmediareq * ifmr)2840 qlnx_media_status(if_t ifp, struct ifmediareq *ifmr)
2841 {
2842 	qlnx_host_t		*ha;
2843 
2844 	ha = (qlnx_host_t *)if_getsoftc(ifp);
2845 
2846 	QL_DPRINT2(ha, "enter\n");
2847 
2848 	ifmr->ifm_status = IFM_AVALID;
2849 	ifmr->ifm_active = IFM_ETHER;
2850 
2851 	if (ha->link_up) {
2852 		ifmr->ifm_status |= IFM_ACTIVE;
2853 		ifmr->ifm_active |=
2854 			(IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2855 
2856 		if (ha->if_link.link_partner_caps &
2857 			(QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2858 			ifmr->ifm_active |=
2859 				(IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2860 	}
2861 
2862 	QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2863 
2864 	return;
2865 }
2866 
2867 static void
qlnx_free_tx_pkt(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)2868 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2869 	struct qlnx_tx_queue *txq)
2870 {
2871 	u16			idx;
2872 	struct mbuf		*mp;
2873 	bus_dmamap_t		map;
2874 	int			i;
2875 //	struct eth_tx_bd	*tx_data_bd;
2876 	struct eth_tx_1st_bd	*first_bd;
2877 	int			nbds = 0;
2878 
2879 	idx = txq->sw_tx_cons;
2880 	mp = txq->sw_tx_ring[idx].mp;
2881 	map = txq->sw_tx_ring[idx].map;
2882 
2883 	if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2884 		QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2885 
2886 		QL_DPRINT1(ha, "(mp == NULL) "
2887 			" tx_idx = 0x%x"
2888 			" ecore_prod_idx = 0x%x"
2889 			" ecore_cons_idx = 0x%x"
2890 			" hw_bd_cons = 0x%x"
2891 			" txq_db_last = 0x%x"
2892 			" elem_left = 0x%x\n",
2893 			fp->rss_id,
2894 			ecore_chain_get_prod_idx(&txq->tx_pbl),
2895 			ecore_chain_get_cons_idx(&txq->tx_pbl),
2896 			le16toh(*txq->hw_cons_ptr),
2897 			txq->tx_db.raw,
2898 			ecore_chain_get_elem_left(&txq->tx_pbl));
2899 
2900 		fp->err_tx_free_pkt_null++;
2901 
2902 		//DEBUG
2903 		qlnx_trigger_dump(ha);
2904 
2905 		return;
2906 	} else {
2907 		QLNX_INC_OPACKETS((ha->ifp));
2908 		QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2909 
2910 		bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2911 		bus_dmamap_unload(ha->tx_tag, map);
2912 
2913 		fp->tx_pkts_freed++;
2914 		fp->tx_pkts_completed++;
2915 
2916 		m_freem(mp);
2917 	}
2918 
2919 	first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
2920 	nbds = first_bd->data.nbds;
2921 
2922 //	BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
2923 
2924 	for (i = 1; i < nbds; i++) {
2925 		/* tx_data_bd = */ ecore_chain_consume(&txq->tx_pbl);
2926 //		BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
2927 	}
2928 	txq->sw_tx_ring[idx].flags = 0;
2929 	txq->sw_tx_ring[idx].mp = NULL;
2930 	txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
2931 
2932 	return;
2933 }
2934 
2935 static void
qlnx_tx_int(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)2936 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2937 	struct qlnx_tx_queue *txq)
2938 {
2939 	u16 hw_bd_cons;
2940 	u16 ecore_cons_idx;
2941 	uint16_t diff;
2942 	uint16_t idx, idx2;
2943 
2944 	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
2945 
2946 	while (hw_bd_cons !=
2947 		(ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
2948 		diff = hw_bd_cons - ecore_cons_idx;
2949 		if ((diff > TX_RING_SIZE) ||
2950 			QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2951 			QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2952 
2953 			QL_DPRINT1(ha, "(diff = 0x%x) "
2954 				" tx_idx = 0x%x"
2955 				" ecore_prod_idx = 0x%x"
2956 				" ecore_cons_idx = 0x%x"
2957 				" hw_bd_cons = 0x%x"
2958 				" txq_db_last = 0x%x"
2959 				" elem_left = 0x%x\n",
2960 				diff,
2961 				fp->rss_id,
2962 				ecore_chain_get_prod_idx(&txq->tx_pbl),
2963 				ecore_chain_get_cons_idx(&txq->tx_pbl),
2964 				le16toh(*txq->hw_cons_ptr),
2965 				txq->tx_db.raw,
2966 				ecore_chain_get_elem_left(&txq->tx_pbl));
2967 
2968 			fp->err_tx_cons_idx_conflict++;
2969 
2970 			//DEBUG
2971 			qlnx_trigger_dump(ha);
2972 		}
2973 
2974 		idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2975 		idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1);
2976 		prefetch(txq->sw_tx_ring[idx].mp);
2977 		prefetch(txq->sw_tx_ring[idx2].mp);
2978 
2979 		qlnx_free_tx_pkt(ha, fp, txq);
2980 
2981 		txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2982 	}
2983 	return;
2984 }
2985 
2986 static int
qlnx_transmit_locked(if_t ifp,struct qlnx_fastpath * fp,struct mbuf * mp)2987 qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp, struct mbuf *mp)
2988 {
2989         int                     ret = 0;
2990         struct qlnx_tx_queue    *txq;
2991         qlnx_host_t *           ha;
2992         uint16_t elem_left;
2993 
2994         txq = fp->txq[0];
2995         ha = (qlnx_host_t *)fp->edev;
2996 
2997         if ((!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) || (!ha->link_up)) {
2998                 if(mp != NULL)
2999                         ret = drbr_enqueue(ifp, fp->tx_br, mp);
3000                 return (ret);
3001         }
3002 
3003         if(mp != NULL)
3004                 ret  = drbr_enqueue(ifp, fp->tx_br, mp);
3005 
3006         mp = drbr_peek(ifp, fp->tx_br);
3007 
3008         while (mp != NULL) {
3009                 if (qlnx_send(ha, fp, &mp)) {
3010                         if (mp != NULL) {
3011                                 drbr_putback(ifp, fp->tx_br, mp);
3012                         } else {
3013                                 fp->tx_pkts_processed++;
3014                                 drbr_advance(ifp, fp->tx_br);
3015                         }
3016                         goto qlnx_transmit_locked_exit;
3017 
3018                 } else {
3019                         drbr_advance(ifp, fp->tx_br);
3020                         fp->tx_pkts_transmitted++;
3021                         fp->tx_pkts_processed++;
3022                 }
3023 
3024                 mp = drbr_peek(ifp, fp->tx_br);
3025         }
3026 
3027 qlnx_transmit_locked_exit:
3028         if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) ||
3029                 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))
3030                                         < QLNX_TX_ELEM_MAX_THRESH))
3031                 (void)qlnx_tx_int(ha, fp, fp->txq[0]);
3032 
3033         QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret);
3034         return ret;
3035 }
3036 
3037 static int
qlnx_transmit(if_t ifp,struct mbuf * mp)3038 qlnx_transmit(if_t ifp, struct mbuf  *mp)
3039 {
3040         qlnx_host_t		*ha = (qlnx_host_t *)if_getsoftc(ifp);
3041         struct qlnx_fastpath	*fp;
3042         int			rss_id = 0, ret = 0;
3043 
3044 #ifdef QLNX_TRACEPERF_DATA
3045         uint64_t tx_pkts = 0, tx_compl = 0;
3046 #endif
3047 
3048         QL_DPRINT2(ha, "enter\n");
3049 
3050         if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
3051                 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
3052 					ha->num_rss;
3053 
3054         fp = &ha->fp_array[rss_id];
3055 
3056         if (fp->tx_br == NULL) {
3057                 ret = EINVAL;
3058                 goto qlnx_transmit_exit;
3059         }
3060 
3061         if (mtx_trylock(&fp->tx_mtx)) {
3062 #ifdef QLNX_TRACEPERF_DATA
3063                         tx_pkts = fp->tx_pkts_transmitted;
3064                         tx_compl = fp->tx_pkts_completed;
3065 #endif
3066 
3067                         ret = qlnx_transmit_locked(ifp, fp, mp);
3068 
3069 #ifdef QLNX_TRACEPERF_DATA
3070                         fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts);
3071                         fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl);
3072 #endif
3073                         mtx_unlock(&fp->tx_mtx);
3074         } else {
3075                 if (mp != NULL && (fp->fp_taskqueue != NULL)) {
3076                         ret = drbr_enqueue(ifp, fp->tx_br, mp);
3077                         taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
3078                 }
3079         }
3080 
3081 qlnx_transmit_exit:
3082 
3083         QL_DPRINT2(ha, "exit ret = %d\n", ret);
3084         return ret;
3085 }
3086 
3087 static void
qlnx_qflush(if_t ifp)3088 qlnx_qflush(if_t ifp)
3089 {
3090 	int			rss_id;
3091 	struct qlnx_fastpath	*fp;
3092 	struct mbuf		*mp;
3093 	qlnx_host_t		*ha;
3094 
3095 	ha = (qlnx_host_t *)if_getsoftc(ifp);
3096 
3097 	QL_DPRINT2(ha, "enter\n");
3098 
3099 	for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
3100 		fp = &ha->fp_array[rss_id];
3101 
3102 		if (fp == NULL)
3103 			continue;
3104 
3105 		if (fp->tx_br) {
3106 			mtx_lock(&fp->tx_mtx);
3107 
3108 			while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
3109 				fp->tx_pkts_freed++;
3110 				m_freem(mp);
3111 			}
3112 			mtx_unlock(&fp->tx_mtx);
3113 		}
3114 	}
3115 	QL_DPRINT2(ha, "exit\n");
3116 
3117 	return;
3118 }
3119 
3120 static void
qlnx_txq_doorbell_wr32(qlnx_host_t * ha,void * reg_addr,uint32_t value)3121 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
3122 {
3123 	uint32_t		offset;
3124 
3125 	offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells);
3126 
3127 	bus_write_4(ha->pci_dbells, offset, value);
3128 	bus_barrier(ha->pci_reg,  0, 0, BUS_SPACE_BARRIER_READ);
3129 	bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
3130 
3131 	return;
3132 }
3133 
3134 static uint32_t
qlnx_tcp_offset(qlnx_host_t * ha,struct mbuf * mp)3135 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
3136 {
3137         struct ether_vlan_header	*eh = NULL;
3138         struct ip			*ip = NULL;
3139         struct ip6_hdr			*ip6 = NULL;
3140         struct tcphdr			*th = NULL;
3141         uint32_t			ehdrlen = 0, ip_hlen = 0, offset = 0;
3142         uint16_t			etype = 0;
3143         uint8_t				buf[sizeof(struct ip6_hdr)];
3144 
3145         eh = mtod(mp, struct ether_vlan_header *);
3146 
3147         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3148                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3149                 etype = ntohs(eh->evl_proto);
3150         } else {
3151                 ehdrlen = ETHER_HDR_LEN;
3152                 etype = ntohs(eh->evl_encap_proto);
3153         }
3154 
3155         switch (etype) {
3156                 case ETHERTYPE_IP:
3157                         ip = (struct ip *)(mp->m_data + ehdrlen);
3158 
3159                         ip_hlen = sizeof (struct ip);
3160 
3161                         if (mp->m_len < (ehdrlen + ip_hlen)) {
3162                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
3163                                 ip = (struct ip *)buf;
3164                         }
3165 
3166                         th = (struct tcphdr *)(ip + 1);
3167 			offset = ip_hlen + ehdrlen + (th->th_off << 2);
3168                 break;
3169 
3170                 case ETHERTYPE_IPV6:
3171                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3172 
3173                         ip_hlen = sizeof(struct ip6_hdr);
3174 
3175                         if (mp->m_len < (ehdrlen + ip_hlen)) {
3176                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
3177                                         buf);
3178                                 ip6 = (struct ip6_hdr *)buf;
3179                         }
3180                         th = (struct tcphdr *)(ip6 + 1);
3181 			offset = ip_hlen + ehdrlen + (th->th_off << 2);
3182                 break;
3183 
3184                 default:
3185                 break;
3186         }
3187 
3188         return (offset);
3189 }
3190 
3191 static __inline int
qlnx_tso_check(struct qlnx_fastpath * fp,bus_dma_segment_t * segs,int nsegs,uint32_t offset)3192 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
3193 	uint32_t offset)
3194 {
3195 	int			i;
3196 	uint32_t		sum, nbds_in_hdr = 1;
3197         uint32_t		window;
3198         bus_dma_segment_t	*s_seg;
3199 
3200         /* If the header spans multiple segments, skip those segments */
3201 
3202         if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM)
3203                 return (0);
3204 
3205         i = 0;
3206 
3207         while ((i < nsegs) && (offset >= segs->ds_len)) {
3208                 offset = offset - segs->ds_len;
3209                 segs++;
3210                 i++;
3211                 nbds_in_hdr++;
3212         }
3213 
3214         window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr;
3215 
3216         nsegs = nsegs - i;
3217 
3218         while (nsegs >= window) {
3219                 sum = 0;
3220                 s_seg = segs;
3221 
3222                 for (i = 0; i < window; i++){
3223                         sum += s_seg->ds_len;
3224                         s_seg++;
3225                 }
3226 
3227                 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
3228                         fp->tx_lso_wnd_min_len++;
3229                         return (-1);
3230                 }
3231 
3232                 nsegs = nsegs - 1;
3233                 segs++;
3234         }
3235 
3236 	return (0);
3237 }
3238 
3239 static int
qlnx_send(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct mbuf ** m_headp)3240 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
3241 {
3242 	bus_dma_segment_t	*segs;
3243 	bus_dmamap_t		map = 0;
3244 	uint32_t		nsegs = 0;
3245 	int			ret = -1;
3246 	struct mbuf		*m_head = *m_headp;
3247 	uint16_t		idx = 0;
3248 	uint16_t		elem_left;
3249 
3250 	uint8_t			nbd = 0;
3251 	struct qlnx_tx_queue    *txq;
3252 
3253 	struct eth_tx_1st_bd    *first_bd;
3254 	struct eth_tx_2nd_bd    *second_bd;
3255 	struct eth_tx_3rd_bd    *third_bd;
3256 	struct eth_tx_bd        *tx_data_bd;
3257 
3258 	int			seg_idx = 0;
3259 	uint32_t		nbds_in_hdr = 0;
3260 	uint32_t		offset = 0;
3261 
3262 #ifdef QLNX_TRACE_PERF_DATA
3263         uint16_t                bd_used;
3264 #endif
3265 
3266 	QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id);
3267 
3268 	if (!ha->link_up)
3269 		return (-1);
3270 
3271 	first_bd	= NULL;
3272 	second_bd	= NULL;
3273 	third_bd	= NULL;
3274 	tx_data_bd	= NULL;
3275 
3276 	txq = fp->txq[0];
3277 
3278         if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) <
3279 		QLNX_TX_ELEM_MIN_THRESH) {
3280                 fp->tx_nsegs_gt_elem_left++;
3281                 fp->err_tx_nsegs_gt_elem_left++;
3282 
3283                 return (ENOBUFS);
3284         }
3285 
3286 	idx = txq->sw_tx_prod;
3287 
3288 	map = txq->sw_tx_ring[idx].map;
3289 	segs = txq->segs;
3290 
3291 	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
3292 			BUS_DMA_NOWAIT);
3293 
3294 	if (ha->dbg_trace_tso_pkt_len) {
3295 		if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3296 			if (!fp->tx_tso_min_pkt_len) {
3297 				fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3298 				fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3299 			} else {
3300 				if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
3301 					fp->tx_tso_min_pkt_len =
3302 						m_head->m_pkthdr.len;
3303 				if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
3304 					fp->tx_tso_max_pkt_len =
3305 						m_head->m_pkthdr.len;
3306 			}
3307 		}
3308 	}
3309 
3310 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3311 		offset = qlnx_tcp_offset(ha, m_head);
3312 
3313 	if ((ret == EFBIG) ||
3314 		((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
3315 			(!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
3316 		((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
3317 			qlnx_tso_check(fp, segs, nsegs, offset))))) {
3318 		struct mbuf *m;
3319 
3320 		QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
3321 
3322 		fp->tx_defrag++;
3323 
3324 		m = m_defrag(m_head, M_NOWAIT);
3325 		if (m == NULL) {
3326 			fp->err_tx_defrag++;
3327 			fp->tx_pkts_freed++;
3328 			m_freem(m_head);
3329 			*m_headp = NULL;
3330 			QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
3331 			return (ENOBUFS);
3332 		}
3333 
3334 		m_head = m;
3335 		*m_headp = m_head;
3336 
3337 		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
3338 				segs, &nsegs, BUS_DMA_NOWAIT))) {
3339 			fp->err_tx_defrag_dmamap_load++;
3340 
3341 			QL_DPRINT1(ha,
3342 				"bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
3343 				ret, m_head->m_pkthdr.len);
3344 
3345 			fp->tx_pkts_freed++;
3346 			m_freem(m_head);
3347 			*m_headp = NULL;
3348 
3349 			return (ret);
3350 		}
3351 
3352 		if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
3353 			!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3354 			fp->err_tx_non_tso_max_seg++;
3355 
3356 			QL_DPRINT1(ha,
3357 				"(%d) nsegs too many for non-TSO [%d, %d]\n",
3358 				ret, nsegs, m_head->m_pkthdr.len);
3359 
3360 			fp->tx_pkts_freed++;
3361 			m_freem(m_head);
3362 			*m_headp = NULL;
3363 
3364 			return (ret);
3365 		}
3366 		if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3367 			offset = qlnx_tcp_offset(ha, m_head);
3368 
3369 	} else if (ret) {
3370 		fp->err_tx_dmamap_load++;
3371 
3372 		QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
3373 			   ret, m_head->m_pkthdr.len);
3374 		fp->tx_pkts_freed++;
3375 		m_freem(m_head);
3376 		*m_headp = NULL;
3377 		return (ret);
3378 	}
3379 
3380 	QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
3381 
3382 	if (ha->dbg_trace_tso_pkt_len) {
3383 		if (nsegs < QLNX_FP_MAX_SEGS)
3384 			fp->tx_pkts[(nsegs - 1)]++;
3385 		else
3386 			fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
3387 	}
3388 
3389 #ifdef QLNX_TRACE_PERF_DATA
3390         if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3391                 if(m_head->m_pkthdr.len <= 2048)
3392                         fp->tx_pkts_hist[0]++;
3393                 else if((m_head->m_pkthdr.len > 2048) &&
3394 				(m_head->m_pkthdr.len <= 4096))
3395                         fp->tx_pkts_hist[1]++;
3396                 else if((m_head->m_pkthdr.len > 4096) &&
3397 				(m_head->m_pkthdr.len <= 8192))
3398                         fp->tx_pkts_hist[2]++;
3399                 else if((m_head->m_pkthdr.len > 8192) &&
3400 				(m_head->m_pkthdr.len <= 12288 ))
3401                         fp->tx_pkts_hist[3]++;
3402                 else if((m_head->m_pkthdr.len > 11288) &&
3403 				(m_head->m_pkthdr.len <= 16394))
3404                         fp->tx_pkts_hist[4]++;
3405                 else if((m_head->m_pkthdr.len > 16384) &&
3406 				(m_head->m_pkthdr.len <= 20480))
3407                         fp->tx_pkts_hist[5]++;
3408                 else if((m_head->m_pkthdr.len > 20480) &&
3409 				(m_head->m_pkthdr.len <= 24576))
3410                         fp->tx_pkts_hist[6]++;
3411                 else if((m_head->m_pkthdr.len > 24576) &&
3412 				(m_head->m_pkthdr.len <= 28672))
3413                         fp->tx_pkts_hist[7]++;
3414                 else if((m_head->m_pkthdr.len > 28762) &&
3415 				(m_head->m_pkthdr.len <= 32768))
3416                         fp->tx_pkts_hist[8]++;
3417                 else if((m_head->m_pkthdr.len > 32768) &&
3418 				(m_head->m_pkthdr.len <= 36864))
3419                         fp->tx_pkts_hist[9]++;
3420                 else if((m_head->m_pkthdr.len > 36864) &&
3421 				(m_head->m_pkthdr.len <= 40960))
3422                         fp->tx_pkts_hist[10]++;
3423                 else if((m_head->m_pkthdr.len > 40960) &&
3424 				(m_head->m_pkthdr.len <= 45056))
3425                         fp->tx_pkts_hist[11]++;
3426                 else if((m_head->m_pkthdr.len > 45056) &&
3427 				(m_head->m_pkthdr.len <= 49152))
3428                         fp->tx_pkts_hist[12]++;
3429                 else if((m_head->m_pkthdr.len > 49512) &&
3430 				m_head->m_pkthdr.len <= 53248))
3431                         fp->tx_pkts_hist[13]++;
3432                 else if((m_head->m_pkthdr.len > 53248) &&
3433 				(m_head->m_pkthdr.len <= 57344))
3434                         fp->tx_pkts_hist[14]++;
3435                 else if((m_head->m_pkthdr.len > 53248) &&
3436 				(m_head->m_pkthdr.len <= 57344))
3437                         fp->tx_pkts_hist[15]++;
3438                 else if((m_head->m_pkthdr.len > 57344) &&
3439 				(m_head->m_pkthdr.len <= 61440))
3440                         fp->tx_pkts_hist[16]++;
3441                 else
3442                         fp->tx_pkts_hist[17]++;
3443         }
3444 
3445         if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3446                 elem_left =  ecore_chain_get_elem_left(&txq->tx_pbl);
3447                 bd_used = TX_RING_SIZE - elem_left;
3448 
3449                 if(bd_used <= 100)
3450                         fp->tx_pkts_q[0]++;
3451                 else if((bd_used > 100) && (bd_used <= 500))
3452                         fp->tx_pkts_q[1]++;
3453                 else if((bd_used > 500) && (bd_used <= 1000))
3454                         fp->tx_pkts_q[2]++;
3455                 else if((bd_used > 1000) && (bd_used <= 2000))
3456                         fp->tx_pkts_q[3]++;
3457                 else if((bd_used > 3000) && (bd_used <= 4000))
3458                         fp->tx_pkts_q[4]++;
3459                 else if((bd_used > 4000) && (bd_used <= 5000))
3460                         fp->tx_pkts_q[5]++;
3461                 else if((bd_used > 6000) && (bd_used <= 7000))
3462                         fp->tx_pkts_q[6]++;
3463                 else if((bd_used > 7000) && (bd_used <= 8000))
3464                         fp->tx_pkts_q[7]++;
3465                 else if((bd_used > 8000) && (bd_used <= 9000))
3466                         fp->tx_pkts_q[8]++;
3467                 else if((bd_used > 9000) && (bd_used <= 10000))
3468                         fp->tx_pkts_q[9]++;
3469                 else if((bd_used > 10000) && (bd_used <= 11000))
3470                         fp->tx_pkts_q[10]++;
3471                 else if((bd_used > 11000) && (bd_used <= 12000))
3472                         fp->tx_pkts_q[11]++;
3473                 else if((bd_used > 12000) && (bd_used <= 13000))
3474                         fp->tx_pkts_q[12]++;
3475                 else if((bd_used > 13000) && (bd_used <= 14000))
3476                         fp->tx_pkts_q[13]++;
3477                 else if((bd_used > 14000) && (bd_used <= 15000))
3478                         fp->tx_pkts_q[14]++;
3479                else if((bd_used > 15000) && (bd_used <= 16000))
3480                         fp->tx_pkts_q[15]++;
3481                 else
3482                         fp->tx_pkts_q[16]++;
3483         }
3484 
3485 #endif /* end of QLNX_TRACE_PERF_DATA */
3486 
3487 	if ((nsegs + QLNX_TX_ELEM_RESERVE) >
3488 		(int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
3489 		QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
3490 			" in chain[%d] trying to free packets\n",
3491 			nsegs, elem_left, fp->rss_id);
3492 
3493 		fp->tx_nsegs_gt_elem_left++;
3494 
3495 		(void)qlnx_tx_int(ha, fp, txq);
3496 
3497 		if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
3498 			ecore_chain_get_elem_left(&txq->tx_pbl))) {
3499 			QL_DPRINT1(ha,
3500 				"(%d, 0x%x) insuffient BDs in chain[%d]\n",
3501 				nsegs, elem_left, fp->rss_id);
3502 
3503 			fp->err_tx_nsegs_gt_elem_left++;
3504 			fp->tx_ring_full = 1;
3505 			if (ha->storm_stats_enable)
3506 				ha->storm_stats_gather = 1;
3507 			return (ENOBUFS);
3508 		}
3509 	}
3510 
3511 	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
3512 
3513 	txq->sw_tx_ring[idx].mp = m_head;
3514 
3515 	first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
3516 
3517 	memset(first_bd, 0, sizeof(*first_bd));
3518 
3519 	first_bd->data.bd_flags.bitfields =
3520 		1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
3521 
3522 	BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
3523 
3524 	nbd++;
3525 
3526 	if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
3527 		first_bd->data.bd_flags.bitfields |=
3528 			(1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3529 	}
3530 
3531 	if (m_head->m_pkthdr.csum_flags &
3532 		(CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
3533 		first_bd->data.bd_flags.bitfields |=
3534 			(1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
3535 	}
3536 
3537         if (m_head->m_flags & M_VLANTAG) {
3538                 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
3539 		first_bd->data.bd_flags.bitfields |=
3540 			(1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
3541         }
3542 
3543 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3544                 first_bd->data.bd_flags.bitfields |=
3545 			(1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
3546 		first_bd->data.bd_flags.bitfields |=
3547 			(1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3548 
3549 		nbds_in_hdr = 1;
3550 
3551 		if (offset == segs->ds_len) {
3552 			BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3553 			segs++;
3554 			seg_idx++;
3555 
3556 			second_bd = (struct eth_tx_2nd_bd *)
3557 					ecore_chain_produce(&txq->tx_pbl);
3558 			memset(second_bd, 0, sizeof(*second_bd));
3559 			nbd++;
3560 
3561 			if (seg_idx < nsegs) {
3562 				BD_SET_UNMAP_ADDR_LEN(second_bd, \
3563 					(segs->ds_addr), (segs->ds_len));
3564 				segs++;
3565 				seg_idx++;
3566 			}
3567 
3568 			third_bd = (struct eth_tx_3rd_bd *)
3569 					ecore_chain_produce(&txq->tx_pbl);
3570 			memset(third_bd, 0, sizeof(*third_bd));
3571 			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3572 			third_bd->data.bitfields |=
3573 				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3574 			nbd++;
3575 
3576 			if (seg_idx < nsegs) {
3577 				BD_SET_UNMAP_ADDR_LEN(third_bd, \
3578 					(segs->ds_addr), (segs->ds_len));
3579 				segs++;
3580 				seg_idx++;
3581 			}
3582 
3583 			for (; seg_idx < nsegs; seg_idx++) {
3584 				tx_data_bd = (struct eth_tx_bd *)
3585 					ecore_chain_produce(&txq->tx_pbl);
3586 				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3587 				BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3588 					segs->ds_addr,\
3589 					segs->ds_len);
3590 				segs++;
3591 				nbd++;
3592 			}
3593 
3594 		} else if (offset < segs->ds_len) {
3595 			BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3596 
3597 			second_bd = (struct eth_tx_2nd_bd *)
3598 					ecore_chain_produce(&txq->tx_pbl);
3599 			memset(second_bd, 0, sizeof(*second_bd));
3600 			BD_SET_UNMAP_ADDR_LEN(second_bd, \
3601 				(segs->ds_addr + offset),\
3602 				(segs->ds_len - offset));
3603 			nbd++;
3604 			segs++;
3605 
3606 			third_bd = (struct eth_tx_3rd_bd *)
3607 					ecore_chain_produce(&txq->tx_pbl);
3608 			memset(third_bd, 0, sizeof(*third_bd));
3609 
3610 			BD_SET_UNMAP_ADDR_LEN(third_bd, \
3611 					segs->ds_addr,\
3612 					segs->ds_len);
3613 			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3614 			third_bd->data.bitfields |=
3615 				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3616 			segs++;
3617 			nbd++;
3618 
3619 			for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3620 				tx_data_bd = (struct eth_tx_bd *)
3621 					ecore_chain_produce(&txq->tx_pbl);
3622 				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3623 				BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3624 					segs->ds_addr,\
3625 					segs->ds_len);
3626 				segs++;
3627 				nbd++;
3628 			}
3629 
3630 		} else {
3631 			offset = offset - segs->ds_len;
3632 			segs++;
3633 
3634 			for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3635 				if (offset)
3636 					nbds_in_hdr++;
3637 
3638 				tx_data_bd = (struct eth_tx_bd *)
3639 					ecore_chain_produce(&txq->tx_pbl);
3640 				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3641 
3642 				if (second_bd == NULL) {
3643 					second_bd = (struct eth_tx_2nd_bd *)
3644 								tx_data_bd;
3645 				} else if (third_bd == NULL) {
3646 					third_bd = (struct eth_tx_3rd_bd *)
3647 								tx_data_bd;
3648 				}
3649 
3650 				if (offset && (offset < segs->ds_len)) {
3651 					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3652 						segs->ds_addr, offset);
3653 
3654 					tx_data_bd = (struct eth_tx_bd *)
3655 					ecore_chain_produce(&txq->tx_pbl);
3656 
3657 					memset(tx_data_bd, 0,
3658 						sizeof(*tx_data_bd));
3659 
3660 					if (second_bd == NULL) {
3661 						second_bd =
3662 					(struct eth_tx_2nd_bd *)tx_data_bd;
3663 					} else if (third_bd == NULL) {
3664 						third_bd =
3665 					(struct eth_tx_3rd_bd *)tx_data_bd;
3666 					}
3667 					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3668 						(segs->ds_addr + offset), \
3669 						(segs->ds_len - offset));
3670 					nbd++;
3671 					offset = 0;
3672 				} else {
3673 					if (offset)
3674 						offset = offset - segs->ds_len;
3675 					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3676 						segs->ds_addr, segs->ds_len);
3677 				}
3678 				segs++;
3679 				nbd++;
3680 			}
3681 
3682 			if (third_bd == NULL) {
3683 				third_bd = (struct eth_tx_3rd_bd *)
3684 					ecore_chain_produce(&txq->tx_pbl);
3685 				memset(third_bd, 0, sizeof(*third_bd));
3686 			}
3687 
3688 			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3689 			third_bd->data.bitfields |=
3690 				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3691 		}
3692 		fp->tx_tso_pkts++;
3693 	} else {
3694 		segs++;
3695 		for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3696 			tx_data_bd = (struct eth_tx_bd *)
3697 					ecore_chain_produce(&txq->tx_pbl);
3698 			memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3699 			BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3700 				segs->ds_len);
3701 			segs++;
3702 			nbd++;
3703 		}
3704 		first_bd->data.bitfields =
3705 			(m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3706 				 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3707 		first_bd->data.bitfields =
3708 			htole16(first_bd->data.bitfields);
3709 		fp->tx_non_tso_pkts++;
3710 	}
3711 
3712 	first_bd->data.nbds = nbd;
3713 
3714 	if (ha->dbg_trace_tso_pkt_len) {
3715 		if (fp->tx_tso_max_nsegs < nsegs)
3716 			fp->tx_tso_max_nsegs = nsegs;
3717 
3718 		if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3719 			fp->tx_tso_min_nsegs = nsegs;
3720 	}
3721 
3722 	txq->sw_tx_ring[idx].nsegs = nsegs;
3723 	txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3724 
3725 	txq->tx_db.data.bd_prod =
3726 		htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3727 
3728 	qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3729 
3730 	QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id);
3731 	return (0);
3732 }
3733 
3734 static void
qlnx_stop(qlnx_host_t * ha)3735 qlnx_stop(qlnx_host_t *ha)
3736 {
3737 	if_t		ifp = ha->ifp;
3738 	int		i;
3739 
3740 	if_setdrvflagbits(ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
3741 
3742 	/*
3743 	 * We simply lock and unlock each fp->tx_mtx to
3744 	 * propagate the if_drv_flags
3745 	 * state to each tx thread
3746 	 */
3747         QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3748 
3749 	if (ha->state == QLNX_STATE_OPEN) {
3750         	for (i = 0; i < ha->num_rss; i++) {
3751 			struct qlnx_fastpath *fp = &ha->fp_array[i];
3752 
3753 			mtx_lock(&fp->tx_mtx);
3754 			mtx_unlock(&fp->tx_mtx);
3755 
3756 			if (fp->fp_taskqueue != NULL)
3757 				taskqueue_enqueue(fp->fp_taskqueue,
3758 					&fp->fp_task);
3759 		}
3760 	}
3761 #ifdef QLNX_ENABLE_IWARP
3762 	if (qlnx_vf_device(ha) != 0) {
3763 		qlnx_rdma_dev_close(ha);
3764 	}
3765 #endif /* #ifdef QLNX_ENABLE_IWARP */
3766 
3767 	qlnx_unload(ha);
3768 
3769 	return;
3770 }
3771 
3772 static int
qlnx_get_ifq_snd_maxlen(qlnx_host_t * ha)3773 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3774 {
3775         return(TX_RING_SIZE - 1);
3776 }
3777 
3778 uint8_t *
qlnx_get_mac_addr(qlnx_host_t * ha)3779 qlnx_get_mac_addr(qlnx_host_t *ha)
3780 {
3781 	struct ecore_hwfn	*p_hwfn;
3782 	unsigned char mac[ETHER_ADDR_LEN];
3783 	uint8_t			p_is_forced;
3784 
3785 	p_hwfn = &ha->cdev.hwfns[0];
3786 
3787 	if (qlnx_vf_device(ha) != 0)
3788 		return (p_hwfn->hw_info.hw_mac_addr);
3789 
3790 	ecore_vf_read_bulletin(p_hwfn, &p_is_forced);
3791 	if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) ==
3792 		true) {
3793 		device_printf(ha->pci_dev, "%s: p_is_forced = %d"
3794 			" mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
3795 			p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3796         	memcpy(ha->primary_mac, mac, ETH_ALEN);
3797 	}
3798 
3799 	return (ha->primary_mac);
3800 }
3801 
3802 static uint32_t
qlnx_get_optics(qlnx_host_t * ha,struct qlnx_link_output * if_link)3803 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3804 {
3805 	uint32_t	ifm_type = 0;
3806 
3807 	switch (if_link->media_type) {
3808 	case MEDIA_MODULE_FIBER:
3809 	case MEDIA_UNSPECIFIED:
3810 		if (if_link->speed == (100 * 1000))
3811 			ifm_type = QLNX_IFM_100G_SR4;
3812 		else if (if_link->speed == (40 * 1000))
3813 			ifm_type = IFM_40G_SR4;
3814 		else if (if_link->speed == (25 * 1000))
3815 			ifm_type = QLNX_IFM_25G_SR;
3816 		else if (if_link->speed == (10 * 1000))
3817 			ifm_type = (IFM_10G_LR | IFM_10G_SR);
3818 		else if (if_link->speed == (1 * 1000))
3819 			ifm_type = (IFM_1000_SX | IFM_1000_LX);
3820 
3821 		break;
3822 
3823 	case MEDIA_DA_TWINAX:
3824 		if (if_link->speed == (100 * 1000))
3825 			ifm_type = QLNX_IFM_100G_CR4;
3826 		else if (if_link->speed == (40 * 1000))
3827 			ifm_type = IFM_40G_CR4;
3828 		else if (if_link->speed == (25 * 1000))
3829 			ifm_type = QLNX_IFM_25G_CR;
3830 		else if (if_link->speed == (10 * 1000))
3831 			ifm_type = IFM_10G_TWINAX;
3832 
3833 		break;
3834 
3835 	default :
3836 		ifm_type = IFM_UNKNOWN;
3837 		break;
3838 	}
3839 	return (ifm_type);
3840 }
3841 
3842 /*****************************************************************************
3843  * Interrupt Service Functions
3844  *****************************************************************************/
3845 
3846 static int
qlnx_rx_jumbo_chain(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct mbuf * mp_head,uint16_t len)3847 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3848 	struct mbuf *mp_head, uint16_t len)
3849 {
3850 	struct mbuf		*mp, *mpf, *mpl;
3851 	struct sw_rx_data	*sw_rx_data;
3852 	struct qlnx_rx_queue	*rxq;
3853 	uint16_t 		len_in_buffer;
3854 
3855 	rxq = fp->rxq;
3856 	mpf = mpl = mp = NULL;
3857 
3858 	while (len) {
3859         	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3860 
3861                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3862                 mp = sw_rx_data->data;
3863 
3864 		if (mp == NULL) {
3865                 	QL_DPRINT1(ha, "mp = NULL\n");
3866 			fp->err_rx_mp_null++;
3867         		rxq->sw_rx_cons  =
3868 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3869 
3870 			if (mpf != NULL)
3871 				m_freem(mpf);
3872 
3873 			return (-1);
3874 		}
3875 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3876 			BUS_DMASYNC_POSTREAD);
3877 
3878                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3879                         QL_DPRINT1(ha, "New buffer allocation failed, dropping"
3880 				" incoming packet and reusing its buffer\n");
3881 
3882                         qlnx_reuse_rx_data(rxq);
3883                         fp->err_rx_alloc_errors++;
3884 
3885 			if (mpf != NULL)
3886 				m_freem(mpf);
3887 
3888 			return (-1);
3889 		}
3890                 ecore_chain_consume(&rxq->rx_bd_ring);
3891 
3892 		if (len > rxq->rx_buf_size)
3893 			len_in_buffer = rxq->rx_buf_size;
3894 		else
3895 			len_in_buffer = len;
3896 
3897 		len = len - len_in_buffer;
3898 
3899 		mp->m_flags &= ~M_PKTHDR;
3900 		mp->m_next = NULL;
3901 		mp->m_len = len_in_buffer;
3902 
3903 		if (mpf == NULL)
3904 			mpf = mpl = mp;
3905 		else {
3906 			mpl->m_next = mp;
3907 			mpl = mp;
3908 		}
3909 	}
3910 
3911 	if (mpf != NULL)
3912 		mp_head->m_next = mpf;
3913 
3914 	return (0);
3915 }
3916 
3917 static void
qlnx_tpa_start(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_rx_queue * rxq,struct eth_fast_path_rx_tpa_start_cqe * cqe)3918 qlnx_tpa_start(qlnx_host_t *ha,
3919 	struct qlnx_fastpath *fp,
3920 	struct qlnx_rx_queue *rxq,
3921 	struct eth_fast_path_rx_tpa_start_cqe *cqe)
3922 {
3923 	uint32_t		agg_index;
3924         if_t ifp = ha->ifp;
3925 	struct mbuf		*mp;
3926 	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
3927 	struct sw_rx_data	*sw_rx_data;
3928 	dma_addr_t		addr;
3929 	bus_dmamap_t		map;
3930 	struct eth_rx_bd	*rx_bd;
3931 	int			i;
3932 	uint8_t			hash_type;
3933 
3934 	agg_index = cqe->tpa_agg_index;
3935 
3936         QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
3937                 \t type = 0x%x\n \
3938                 \t bitfields = 0x%x\n \
3939                 \t seg_len = 0x%x\n \
3940                 \t pars_flags = 0x%x\n \
3941                 \t vlan_tag = 0x%x\n \
3942                 \t rss_hash = 0x%x\n \
3943                 \t len_on_first_bd = 0x%x\n \
3944                 \t placement_offset = 0x%x\n \
3945                 \t tpa_agg_index = 0x%x\n \
3946                 \t header_len = 0x%x\n \
3947                 \t ext_bd_len_list[0] = 0x%x\n \
3948                 \t ext_bd_len_list[1] = 0x%x\n \
3949                 \t ext_bd_len_list[2] = 0x%x\n \
3950                 \t ext_bd_len_list[3] = 0x%x\n \
3951                 \t ext_bd_len_list[4] = 0x%x\n",
3952                 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
3953                 cqe->pars_flags.flags, cqe->vlan_tag,
3954                 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
3955                 cqe->tpa_agg_index, cqe->header_len,
3956                 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
3957                 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
3958                 cqe->ext_bd_len_list[4]);
3959 
3960 	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3961 		fp->err_rx_tpa_invalid_agg_num++;
3962 		return;
3963 	}
3964 
3965 	sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3966 	bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
3967 	mp = sw_rx_data->data;
3968 
3969 	QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
3970 
3971 	if (mp == NULL) {
3972                	QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
3973 		fp->err_rx_mp_null++;
3974        		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3975 
3976 		return;
3977 	}
3978 
3979 	if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
3980 		QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
3981 			" flags = %x, dropping incoming packet\n", fp->rss_id,
3982 			rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
3983 
3984 		fp->err_rx_hw_errors++;
3985 
3986 		qlnx_reuse_rx_data(rxq);
3987 
3988 		QLNX_INC_IERRORS(ifp);
3989 
3990 		return;
3991 	}
3992 
3993 	if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3994 		QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3995 			" dropping incoming packet and reusing its buffer\n",
3996 			fp->rss_id);
3997 
3998 		fp->err_rx_alloc_errors++;
3999 		QLNX_INC_IQDROPS(ifp);
4000 
4001 		/*
4002 		 * Load the tpa mbuf into the rx ring and save the
4003 		 * posted mbuf
4004 		 */
4005 
4006 		map = sw_rx_data->map;
4007 		addr = sw_rx_data->dma_addr;
4008 
4009 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
4010 
4011 		sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
4012 		sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
4013 		sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
4014 
4015 		rxq->tpa_info[agg_index].rx_buf.data = mp;
4016 		rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
4017 		rxq->tpa_info[agg_index].rx_buf.map = map;
4018 
4019 		rx_bd = (struct eth_rx_bd *)
4020 				ecore_chain_produce(&rxq->rx_bd_ring);
4021 
4022 		rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
4023 		rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
4024 
4025 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4026 			BUS_DMASYNC_PREREAD);
4027 
4028 		rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
4029 		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4030 
4031 		ecore_chain_consume(&rxq->rx_bd_ring);
4032 
4033 		/* Now reuse any buffers posted in ext_bd_len_list */
4034 		for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4035 			if (cqe->ext_bd_len_list[i] == 0)
4036 				break;
4037 
4038 			qlnx_reuse_rx_data(rxq);
4039 		}
4040 
4041 		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4042 		return;
4043 	}
4044 
4045 	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4046 		QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
4047 			" dropping incoming packet and reusing its buffer\n",
4048 			fp->rss_id);
4049 
4050 		QLNX_INC_IQDROPS(ifp);
4051 
4052 		/* if we already have mbuf head in aggregation free it */
4053 		if (rxq->tpa_info[agg_index].mpf) {
4054 			m_freem(rxq->tpa_info[agg_index].mpf);
4055 			rxq->tpa_info[agg_index].mpl = NULL;
4056 		}
4057 		rxq->tpa_info[agg_index].mpf = mp;
4058 		rxq->tpa_info[agg_index].mpl = NULL;
4059 
4060 		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4061 		ecore_chain_consume(&rxq->rx_bd_ring);
4062 
4063 		/* Now reuse any buffers posted in ext_bd_len_list */
4064 		for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4065 			if (cqe->ext_bd_len_list[i] == 0)
4066 				break;
4067 
4068 			qlnx_reuse_rx_data(rxq);
4069 		}
4070 		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4071 
4072 		return;
4073 	}
4074 
4075 	/*
4076 	 * first process the ext_bd_len_list
4077 	 * if this fails then we simply drop the packet
4078 	 */
4079 	ecore_chain_consume(&rxq->rx_bd_ring);
4080 	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4081 
4082 	for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4083 		QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
4084 
4085 		if (cqe->ext_bd_len_list[i] == 0)
4086 			break;
4087 
4088 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4089 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4090 			BUS_DMASYNC_POSTREAD);
4091 
4092 		mpc = sw_rx_data->data;
4093 
4094 		if (mpc == NULL) {
4095 			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4096 			fp->err_rx_mp_null++;
4097 			if (mpf != NULL)
4098 				m_freem(mpf);
4099 			mpf = mpl = NULL;
4100 			rxq->tpa_info[agg_index].agg_state =
4101 						QLNX_AGG_STATE_ERROR;
4102 			ecore_chain_consume(&rxq->rx_bd_ring);
4103 			rxq->sw_rx_cons =
4104 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4105 			continue;
4106 		}
4107 
4108 		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4109 			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4110 				" dropping incoming packet and reusing its"
4111 				" buffer\n", fp->rss_id);
4112 
4113 			qlnx_reuse_rx_data(rxq);
4114 
4115 			if (mpf != NULL)
4116 				m_freem(mpf);
4117 			mpf = mpl = NULL;
4118 
4119 			rxq->tpa_info[agg_index].agg_state =
4120 						QLNX_AGG_STATE_ERROR;
4121 
4122 			ecore_chain_consume(&rxq->rx_bd_ring);
4123 			rxq->sw_rx_cons =
4124 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4125 
4126 			continue;
4127 		}
4128 
4129 		mpc->m_flags &= ~M_PKTHDR;
4130 		mpc->m_next = NULL;
4131 		mpc->m_len = cqe->ext_bd_len_list[i];
4132 
4133 		if (mpf == NULL) {
4134 			mpf = mpl = mpc;
4135 		} else {
4136 			mpl->m_len = ha->rx_buf_size;
4137 			mpl->m_next = mpc;
4138 			mpl = mpc;
4139 		}
4140 
4141 		ecore_chain_consume(&rxq->rx_bd_ring);
4142 		rxq->sw_rx_cons =
4143 			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4144 	}
4145 
4146 	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4147 		QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
4148 			" incoming packet and reusing its buffer\n",
4149 			fp->rss_id);
4150 
4151 		QLNX_INC_IQDROPS(ifp);
4152 
4153 		rxq->tpa_info[agg_index].mpf = mp;
4154 		rxq->tpa_info[agg_index].mpl = NULL;
4155 
4156 		return;
4157 	}
4158 
4159         rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
4160 
4161         if (mpf != NULL) {
4162                 mp->m_len = ha->rx_buf_size;
4163                 mp->m_next = mpf;
4164                 rxq->tpa_info[agg_index].mpf = mp;
4165                 rxq->tpa_info[agg_index].mpl = mpl;
4166         } else {
4167                 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
4168                 rxq->tpa_info[agg_index].mpf = mp;
4169                 rxq->tpa_info[agg_index].mpl = mp;
4170                 mp->m_next = NULL;
4171         }
4172 
4173 	mp->m_flags |= M_PKTHDR;
4174 
4175 	/* assign packet to this interface interface */
4176 	mp->m_pkthdr.rcvif = ifp;
4177 
4178 	/* assume no hardware checksum has complated */
4179 	mp->m_pkthdr.csum_flags = 0;
4180 
4181 	//mp->m_pkthdr.flowid = fp->rss_id;
4182 	mp->m_pkthdr.flowid = cqe->rss_hash;
4183 
4184 	hash_type = cqe->bitfields &
4185 			(ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4186 			ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4187 
4188 	switch (hash_type) {
4189 	case RSS_HASH_TYPE_IPV4:
4190 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4191 		break;
4192 
4193 	case RSS_HASH_TYPE_TCP_IPV4:
4194 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4195 		break;
4196 
4197 	case RSS_HASH_TYPE_IPV6:
4198 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4199 		break;
4200 
4201 	case RSS_HASH_TYPE_TCP_IPV6:
4202 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4203 		break;
4204 
4205 	default:
4206 		M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4207 		break;
4208 	}
4209 
4210 	mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
4211 					CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4212 
4213 	mp->m_pkthdr.csum_data = 0xFFFF;
4214 
4215 	if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
4216 		mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
4217 		mp->m_flags |= M_VLANTAG;
4218 	}
4219 
4220 	rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
4221 
4222         QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
4223 		fp->rss_id, rxq->tpa_info[agg_index].agg_state,
4224                 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
4225 
4226 	return;
4227 }
4228 
4229 static void
qlnx_tpa_cont(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_rx_queue * rxq,struct eth_fast_path_rx_tpa_cont_cqe * cqe)4230 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4231 	struct qlnx_rx_queue *rxq,
4232 	struct eth_fast_path_rx_tpa_cont_cqe *cqe)
4233 {
4234 	struct sw_rx_data	*sw_rx_data;
4235 	int			i;
4236 	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
4237 	struct mbuf		*mp;
4238 	uint32_t		agg_index;
4239 
4240         QL_DPRINT7(ha, "[%d]: enter\n \
4241                 \t type = 0x%x\n \
4242                 \t tpa_agg_index = 0x%x\n \
4243                 \t len_list[0] = 0x%x\n \
4244                 \t len_list[1] = 0x%x\n \
4245                 \t len_list[2] = 0x%x\n \
4246                 \t len_list[3] = 0x%x\n \
4247                 \t len_list[4] = 0x%x\n \
4248                 \t len_list[5] = 0x%x\n",
4249                 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4250                 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4251                 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
4252 
4253 	agg_index = cqe->tpa_agg_index;
4254 
4255 	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4256 		QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4257 		fp->err_rx_tpa_invalid_agg_num++;
4258 		return;
4259 	}
4260 
4261 	for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
4262 		QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4263 
4264 		if (cqe->len_list[i] == 0)
4265 			break;
4266 
4267 		if (rxq->tpa_info[agg_index].agg_state !=
4268 			QLNX_AGG_STATE_START) {
4269 			qlnx_reuse_rx_data(rxq);
4270 			continue;
4271 		}
4272 
4273 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4274 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4275 			BUS_DMASYNC_POSTREAD);
4276 
4277 		mpc = sw_rx_data->data;
4278 
4279 		if (mpc == NULL) {
4280 			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4281 
4282 			fp->err_rx_mp_null++;
4283 			if (mpf != NULL)
4284 				m_freem(mpf);
4285 			mpf = mpl = NULL;
4286 			rxq->tpa_info[agg_index].agg_state =
4287 						QLNX_AGG_STATE_ERROR;
4288 			ecore_chain_consume(&rxq->rx_bd_ring);
4289 			rxq->sw_rx_cons =
4290 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4291 			continue;
4292 		}
4293 
4294 		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4295 			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4296 				" dropping incoming packet and reusing its"
4297 				" buffer\n", fp->rss_id);
4298 
4299 			qlnx_reuse_rx_data(rxq);
4300 
4301 			if (mpf != NULL)
4302 				m_freem(mpf);
4303 			mpf = mpl = NULL;
4304 
4305 			rxq->tpa_info[agg_index].agg_state =
4306 						QLNX_AGG_STATE_ERROR;
4307 
4308 			ecore_chain_consume(&rxq->rx_bd_ring);
4309 			rxq->sw_rx_cons =
4310 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4311 
4312 			continue;
4313 		}
4314 
4315 		mpc->m_flags &= ~M_PKTHDR;
4316 		mpc->m_next = NULL;
4317 		mpc->m_len = cqe->len_list[i];
4318 
4319 		if (mpf == NULL) {
4320 			mpf = mpl = mpc;
4321 		} else {
4322 			mpl->m_len = ha->rx_buf_size;
4323 			mpl->m_next = mpc;
4324 			mpl = mpc;
4325 		}
4326 
4327 		ecore_chain_consume(&rxq->rx_bd_ring);
4328 		rxq->sw_rx_cons =
4329 			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4330 	}
4331 
4332         QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
4333                   fp->rss_id, mpf, mpl);
4334 
4335 	if (mpf != NULL) {
4336 		mp = rxq->tpa_info[agg_index].mpl;
4337 		mp->m_len = ha->rx_buf_size;
4338 		mp->m_next = mpf;
4339 		rxq->tpa_info[agg_index].mpl = mpl;
4340 	}
4341 
4342 	return;
4343 }
4344 
4345 static int
qlnx_tpa_end(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_rx_queue * rxq,struct eth_fast_path_rx_tpa_end_cqe * cqe)4346 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4347 	struct qlnx_rx_queue *rxq,
4348 	struct eth_fast_path_rx_tpa_end_cqe *cqe)
4349 {
4350 	struct sw_rx_data	*sw_rx_data;
4351 	int			i;
4352 	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
4353 	struct mbuf		*mp;
4354 	uint32_t		agg_index;
4355 	uint32_t		len = 0;
4356         if_t ifp = ha->ifp;
4357 
4358         QL_DPRINT7(ha, "[%d]: enter\n \
4359                 \t type = 0x%x\n \
4360                 \t tpa_agg_index = 0x%x\n \
4361                 \t total_packet_len = 0x%x\n \
4362                 \t num_of_bds = 0x%x\n \
4363                 \t end_reason = 0x%x\n \
4364                 \t num_of_coalesced_segs = 0x%x\n \
4365                 \t ts_delta = 0x%x\n \
4366                 \t len_list[0] = 0x%x\n \
4367                 \t len_list[1] = 0x%x\n \
4368                 \t len_list[2] = 0x%x\n \
4369                 \t len_list[3] = 0x%x\n",
4370                  fp->rss_id, cqe->type, cqe->tpa_agg_index,
4371                 cqe->total_packet_len, cqe->num_of_bds,
4372                 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
4373                 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4374                 cqe->len_list[3]);
4375 
4376 	agg_index = cqe->tpa_agg_index;
4377 
4378 	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4379 		QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4380 
4381 		fp->err_rx_tpa_invalid_agg_num++;
4382 		return (0);
4383 	}
4384 
4385 	for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
4386 		QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4387 
4388 		if (cqe->len_list[i] == 0)
4389 			break;
4390 
4391 		if (rxq->tpa_info[agg_index].agg_state !=
4392 			QLNX_AGG_STATE_START) {
4393 			QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
4394 
4395 			qlnx_reuse_rx_data(rxq);
4396 			continue;
4397 		}
4398 
4399 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4400 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4401 			BUS_DMASYNC_POSTREAD);
4402 
4403 		mpc = sw_rx_data->data;
4404 
4405 		if (mpc == NULL) {
4406 			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4407 
4408 			fp->err_rx_mp_null++;
4409 			if (mpf != NULL)
4410 				m_freem(mpf);
4411 			mpf = mpl = NULL;
4412 			rxq->tpa_info[agg_index].agg_state =
4413 						QLNX_AGG_STATE_ERROR;
4414 			ecore_chain_consume(&rxq->rx_bd_ring);
4415 			rxq->sw_rx_cons =
4416 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4417 			continue;
4418 		}
4419 
4420 		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4421 			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4422 				" dropping incoming packet and reusing its"
4423 				" buffer\n", fp->rss_id);
4424 
4425 			qlnx_reuse_rx_data(rxq);
4426 
4427 			if (mpf != NULL)
4428 				m_freem(mpf);
4429 			mpf = mpl = NULL;
4430 
4431 			rxq->tpa_info[agg_index].agg_state =
4432 						QLNX_AGG_STATE_ERROR;
4433 
4434 			ecore_chain_consume(&rxq->rx_bd_ring);
4435 			rxq->sw_rx_cons =
4436 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4437 
4438 			continue;
4439 		}
4440 
4441 		mpc->m_flags &= ~M_PKTHDR;
4442 		mpc->m_next = NULL;
4443 		mpc->m_len = cqe->len_list[i];
4444 
4445 		if (mpf == NULL) {
4446 			mpf = mpl = mpc;
4447 		} else {
4448 			mpl->m_len = ha->rx_buf_size;
4449 			mpl->m_next = mpc;
4450 			mpl = mpc;
4451 		}
4452 
4453 		ecore_chain_consume(&rxq->rx_bd_ring);
4454 		rxq->sw_rx_cons =
4455 			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4456 	}
4457 
4458 	QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
4459 
4460 	if (mpf != NULL) {
4461 		QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
4462 
4463 		mp = rxq->tpa_info[agg_index].mpl;
4464 		mp->m_len = ha->rx_buf_size;
4465 		mp->m_next = mpf;
4466 	}
4467 
4468 	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
4469 		QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
4470 
4471 		if (rxq->tpa_info[agg_index].mpf != NULL)
4472 			m_freem(rxq->tpa_info[agg_index].mpf);
4473 		rxq->tpa_info[agg_index].mpf = NULL;
4474 		rxq->tpa_info[agg_index].mpl = NULL;
4475 		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4476 		return (0);
4477 	}
4478 
4479 	mp = rxq->tpa_info[agg_index].mpf;
4480 	m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
4481 	mp->m_pkthdr.len = cqe->total_packet_len;
4482 
4483 	if (mp->m_next  == NULL)
4484 		mp->m_len = mp->m_pkthdr.len;
4485 	else {
4486 		/* compute the total packet length */
4487 		mpf = mp;
4488 		while (mpf != NULL) {
4489 			len += mpf->m_len;
4490 			mpf = mpf->m_next;
4491 		}
4492 
4493 		if (cqe->total_packet_len > len) {
4494 			mpl = rxq->tpa_info[agg_index].mpl;
4495 			mpl->m_len += (cqe->total_packet_len - len);
4496 		}
4497 	}
4498 
4499 	QLNX_INC_IPACKETS(ifp);
4500 	QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
4501 
4502         QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \
4503 		m_len = 0x%x m_pkthdr_len = 0x%x\n",
4504                 fp->rss_id, mp->m_pkthdr.csum_data,
4505                 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
4506 
4507 	if_input(ifp, mp);
4508 
4509 	rxq->tpa_info[agg_index].mpf = NULL;
4510 	rxq->tpa_info[agg_index].mpl = NULL;
4511 	rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4512 
4513 	return (cqe->num_of_coalesced_segs);
4514 }
4515 
4516 static int
qlnx_rx_int(qlnx_host_t * ha,struct qlnx_fastpath * fp,int budget,int lro_enable)4517 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
4518 	int lro_enable)
4519 {
4520         uint16_t		hw_comp_cons, sw_comp_cons;
4521         int			rx_pkt = 0;
4522         struct qlnx_rx_queue	*rxq = fp->rxq;
4523         if_t ifp = ha->ifp;
4524 	struct ecore_dev	*cdev = &ha->cdev;
4525 	struct ecore_hwfn       *p_hwfn;
4526 
4527 #ifdef QLNX_SOFT_LRO
4528 	struct lro_ctrl		*lro;
4529 
4530 	lro = &rxq->lro;
4531 #endif /* #ifdef QLNX_SOFT_LRO */
4532 
4533         hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
4534         sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4535 
4536 	p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4537 
4538         /* Memory barrier to prevent the CPU from doing speculative reads of CQE
4539          * / BD in the while-loop before reading hw_comp_cons. If the CQE is
4540          * read before it is written by FW, then FW writes CQE and SB, and then
4541          * the CPU reads the hw_comp_cons, it will use an old CQE.
4542          */
4543 
4544         /* Loop to complete all indicated BDs */
4545         while (sw_comp_cons != hw_comp_cons) {
4546                 union eth_rx_cqe		*cqe;
4547                 struct eth_fast_path_rx_reg_cqe	*fp_cqe;
4548                 struct sw_rx_data		*sw_rx_data;
4549 		register struct mbuf		*mp;
4550                 enum eth_rx_cqe_type		cqe_type;
4551                 uint16_t			len, pad, len_on_first_bd;
4552                 uint8_t				*data;
4553 		uint8_t				hash_type;
4554 
4555                 /* Get the CQE from the completion ring */
4556                 cqe = (union eth_rx_cqe *)
4557                         ecore_chain_consume(&rxq->rx_comp_ring);
4558                 cqe_type = cqe->fast_path_regular.type;
4559 
4560                 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4561                         QL_DPRINT3(ha, "Got a slowath CQE\n");
4562 
4563                         ecore_eth_cqe_completion(p_hwfn,
4564                                         (struct eth_slow_path_rx_cqe *)cqe);
4565                         goto next_cqe;
4566                 }
4567 
4568 		if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4569 			switch (cqe_type) {
4570 			case ETH_RX_CQE_TYPE_TPA_START:
4571 				qlnx_tpa_start(ha, fp, rxq,
4572 					&cqe->fast_path_tpa_start);
4573 				fp->tpa_start++;
4574 				break;
4575 
4576 			case ETH_RX_CQE_TYPE_TPA_CONT:
4577 				qlnx_tpa_cont(ha, fp, rxq,
4578 					&cqe->fast_path_tpa_cont);
4579 				fp->tpa_cont++;
4580 				break;
4581 
4582 			case ETH_RX_CQE_TYPE_TPA_END:
4583 				rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4584 						&cqe->fast_path_tpa_end);
4585 				fp->tpa_end++;
4586 				break;
4587 
4588 			default:
4589 				break;
4590 			}
4591 
4592                         goto next_cqe;
4593 		}
4594 
4595                 /* Get the data from the SW ring */
4596                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4597                 mp = sw_rx_data->data;
4598 
4599 		if (mp == NULL) {
4600                 	QL_DPRINT1(ha, "mp = NULL\n");
4601 			fp->err_rx_mp_null++;
4602         		rxq->sw_rx_cons  =
4603 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4604 			goto next_cqe;
4605 		}
4606 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4607 			BUS_DMASYNC_POSTREAD);
4608 
4609                 /* non GRO */
4610                 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4611                 len =  le16toh(fp_cqe->pkt_len);
4612                 pad = fp_cqe->placement_offset;
4613 #if 0
4614 		QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4615 			" len %u, parsing flags = %d pad  = %d\n",
4616 			cqe_type, fp_cqe->bitfields,
4617 			le16toh(fp_cqe->vlan_tag),
4618 			len, le16toh(fp_cqe->pars_flags.flags), pad);
4619 #endif
4620 		data = mtod(mp, uint8_t *);
4621 		data = data + pad;
4622 
4623 		if (0)
4624 			qlnx_dump_buf8(ha, __func__, data, len);
4625 
4626                 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4627                  * is always with a fixed size. If allocation fails, we take the
4628                  * consumed BD and return it to the ring in the PROD position.
4629                  * The packet that was received on that BD will be dropped (and
4630                  * not passed to the upper stack).
4631                  */
4632 		/* If this is an error packet then drop it */
4633 		if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4634 			CQE_FLAGS_ERR) {
4635 			QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4636 				" dropping incoming packet\n", sw_comp_cons,
4637 			le16toh(cqe->fast_path_regular.pars_flags.flags));
4638 			fp->err_rx_hw_errors++;
4639 
4640                         qlnx_reuse_rx_data(rxq);
4641 
4642 			QLNX_INC_IERRORS(ifp);
4643 
4644 			goto next_cqe;
4645 		}
4646 
4647                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4648                         QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4649 				" incoming packet and reusing its buffer\n");
4650                         qlnx_reuse_rx_data(rxq);
4651 
4652                         fp->err_rx_alloc_errors++;
4653 
4654 			QLNX_INC_IQDROPS(ifp);
4655 
4656                         goto next_cqe;
4657                 }
4658 
4659                 ecore_chain_consume(&rxq->rx_bd_ring);
4660 
4661 		len_on_first_bd = fp_cqe->len_on_first_bd;
4662 		m_adj(mp, pad);
4663 		mp->m_pkthdr.len = len;
4664 
4665 		if ((len > 60 ) && (len > len_on_first_bd)) {
4666 			mp->m_len = len_on_first_bd;
4667 
4668 			if (qlnx_rx_jumbo_chain(ha, fp, mp,
4669 				(len - len_on_first_bd)) != 0) {
4670 				m_freem(mp);
4671 
4672 				QLNX_INC_IQDROPS(ifp);
4673 
4674                         	goto next_cqe;
4675 			}
4676 
4677 		} else if (len_on_first_bd < len) {
4678 			fp->err_rx_jumbo_chain_pkts++;
4679 		} else {
4680 			mp->m_len = len;
4681 		}
4682 
4683 		mp->m_flags |= M_PKTHDR;
4684 
4685 		/* assign packet to this interface interface */
4686 		mp->m_pkthdr.rcvif = ifp;
4687 
4688 		/* assume no hardware checksum has complated */
4689 		mp->m_pkthdr.csum_flags = 0;
4690 
4691 		mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4692 
4693 		hash_type = fp_cqe->bitfields &
4694 				(ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4695 				ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4696 
4697 		switch (hash_type) {
4698 		case RSS_HASH_TYPE_IPV4:
4699 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4700 			break;
4701 
4702 		case RSS_HASH_TYPE_TCP_IPV4:
4703 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4704 			break;
4705 
4706 		case RSS_HASH_TYPE_IPV6:
4707 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4708 			break;
4709 
4710 		case RSS_HASH_TYPE_TCP_IPV6:
4711 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4712 			break;
4713 
4714 		default:
4715 			M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4716 			break;
4717 		}
4718 
4719 		if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4720 			mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4721 		}
4722 
4723 		if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4724 			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4725 		}
4726 
4727 		if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4728 			mp->m_pkthdr.csum_data = 0xFFFF;
4729 			mp->m_pkthdr.csum_flags |=
4730 				(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4731 		}
4732 
4733 		if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4734 			mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4735 			mp->m_flags |= M_VLANTAG;
4736 		}
4737 
4738 		QLNX_INC_IPACKETS(ifp);
4739 		QLNX_INC_IBYTES(ifp, len);
4740 
4741 #ifdef QLNX_SOFT_LRO
4742 		if (lro_enable)
4743 			tcp_lro_queue_mbuf(lro, mp);
4744 		else
4745 			if_input(ifp, mp);
4746 #else
4747 
4748 		if_input(ifp, mp);
4749 
4750 #endif /* #ifdef QLNX_SOFT_LRO */
4751 
4752                 rx_pkt++;
4753 
4754         	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4755 
4756 next_cqe:	/* don't consume bd rx buffer */
4757                 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4758                 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4759 
4760 		/* CR TPA - revisit how to handle budget in TPA perhaps
4761 		   increase on "end" */
4762                 if (rx_pkt == budget)
4763                         break;
4764         } /* repeat while sw_comp_cons != hw_comp_cons... */
4765 
4766         /* Update producers */
4767         qlnx_update_rx_prod(p_hwfn, rxq);
4768 
4769         return rx_pkt;
4770 }
4771 
4772 /*
4773  * fast path interrupt
4774  */
4775 
4776 static void
qlnx_fp_isr(void * arg)4777 qlnx_fp_isr(void *arg)
4778 {
4779         qlnx_ivec_t		*ivec = arg;
4780         qlnx_host_t		*ha;
4781         struct qlnx_fastpath	*fp = NULL;
4782         int			idx;
4783 
4784         ha = ivec->ha;
4785 
4786         if (ha->state != QLNX_STATE_OPEN) {
4787                 return;
4788         }
4789 
4790         idx = ivec->rss_idx;
4791 
4792         if ((idx = ivec->rss_idx) >= ha->num_rss) {
4793                 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4794                 ha->err_illegal_intr++;
4795                 return;
4796         }
4797         fp = &ha->fp_array[idx];
4798 
4799         if (fp == NULL) {
4800                 ha->err_fp_null++;
4801         } else {
4802 		int			rx_int = 0;
4803 #ifdef QLNX_SOFT_LRO
4804 		int			total_rx_count = 0;
4805 #endif
4806 		int 			lro_enable, tc;
4807 		struct qlnx_tx_queue	*txq;
4808 		uint16_t		elem_left;
4809 
4810 		lro_enable = if_getcapenable(ha->ifp) & IFCAP_LRO;
4811 
4812                 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4813 
4814                 do {
4815                         for (tc = 0; tc < ha->num_tc; tc++) {
4816 				txq = fp->txq[tc];
4817 
4818 				if((int)(elem_left =
4819 					ecore_chain_get_elem_left(&txq->tx_pbl)) <
4820 						QLNX_TX_ELEM_THRESH)  {
4821                                 	if (mtx_trylock(&fp->tx_mtx)) {
4822 #ifdef QLNX_TRACE_PERF_DATA
4823 						tx_compl = fp->tx_pkts_completed;
4824 #endif
4825 
4826 						qlnx_tx_int(ha, fp, fp->txq[tc]);
4827 #ifdef QLNX_TRACE_PERF_DATA
4828 						fp->tx_pkts_compl_intr +=
4829 							(fp->tx_pkts_completed - tx_compl);
4830 						if ((fp->tx_pkts_completed - tx_compl) <= 32)
4831 							fp->tx_comInt[0]++;
4832 						else if (((fp->tx_pkts_completed - tx_compl) > 32) &&
4833 							((fp->tx_pkts_completed - tx_compl) <= 64))
4834 							fp->tx_comInt[1]++;
4835 						else if(((fp->tx_pkts_completed - tx_compl) > 64) &&
4836 							((fp->tx_pkts_completed - tx_compl) <= 128))
4837 							fp->tx_comInt[2]++;
4838 						else if(((fp->tx_pkts_completed - tx_compl) > 128))
4839 							fp->tx_comInt[3]++;
4840 #endif
4841 						mtx_unlock(&fp->tx_mtx);
4842 					}
4843 				}
4844                         }
4845 
4846                         rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
4847                                         lro_enable);
4848 
4849                         if (rx_int) {
4850                                 fp->rx_pkts += rx_int;
4851 #ifdef QLNX_SOFT_LRO
4852                                 total_rx_count += rx_int;
4853 #endif
4854                         }
4855 
4856                 } while (rx_int);
4857 
4858 #ifdef QLNX_SOFT_LRO
4859                 {
4860                         struct lro_ctrl *lro;
4861 
4862                         lro = &fp->rxq->lro;
4863 
4864                         if (lro_enable && total_rx_count) {
4865 
4866 #ifdef QLNX_TRACE_LRO_CNT
4867                                 if (lro->lro_mbuf_count & ~1023)
4868                                         fp->lro_cnt_1024++;
4869                                 else if (lro->lro_mbuf_count & ~511)
4870                                         fp->lro_cnt_512++;
4871                                 else if (lro->lro_mbuf_count & ~255)
4872                                         fp->lro_cnt_256++;
4873                                 else if (lro->lro_mbuf_count & ~127)
4874                                         fp->lro_cnt_128++;
4875                                 else if (lro->lro_mbuf_count & ~63)
4876                                         fp->lro_cnt_64++;
4877 #endif /* #ifdef QLNX_TRACE_LRO_CNT */
4878 
4879                                 tcp_lro_flush_all(lro);
4880                         }
4881                 }
4882 #endif /* #ifdef QLNX_SOFT_LRO */
4883 
4884                 ecore_sb_update_sb_idx(fp->sb_info);
4885                 rmb();
4886                 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
4887         }
4888 
4889         return;
4890 }
4891 
4892 /*
4893  * slow path interrupt processing function
4894  * can be invoked in polled mode or in interrupt mode via taskqueue.
4895  */
4896 void
qlnx_sp_isr(void * arg)4897 qlnx_sp_isr(void *arg)
4898 {
4899 	struct ecore_hwfn	*p_hwfn;
4900 	qlnx_host_t		*ha;
4901 
4902 	p_hwfn = arg;
4903 
4904 	ha = (qlnx_host_t *)p_hwfn->p_dev;
4905 
4906 	ha->sp_interrupts++;
4907 
4908 	QL_DPRINT2(ha, "enter\n");
4909 
4910 	ecore_int_sp_dpc(p_hwfn);
4911 
4912 	QL_DPRINT2(ha, "exit\n");
4913 
4914 	return;
4915 }
4916 
4917 /*****************************************************************************
4918  * Support Functions for DMA'able Memory
4919  *****************************************************************************/
4920 
4921 static void
qlnx_dmamap_callback(void * arg,bus_dma_segment_t * segs,int nsegs,int error)4922 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
4923 {
4924         *((bus_addr_t *)arg) = 0;
4925 
4926         if (error) {
4927                 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
4928                 return;
4929         }
4930 
4931         *((bus_addr_t *)arg) = segs[0].ds_addr;
4932 
4933         return;
4934 }
4935 
4936 static int
qlnx_alloc_dmabuf(qlnx_host_t * ha,qlnx_dma_t * dma_buf)4937 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4938 {
4939         int             ret = 0;
4940         bus_addr_t      b_addr;
4941 
4942         ret = bus_dma_tag_create(
4943                         ha->parent_tag,/* parent */
4944                         dma_buf->alignment,
4945                         ((bus_size_t)(1ULL << 32)),/* boundary */
4946                         BUS_SPACE_MAXADDR,      /* lowaddr */
4947                         BUS_SPACE_MAXADDR,      /* highaddr */
4948                         NULL, NULL,             /* filter, filterarg */
4949                         dma_buf->size,          /* maxsize */
4950                         1,                      /* nsegments */
4951                         dma_buf->size,          /* maxsegsize */
4952                         0,                      /* flags */
4953                         NULL, NULL,             /* lockfunc, lockarg */
4954                         &dma_buf->dma_tag);
4955 
4956         if (ret) {
4957                 QL_DPRINT1(ha, "could not create dma tag\n");
4958                 goto qlnx_alloc_dmabuf_exit;
4959         }
4960         ret = bus_dmamem_alloc(dma_buf->dma_tag,
4961                         (void **)&dma_buf->dma_b,
4962                         (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
4963                         &dma_buf->dma_map);
4964         if (ret) {
4965                 bus_dma_tag_destroy(dma_buf->dma_tag);
4966                 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
4967                 goto qlnx_alloc_dmabuf_exit;
4968         }
4969 
4970         ret = bus_dmamap_load(dma_buf->dma_tag,
4971                         dma_buf->dma_map,
4972                         dma_buf->dma_b,
4973                         dma_buf->size,
4974                         qlnx_dmamap_callback,
4975                         &b_addr, BUS_DMA_NOWAIT);
4976 
4977         if (ret || !b_addr) {
4978                 bus_dma_tag_destroy(dma_buf->dma_tag);
4979                 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
4980                         dma_buf->dma_map);
4981                 ret = -1;
4982                 goto qlnx_alloc_dmabuf_exit;
4983         }
4984 
4985         dma_buf->dma_addr = b_addr;
4986 
4987 qlnx_alloc_dmabuf_exit:
4988 
4989         return ret;
4990 }
4991 
4992 static void
qlnx_free_dmabuf(qlnx_host_t * ha,qlnx_dma_t * dma_buf)4993 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4994 {
4995 	bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
4996         bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
4997         bus_dma_tag_destroy(dma_buf->dma_tag);
4998 	return;
4999 }
5000 
5001 void *
qlnx_dma_alloc_coherent(void * ecore_dev,bus_addr_t * phys,uint32_t size)5002 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
5003 {
5004 	qlnx_dma_t	dma_buf;
5005 	qlnx_dma_t	*dma_p;
5006 	qlnx_host_t	*ha __unused;
5007 
5008 	ha = (qlnx_host_t *)ecore_dev;
5009 
5010 	size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5011 
5012 	memset(&dma_buf, 0, sizeof (qlnx_dma_t));
5013 
5014 	dma_buf.size = size + PAGE_SIZE;
5015 	dma_buf.alignment = 8;
5016 
5017 	if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
5018 		return (NULL);
5019 	bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
5020 
5021 	*phys = dma_buf.dma_addr;
5022 
5023 	dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
5024 
5025 	memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
5026 
5027 	QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5028 		(void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
5029 		dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
5030 
5031 	return (dma_buf.dma_b);
5032 }
5033 
5034 void
qlnx_dma_free_coherent(void * ecore_dev,void * v_addr,bus_addr_t phys,uint32_t size)5035 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
5036 	uint32_t size)
5037 {
5038 	qlnx_dma_t dma_buf, *dma_p;
5039 	qlnx_host_t	*ha;
5040 
5041 	ha = (qlnx_host_t *)ecore_dev;
5042 
5043 	if (v_addr == NULL)
5044 		return;
5045 
5046 	size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5047 
5048 	dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
5049 
5050 	QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5051 		(void *)dma_p->dma_map, (void *)dma_p->dma_tag,
5052 		dma_p->dma_b, (void *)dma_p->dma_addr, size);
5053 
5054 	dma_buf = *dma_p;
5055 
5056 	if (!ha->qlnxr_debug)
5057 	qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
5058 	return;
5059 }
5060 
5061 static int
qlnx_alloc_parent_dma_tag(qlnx_host_t * ha)5062 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
5063 {
5064         int             ret;
5065         device_t        dev;
5066 
5067         dev = ha->pci_dev;
5068 
5069         /*
5070          * Allocate parent DMA Tag
5071          */
5072         ret = bus_dma_tag_create(
5073                         bus_get_dma_tag(dev),   /* parent */
5074                         1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
5075                         BUS_SPACE_MAXADDR,      /* lowaddr */
5076                         BUS_SPACE_MAXADDR,      /* highaddr */
5077                         NULL, NULL,             /* filter, filterarg */
5078                         BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
5079                         0,                      /* nsegments */
5080                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
5081                         0,                      /* flags */
5082                         NULL, NULL,             /* lockfunc, lockarg */
5083                         &ha->parent_tag);
5084 
5085         if (ret) {
5086                 QL_DPRINT1(ha, "could not create parent dma tag\n");
5087                 return (-1);
5088         }
5089 
5090         ha->flags.parent_tag = 1;
5091 
5092         return (0);
5093 }
5094 
5095 static void
qlnx_free_parent_dma_tag(qlnx_host_t * ha)5096 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
5097 {
5098         if (ha->parent_tag != NULL) {
5099                 bus_dma_tag_destroy(ha->parent_tag);
5100 		ha->parent_tag = NULL;
5101         }
5102 	return;
5103 }
5104 
5105 static int
qlnx_alloc_tx_dma_tag(qlnx_host_t * ha)5106 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
5107 {
5108         if (bus_dma_tag_create(NULL,    /* parent */
5109                 1, 0,    /* alignment, bounds */
5110                 BUS_SPACE_MAXADDR,       /* lowaddr */
5111                 BUS_SPACE_MAXADDR,       /* highaddr */
5112                 NULL, NULL,      /* filter, filterarg */
5113                 QLNX_MAX_TSO_FRAME_SIZE,     /* maxsize */
5114                 QLNX_MAX_SEGMENTS,        /* nsegments */
5115                 QLNX_MAX_TX_MBUF_SIZE,	  /* maxsegsize */
5116                 0,        /* flags */
5117                 NULL,    /* lockfunc */
5118                 NULL,    /* lockfuncarg */
5119                 &ha->tx_tag)) {
5120                 QL_DPRINT1(ha, "tx_tag alloc failed\n");
5121                 return (-1);
5122         }
5123 
5124 	return (0);
5125 }
5126 
5127 static void
qlnx_free_tx_dma_tag(qlnx_host_t * ha)5128 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
5129 {
5130         if (ha->tx_tag != NULL) {
5131                 bus_dma_tag_destroy(ha->tx_tag);
5132 		ha->tx_tag = NULL;
5133         }
5134 	return;
5135 }
5136 
5137 static int
qlnx_alloc_rx_dma_tag(qlnx_host_t * ha)5138 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
5139 {
5140         if (bus_dma_tag_create(NULL,    /* parent */
5141                         1, 0,    /* alignment, bounds */
5142                         BUS_SPACE_MAXADDR,       /* lowaddr */
5143                         BUS_SPACE_MAXADDR,       /* highaddr */
5144                         NULL, NULL,      /* filter, filterarg */
5145                         MJUM9BYTES,     /* maxsize */
5146                         1,        /* nsegments */
5147                         MJUM9BYTES,        /* maxsegsize */
5148                         0,        /* flags */
5149                         NULL,    /* lockfunc */
5150                         NULL,    /* lockfuncarg */
5151                         &ha->rx_tag)) {
5152                 QL_DPRINT1(ha, " rx_tag alloc failed\n");
5153 
5154                 return (-1);
5155         }
5156 	return (0);
5157 }
5158 
5159 static void
qlnx_free_rx_dma_tag(qlnx_host_t * ha)5160 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
5161 {
5162         if (ha->rx_tag != NULL) {
5163                 bus_dma_tag_destroy(ha->rx_tag);
5164 		ha->rx_tag = NULL;
5165         }
5166 	return;
5167 }
5168 
5169 /*********************************
5170  * Exported functions
5171  *********************************/
5172 uint32_t
qlnx_pci_bus_get_bar_size(void * ecore_dev,uint8_t bar_id)5173 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
5174 {
5175 	uint32_t bar_size;
5176 
5177 	bar_id = bar_id * 2;
5178 
5179 	bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
5180 				SYS_RES_MEMORY,
5181 				PCIR_BAR(bar_id));
5182 
5183 	return (bar_size);
5184 }
5185 
5186 uint32_t
qlnx_pci_read_config_byte(void * ecore_dev,uint32_t pci_reg,uint8_t * reg_value)5187 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
5188 {
5189 	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5190 				pci_reg, 1);
5191 	return 0;
5192 }
5193 
5194 uint32_t
qlnx_pci_read_config_word(void * ecore_dev,uint32_t pci_reg,uint16_t * reg_value)5195 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
5196 	uint16_t *reg_value)
5197 {
5198 	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5199 				pci_reg, 2);
5200 	return 0;
5201 }
5202 
5203 uint32_t
qlnx_pci_read_config_dword(void * ecore_dev,uint32_t pci_reg,uint32_t * reg_value)5204 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
5205 	uint32_t *reg_value)
5206 {
5207 	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5208 				pci_reg, 4);
5209 	return 0;
5210 }
5211 
5212 void
qlnx_pci_write_config_byte(void * ecore_dev,uint32_t pci_reg,uint8_t reg_value)5213 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
5214 {
5215 	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5216 		pci_reg, reg_value, 1);
5217 	return;
5218 }
5219 
5220 void
qlnx_pci_write_config_word(void * ecore_dev,uint32_t pci_reg,uint16_t reg_value)5221 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
5222 	uint16_t reg_value)
5223 {
5224 	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5225 		pci_reg, reg_value, 2);
5226 	return;
5227 }
5228 
5229 void
qlnx_pci_write_config_dword(void * ecore_dev,uint32_t pci_reg,uint32_t reg_value)5230 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
5231 	uint32_t reg_value)
5232 {
5233 	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5234 		pci_reg, reg_value, 4);
5235 	return;
5236 }
5237 
5238 int
qlnx_pci_find_capability(void * ecore_dev,int cap)5239 qlnx_pci_find_capability(void *ecore_dev, int cap)
5240 {
5241 	int		reg;
5242 	qlnx_host_t	*ha;
5243 
5244 	ha = ecore_dev;
5245 
5246 	if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, &reg) == 0)
5247 		return reg;
5248 	else {
5249 		QL_DPRINT1(ha, "failed\n");
5250 		return 0;
5251 	}
5252 }
5253 
5254 int
qlnx_pci_find_ext_capability(void * ecore_dev,int ext_cap)5255 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap)
5256 {
5257 	int		reg;
5258 	qlnx_host_t	*ha;
5259 
5260 	ha = ecore_dev;
5261 
5262 	if (pci_find_extcap(ha->pci_dev, ext_cap, &reg) == 0)
5263 		return reg;
5264 	else {
5265 		QL_DPRINT1(ha, "failed\n");
5266 		return 0;
5267 	}
5268 }
5269 
5270 uint32_t
qlnx_reg_rd32(void * hwfn,uint32_t reg_addr)5271 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
5272 {
5273 	uint32_t		data32;
5274 	struct ecore_hwfn	*p_hwfn;
5275 
5276 	p_hwfn = hwfn;
5277 
5278 	data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5279 			(bus_size_t)(p_hwfn->reg_offset + reg_addr));
5280 
5281 	return (data32);
5282 }
5283 
5284 void
qlnx_reg_wr32(void * hwfn,uint32_t reg_addr,uint32_t value)5285 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5286 {
5287 	struct ecore_hwfn	*p_hwfn = hwfn;
5288 
5289 	bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5290 		(bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5291 
5292 	return;
5293 }
5294 
5295 void
qlnx_reg_wr16(void * hwfn,uint32_t reg_addr,uint16_t value)5296 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
5297 {
5298 	struct ecore_hwfn	*p_hwfn = hwfn;
5299 
5300 	bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5301 		(bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5302 	return;
5303 }
5304 
5305 void
qlnx_dbell_wr32_db(void * hwfn,void * reg_addr,uint32_t value)5306 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value)
5307 {
5308 	struct ecore_dev	*cdev;
5309 	struct ecore_hwfn	*p_hwfn;
5310 	uint32_t	offset;
5311 
5312 	p_hwfn = hwfn;
5313 
5314 	cdev = p_hwfn->p_dev;
5315 
5316 	offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells));
5317 	bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value);
5318 
5319 	return;
5320 }
5321 
5322 void
qlnx_dbell_wr32(void * hwfn,uint32_t reg_addr,uint32_t value)5323 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5324 {
5325 	struct ecore_hwfn	*p_hwfn = hwfn;
5326 
5327 	bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \
5328 		(bus_size_t)(p_hwfn->db_offset + reg_addr), value);
5329 
5330 	return;
5331 }
5332 
5333 uint32_t
qlnx_direct_reg_rd32(void * p_hwfn,uint32_t * reg_addr)5334 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
5335 {
5336 	uint32_t		data32;
5337 	bus_size_t		offset;
5338 	struct ecore_dev	*cdev;
5339 
5340 	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5341 	offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5342 
5343 	data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
5344 
5345 	return (data32);
5346 }
5347 
5348 void
qlnx_direct_reg_wr32(void * p_hwfn,void * reg_addr,uint32_t value)5349 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
5350 {
5351 	bus_size_t		offset;
5352 	struct ecore_dev	*cdev;
5353 
5354 	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5355 	offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5356 
5357 	bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5358 
5359 	return;
5360 }
5361 
5362 void
qlnx_direct_reg_wr64(void * p_hwfn,void * reg_addr,uint64_t value)5363 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
5364 {
5365 	bus_size_t		offset;
5366 	struct ecore_dev	*cdev;
5367 
5368 	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5369 	offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5370 
5371 	bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5372 	return;
5373 }
5374 
5375 void *
qlnx_zalloc(uint32_t size)5376 qlnx_zalloc(uint32_t size)
5377 {
5378 	caddr_t	va;
5379 
5380 	va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
5381 	bzero(va, size);
5382 	return ((void *)va);
5383 }
5384 
5385 void
qlnx_barrier(void * p_dev)5386 qlnx_barrier(void *p_dev)
5387 {
5388 	qlnx_host_t	*ha;
5389 
5390 	ha = ((struct ecore_dev *) p_dev)->ha;
5391 	bus_barrier(ha->pci_reg,  0, 0, BUS_SPACE_BARRIER_WRITE);
5392 }
5393 
5394 void
qlnx_link_update(void * p_hwfn)5395 qlnx_link_update(void *p_hwfn)
5396 {
5397 	qlnx_host_t	*ha;
5398 	int		prev_link_state;
5399 
5400 	ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5401 
5402 	qlnx_fill_link(ha, p_hwfn, &ha->if_link);
5403 
5404 	prev_link_state = ha->link_up;
5405 	ha->link_up = ha->if_link.link_up;
5406 
5407         if (prev_link_state !=  ha->link_up) {
5408                 if (ha->link_up) {
5409                         if_link_state_change(ha->ifp, LINK_STATE_UP);
5410                 } else {
5411                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
5412                 }
5413         }
5414 #ifndef QLNX_VF
5415 #ifdef CONFIG_ECORE_SRIOV
5416 
5417 	if (qlnx_vf_device(ha) != 0) {
5418 		if (ha->sriov_initialized)
5419 			qlnx_inform_vf_link_state(p_hwfn, ha);
5420 	}
5421 
5422 #endif /* #ifdef CONFIG_ECORE_SRIOV */
5423 #endif /* #ifdef QLNX_VF */
5424 
5425         return;
5426 }
5427 
5428 static void
__qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn * p_hwfn,struct ecore_vf_acquire_sw_info * p_sw_info)5429 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn,
5430 	struct ecore_vf_acquire_sw_info *p_sw_info)
5431 {
5432 	p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) |
5433 					(QLNX_VERSION_MINOR << 16) |
5434 					 QLNX_VERSION_BUILD;
5435 	p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD;
5436 
5437 	return;
5438 }
5439 
5440 void
qlnx_osal_vf_fill_acquire_resc_req(void * p_hwfn,void * p_resc_req,void * p_sw_info)5441 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req,
5442 	void *p_sw_info)
5443 {
5444 	__qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info);
5445 
5446 	return;
5447 }
5448 
5449 void
qlnx_fill_link(qlnx_host_t * ha,struct ecore_hwfn * hwfn,struct qlnx_link_output * if_link)5450 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
5451 	struct qlnx_link_output *if_link)
5452 {
5453 	struct ecore_mcp_link_params    link_params;
5454 	struct ecore_mcp_link_state     link_state;
5455 	uint8_t				p_change;
5456 	struct ecore_ptt *p_ptt = NULL;
5457 
5458 	memset(if_link, 0, sizeof(*if_link));
5459 	memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
5460 	memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
5461 
5462 	ha = (qlnx_host_t *)hwfn->p_dev;
5463 
5464 	/* Prepare source inputs */
5465 	/* we only deal with physical functions */
5466 	if (qlnx_vf_device(ha) != 0) {
5467         	p_ptt = ecore_ptt_acquire(hwfn);
5468 
5469 	        if (p_ptt == NULL) {
5470 			QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5471 			return;
5472 		}
5473 
5474 		ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type);
5475 		ecore_ptt_release(hwfn, p_ptt);
5476 
5477 		memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
5478 			sizeof(link_params));
5479 		memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
5480 			sizeof(link_state));
5481 	} else {
5482 		ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type);
5483 		ecore_vf_read_bulletin(hwfn, &p_change);
5484 		ecore_vf_get_link_params(hwfn, &link_params);
5485 		ecore_vf_get_link_state(hwfn, &link_state);
5486 	}
5487 
5488 	/* Set the link parameters to pass to protocol driver */
5489 	if (link_state.link_up) {
5490 		if_link->link_up = true;
5491 		if_link->speed = link_state.speed;
5492 	}
5493 
5494 	if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
5495 
5496 	if (link_params.speed.autoneg)
5497 		if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
5498 
5499 	if (link_params.pause.autoneg ||
5500 		(link_params.pause.forced_rx && link_params.pause.forced_tx))
5501 		if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
5502 
5503 	if (link_params.pause.autoneg || link_params.pause.forced_rx ||
5504 		link_params.pause.forced_tx)
5505 		if_link->supported_caps |= QLNX_LINK_CAP_Pause;
5506 
5507 	if (link_params.speed.advertised_speeds &
5508 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
5509 		if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
5510                                            QLNX_LINK_CAP_1000baseT_Full;
5511 
5512 	if (link_params.speed.advertised_speeds &
5513 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
5514 		if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5515 
5516 	if (link_params.speed.advertised_speeds &
5517 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
5518 		if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5519 
5520 	if (link_params.speed.advertised_speeds &
5521 		NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
5522 		if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5523 
5524 	if (link_params.speed.advertised_speeds &
5525 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
5526 		if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5527 
5528 	if (link_params.speed.advertised_speeds &
5529 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
5530 		if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5531 
5532 	if_link->advertised_caps = if_link->supported_caps;
5533 
5534 	if_link->autoneg = link_params.speed.autoneg;
5535 	if_link->duplex = QLNX_LINK_DUPLEX;
5536 
5537 	/* Link partner capabilities */
5538 
5539 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
5540 		if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
5541 
5542 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
5543 		if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
5544 
5545 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
5546 		if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5547 
5548 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
5549 		if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5550 
5551 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
5552 		if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5553 
5554 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
5555 		if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5556 
5557 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
5558 		if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5559 
5560 	if (link_state.an_complete)
5561 		if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
5562 
5563 	if (link_state.partner_adv_pause)
5564 		if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
5565 
5566 	if ((link_state.partner_adv_pause ==
5567 		ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
5568 		(link_state.partner_adv_pause ==
5569 			ECORE_LINK_PARTNER_BOTH_PAUSE))
5570 		if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
5571 
5572 	return;
5573 }
5574 
5575 void
qlnx_schedule_recovery(void * p_hwfn)5576 qlnx_schedule_recovery(void *p_hwfn)
5577 {
5578 	qlnx_host_t	*ha;
5579 
5580 	ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5581 
5582 	if (qlnx_vf_device(ha) != 0) {
5583 		taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
5584 	}
5585 
5586 	return;
5587 }
5588 
5589 static int
qlnx_nic_setup(struct ecore_dev * cdev,struct ecore_pf_params * func_params)5590 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
5591 {
5592         int	rc, i;
5593 
5594         for (i = 0; i < cdev->num_hwfns; i++) {
5595                 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5596                 p_hwfn->pf_params = *func_params;
5597 
5598 #ifdef QLNX_ENABLE_IWARP
5599 		if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) {
5600 			p_hwfn->using_ll2 = true;
5601 		}
5602 #endif /* #ifdef QLNX_ENABLE_IWARP */
5603         }
5604 
5605         rc = ecore_resc_alloc(cdev);
5606         if (rc)
5607                 goto qlnx_nic_setup_exit;
5608 
5609         ecore_resc_setup(cdev);
5610 
5611 qlnx_nic_setup_exit:
5612 
5613         return rc;
5614 }
5615 
5616 static int
qlnx_nic_start(struct ecore_dev * cdev)5617 qlnx_nic_start(struct ecore_dev *cdev)
5618 {
5619         int				rc;
5620 	struct ecore_hw_init_params	params;
5621 
5622 	bzero(&params, sizeof (struct ecore_hw_init_params));
5623 
5624 	params.p_tunn = NULL;
5625 	params.b_hw_start = true;
5626 	params.int_mode = cdev->int_mode;
5627 	params.allow_npar_tx_switch = true;
5628 	params.bin_fw_data = NULL;
5629 
5630         rc = ecore_hw_init(cdev, &params);
5631         if (rc) {
5632                 ecore_resc_free(cdev);
5633                 return rc;
5634         }
5635 
5636         return 0;
5637 }
5638 
5639 static int
qlnx_slowpath_start(qlnx_host_t * ha)5640 qlnx_slowpath_start(qlnx_host_t *ha)
5641 {
5642 	struct ecore_dev	*cdev;
5643 	struct ecore_pf_params	pf_params;
5644 	int			rc;
5645 
5646 	memset(&pf_params, 0, sizeof(struct ecore_pf_params));
5647 	pf_params.eth_pf_params.num_cons  =
5648 		(ha->num_rss) * (ha->num_tc + 1);
5649 
5650 #ifdef QLNX_ENABLE_IWARP
5651 	if (qlnx_vf_device(ha) != 0) {
5652 		if(ha->personality == ECORE_PCI_ETH_IWARP) {
5653 			device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n");
5654 			pf_params.rdma_pf_params.num_qps = 1024;
5655 			pf_params.rdma_pf_params.num_srqs = 1024;
5656 			pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5657 			pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP;
5658 		} else if(ha->personality == ECORE_PCI_ETH_ROCE) {
5659 			device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n");
5660 			pf_params.rdma_pf_params.num_qps = 8192;
5661 			pf_params.rdma_pf_params.num_srqs = 8192;
5662 			//pf_params.rdma_pf_params.min_dpis = 0;
5663 			pf_params.rdma_pf_params.min_dpis = 8;
5664 			pf_params.rdma_pf_params.roce_edpm_mode = 0;
5665 			pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5666 			pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE;
5667 		}
5668 	}
5669 #endif /* #ifdef QLNX_ENABLE_IWARP */
5670 
5671 	cdev = &ha->cdev;
5672 
5673 	rc = qlnx_nic_setup(cdev, &pf_params);
5674         if (rc)
5675                 goto qlnx_slowpath_start_exit;
5676 
5677         cdev->int_mode = ECORE_INT_MODE_MSIX;
5678         cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5679 
5680 #ifdef QLNX_MAX_COALESCE
5681 	cdev->rx_coalesce_usecs = 255;
5682 	cdev->tx_coalesce_usecs = 255;
5683 #endif
5684 
5685 	rc = qlnx_nic_start(cdev);
5686 
5687 	ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5688 	ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5689 
5690 #ifdef QLNX_USER_LLDP
5691 	(void)qlnx_set_lldp_tlvx(ha, NULL);
5692 #endif /* #ifdef QLNX_USER_LLDP */
5693 
5694 qlnx_slowpath_start_exit:
5695 
5696 	return (rc);
5697 }
5698 
5699 static int
qlnx_slowpath_stop(qlnx_host_t * ha)5700 qlnx_slowpath_stop(qlnx_host_t *ha)
5701 {
5702 	struct ecore_dev	*cdev;
5703 	device_t		dev = ha->pci_dev;
5704 	int			i;
5705 
5706 	cdev = &ha->cdev;
5707 
5708 	ecore_hw_stop(cdev);
5709 
5710  	for (i = 0; i < ha->cdev.num_hwfns; i++) {
5711         	if (ha->sp_handle[i])
5712                 	(void)bus_teardown_intr(dev, ha->sp_irq[i],
5713 				ha->sp_handle[i]);
5714 
5715 		ha->sp_handle[i] = NULL;
5716 
5717         	if (ha->sp_irq[i])
5718 			(void) bus_release_resource(dev, SYS_RES_IRQ,
5719 				ha->sp_irq_rid[i], ha->sp_irq[i]);
5720 		ha->sp_irq[i] = NULL;
5721 	}
5722 
5723         ecore_resc_free(cdev);
5724 
5725         return 0;
5726 }
5727 
5728 static void
qlnx_set_id(struct ecore_dev * cdev,char name[NAME_SIZE],char ver_str[VER_SIZE])5729 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5730 	char ver_str[VER_SIZE])
5731 {
5732         int	i;
5733 
5734         memcpy(cdev->name, name, NAME_SIZE);
5735 
5736         for_each_hwfn(cdev, i) {
5737                 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5738         }
5739 
5740         cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5741 
5742 	return ;
5743 }
5744 
5745 void
qlnx_get_protocol_stats(void * cdev,int proto_type,void * proto_stats)5746 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5747 {
5748 	enum ecore_mcp_protocol_type	type;
5749 	union ecore_mcp_protocol_stats	*stats;
5750 	struct ecore_eth_stats		eth_stats;
5751 	qlnx_host_t			*ha;
5752 
5753 	ha = cdev;
5754 	stats = proto_stats;
5755 	type = proto_type;
5756 
5757         switch (type) {
5758         case ECORE_MCP_LAN_STATS:
5759                 ecore_get_vport_stats((struct ecore_dev *)cdev, &eth_stats);
5760                 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5761                 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5762                 stats->lan_stats.fcs_err = -1;
5763                 break;
5764 
5765 	default:
5766 		ha->err_get_proto_invalid_type++;
5767 
5768 		QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5769 		break;
5770 	}
5771 	return;
5772 }
5773 
5774 static int
qlnx_get_mfw_version(qlnx_host_t * ha,uint32_t * mfw_ver)5775 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5776 {
5777 	struct ecore_hwfn	*p_hwfn;
5778 	struct ecore_ptt	*p_ptt;
5779 
5780 	p_hwfn = &ha->cdev.hwfns[0];
5781 	p_ptt = ecore_ptt_acquire(p_hwfn);
5782 
5783 	if (p_ptt ==  NULL) {
5784                 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5785                 return (-1);
5786 	}
5787 	ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5788 
5789 	ecore_ptt_release(p_hwfn, p_ptt);
5790 
5791 	return (0);
5792 }
5793 
5794 static int
qlnx_get_flash_size(qlnx_host_t * ha,uint32_t * flash_size)5795 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5796 {
5797 	struct ecore_hwfn	*p_hwfn;
5798 	struct ecore_ptt	*p_ptt;
5799 
5800 	p_hwfn = &ha->cdev.hwfns[0];
5801 	p_ptt = ecore_ptt_acquire(p_hwfn);
5802 
5803 	if (p_ptt ==  NULL) {
5804                 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
5805                 return (-1);
5806 	}
5807 	ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5808 
5809 	ecore_ptt_release(p_hwfn, p_ptt);
5810 
5811 	return (0);
5812 }
5813 
5814 static int
qlnx_alloc_mem_arrays(qlnx_host_t * ha)5815 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
5816 {
5817 	bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5818 	bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5819 	bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5820 
5821         return 0;
5822 }
5823 
5824 static void
qlnx_init_fp(qlnx_host_t * ha)5825 qlnx_init_fp(qlnx_host_t *ha)
5826 {
5827 	int rss_id, txq_array_index, tc;
5828 
5829 	for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5830 		struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5831 
5832 		fp->rss_id = rss_id;
5833 		fp->edev = ha;
5834 		fp->sb_info = &ha->sb_array[rss_id];
5835 		fp->rxq = &ha->rxq_array[rss_id];
5836 		fp->rxq->rxq_id = rss_id;
5837 
5838 		for (tc = 0; tc < ha->num_tc; tc++) {
5839                         txq_array_index = tc * ha->num_rss + rss_id;
5840                         fp->txq[tc] = &ha->txq_array[txq_array_index];
5841                         fp->txq[tc]->index = txq_array_index;
5842 		}
5843 
5844 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
5845 			rss_id);
5846 
5847 		fp->tx_ring_full = 0;
5848 
5849 		/* reset all the statistics counters */
5850 
5851 		fp->tx_pkts_processed = 0;
5852 		fp->tx_pkts_freed = 0;
5853 		fp->tx_pkts_transmitted = 0;
5854 		fp->tx_pkts_completed = 0;
5855 
5856 #ifdef QLNX_TRACE_PERF_DATA
5857 		fp->tx_pkts_trans_ctx = 0;
5858 		fp->tx_pkts_compl_ctx = 0;
5859 		fp->tx_pkts_trans_fp = 0;
5860 		fp->tx_pkts_compl_fp = 0;
5861 		fp->tx_pkts_compl_intr = 0;
5862 #endif
5863 		fp->tx_lso_wnd_min_len = 0;
5864 		fp->tx_defrag = 0;
5865 		fp->tx_nsegs_gt_elem_left = 0;
5866 		fp->tx_tso_max_nsegs = 0;
5867 		fp->tx_tso_min_nsegs = 0;
5868 		fp->err_tx_nsegs_gt_elem_left = 0;
5869 		fp->err_tx_dmamap_create = 0;
5870 		fp->err_tx_defrag_dmamap_load = 0;
5871 		fp->err_tx_non_tso_max_seg = 0;
5872 		fp->err_tx_dmamap_load = 0;
5873 		fp->err_tx_defrag = 0;
5874 		fp->err_tx_free_pkt_null = 0;
5875 		fp->err_tx_cons_idx_conflict = 0;
5876 
5877 		fp->rx_pkts = 0;
5878 		fp->err_m_getcl = 0;
5879 		fp->err_m_getjcl = 0;
5880         }
5881 	return;
5882 }
5883 
5884 void
qlnx_free_mem_sb(qlnx_host_t * ha,struct ecore_sb_info * sb_info)5885 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
5886 {
5887 	struct ecore_dev	*cdev;
5888 
5889 	cdev = &ha->cdev;
5890 
5891         if (sb_info->sb_virt) {
5892                 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
5893 			(sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
5894 		sb_info->sb_virt = NULL;
5895 	}
5896 }
5897 
5898 static int
qlnx_sb_init(struct ecore_dev * cdev,struct ecore_sb_info * sb_info,void * sb_virt_addr,bus_addr_t sb_phy_addr,u16 sb_id)5899 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
5900 	void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
5901 {
5902         struct ecore_hwfn	*p_hwfn;
5903         int			hwfn_index, rc;
5904         u16			rel_sb_id;
5905 
5906         hwfn_index = sb_id % cdev->num_hwfns;
5907         p_hwfn = &cdev->hwfns[hwfn_index];
5908         rel_sb_id = sb_id / cdev->num_hwfns;
5909 
5910         QL_DPRINT2(((qlnx_host_t *)cdev),
5911                 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
5912                 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
5913                 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
5914                 sb_virt_addr, (void *)sb_phy_addr);
5915 
5916         rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
5917                              sb_virt_addr, sb_phy_addr, rel_sb_id);
5918 
5919         return rc;
5920 }
5921 
5922 /* This function allocates fast-path status block memory */
5923 int
qlnx_alloc_mem_sb(qlnx_host_t * ha,struct ecore_sb_info * sb_info,u16 sb_id)5924 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
5925 {
5926         struct status_block_e4	*sb_virt;
5927         bus_addr_t		sb_phys;
5928         int			rc;
5929 	uint32_t		size;
5930 	struct ecore_dev	*cdev;
5931 
5932 	cdev = &ha->cdev;
5933 
5934 	size = sizeof(*sb_virt);
5935 	sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
5936 
5937         if (!sb_virt) {
5938                 QL_DPRINT1(ha, "Status block allocation failed\n");
5939                 return -ENOMEM;
5940         }
5941 
5942         rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
5943         if (rc) {
5944                 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
5945         }
5946 
5947 	return rc;
5948 }
5949 
5950 static void
qlnx_free_rx_buffers(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)5951 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5952 {
5953         int			i;
5954 	struct sw_rx_data	*rx_buf;
5955 
5956         for (i = 0; i < rxq->num_rx_buffers; i++) {
5957                 rx_buf = &rxq->sw_rx_ring[i];
5958 
5959 		if (rx_buf->data != NULL) {
5960 			if (rx_buf->map != NULL) {
5961 				bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5962 				bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5963 				rx_buf->map = NULL;
5964 			}
5965 			m_freem(rx_buf->data);
5966 			rx_buf->data = NULL;
5967 		}
5968         }
5969 	return;
5970 }
5971 
5972 static void
qlnx_free_mem_rxq(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)5973 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5974 {
5975 	struct ecore_dev	*cdev;
5976 	int			i;
5977 
5978 	cdev = &ha->cdev;
5979 
5980 	qlnx_free_rx_buffers(ha, rxq);
5981 
5982 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5983 		qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
5984 		if (rxq->tpa_info[i].mpf != NULL)
5985 			m_freem(rxq->tpa_info[i].mpf);
5986 	}
5987 
5988 	bzero((void *)&rxq->sw_rx_ring[0],
5989 		(sizeof (struct sw_rx_data) * RX_RING_SIZE));
5990 
5991         /* Free the real RQ ring used by FW */
5992 	if (rxq->rx_bd_ring.p_virt_addr) {
5993                 ecore_chain_free(cdev, &rxq->rx_bd_ring);
5994                 rxq->rx_bd_ring.p_virt_addr = NULL;
5995         }
5996 
5997         /* Free the real completion ring used by FW */
5998         if (rxq->rx_comp_ring.p_virt_addr &&
5999                         rxq->rx_comp_ring.pbl_sp.p_virt_table) {
6000                 ecore_chain_free(cdev, &rxq->rx_comp_ring);
6001                 rxq->rx_comp_ring.p_virt_addr = NULL;
6002                 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
6003         }
6004 
6005 #ifdef QLNX_SOFT_LRO
6006 	{
6007 		struct lro_ctrl *lro;
6008 
6009 		lro = &rxq->lro;
6010 		tcp_lro_free(lro);
6011 	}
6012 #endif /* #ifdef QLNX_SOFT_LRO */
6013 
6014 	return;
6015 }
6016 
6017 static int
qlnx_alloc_rx_buffer(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)6018 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6019 {
6020         register struct mbuf	*mp;
6021         uint16_t		rx_buf_size;
6022         struct sw_rx_data	*sw_rx_data;
6023         struct eth_rx_bd	*rx_bd;
6024         dma_addr_t		dma_addr;
6025 	bus_dmamap_t		map;
6026 	bus_dma_segment_t       segs[1];
6027 	int			nsegs;
6028 	int			ret;
6029 
6030         rx_buf_size = rxq->rx_buf_size;
6031 
6032 	mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6033 
6034         if (mp == NULL) {
6035                 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6036                 return -ENOMEM;
6037         }
6038 
6039 	mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6040 
6041 	map = (bus_dmamap_t)0;
6042 
6043 	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6044 			BUS_DMA_NOWAIT);
6045 	dma_addr = segs[0].ds_addr;
6046 
6047 	if (ret || !dma_addr || (nsegs != 1)) {
6048 		m_freem(mp);
6049 		QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6050                            ret, (long long unsigned int)dma_addr, nsegs);
6051 		return -ENOMEM;
6052 	}
6053 
6054         sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
6055         sw_rx_data->data = mp;
6056         sw_rx_data->dma_addr = dma_addr;
6057         sw_rx_data->map = map;
6058 
6059         /* Advance PROD and get BD pointer */
6060         rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
6061         rx_bd->addr.hi = htole32(U64_HI(dma_addr));
6062         rx_bd->addr.lo = htole32(U64_LO(dma_addr));
6063 	bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6064 
6065         rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6066 
6067         return 0;
6068 }
6069 
6070 static int
qlnx_alloc_tpa_mbuf(qlnx_host_t * ha,uint16_t rx_buf_size,struct qlnx_agg_info * tpa)6071 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
6072 	struct qlnx_agg_info *tpa)
6073 {
6074 	struct mbuf		*mp;
6075         dma_addr_t		dma_addr;
6076 	bus_dmamap_t		map;
6077 	bus_dma_segment_t       segs[1];
6078 	int			nsegs;
6079 	int			ret;
6080         struct sw_rx_data	*rx_buf;
6081 
6082 	mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6083 
6084         if (mp == NULL) {
6085                 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6086                 return -ENOMEM;
6087         }
6088 
6089 	mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6090 
6091 	map = (bus_dmamap_t)0;
6092 
6093 	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6094 			BUS_DMA_NOWAIT);
6095 	dma_addr = segs[0].ds_addr;
6096 
6097 	if (ret || !dma_addr || (nsegs != 1)) {
6098 		m_freem(mp);
6099 		QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6100 			ret, (long long unsigned int)dma_addr, nsegs);
6101 		return -ENOMEM;
6102 	}
6103 
6104         rx_buf = &tpa->rx_buf;
6105 
6106 	memset(rx_buf, 0, sizeof (struct sw_rx_data));
6107 
6108         rx_buf->data = mp;
6109         rx_buf->dma_addr = dma_addr;
6110         rx_buf->map = map;
6111 
6112 	bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6113 
6114 	return (0);
6115 }
6116 
6117 static void
qlnx_free_tpa_mbuf(qlnx_host_t * ha,struct qlnx_agg_info * tpa)6118 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
6119 {
6120         struct sw_rx_data	*rx_buf;
6121 
6122 	rx_buf = &tpa->rx_buf;
6123 
6124 	if (rx_buf->data != NULL) {
6125 		if (rx_buf->map != NULL) {
6126 			bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6127 			bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6128 			rx_buf->map = NULL;
6129 		}
6130 		m_freem(rx_buf->data);
6131 		rx_buf->data = NULL;
6132 	}
6133 	return;
6134 }
6135 
6136 /* This function allocates all memory needed per Rx queue */
6137 static int
qlnx_alloc_mem_rxq(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)6138 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6139 {
6140         int			i, rc, num_allocated;
6141 	struct ecore_dev	 *cdev;
6142 
6143 	cdev = &ha->cdev;
6144 
6145         rxq->num_rx_buffers = RX_RING_SIZE;
6146 
6147 	rxq->rx_buf_size = ha->rx_buf_size;
6148 
6149         /* Allocate the parallel driver ring for Rx buffers */
6150 	bzero((void *)&rxq->sw_rx_ring[0],
6151 		(sizeof (struct sw_rx_data) * RX_RING_SIZE));
6152 
6153         /* Allocate FW Rx ring  */
6154 
6155         rc = ecore_chain_alloc(cdev,
6156 			ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6157 			ECORE_CHAIN_MODE_NEXT_PTR,
6158 			ECORE_CHAIN_CNT_TYPE_U16,
6159 			RX_RING_SIZE,
6160 			sizeof(struct eth_rx_bd),
6161 			&rxq->rx_bd_ring, NULL);
6162 
6163         if (rc)
6164                 goto err;
6165 
6166         /* Allocate FW completion ring */
6167         rc = ecore_chain_alloc(cdev,
6168                         ECORE_CHAIN_USE_TO_CONSUME,
6169                         ECORE_CHAIN_MODE_PBL,
6170 			ECORE_CHAIN_CNT_TYPE_U16,
6171                         RX_RING_SIZE,
6172                         sizeof(union eth_rx_cqe),
6173                         &rxq->rx_comp_ring, NULL);
6174 
6175         if (rc)
6176                 goto err;
6177 
6178         /* Allocate buffers for the Rx ring */
6179 
6180 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6181 		rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
6182 			&rxq->tpa_info[i]);
6183                 if (rc)
6184                         break;
6185 	}
6186 
6187         for (i = 0; i < rxq->num_rx_buffers; i++) {
6188                 rc = qlnx_alloc_rx_buffer(ha, rxq);
6189                 if (rc)
6190                         break;
6191         }
6192         num_allocated = i;
6193         if (!num_allocated) {
6194 		QL_DPRINT1(ha, "Rx buffers allocation failed\n");
6195                 goto err;
6196         } else if (num_allocated < rxq->num_rx_buffers) {
6197 		QL_DPRINT1(ha, "Allocated less buffers than"
6198 			" desired (%d allocated)\n", num_allocated);
6199         }
6200 
6201 #ifdef QLNX_SOFT_LRO
6202 
6203 	{
6204 		struct lro_ctrl *lro;
6205 
6206 		lro = &rxq->lro;
6207 
6208 		if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
6209 			QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6210 				   rxq->rxq_id);
6211 			goto err;
6212 		}
6213 
6214 		lro->ifp = ha->ifp;
6215 	}
6216 #endif /* #ifdef QLNX_SOFT_LRO */
6217         return 0;
6218 
6219 err:
6220         qlnx_free_mem_rxq(ha, rxq);
6221         return -ENOMEM;
6222 }
6223 
6224 static void
qlnx_free_mem_txq(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)6225 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6226 	struct qlnx_tx_queue *txq)
6227 {
6228 	struct ecore_dev	*cdev;
6229 
6230 	cdev = &ha->cdev;
6231 
6232 	bzero((void *)&txq->sw_tx_ring[0],
6233 		(sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6234 
6235         /* Free the real RQ ring used by FW */
6236         if (txq->tx_pbl.p_virt_addr) {
6237                 ecore_chain_free(cdev, &txq->tx_pbl);
6238                 txq->tx_pbl.p_virt_addr = NULL;
6239         }
6240 	return;
6241 }
6242 
6243 /* This function allocates all memory needed per Tx queue */
6244 static int
qlnx_alloc_mem_txq(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)6245 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6246 	struct qlnx_tx_queue *txq)
6247 {
6248         int			ret = ECORE_SUCCESS;
6249         union eth_tx_bd_types	*p_virt;
6250 	struct ecore_dev	*cdev;
6251 
6252 	cdev = &ha->cdev;
6253 
6254 	bzero((void *)&txq->sw_tx_ring[0],
6255 		(sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6256 
6257         /* Allocate the real Tx ring to be used by FW */
6258         ret = ecore_chain_alloc(cdev,
6259                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6260                         ECORE_CHAIN_MODE_PBL,
6261 			ECORE_CHAIN_CNT_TYPE_U16,
6262                         TX_RING_SIZE,
6263                         sizeof(*p_virt),
6264                         &txq->tx_pbl, NULL);
6265 
6266         if (ret != ECORE_SUCCESS) {
6267                 goto err;
6268         }
6269 
6270 	txq->num_tx_buffers = TX_RING_SIZE;
6271 
6272         return 0;
6273 
6274 err:
6275         qlnx_free_mem_txq(ha, fp, txq);
6276         return -ENOMEM;
6277 }
6278 
6279 static void
qlnx_free_tx_br(qlnx_host_t * ha,struct qlnx_fastpath * fp)6280 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6281 {
6282 	struct mbuf	*mp;
6283 	if_t		ifp = ha->ifp;
6284 
6285 	if (mtx_initialized(&fp->tx_mtx)) {
6286 		if (fp->tx_br != NULL) {
6287 			mtx_lock(&fp->tx_mtx);
6288 
6289 			while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
6290 				fp->tx_pkts_freed++;
6291 				m_freem(mp);
6292 			}
6293 
6294 			mtx_unlock(&fp->tx_mtx);
6295 
6296 			buf_ring_free(fp->tx_br, M_DEVBUF);
6297 			fp->tx_br = NULL;
6298 		}
6299 		mtx_destroy(&fp->tx_mtx);
6300 	}
6301 	return;
6302 }
6303 
6304 static void
qlnx_free_mem_fp(qlnx_host_t * ha,struct qlnx_fastpath * fp)6305 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6306 {
6307         int	tc;
6308 
6309         qlnx_free_mem_sb(ha, fp->sb_info);
6310 
6311         qlnx_free_mem_rxq(ha, fp->rxq);
6312 
6313         for (tc = 0; tc < ha->num_tc; tc++)
6314                 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
6315 
6316 	return;
6317 }
6318 
6319 static int
qlnx_alloc_tx_br(qlnx_host_t * ha,struct qlnx_fastpath * fp)6320 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6321 {
6322 	snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
6323 		"qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
6324 
6325 	mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
6326 
6327         fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
6328                                    M_NOWAIT, &fp->tx_mtx);
6329         if (fp->tx_br == NULL) {
6330 		QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
6331 			ha->dev_unit, fp->rss_id);
6332 		return -ENOMEM;
6333         }
6334 	return 0;
6335 }
6336 
6337 static int
qlnx_alloc_mem_fp(qlnx_host_t * ha,struct qlnx_fastpath * fp)6338 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6339 {
6340         int	rc, tc;
6341 
6342         rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
6343         if (rc)
6344                 goto err;
6345 
6346 	if (ha->rx_jumbo_buf_eq_mtu) {
6347 		if (ha->max_frame_size <= MCLBYTES)
6348 			ha->rx_buf_size = MCLBYTES;
6349 		else if (ha->max_frame_size <= MJUMPAGESIZE)
6350 			ha->rx_buf_size = MJUMPAGESIZE;
6351 		else if (ha->max_frame_size <= MJUM9BYTES)
6352 			ha->rx_buf_size = MJUM9BYTES;
6353 		else if (ha->max_frame_size <= MJUM16BYTES)
6354 			ha->rx_buf_size = MJUM16BYTES;
6355 	} else {
6356 		if (ha->max_frame_size <= MCLBYTES)
6357 			ha->rx_buf_size = MCLBYTES;
6358 		else
6359 			ha->rx_buf_size = MJUMPAGESIZE;
6360 	}
6361 
6362         rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
6363         if (rc)
6364                 goto err;
6365 
6366         for (tc = 0; tc < ha->num_tc; tc++) {
6367                 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
6368                 if (rc)
6369                         goto err;
6370         }
6371 
6372         return 0;
6373 
6374 err:
6375         qlnx_free_mem_fp(ha, fp);
6376         return -ENOMEM;
6377 }
6378 
6379 static void
qlnx_free_mem_load(qlnx_host_t * ha)6380 qlnx_free_mem_load(qlnx_host_t *ha)
6381 {
6382         int			i;
6383 
6384         for (i = 0; i < ha->num_rss; i++) {
6385                 struct qlnx_fastpath *fp = &ha->fp_array[i];
6386 
6387                 qlnx_free_mem_fp(ha, fp);
6388         }
6389 	return;
6390 }
6391 
6392 static int
qlnx_alloc_mem_load(qlnx_host_t * ha)6393 qlnx_alloc_mem_load(qlnx_host_t *ha)
6394 {
6395         int	rc = 0, rss_id;
6396 
6397         for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6398                 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6399 
6400                 rc = qlnx_alloc_mem_fp(ha, fp);
6401                 if (rc)
6402                         break;
6403         }
6404 	return (rc);
6405 }
6406 
6407 static int
qlnx_start_vport(struct ecore_dev * cdev,u8 vport_id,u16 mtu,u8 drop_ttl0_flg,u8 inner_vlan_removal_en_flg,u8 tx_switching,u8 hw_lro_enable)6408 qlnx_start_vport(struct ecore_dev *cdev,
6409                 u8 vport_id,
6410                 u16 mtu,
6411                 u8 drop_ttl0_flg,
6412                 u8 inner_vlan_removal_en_flg,
6413 		u8 tx_switching,
6414 		u8 hw_lro_enable)
6415 {
6416         int					rc, i;
6417 	struct ecore_sp_vport_start_params	vport_start_params = { 0 };
6418 	qlnx_host_t				*ha __unused;
6419 
6420 	ha = (qlnx_host_t *)cdev;
6421 
6422 	vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
6423 	vport_start_params.tx_switching = 0;
6424 	vport_start_params.handle_ptp_pkts = 0;
6425 	vport_start_params.only_untagged = 0;
6426 	vport_start_params.drop_ttl0 = drop_ttl0_flg;
6427 
6428 	vport_start_params.tpa_mode =
6429 		(hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
6430 	vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6431 
6432 	vport_start_params.vport_id = vport_id;
6433 	vport_start_params.mtu = mtu;
6434 
6435 	QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
6436 
6437         for_each_hwfn(cdev, i) {
6438                 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6439 
6440 		vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
6441 		vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6442 
6443                 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
6444 
6445                 if (rc) {
6446 			QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
6447 				" with MTU %d\n" , vport_id, mtu);
6448                         return -ENOMEM;
6449                 }
6450 
6451                 ecore_hw_start_fastpath(p_hwfn);
6452 
6453 		QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
6454 			vport_id, mtu);
6455         }
6456         return 0;
6457 }
6458 
6459 static int
qlnx_update_vport(struct ecore_dev * cdev,struct qlnx_update_vport_params * params)6460 qlnx_update_vport(struct ecore_dev *cdev,
6461 	struct qlnx_update_vport_params *params)
6462 {
6463         struct ecore_sp_vport_update_params	sp_params;
6464         int					rc, i, j, fp_index;
6465 	struct ecore_hwfn			*p_hwfn;
6466         struct ecore_rss_params			*rss;
6467 	qlnx_host_t				*ha = (qlnx_host_t *)cdev;
6468         struct qlnx_fastpath			*fp;
6469 
6470         memset(&sp_params, 0, sizeof(sp_params));
6471         /* Translate protocol params into sp params */
6472         sp_params.vport_id = params->vport_id;
6473 
6474         sp_params.update_vport_active_rx_flg =
6475 		params->update_vport_active_rx_flg;
6476         sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
6477 
6478         sp_params.update_vport_active_tx_flg =
6479 		params->update_vport_active_tx_flg;
6480         sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
6481 
6482         sp_params.update_inner_vlan_removal_flg =
6483                 params->update_inner_vlan_removal_flg;
6484         sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
6485 
6486 	sp_params.sge_tpa_params = params->sge_tpa_params;
6487 
6488         /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
6489          * We need to re-fix the rss values per engine for CMT.
6490          */
6491 	if (params->rss_params->update_rss_config)
6492         sp_params.rss_params = params->rss_params;
6493 	else
6494 		sp_params.rss_params =  NULL;
6495 
6496         for_each_hwfn(cdev, i) {
6497 		p_hwfn = &cdev->hwfns[i];
6498 
6499 		if ((cdev->num_hwfns > 1) &&
6500 			params->rss_params->update_rss_config &&
6501 			params->rss_params->rss_enable) {
6502 			rss = params->rss_params;
6503 
6504 			for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
6505 				fp_index = ((cdev->num_hwfns * j) + i) %
6506 						ha->num_rss;
6507 
6508                 		fp = &ha->fp_array[fp_index];
6509                         	rss->rss_ind_table[j] = fp->rxq->handle;
6510 			}
6511 
6512 			for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
6513 				QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
6514 					rss->rss_ind_table[j],
6515 					rss->rss_ind_table[j+1],
6516 					rss->rss_ind_table[j+2],
6517 					rss->rss_ind_table[j+3],
6518 					rss->rss_ind_table[j+4],
6519 					rss->rss_ind_table[j+5],
6520 					rss->rss_ind_table[j+6],
6521 					rss->rss_ind_table[j+7]);
6522 					j += 8;
6523 			}
6524 		}
6525 
6526                 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6527 
6528 		QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
6529 
6530                 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
6531                                            ECORE_SPQ_MODE_EBLOCK, NULL);
6532                 if (rc) {
6533 			QL_DPRINT1(ha, "Failed to update VPORT\n");
6534                         return rc;
6535                 }
6536 
6537                 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
6538 			rx_active_flag %d [tx_update %d], [rx_update %d]\n",
6539 			params->vport_id, params->vport_active_tx_flg,
6540 			params->vport_active_rx_flg,
6541 			params->update_vport_active_tx_flg,
6542 			params->update_vport_active_rx_flg);
6543         }
6544 
6545         return 0;
6546 }
6547 
6548 static void
qlnx_reuse_rx_data(struct qlnx_rx_queue * rxq)6549 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
6550 {
6551         struct eth_rx_bd	*rx_bd_cons =
6552 					ecore_chain_consume(&rxq->rx_bd_ring);
6553         struct eth_rx_bd	*rx_bd_prod =
6554 					ecore_chain_produce(&rxq->rx_bd_ring);
6555         struct sw_rx_data	*sw_rx_data_cons =
6556 					&rxq->sw_rx_ring[rxq->sw_rx_cons];
6557         struct sw_rx_data	*sw_rx_data_prod =
6558 					&rxq->sw_rx_ring[rxq->sw_rx_prod];
6559 
6560         sw_rx_data_prod->data = sw_rx_data_cons->data;
6561         memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
6562 
6563         rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
6564         rxq->sw_rx_prod  = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6565 
6566 	return;
6567 }
6568 
6569 static void
qlnx_update_rx_prod(struct ecore_hwfn * p_hwfn,struct qlnx_rx_queue * rxq)6570 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
6571 {
6572 
6573         uint16_t	 	bd_prod;
6574         uint16_t		cqe_prod;
6575 	union {
6576 		struct eth_rx_prod_data rx_prod_data;
6577 		uint32_t		data32;
6578 	} rx_prods;
6579 
6580         bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
6581         cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
6582 
6583         /* Update producers */
6584         rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
6585         rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
6586 
6587         /* Make sure that the BD and SGE data is updated before updating the
6588          * producers since FW might read the BD/SGE right after the producer
6589          * is updated.
6590          */
6591 	wmb();
6592 
6593 #ifdef ECORE_CONFIG_DIRECT_HWFN
6594 	internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
6595 		sizeof(rx_prods), &rx_prods.data32);
6596 #else
6597 	internal_ram_wr(rxq->hw_rxq_prod_addr,
6598 		sizeof(rx_prods), &rx_prods.data32);
6599 #endif
6600 
6601         /* mmiowb is needed to synchronize doorbell writes from more than one
6602          * processor. It guarantees that the write arrives to the device before
6603          * the napi lock is released and another qlnx_poll is called (possibly
6604          * on another CPU). Without this barrier, the next doorbell can bypass
6605          * this doorbell. This is applicable to IA64/Altix systems.
6606          */
6607         wmb();
6608 
6609 	return;
6610 }
6611 
6612 static uint32_t qlnx_hash_key[] = {
6613                 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
6614                 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
6615                 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
6616                 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
6617                 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
6618                 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
6619                 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
6620                 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
6621                 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
6622                 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
6623 
6624 static int
qlnx_start_queues(qlnx_host_t * ha)6625 qlnx_start_queues(qlnx_host_t *ha)
6626 {
6627         int				rc, tc, i, vport_id = 0,
6628 					drop_ttl0_flg = 1, vlan_removal_en = 1,
6629 					tx_switching = 0, hw_lro_enable = 0;
6630         struct ecore_dev		*cdev = &ha->cdev;
6631         struct ecore_rss_params		*rss_params = &ha->rss_params;
6632         struct qlnx_update_vport_params	vport_update_params;
6633         if_t				ifp;
6634         struct ecore_hwfn		*p_hwfn;
6635 	struct ecore_sge_tpa_params	tpa_params;
6636 	struct ecore_queue_start_common_params qparams;
6637         struct qlnx_fastpath		*fp;
6638 
6639 	ifp = ha->ifp;
6640 
6641 	QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
6642 
6643         if (!ha->num_rss) {
6644 		QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
6645 			" are no Rx queues\n");
6646                 return -EINVAL;
6647         }
6648 
6649 #ifndef QLNX_SOFT_LRO
6650         hw_lro_enable = if_getcapenable(ifp) & IFCAP_LRO;
6651 #endif /* #ifndef QLNX_SOFT_LRO */
6652 
6653         rc = qlnx_start_vport(cdev, vport_id, if_getmtu(ifp), drop_ttl0_flg,
6654 			vlan_removal_en, tx_switching, hw_lro_enable);
6655 
6656         if (rc) {
6657                 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
6658                 return rc;
6659         }
6660 
6661 	QL_DPRINT2(ha, "Start vport ramrod passed, "
6662 		"vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
6663 		vport_id, (int)(if_getmtu(ifp) + 0xe), vlan_removal_en);
6664 
6665         for_each_rss(i) {
6666 		struct ecore_rxq_start_ret_params rx_ret_params;
6667 		struct ecore_txq_start_ret_params tx_ret_params;
6668 
6669                 fp = &ha->fp_array[i];
6670         	p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6671 
6672 		bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
6673 		bzero(&rx_ret_params,
6674 			sizeof (struct ecore_rxq_start_ret_params));
6675 
6676 		qparams.queue_id = i ;
6677 		qparams.vport_id = vport_id;
6678 		qparams.stats_id = vport_id;
6679 		qparams.p_sb = fp->sb_info;
6680 		qparams.sb_idx = RX_PI;
6681 
6682 
6683 		rc = ecore_eth_rx_queue_start(p_hwfn,
6684 			p_hwfn->hw_info.opaque_fid,
6685 			&qparams,
6686 			fp->rxq->rx_buf_size,	/* bd_max_bytes */
6687 			/* bd_chain_phys_addr */
6688 			fp->rxq->rx_bd_ring.p_phys_addr,
6689 			/* cqe_pbl_addr */
6690 			ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6691 			/* cqe_pbl_size */
6692 			ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6693 			&rx_ret_params);
6694 
6695                 if (rc) {
6696                 	QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6697                         return rc;
6698                 }
6699 
6700 		fp->rxq->hw_rxq_prod_addr	= rx_ret_params.p_prod;
6701 		fp->rxq->handle			= rx_ret_params.p_handle;
6702                 fp->rxq->hw_cons_ptr		=
6703 				&fp->sb_info->sb_virt->pi_array[RX_PI];
6704 
6705                 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6706 
6707                 for (tc = 0; tc < ha->num_tc; tc++) {
6708                         struct qlnx_tx_queue *txq = fp->txq[tc];
6709 
6710 			bzero(&qparams,
6711 				sizeof(struct ecore_queue_start_common_params));
6712 			bzero(&tx_ret_params,
6713 				sizeof (struct ecore_txq_start_ret_params));
6714 
6715 			qparams.queue_id = txq->index / cdev->num_hwfns ;
6716 			qparams.vport_id = vport_id;
6717 			qparams.stats_id = vport_id;
6718 			qparams.p_sb = fp->sb_info;
6719 			qparams.sb_idx = TX_PI(tc);
6720 
6721 			rc = ecore_eth_tx_queue_start(p_hwfn,
6722 				p_hwfn->hw_info.opaque_fid,
6723 				&qparams, tc,
6724 				/* bd_chain_phys_addr */
6725 				ecore_chain_get_pbl_phys(&txq->tx_pbl),
6726 				ecore_chain_get_page_cnt(&txq->tx_pbl),
6727 				&tx_ret_params);
6728 
6729                         if (rc) {
6730                 		QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6731 					   txq->index, rc);
6732                                 return rc;
6733                         }
6734 
6735 			txq->doorbell_addr = tx_ret_params.p_doorbell;
6736 			txq->handle = tx_ret_params.p_handle;
6737 
6738                         txq->hw_cons_ptr =
6739                                 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6740                         SET_FIELD(txq->tx_db.data.params,
6741                                   ETH_DB_DATA_DEST, DB_DEST_XCM);
6742                         SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6743                                   DB_AGG_CMD_SET);
6744                         SET_FIELD(txq->tx_db.data.params,
6745                                   ETH_DB_DATA_AGG_VAL_SEL,
6746                                   DQ_XCM_ETH_TX_BD_PROD_CMD);
6747 
6748                         txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
6749                 }
6750         }
6751 
6752         /* Fill struct with RSS params */
6753         if (ha->num_rss > 1) {
6754                 rss_params->update_rss_config = 1;
6755                 rss_params->rss_enable = 1;
6756                 rss_params->update_rss_capabilities = 1;
6757                 rss_params->update_rss_ind_table = 1;
6758                 rss_params->update_rss_key = 1;
6759                 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
6760                                        ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
6761                 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
6762 
6763                 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
6764                 	fp = &ha->fp_array[(i % ha->num_rss)];
6765                         rss_params->rss_ind_table[i] = fp->rxq->handle;
6766 		}
6767 
6768                 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
6769 			rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
6770 
6771         } else {
6772                 memset(rss_params, 0, sizeof(*rss_params));
6773         }
6774 
6775         /* Prepare and send the vport enable */
6776         memset(&vport_update_params, 0, sizeof(vport_update_params));
6777         vport_update_params.vport_id = vport_id;
6778         vport_update_params.update_vport_active_tx_flg = 1;
6779         vport_update_params.vport_active_tx_flg = 1;
6780         vport_update_params.update_vport_active_rx_flg = 1;
6781         vport_update_params.vport_active_rx_flg = 1;
6782         vport_update_params.rss_params = rss_params;
6783         vport_update_params.update_inner_vlan_removal_flg = 1;
6784         vport_update_params.inner_vlan_removal_flg = 1;
6785 
6786 	if (hw_lro_enable) {
6787 		memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
6788 
6789 		tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6790 
6791 		tpa_params.update_tpa_en_flg = 1;
6792 		tpa_params.tpa_ipv4_en_flg = 1;
6793 		tpa_params.tpa_ipv6_en_flg = 1;
6794 
6795 		tpa_params.update_tpa_param_flg = 1;
6796 		tpa_params.tpa_pkt_split_flg = 0;
6797 		tpa_params.tpa_hdr_data_split_flg = 0;
6798 		tpa_params.tpa_gro_consistent_flg = 0;
6799 		tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
6800 		tpa_params.tpa_max_size = (uint16_t)(-1);
6801 		tpa_params.tpa_min_size_to_start = if_getmtu(ifp) / 2;
6802 		tpa_params.tpa_min_size_to_cont = if_getmtu(ifp) / 2;
6803 
6804 		vport_update_params.sge_tpa_params = &tpa_params;
6805 	}
6806 
6807         rc = qlnx_update_vport(cdev, &vport_update_params);
6808         if (rc) {
6809 		QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6810                 return rc;
6811         }
6812 
6813         return 0;
6814 }
6815 
6816 static int
qlnx_drain_txq(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)6817 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6818 	struct qlnx_tx_queue *txq)
6819 {
6820 	uint16_t	hw_bd_cons;
6821 	uint16_t	ecore_cons_idx;
6822 
6823 	QL_DPRINT2(ha, "enter\n");
6824 
6825 	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6826 
6827 	while (hw_bd_cons !=
6828 		(ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6829 		mtx_lock(&fp->tx_mtx);
6830 
6831 		(void)qlnx_tx_int(ha, fp, txq);
6832 
6833 		mtx_unlock(&fp->tx_mtx);
6834 
6835 		qlnx_mdelay(__func__, 2);
6836 
6837 		hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6838 	}
6839 
6840 	QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6841 
6842         return 0;
6843 }
6844 
6845 static int
qlnx_stop_queues(qlnx_host_t * ha)6846 qlnx_stop_queues(qlnx_host_t *ha)
6847 {
6848         struct qlnx_update_vport_params	vport_update_params;
6849         struct ecore_dev		*cdev;
6850         struct qlnx_fastpath		*fp;
6851         int				rc, tc, i;
6852 
6853         cdev = &ha->cdev;
6854 
6855         /* Disable the vport */
6856 
6857         memset(&vport_update_params, 0, sizeof(vport_update_params));
6858 
6859         vport_update_params.vport_id = 0;
6860         vport_update_params.update_vport_active_tx_flg = 1;
6861         vport_update_params.vport_active_tx_flg = 0;
6862         vport_update_params.update_vport_active_rx_flg = 1;
6863         vport_update_params.vport_active_rx_flg = 0;
6864         vport_update_params.rss_params = &ha->rss_params;
6865         vport_update_params.rss_params->update_rss_config = 0;
6866         vport_update_params.rss_params->rss_enable = 0;
6867         vport_update_params.update_inner_vlan_removal_flg = 0;
6868         vport_update_params.inner_vlan_removal_flg = 0;
6869 
6870 	QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
6871 
6872         rc = qlnx_update_vport(cdev, &vport_update_params);
6873         if (rc) {
6874 		QL_DPRINT1(ha, "Failed to update vport\n");
6875                 return rc;
6876         }
6877 
6878         /* Flush Tx queues. If needed, request drain from MCP */
6879         for_each_rss(i) {
6880                 fp = &ha->fp_array[i];
6881 
6882                 for (tc = 0; tc < ha->num_tc; tc++) {
6883                         struct qlnx_tx_queue *txq = fp->txq[tc];
6884 
6885                         rc = qlnx_drain_txq(ha, fp, txq);
6886                         if (rc)
6887                                 return rc;
6888                 }
6889         }
6890 
6891         /* Stop all Queues in reverse order*/
6892         for (i = ha->num_rss - 1; i >= 0; i--) {
6893 		struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
6894 
6895                 fp = &ha->fp_array[i];
6896 
6897                 /* Stop the Tx Queue(s)*/
6898                 for (tc = 0; tc < ha->num_tc; tc++) {
6899 			int tx_queue_id __unused;
6900 
6901 			tx_queue_id = tc * ha->num_rss + i;
6902 			rc = ecore_eth_tx_queue_stop(p_hwfn,
6903 					fp->txq[tc]->handle);
6904 
6905                         if (rc) {
6906 				QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
6907 					   tx_queue_id);
6908                                 return rc;
6909                         }
6910                 }
6911 
6912                 /* Stop the Rx Queue*/
6913 		rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
6914 				false);
6915                 if (rc) {
6916                         QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
6917                         return rc;
6918                 }
6919         }
6920 
6921         /* Stop the vport */
6922 	for_each_hwfn(cdev, i) {
6923 		struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6924 
6925 		rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
6926 
6927 		if (rc) {
6928                         QL_DPRINT1(ha, "Failed to stop VPORT\n");
6929 			return rc;
6930 		}
6931 	}
6932 
6933         return rc;
6934 }
6935 
6936 static int
qlnx_set_ucast_rx_mac(qlnx_host_t * ha,enum ecore_filter_opcode opcode,unsigned char mac[ETH_ALEN])6937 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
6938 	enum ecore_filter_opcode opcode,
6939 	unsigned char mac[ETH_ALEN])
6940 {
6941 	struct ecore_filter_ucast	ucast;
6942 	struct ecore_dev		*cdev;
6943 	int				rc;
6944 
6945 	cdev = &ha->cdev;
6946 
6947 	bzero(&ucast, sizeof(struct ecore_filter_ucast));
6948 
6949         ucast.opcode = opcode;
6950         ucast.type = ECORE_FILTER_MAC;
6951         ucast.is_rx_filter = 1;
6952         ucast.vport_to_add_to = 0;
6953         memcpy(&ucast.mac[0], mac, ETH_ALEN);
6954 
6955 	rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6956 
6957         return (rc);
6958 }
6959 
6960 static int
qlnx_remove_all_ucast_mac(qlnx_host_t * ha)6961 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
6962 {
6963 	struct ecore_filter_ucast	ucast;
6964 	struct ecore_dev		*cdev;
6965 	int				rc;
6966 
6967 	bzero(&ucast, sizeof(struct ecore_filter_ucast));
6968 
6969 	ucast.opcode = ECORE_FILTER_REPLACE;
6970 	ucast.type = ECORE_FILTER_MAC;
6971 	ucast.is_rx_filter = 1;
6972 
6973 	cdev = &ha->cdev;
6974 
6975 	rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6976 
6977 	return (rc);
6978 }
6979 
6980 static int
qlnx_remove_all_mcast_mac(qlnx_host_t * ha)6981 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
6982 {
6983 	struct ecore_filter_mcast	*mcast;
6984 	struct ecore_dev		*cdev;
6985 	int				rc, i;
6986 
6987 	cdev = &ha->cdev;
6988 
6989 	mcast = &ha->ecore_mcast;
6990 	bzero(mcast, sizeof(struct ecore_filter_mcast));
6991 
6992 	mcast->opcode = ECORE_FILTER_REMOVE;
6993 
6994 	for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
6995 		if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
6996 			ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
6997 			ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
6998 			memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN);
6999 			mcast->num_mc_addrs++;
7000 		}
7001 	}
7002 	mcast = &ha->ecore_mcast;
7003 
7004 	rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
7005 
7006 	bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
7007 	ha->nmcast = 0;
7008 
7009 	return (rc);
7010 }
7011 
7012 static int
qlnx_clean_filters(qlnx_host_t * ha)7013 qlnx_clean_filters(qlnx_host_t *ha)
7014 {
7015         int	rc = 0;
7016 
7017 	/* Remove all unicast macs */
7018 	rc = qlnx_remove_all_ucast_mac(ha);
7019 	if (rc)
7020 		return rc;
7021 
7022 	/* Remove all multicast macs */
7023 	rc = qlnx_remove_all_mcast_mac(ha);
7024 	if (rc)
7025 		return rc;
7026 
7027         rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
7028 
7029         return (rc);
7030 }
7031 
7032 static int
qlnx_set_rx_accept_filter(qlnx_host_t * ha,uint8_t filter)7033 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
7034 {
7035 	struct ecore_filter_accept_flags	accept;
7036 	int					rc = 0;
7037 	struct ecore_dev			*cdev;
7038 
7039 	cdev = &ha->cdev;
7040 
7041 	bzero(&accept, sizeof(struct ecore_filter_accept_flags));
7042 
7043 	accept.update_rx_mode_config = 1;
7044 	accept.rx_accept_filter = filter;
7045 
7046 	accept.update_tx_mode_config = 1;
7047 	accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
7048 		ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
7049 
7050 	rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
7051 			ECORE_SPQ_MODE_CB, NULL);
7052 
7053 	return (rc);
7054 }
7055 
7056 static int
qlnx_set_rx_mode(qlnx_host_t * ha)7057 qlnx_set_rx_mode(qlnx_host_t *ha)
7058 {
7059 	int	rc = 0;
7060 	uint8_t	filter;
7061 	const if_t ifp = ha->ifp;
7062 	const struct ifaddr *ifa;
7063 	struct sockaddr_dl *sdl;
7064 
7065 	ifa = if_getifaddr(ifp);
7066 	if (if_gettype(ifp) == IFT_ETHER && ifa != NULL &&
7067 			ifa->ifa_addr != NULL) {
7068 		sdl = (struct sockaddr_dl *) ifa->ifa_addr;
7069 
7070 		rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, LLADDR(sdl));
7071 	} else {
7072 		rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
7073 	}
7074         if (rc)
7075                 return rc;
7076 
7077 	rc = qlnx_remove_all_mcast_mac(ha);
7078         if (rc)
7079                 return rc;
7080 
7081 	filter = ECORE_ACCEPT_UCAST_MATCHED |
7082 			ECORE_ACCEPT_MCAST_MATCHED |
7083 			ECORE_ACCEPT_BCAST;
7084 
7085 	if (qlnx_vf_device(ha) == 0 || (if_getflags(ha->ifp) & IFF_PROMISC)) {
7086 		filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
7087 		filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
7088 	} else if (if_getflags(ha->ifp) & IFF_ALLMULTI) {
7089 		filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
7090 	}
7091 	ha->filter = filter;
7092 
7093 	rc = qlnx_set_rx_accept_filter(ha, filter);
7094 
7095 	return (rc);
7096 }
7097 
7098 static int
qlnx_set_link(qlnx_host_t * ha,bool link_up)7099 qlnx_set_link(qlnx_host_t *ha, bool link_up)
7100 {
7101         int			i, rc = 0;
7102 	struct ecore_dev	*cdev;
7103 	struct ecore_hwfn	*hwfn;
7104 	struct ecore_ptt	*ptt;
7105 
7106 	if (qlnx_vf_device(ha) == 0)
7107 		return (0);
7108 
7109 	cdev = &ha->cdev;
7110 
7111         for_each_hwfn(cdev, i) {
7112                 hwfn = &cdev->hwfns[i];
7113 
7114                 ptt = ecore_ptt_acquire(hwfn);
7115        	        if (!ptt)
7116                         return -EBUSY;
7117 
7118                 rc = ecore_mcp_set_link(hwfn, ptt, link_up);
7119 
7120                 ecore_ptt_release(hwfn, ptt);
7121 
7122                 if (rc)
7123                         return rc;
7124         }
7125         return (rc);
7126 }
7127 
7128 static uint64_t
qlnx_get_counter(if_t ifp,ift_counter cnt)7129 qlnx_get_counter(if_t ifp, ift_counter cnt)
7130 {
7131 	qlnx_host_t *ha;
7132 	uint64_t count;
7133 
7134         ha = (qlnx_host_t *)if_getsoftc(ifp);
7135 
7136         switch (cnt) {
7137         case IFCOUNTER_IPACKETS:
7138 		count = ha->hw_stats.common.rx_ucast_pkts +
7139 			ha->hw_stats.common.rx_mcast_pkts +
7140 			ha->hw_stats.common.rx_bcast_pkts;
7141 		break;
7142 
7143         case IFCOUNTER_IERRORS:
7144 		count = ha->hw_stats.common.rx_crc_errors +
7145 			ha->hw_stats.common.rx_align_errors +
7146 			ha->hw_stats.common.rx_oversize_packets +
7147 			ha->hw_stats.common.rx_undersize_packets;
7148 		break;
7149 
7150         case IFCOUNTER_OPACKETS:
7151 		count = ha->hw_stats.common.tx_ucast_pkts +
7152 			ha->hw_stats.common.tx_mcast_pkts +
7153 			ha->hw_stats.common.tx_bcast_pkts;
7154 		break;
7155 
7156         case IFCOUNTER_OERRORS:
7157                 count = ha->hw_stats.common.tx_err_drop_pkts;
7158 		break;
7159 
7160         case IFCOUNTER_COLLISIONS:
7161                 return (0);
7162 
7163         case IFCOUNTER_IBYTES:
7164 		count = ha->hw_stats.common.rx_ucast_bytes +
7165 			ha->hw_stats.common.rx_mcast_bytes +
7166 			ha->hw_stats.common.rx_bcast_bytes;
7167 		break;
7168 
7169         case IFCOUNTER_OBYTES:
7170 		count = ha->hw_stats.common.tx_ucast_bytes +
7171 			ha->hw_stats.common.tx_mcast_bytes +
7172 			ha->hw_stats.common.tx_bcast_bytes;
7173 		break;
7174 
7175         case IFCOUNTER_IMCASTS:
7176 		count = ha->hw_stats.common.rx_mcast_bytes;
7177 		break;
7178 
7179         case IFCOUNTER_OMCASTS:
7180 		count = ha->hw_stats.common.tx_mcast_bytes;
7181 		break;
7182 
7183         case IFCOUNTER_IQDROPS:
7184         case IFCOUNTER_OQDROPS:
7185         case IFCOUNTER_NOPROTO:
7186 
7187         default:
7188                 return (if_get_counter_default(ifp, cnt));
7189         }
7190 	return (count);
7191 }
7192 
7193 static void
qlnx_timer(void * arg)7194 qlnx_timer(void *arg)
7195 {
7196 	qlnx_host_t	*ha;
7197 
7198 	ha = (qlnx_host_t *)arg;
7199 
7200 	if (ha->error_recovery) {
7201 		ha->error_recovery = 0;
7202 		taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
7203 		return;
7204 	}
7205 
7206        	ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
7207 
7208 	if (ha->storm_stats_gather)
7209 		qlnx_sample_storm_stats(ha);
7210 
7211 	callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7212 
7213 	return;
7214 }
7215 
7216 static int
qlnx_load(qlnx_host_t * ha)7217 qlnx_load(qlnx_host_t *ha)
7218 {
7219 	int			i;
7220 	int			rc = 0;
7221         device_t		dev;
7222 
7223         dev = ha->pci_dev;
7224 
7225 	QL_DPRINT2(ha, "enter\n");
7226 
7227         rc = qlnx_alloc_mem_arrays(ha);
7228         if (rc)
7229                 goto qlnx_load_exit0;
7230 
7231         qlnx_init_fp(ha);
7232 
7233         rc = qlnx_alloc_mem_load(ha);
7234         if (rc)
7235                 goto qlnx_load_exit1;
7236 
7237         QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
7238 		   ha->num_rss, ha->num_tc);
7239 
7240 	for (i = 0; i < ha->num_rss; i++) {
7241 		if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
7242                         (INTR_TYPE_NET | INTR_MPSAFE),
7243                         NULL, qlnx_fp_isr, &ha->irq_vec[i],
7244                         &ha->irq_vec[i].handle))) {
7245                         QL_DPRINT1(ha, "could not setup interrupt\n");
7246                         goto qlnx_load_exit2;
7247 		}
7248 
7249 		QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
7250 			 irq %p handle %p\n", i,
7251 			ha->irq_vec[i].irq_rid,
7252 			ha->irq_vec[i].irq, ha->irq_vec[i].handle);
7253 
7254 		bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
7255 	}
7256 
7257         rc = qlnx_start_queues(ha);
7258         if (rc)
7259                 goto qlnx_load_exit2;
7260 
7261         QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
7262 
7263         /* Add primary mac and set Rx filters */
7264         rc = qlnx_set_rx_mode(ha);
7265         if (rc)
7266                 goto qlnx_load_exit2;
7267 
7268         /* Ask for link-up using current configuration */
7269 	qlnx_set_link(ha, true);
7270 
7271 	if (qlnx_vf_device(ha) == 0)
7272 		qlnx_link_update(&ha->cdev.hwfns[0]);
7273 
7274         ha->state = QLNX_STATE_OPEN;
7275 
7276 	bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
7277 
7278 	if (ha->flags.callout_init)
7279         	callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7280 
7281         goto qlnx_load_exit0;
7282 
7283 qlnx_load_exit2:
7284         qlnx_free_mem_load(ha);
7285 
7286 qlnx_load_exit1:
7287         ha->num_rss = 0;
7288 
7289 qlnx_load_exit0:
7290 	QL_DPRINT2(ha, "exit [%d]\n", rc);
7291         return rc;
7292 }
7293 
7294 static void
qlnx_drain_soft_lro(qlnx_host_t * ha)7295 qlnx_drain_soft_lro(qlnx_host_t *ha)
7296 {
7297 #ifdef QLNX_SOFT_LRO
7298 
7299 	if_t		ifp;
7300 	int		i;
7301 
7302 	ifp = ha->ifp;
7303 
7304 	if (if_getcapenable(ifp) & IFCAP_LRO) {
7305 	        for (i = 0; i < ha->num_rss; i++) {
7306 			struct qlnx_fastpath *fp = &ha->fp_array[i];
7307 			struct lro_ctrl *lro;
7308 
7309 			lro = &fp->rxq->lro;
7310 
7311 			tcp_lro_flush_all(lro);
7312                 }
7313 	}
7314 
7315 #endif /* #ifdef QLNX_SOFT_LRO */
7316 
7317 	return;
7318 }
7319 
7320 static void
qlnx_unload(qlnx_host_t * ha)7321 qlnx_unload(qlnx_host_t *ha)
7322 {
7323 	struct ecore_dev	*cdev;
7324         device_t		dev;
7325 	int			i;
7326 
7327 	cdev = &ha->cdev;
7328         dev = ha->pci_dev;
7329 
7330 	QL_DPRINT2(ha, "enter\n");
7331         QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
7332 
7333 	if (ha->state == QLNX_STATE_OPEN) {
7334 		qlnx_set_link(ha, false);
7335 		qlnx_clean_filters(ha);
7336 		qlnx_stop_queues(ha);
7337 		ecore_hw_stop_fastpath(cdev);
7338 
7339 		for (i = 0; i < ha->num_rss; i++) {
7340 			if (ha->irq_vec[i].handle) {
7341 				(void)bus_teardown_intr(dev,
7342 					ha->irq_vec[i].irq,
7343 					ha->irq_vec[i].handle);
7344 				ha->irq_vec[i].handle = NULL;
7345 			}
7346 		}
7347 
7348 		qlnx_drain_fp_taskqueues(ha);
7349 		qlnx_drain_soft_lro(ha);
7350         	qlnx_free_mem_load(ha);
7351 	}
7352 
7353 	if (ha->flags.callout_init)
7354 		callout_drain(&ha->qlnx_callout);
7355 
7356 	qlnx_mdelay(__func__, 1000);
7357 
7358         ha->state = QLNX_STATE_CLOSED;
7359 
7360 	QL_DPRINT2(ha, "exit\n");
7361 	return;
7362 }
7363 
7364 static int
qlnx_grc_dumpsize(qlnx_host_t * ha,uint32_t * num_dwords,int hwfn_index)7365 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7366 {
7367 	int			rval = -1;
7368 	struct ecore_hwfn	*p_hwfn;
7369 	struct ecore_ptt	*p_ptt;
7370 
7371 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7372 
7373 	p_hwfn = &ha->cdev.hwfns[hwfn_index];
7374 	p_ptt = ecore_ptt_acquire(p_hwfn);
7375 
7376         if (!p_ptt) {
7377 		QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7378                 return (rval);
7379         }
7380 
7381         rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7382 
7383 	if (rval == DBG_STATUS_OK)
7384                 rval = 0;
7385         else {
7386 		QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
7387 			"[0x%x]\n", rval);
7388 	}
7389 
7390         ecore_ptt_release(p_hwfn, p_ptt);
7391 
7392         return (rval);
7393 }
7394 
7395 static int
qlnx_idle_chk_size(qlnx_host_t * ha,uint32_t * num_dwords,int hwfn_index)7396 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7397 {
7398 	int			rval = -1;
7399 	struct ecore_hwfn	*p_hwfn;
7400 	struct ecore_ptt	*p_ptt;
7401 
7402 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7403 
7404 	p_hwfn = &ha->cdev.hwfns[hwfn_index];
7405 	p_ptt = ecore_ptt_acquire(p_hwfn);
7406 
7407         if (!p_ptt) {
7408 		QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7409                 return (rval);
7410         }
7411 
7412         rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7413 
7414 	if (rval == DBG_STATUS_OK)
7415                 rval = 0;
7416         else {
7417 		QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
7418 			" [0x%x]\n", rval);
7419 	}
7420 
7421         ecore_ptt_release(p_hwfn, p_ptt);
7422 
7423         return (rval);
7424 }
7425 
7426 static void
qlnx_sample_storm_stats(qlnx_host_t * ha)7427 qlnx_sample_storm_stats(qlnx_host_t *ha)
7428 {
7429         int			i, index;
7430         struct ecore_dev	*cdev;
7431 	qlnx_storm_stats_t	*s_stats;
7432 	uint32_t		reg;
7433         struct ecore_ptt	*p_ptt;
7434         struct ecore_hwfn	*hwfn;
7435 
7436 	if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
7437 		ha->storm_stats_gather = 0;
7438 		return;
7439 	}
7440 
7441         cdev = &ha->cdev;
7442 
7443         for_each_hwfn(cdev, i) {
7444                 hwfn = &cdev->hwfns[i];
7445 
7446                 p_ptt = ecore_ptt_acquire(hwfn);
7447                 if (!p_ptt)
7448                         return;
7449 
7450 		index = ha->storm_stats_index +
7451 				(i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
7452 
7453 		s_stats = &ha->storm_stats[index];
7454 
7455 		/* XSTORM */
7456 		reg = XSEM_REG_FAST_MEMORY +
7457 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7458 		s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7459 
7460 		reg = XSEM_REG_FAST_MEMORY +
7461 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7462 		s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7463 
7464 		reg = XSEM_REG_FAST_MEMORY +
7465 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7466 		s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7467 
7468 		reg = XSEM_REG_FAST_MEMORY +
7469 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7470 		s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7471 
7472 		/* YSTORM */
7473 		reg = YSEM_REG_FAST_MEMORY +
7474 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7475 		s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7476 
7477 		reg = YSEM_REG_FAST_MEMORY +
7478 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7479 		s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7480 
7481 		reg = YSEM_REG_FAST_MEMORY +
7482 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7483 		s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7484 
7485 		reg = YSEM_REG_FAST_MEMORY +
7486 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7487 		s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7488 
7489 		/* PSTORM */
7490 		reg = PSEM_REG_FAST_MEMORY +
7491 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7492 		s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7493 
7494 		reg = PSEM_REG_FAST_MEMORY +
7495 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7496 		s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7497 
7498 		reg = PSEM_REG_FAST_MEMORY +
7499 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7500 		s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7501 
7502 		reg = PSEM_REG_FAST_MEMORY +
7503 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7504 		s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7505 
7506 		/* TSTORM */
7507 		reg = TSEM_REG_FAST_MEMORY +
7508 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7509 		s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7510 
7511 		reg = TSEM_REG_FAST_MEMORY +
7512 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7513 		s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7514 
7515 		reg = TSEM_REG_FAST_MEMORY +
7516 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7517 		s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7518 
7519 		reg = TSEM_REG_FAST_MEMORY +
7520 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7521 		s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7522 
7523 		/* MSTORM */
7524 		reg = MSEM_REG_FAST_MEMORY +
7525 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7526 		s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7527 
7528 		reg = MSEM_REG_FAST_MEMORY +
7529 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7530 		s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7531 
7532 		reg = MSEM_REG_FAST_MEMORY +
7533 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7534 		s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7535 
7536 		reg = MSEM_REG_FAST_MEMORY +
7537 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7538 		s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7539 
7540 		/* USTORM */
7541 		reg = USEM_REG_FAST_MEMORY +
7542 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7543 		s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7544 
7545 		reg = USEM_REG_FAST_MEMORY +
7546 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7547 		s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7548 
7549 		reg = USEM_REG_FAST_MEMORY +
7550 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7551 		s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7552 
7553 		reg = USEM_REG_FAST_MEMORY +
7554 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7555 		s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7556 
7557                 ecore_ptt_release(hwfn, p_ptt);
7558         }
7559 
7560 	ha->storm_stats_index++;
7561 
7562         return;
7563 }
7564 
7565 /*
7566  * Name: qlnx_dump_buf8
7567  * Function: dumps a buffer as bytes
7568  */
7569 static void
qlnx_dump_buf8(qlnx_host_t * ha,const char * msg,void * dbuf,uint32_t len)7570 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
7571 {
7572         device_t	dev;
7573         uint32_t	i = 0;
7574         uint8_t		*buf;
7575 
7576         dev = ha->pci_dev;
7577         buf = dbuf;
7578 
7579         device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
7580 
7581         while (len >= 16) {
7582                 device_printf(dev,"0x%08x:"
7583                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7584                         " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7585                         buf[0], buf[1], buf[2], buf[3],
7586                         buf[4], buf[5], buf[6], buf[7],
7587                         buf[8], buf[9], buf[10], buf[11],
7588                         buf[12], buf[13], buf[14], buf[15]);
7589                 i += 16;
7590                 len -= 16;
7591                 buf += 16;
7592         }
7593         switch (len) {
7594         case 1:
7595                 device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
7596                 break;
7597         case 2:
7598                 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
7599                 break;
7600         case 3:
7601                 device_printf(dev,"0x%08x: %02x %02x %02x\n",
7602                         i, buf[0], buf[1], buf[2]);
7603                 break;
7604         case 4:
7605                 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
7606                         buf[0], buf[1], buf[2], buf[3]);
7607                 break;
7608         case 5:
7609                 device_printf(dev,"0x%08x:"
7610                         " %02x %02x %02x %02x %02x\n", i,
7611                         buf[0], buf[1], buf[2], buf[3], buf[4]);
7612                 break;
7613         case 6:
7614                 device_printf(dev,"0x%08x:"
7615                         " %02x %02x %02x %02x %02x %02x\n", i,
7616                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
7617                 break;
7618         case 7:
7619                 device_printf(dev,"0x%08x:"
7620                         " %02x %02x %02x %02x %02x %02x %02x\n", i,
7621                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
7622                 break;
7623         case 8:
7624                 device_printf(dev,"0x%08x:"
7625                         " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7626                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7627                         buf[7]);
7628                 break;
7629         case 9:
7630                 device_printf(dev,"0x%08x:"
7631                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7632                         " %02x\n", i,
7633                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7634                         buf[7], buf[8]);
7635                 break;
7636         case 10:
7637                 device_printf(dev,"0x%08x:"
7638                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7639                         " %02x %02x\n", i,
7640                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7641                         buf[7], buf[8], buf[9]);
7642                 break;
7643         case 11:
7644                 device_printf(dev,"0x%08x:"
7645                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7646                         " %02x %02x %02x\n", i,
7647                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7648                         buf[7], buf[8], buf[9], buf[10]);
7649                 break;
7650         case 12:
7651                 device_printf(dev,"0x%08x:"
7652                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7653                         " %02x %02x %02x %02x\n", i,
7654                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7655                         buf[7], buf[8], buf[9], buf[10], buf[11]);
7656                 break;
7657         case 13:
7658                 device_printf(dev,"0x%08x:"
7659                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7660                         " %02x %02x %02x %02x %02x\n", i,
7661                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7662                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
7663                 break;
7664         case 14:
7665                 device_printf(dev,"0x%08x:"
7666                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7667                         " %02x %02x %02x %02x %02x %02x\n", i,
7668                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7669                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7670                         buf[13]);
7671                 break;
7672         case 15:
7673                 device_printf(dev,"0x%08x:"
7674                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7675                         " %02x %02x %02x %02x %02x %02x %02x\n", i,
7676                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7677                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7678                         buf[13], buf[14]);
7679                 break;
7680         default:
7681                 break;
7682         }
7683 
7684         device_printf(dev, "%s: %s dump end\n", __func__, msg);
7685 
7686         return;
7687 }
7688 
7689 #ifdef CONFIG_ECORE_SRIOV
7690 
7691 static void
__qlnx_osal_iov_vf_cleanup(struct ecore_hwfn * p_hwfn,uint8_t rel_vf_id)7692 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id)
7693 {
7694         struct ecore_public_vf_info *vf_info;
7695 
7696         vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false);
7697 
7698         if (!vf_info)
7699                 return;
7700 
7701         /* Clear the VF mac */
7702         memset(vf_info->forced_mac, 0, ETH_ALEN);
7703 
7704         vf_info->forced_vlan = 0;
7705 
7706 	return;
7707 }
7708 
7709 void
qlnx_osal_iov_vf_cleanup(void * p_hwfn,uint8_t relative_vf_id)7710 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id)
7711 {
7712 	__qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id);
7713 	return;
7714 }
7715 
7716 static int
__qlnx_iov_chk_ucast(struct ecore_hwfn * p_hwfn,int vfid,struct ecore_filter_ucast * params)7717 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid,
7718 	struct ecore_filter_ucast *params)
7719 {
7720         struct ecore_public_vf_info *vf;
7721 
7722 	if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
7723 		QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev),
7724 			"VF[%d] vport not initialized\n", vfid);
7725 		return ECORE_INVAL;
7726 	}
7727 
7728         vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true);
7729         if (!vf)
7730                 return -EINVAL;
7731 
7732         /* No real decision to make; Store the configured MAC */
7733         if (params->type == ECORE_FILTER_MAC ||
7734             params->type == ECORE_FILTER_MAC_VLAN)
7735                 memcpy(params->mac, vf->forced_mac, ETH_ALEN);
7736 
7737         return 0;
7738 }
7739 
7740 int
qlnx_iov_chk_ucast(void * p_hwfn,int vfid,void * params)7741 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params)
7742 {
7743 	return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params));
7744 }
7745 
7746 static int
__qlnx_iov_update_vport(struct ecore_hwfn * hwfn,uint8_t vfid,struct ecore_sp_vport_update_params * params,uint16_t * tlvs)7747 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid,
7748         struct ecore_sp_vport_update_params *params, uint16_t * tlvs)
7749 {
7750 	if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) {
7751 		QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev),
7752 			"VF[%d] vport not initialized\n", vfid);
7753 		return ECORE_INVAL;
7754 	}
7755 
7756         /* Untrusted VFs can't even be trusted to know that fact.
7757          * Simply indicate everything is configured fine, and trace
7758          * configuration 'behind their back'.
7759          */
7760         if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM)))
7761                 return 0;
7762 
7763         return 0;
7764 
7765 }
7766 int
qlnx_iov_update_vport(void * hwfn,uint8_t vfid,void * params,uint16_t * tlvs)7767 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs)
7768 {
7769 	return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs));
7770 }
7771 
7772 static int
qlnx_find_hwfn_index(struct ecore_hwfn * p_hwfn)7773 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn)
7774 {
7775 	int			i;
7776 	struct ecore_dev	*cdev;
7777 
7778 	cdev = p_hwfn->p_dev;
7779 
7780 	for (i = 0; i < cdev->num_hwfns; i++) {
7781 		if (&cdev->hwfns[i] == p_hwfn)
7782 			break;
7783 	}
7784 
7785 	if (i >= cdev->num_hwfns)
7786 		return (-1);
7787 
7788 	return (i);
7789 }
7790 
7791 static int
__qlnx_pf_vf_msg(struct ecore_hwfn * p_hwfn,uint16_t rel_vf_id)7792 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id)
7793 {
7794 	qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7795 	int i;
7796 
7797 	QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n",
7798 		ha, p_hwfn->p_dev, p_hwfn, rel_vf_id);
7799 
7800 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7801 		return (-1);
7802 
7803 	if (ha->sriov_task[i].pf_taskqueue != NULL) {
7804 		atomic_testandset_32(&ha->sriov_task[i].flags,
7805 			QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG);
7806 
7807 		taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7808 			&ha->sriov_task[i].pf_task);
7809 	}
7810 
7811 	return (ECORE_SUCCESS);
7812 }
7813 
7814 int
qlnx_pf_vf_msg(void * p_hwfn,uint16_t relative_vf_id)7815 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id)
7816 {
7817 	return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id));
7818 }
7819 
7820 static void
__qlnx_vf_flr_update(struct ecore_hwfn * p_hwfn)7821 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn)
7822 {
7823 	qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7824 	int i;
7825 
7826 	if (!ha->sriov_initialized)
7827 		return;
7828 
7829 	QL_DPRINT2(ha,  "ha = %p cdev = %p p_hwfn = %p \n",
7830 		ha, p_hwfn->p_dev, p_hwfn);
7831 
7832 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7833 		return;
7834 
7835 	if (ha->sriov_task[i].pf_taskqueue != NULL) {
7836 		atomic_testandset_32(&ha->sriov_task[i].flags,
7837 			QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE);
7838 
7839 		taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7840 			&ha->sriov_task[i].pf_task);
7841 	}
7842 
7843 	return;
7844 }
7845 
7846 void
qlnx_vf_flr_update(void * p_hwfn)7847 qlnx_vf_flr_update(void *p_hwfn)
7848 {
7849 	__qlnx_vf_flr_update(p_hwfn);
7850 
7851 	return;
7852 }
7853 
7854 #ifndef QLNX_VF
7855 
7856 static void
qlnx_vf_bulleting_update(struct ecore_hwfn * p_hwfn)7857 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn)
7858 {
7859 	qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7860 	int i;
7861 
7862 	QL_DPRINT2(ha,  "ha = %p cdev = %p p_hwfn = %p \n",
7863 		ha, p_hwfn->p_dev, p_hwfn);
7864 
7865 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7866 		return;
7867 
7868 	QL_DPRINT2(ha,  "ha = %p cdev = %p p_hwfn = %p i = %d\n",
7869 		ha, p_hwfn->p_dev, p_hwfn, i);
7870 
7871 	if (ha->sriov_task[i].pf_taskqueue != NULL) {
7872 		atomic_testandset_32(&ha->sriov_task[i].flags,
7873 			QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE);
7874 
7875 		taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7876 			&ha->sriov_task[i].pf_task);
7877 	}
7878 }
7879 
7880 static void
qlnx_initialize_sriov(qlnx_host_t * ha)7881 qlnx_initialize_sriov(qlnx_host_t *ha)
7882 {
7883 	device_t	dev;
7884 	nvlist_t	*pf_schema, *vf_schema;
7885 	int		iov_error;
7886 
7887 	dev = ha->pci_dev;
7888 
7889 	pf_schema = pci_iov_schema_alloc_node();
7890 	vf_schema = pci_iov_schema_alloc_node();
7891 
7892 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
7893 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
7894 		IOV_SCHEMA_HASDEFAULT, FALSE);
7895 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
7896 		IOV_SCHEMA_HASDEFAULT, FALSE);
7897 	pci_iov_schema_add_uint16(vf_schema, "num-queues",
7898 		IOV_SCHEMA_HASDEFAULT, 1);
7899 
7900 	iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
7901 
7902 	if (iov_error != 0) {
7903 		ha->sriov_initialized = 0;
7904 	} else {
7905 		device_printf(dev, "SRIOV initialized\n");
7906 		ha->sriov_initialized = 1;
7907 	}
7908 
7909 	return;
7910 }
7911 
7912 static void
qlnx_sriov_disable(qlnx_host_t * ha)7913 qlnx_sriov_disable(qlnx_host_t *ha)
7914 {
7915 	struct ecore_dev *cdev;
7916 	int i, j;
7917 
7918 	cdev = &ha->cdev;
7919 
7920 	ecore_iov_set_vfs_to_disable(cdev, true);
7921 
7922 	for_each_hwfn(cdev, i) {
7923 		struct ecore_hwfn *hwfn = &cdev->hwfns[i];
7924 		struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
7925 
7926 		if (!ptt) {
7927 			QL_DPRINT1(ha, "Failed to acquire ptt\n");
7928 			return;
7929 		}
7930 		/* Clean WFQ db and configure equal weight for all vports */
7931 		ecore_clean_wfq_db(hwfn, ptt);
7932 
7933 		ecore_for_each_vf(hwfn, j) {
7934 			int k = 0;
7935 
7936 			if (!ecore_iov_is_valid_vfid(hwfn, j, true, false))
7937 				continue;
7938 
7939 			if (ecore_iov_is_vf_started(hwfn, j)) {
7940 				/* Wait until VF is disabled before releasing */
7941 
7942 				for (k = 0; k < 100; k++) {
7943 					if (!ecore_iov_is_vf_stopped(hwfn, j)) {
7944 						qlnx_mdelay(__func__, 10);
7945 					} else
7946 						break;
7947 				}
7948 			}
7949 
7950 			if (k < 100)
7951 				ecore_iov_release_hw_for_vf(&cdev->hwfns[i],
7952                                                           ptt, j);
7953 			else {
7954 				QL_DPRINT1(ha,
7955 					"Timeout waiting for VF's FLR to end\n");
7956 			}
7957 		}
7958 		ecore_ptt_release(hwfn, ptt);
7959 	}
7960 
7961 	ecore_iov_set_vfs_to_disable(cdev, false);
7962 
7963 	return;
7964 }
7965 
7966 static void
qlnx_sriov_enable_qid_config(struct ecore_hwfn * hwfn,u16 vfid,struct ecore_iov_vf_init_params * params)7967 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid,
7968 	struct ecore_iov_vf_init_params *params)
7969 {
7970         u16 base, i;
7971 
7972         /* Since we have an equal resource distribution per-VF, and we assume
7973          * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting
7974          * sequentially from there.
7975          */
7976         base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues;
7977 
7978         params->rel_vf_id = vfid;
7979 
7980         for (i = 0; i < params->num_queues; i++) {
7981                 params->req_rx_queue[i] = base + i;
7982                 params->req_tx_queue[i] = base + i;
7983         }
7984 
7985         /* PF uses indices 0 for itself; Set vport/RSS afterwards */
7986         params->vport_id = vfid + 1;
7987         params->rss_eng_id = vfid + 1;
7988 
7989 	return;
7990 }
7991 
7992 static int
qlnx_iov_init(device_t dev,uint16_t num_vfs,const nvlist_t * nvlist_params)7993 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params)
7994 {
7995 	qlnx_host_t		*ha;
7996 	struct ecore_dev	*cdev;
7997 	struct ecore_iov_vf_init_params params;
7998 	int ret, j, i;
7999 	uint32_t max_vfs;
8000 
8001 	if ((ha = device_get_softc(dev)) == NULL) {
8002 		device_printf(dev, "%s: cannot get softc\n", __func__);
8003 		return (-1);
8004 	}
8005 
8006 	if (qlnx_create_pf_taskqueues(ha) != 0)
8007 		goto qlnx_iov_init_err0;
8008 
8009 	cdev = &ha->cdev;
8010 
8011 	max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT);
8012 
8013 	QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n",
8014 		dev, num_vfs, max_vfs);
8015 
8016         if (num_vfs >= max_vfs) {
8017                 QL_DPRINT1(ha, "Can start at most %d VFs\n",
8018                           (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1));
8019 		goto qlnx_iov_init_err0;
8020         }
8021 
8022 	ha->vf_attr =  malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF,
8023 				M_NOWAIT);
8024 
8025 	if (ha->vf_attr == NULL)
8026 		goto qlnx_iov_init_err0;
8027 
8028         memset(&params, 0, sizeof(params));
8029 
8030         /* Initialize HW for VF access */
8031         for_each_hwfn(cdev, j) {
8032                 struct ecore_hwfn *hwfn = &cdev->hwfns[j];
8033                 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
8034 
8035                 /* Make sure not to use more than 16 queues per VF */
8036                 params.num_queues = min_t(int,
8037                                           (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs),
8038                                           16);
8039 
8040                 if (!ptt) {
8041                         QL_DPRINT1(ha, "Failed to acquire ptt\n");
8042                         goto qlnx_iov_init_err1;
8043                 }
8044 
8045                 for (i = 0; i < num_vfs; i++) {
8046                         if (!ecore_iov_is_valid_vfid(hwfn, i, false, true))
8047                                 continue;
8048 
8049                         qlnx_sriov_enable_qid_config(hwfn, i, &params);
8050 
8051                         ret = ecore_iov_init_hw_for_vf(hwfn, ptt, &params);
8052 
8053                         if (ret) {
8054                                 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i);
8055                                 ecore_ptt_release(hwfn, ptt);
8056                                 goto qlnx_iov_init_err1;
8057                         }
8058                 }
8059 
8060                 ecore_ptt_release(hwfn, ptt);
8061         }
8062 
8063 	ha->num_vfs = num_vfs;
8064 	qlnx_inform_vf_link_state(&cdev->hwfns[0], ha);
8065 
8066 	QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs);
8067 
8068 	return (0);
8069 
8070 qlnx_iov_init_err1:
8071 	qlnx_sriov_disable(ha);
8072 
8073 qlnx_iov_init_err0:
8074 	qlnx_destroy_pf_taskqueues(ha);
8075 	ha->num_vfs = 0;
8076 
8077 	return (-1);
8078 }
8079 
8080 static void
qlnx_iov_uninit(device_t dev)8081 qlnx_iov_uninit(device_t dev)
8082 {
8083 	qlnx_host_t	*ha;
8084 
8085 	if ((ha = device_get_softc(dev)) == NULL) {
8086 		device_printf(dev, "%s: cannot get softc\n", __func__);
8087 		return;
8088 	}
8089 
8090 	QL_DPRINT2(ha," dev = %p enter\n", dev);
8091 
8092 	qlnx_sriov_disable(ha);
8093 	qlnx_destroy_pf_taskqueues(ha);
8094 
8095 	free(ha->vf_attr, M_QLNXBUF);
8096 	ha->vf_attr = NULL;
8097 
8098 	ha->num_vfs = 0;
8099 
8100 	QL_DPRINT2(ha," dev = %p exit\n", dev);
8101 	return;
8102 }
8103 
8104 static int
qlnx_iov_add_vf(device_t dev,uint16_t vfnum,const nvlist_t * params)8105 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
8106 {
8107 	qlnx_host_t	*ha;
8108 	qlnx_vf_attr_t	*vf_attr;
8109 	unsigned const char *mac;
8110 	size_t size;
8111 	struct ecore_hwfn *p_hwfn;
8112 
8113 	if ((ha = device_get_softc(dev)) == NULL) {
8114 		device_printf(dev, "%s: cannot get softc\n", __func__);
8115 		return (-1);
8116 	}
8117 
8118 	QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum);
8119 
8120 	if (vfnum > (ha->num_vfs - 1)) {
8121 		QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n",
8122 			vfnum, (ha->num_vfs - 1));
8123 	}
8124 
8125 	vf_attr = &ha->vf_attr[vfnum];
8126 
8127         if (nvlist_exists_binary(params, "mac-addr")) {
8128                 mac = nvlist_get_binary(params, "mac-addr", &size);
8129                 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN);
8130 		device_printf(dev,
8131 			"%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
8132 			__func__, vf_attr->mac_addr[0],
8133 			vf_attr->mac_addr[1], vf_attr->mac_addr[2],
8134 			vf_attr->mac_addr[3], vf_attr->mac_addr[4],
8135 			vf_attr->mac_addr[5]);
8136 		p_hwfn = &ha->cdev.hwfns[0];
8137 		ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr,
8138 			vfnum);
8139 	}
8140 
8141 	QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum);
8142 	return (0);
8143 }
8144 
8145 static void
qlnx_handle_vf_msg(qlnx_host_t * ha,struct ecore_hwfn * p_hwfn)8146 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8147 {
8148         uint64_t events[ECORE_VF_ARRAY_LENGTH];
8149         struct ecore_ptt *ptt;
8150         int i;
8151 
8152         ptt = ecore_ptt_acquire(p_hwfn);
8153         if (!ptt) {
8154                 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8155 		__qlnx_pf_vf_msg(p_hwfn, 0);
8156                 return;
8157         }
8158 
8159         ecore_iov_pf_get_pending_events(p_hwfn, events);
8160 
8161         QL_DPRINT2(ha, "Event mask of VF events:"
8162 		"0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n",
8163                    events[0], events[1], events[2]);
8164 
8165         ecore_for_each_vf(p_hwfn, i) {
8166                 /* Skip VFs with no pending messages */
8167                 if (!(events[i / 64] & (1ULL << (i % 64))))
8168                         continue;
8169 
8170 		QL_DPRINT2(ha,
8171                            "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
8172                            i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
8173 
8174                 /* Copy VF's message to PF's request buffer for that VF */
8175                 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i))
8176                         continue;
8177 
8178                 ecore_iov_process_mbx_req(p_hwfn, ptt, i);
8179         }
8180 
8181         ecore_ptt_release(p_hwfn, ptt);
8182 
8183 	return;
8184 }
8185 
8186 static void
qlnx_handle_vf_flr_update(qlnx_host_t * ha,struct ecore_hwfn * p_hwfn)8187 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8188 {
8189         struct ecore_ptt *ptt;
8190 	int ret;
8191 
8192 	ptt = ecore_ptt_acquire(p_hwfn);
8193 
8194 	if (!ptt) {
8195                 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8196 		__qlnx_vf_flr_update(p_hwfn);
8197                 return;
8198 	}
8199 
8200 	ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt);
8201 
8202 	if (ret) {
8203                 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n");
8204 	}
8205 
8206 	ecore_ptt_release(p_hwfn, ptt);
8207 
8208 	return;
8209 }
8210 
8211 static void
qlnx_handle_bulletin_update(qlnx_host_t * ha,struct ecore_hwfn * p_hwfn)8212 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8213 {
8214         struct ecore_ptt *ptt;
8215 	int i;
8216 
8217 	ptt = ecore_ptt_acquire(p_hwfn);
8218 
8219 	if (!ptt) {
8220                 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8221 		qlnx_vf_bulleting_update(p_hwfn);
8222                 return;
8223 	}
8224 
8225 	ecore_for_each_vf(p_hwfn, i) {
8226 		QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n",
8227 			p_hwfn, i);
8228 		ecore_iov_post_vf_bulletin(p_hwfn, i, ptt);
8229 	}
8230 
8231 	ecore_ptt_release(p_hwfn, ptt);
8232 
8233 	return;
8234 }
8235 
8236 static void
qlnx_pf_taskqueue(void * context,int pending)8237 qlnx_pf_taskqueue(void *context, int pending)
8238 {
8239 	struct ecore_hwfn	*p_hwfn;
8240 	qlnx_host_t		*ha;
8241 	int			i;
8242 
8243 	p_hwfn = context;
8244 
8245 	if (p_hwfn == NULL)
8246 		return;
8247 
8248 	ha = (qlnx_host_t *)(p_hwfn->p_dev);
8249 
8250 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8251 		return;
8252 
8253 	if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8254 		QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG))
8255 		qlnx_handle_vf_msg(ha, p_hwfn);
8256 
8257 	if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8258 		QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE))
8259 		qlnx_handle_vf_flr_update(ha, p_hwfn);
8260 
8261 	if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8262 		QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE))
8263 		qlnx_handle_bulletin_update(ha, p_hwfn);
8264 
8265 	return;
8266 }
8267 
8268 static int
qlnx_create_pf_taskqueues(qlnx_host_t * ha)8269 qlnx_create_pf_taskqueues(qlnx_host_t *ha)
8270 {
8271 	int	i;
8272 	uint8_t	tq_name[32];
8273 
8274 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
8275                 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
8276 
8277 		bzero(tq_name, sizeof (tq_name));
8278 		snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i);
8279 
8280 		TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn);
8281 
8282 		ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
8283 			 taskqueue_thread_enqueue,
8284 			&ha->sriov_task[i].pf_taskqueue);
8285 
8286 		if (ha->sriov_task[i].pf_taskqueue == NULL)
8287 			return (-1);
8288 
8289 		taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1,
8290 			PI_NET, "%s", tq_name);
8291 
8292 		QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue);
8293 	}
8294 
8295 	return (0);
8296 }
8297 
8298 static void
qlnx_destroy_pf_taskqueues(qlnx_host_t * ha)8299 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha)
8300 {
8301 	int	i;
8302 
8303 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
8304 		if (ha->sriov_task[i].pf_taskqueue != NULL) {
8305 			taskqueue_drain(ha->sriov_task[i].pf_taskqueue,
8306 				&ha->sriov_task[i].pf_task);
8307 			taskqueue_free(ha->sriov_task[i].pf_taskqueue);
8308 			ha->sriov_task[i].pf_taskqueue = NULL;
8309 		}
8310 	}
8311 	return;
8312 }
8313 
8314 static void
qlnx_inform_vf_link_state(struct ecore_hwfn * p_hwfn,qlnx_host_t * ha)8315 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha)
8316 {
8317 	struct ecore_mcp_link_capabilities caps;
8318 	struct ecore_mcp_link_params params;
8319 	struct ecore_mcp_link_state link;
8320 	int i;
8321 
8322 	if (!p_hwfn->pf_iov_info)
8323 		return;
8324 
8325 	memset(&params, 0, sizeof(struct ecore_mcp_link_params));
8326 	memset(&link, 0, sizeof(struct ecore_mcp_link_state));
8327 	memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities));
8328 
8329 	memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
8330         memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
8331         memcpy(&params, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
8332 
8333 	QL_DPRINT2(ha, "called\n");
8334 
8335         /* Update bulletin of all future possible VFs with link configuration */
8336         for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
8337                 /* Modify link according to the VF's configured link state */
8338 
8339                 link.link_up = false;
8340 
8341                 if (ha->link_up) {
8342                         link.link_up = true;
8343                         /* Set speed according to maximum supported by HW.
8344                          * that is 40G for regular devices and 100G for CMT
8345                          * mode devices.
8346                          */
8347                         link.speed = (p_hwfn->p_dev->num_hwfns > 1) ?
8348 						100000 : link.speed;
8349 		}
8350 		QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up);
8351                 ecore_iov_set_link(p_hwfn, i, &params, &link, &caps);
8352         }
8353 
8354 	qlnx_vf_bulleting_update(p_hwfn);
8355 
8356 	return;
8357 }
8358 #endif /* #ifndef QLNX_VF */
8359 #endif /* #ifdef CONFIG_ECORE_SRIOV */
8360