xref: /freebsd/sys/dev/qlnx/qlnxe/qlnx_os.c (revision 1eaecc214ea2bfde84f4194c1d0e20b18117343f)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File: qlnx_os.c
30  * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
31  */
32 
33 #include <sys/cdefs.h>
34 #include "qlnx_os.h"
35 #include "bcm_osal.h"
36 #include "reg_addr.h"
37 #include "ecore_gtt_reg_addr.h"
38 #include "ecore.h"
39 #include "ecore_chain.h"
40 #include "ecore_status.h"
41 #include "ecore_hw.h"
42 #include "ecore_rt_defs.h"
43 #include "ecore_init_ops.h"
44 #include "ecore_int.h"
45 #include "ecore_cxt.h"
46 #include "ecore_spq.h"
47 #include "ecore_init_fw_funcs.h"
48 #include "ecore_sp_commands.h"
49 #include "ecore_dev_api.h"
50 #include "ecore_l2_api.h"
51 #include "ecore_mcp.h"
52 #include "ecore_hw_defs.h"
53 #include "mcp_public.h"
54 #include "ecore_iro.h"
55 #include "nvm_cfg.h"
56 #include "ecore_dbg_fw_funcs.h"
57 #include "ecore_iov_api.h"
58 #include "ecore_vf_api.h"
59 
60 #include "qlnx_ioctl.h"
61 #include "qlnx_def.h"
62 #include "qlnx_ver.h"
63 
64 #ifdef QLNX_ENABLE_IWARP
65 #include "qlnx_rdma.h"
66 #endif /* #ifdef QLNX_ENABLE_IWARP */
67 
68 #ifdef CONFIG_ECORE_SRIOV
69 #include <sys/nv.h>
70 #include <sys/iov_schema.h>
71 #include <dev/pci/pci_iov.h>
72 #endif /* #ifdef CONFIG_ECORE_SRIOV */
73 
74 #include <sys/smp.h>
75 
76 /*
77  * static functions
78  */
79 /*
80  * ioctl related functions
81  */
82 static void qlnx_add_sysctls(qlnx_host_t *ha);
83 
84 /*
85  * main driver
86  */
87 static void qlnx_release(qlnx_host_t *ha);
88 static void qlnx_fp_isr(void *arg);
89 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
90 static void qlnx_init(void *arg);
91 static void qlnx_init_locked(qlnx_host_t *ha);
92 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
93 static int qlnx_set_promisc(qlnx_host_t *ha, int enabled);
94 static int qlnx_set_allmulti(qlnx_host_t *ha, int enabled);
95 static int qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data);
96 static int qlnx_media_change(if_t ifp);
97 static void qlnx_media_status(if_t ifp, struct ifmediareq *ifmr);
98 static void qlnx_stop(qlnx_host_t *ha);
99 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
100 		struct mbuf **m_headp);
101 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
102 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
103 			struct qlnx_link_output *if_link);
104 static int qlnx_transmit(if_t ifp, struct mbuf  *mp);
105 static int qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp,
106 		struct mbuf *mp);
107 static void qlnx_qflush(if_t ifp);
108 
109 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
110 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
111 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
112 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
113 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
114 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
115 
116 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
117 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
118 
119 static int qlnx_nic_setup(struct ecore_dev *cdev,
120 		struct ecore_pf_params *func_params);
121 static int qlnx_nic_start(struct ecore_dev *cdev);
122 static int qlnx_slowpath_start(qlnx_host_t *ha);
123 static int qlnx_slowpath_stop(qlnx_host_t *ha);
124 static int qlnx_init_hw(qlnx_host_t *ha);
125 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
126 		char ver_str[VER_SIZE]);
127 static void qlnx_unload(qlnx_host_t *ha);
128 static int qlnx_load(qlnx_host_t *ha);
129 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
130 		uint32_t add_mac);
131 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
132 		uint32_t len);
133 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
134 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
135 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
136 		struct qlnx_rx_queue *rxq);
137 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
138 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
139 		int hwfn_index);
140 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
141 		int hwfn_index);
142 static void qlnx_timer(void *arg);
143 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
144 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
145 static void qlnx_trigger_dump(qlnx_host_t *ha);
146 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
147 			struct qlnx_tx_queue *txq);
148 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
149 		struct qlnx_tx_queue *txq);
150 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
151 		int lro_enable);
152 static void qlnx_fp_taskqueue(void *context, int pending);
153 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
154 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
155 		struct qlnx_agg_info *tpa);
156 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
157 
158 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
159 
160 /*
161  * Hooks to the Operating Systems
162  */
163 static int qlnx_pci_probe (device_t);
164 static int qlnx_pci_attach (device_t);
165 static int qlnx_pci_detach (device_t);
166 
167 #ifndef QLNX_VF
168 
169 #ifdef CONFIG_ECORE_SRIOV
170 
171 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params);
172 static void qlnx_iov_uninit(device_t dev);
173 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
174 static void qlnx_initialize_sriov(qlnx_host_t *ha);
175 static void qlnx_pf_taskqueue(void *context, int pending);
176 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha);
177 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha);
178 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha);
179 
180 #endif /* #ifdef CONFIG_ECORE_SRIOV */
181 
182 static device_method_t qlnx_pci_methods[] = {
183 	/* Device interface */
184 	DEVMETHOD(device_probe, qlnx_pci_probe),
185 	DEVMETHOD(device_attach, qlnx_pci_attach),
186 	DEVMETHOD(device_detach, qlnx_pci_detach),
187 
188 #ifdef CONFIG_ECORE_SRIOV
189 	DEVMETHOD(pci_iov_init, qlnx_iov_init),
190 	DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit),
191 	DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf),
192 #endif /* #ifdef CONFIG_ECORE_SRIOV */
193 	{ 0, 0 }
194 };
195 
196 static driver_t qlnx_pci_driver = {
197 	"ql", qlnx_pci_methods, sizeof (qlnx_host_t),
198 };
199 
200 MODULE_VERSION(if_qlnxe,1);
201 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, 0, 0);
202 
203 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
204 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
205 
206 #else
207 
208 static device_method_t qlnxv_pci_methods[] = {
209 	/* Device interface */
210 	DEVMETHOD(device_probe, qlnx_pci_probe),
211 	DEVMETHOD(device_attach, qlnx_pci_attach),
212 	DEVMETHOD(device_detach, qlnx_pci_detach),
213 	{ 0, 0 }
214 };
215 
216 static driver_t qlnxv_pci_driver = {
217 	"ql", qlnxv_pci_methods, sizeof (qlnx_host_t),
218 };
219 
220 MODULE_VERSION(if_qlnxev,1);
221 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, 0, 0);
222 
223 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1);
224 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1);
225 
226 #endif /* #ifdef QLNX_VF */
227 
228 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
229 
230 char qlnx_ver_str[VER_SIZE];
231 char qlnx_name_str[NAME_SIZE];
232 
233 /*
234  * Some PCI Configuration Space Related Defines
235  */
236 
237 #ifndef PCI_VENDOR_QLOGIC
238 #define PCI_VENDOR_QLOGIC		0x1077
239 #endif
240 
241 /* 40G Adapter QLE45xxx*/
242 #ifndef QLOGIC_PCI_DEVICE_ID_1634
243 #define QLOGIC_PCI_DEVICE_ID_1634	0x1634
244 #endif
245 
246 /* 100G Adapter QLE45xxx*/
247 #ifndef QLOGIC_PCI_DEVICE_ID_1644
248 #define QLOGIC_PCI_DEVICE_ID_1644	0x1644
249 #endif
250 
251 /* 25G Adapter QLE45xxx*/
252 #ifndef QLOGIC_PCI_DEVICE_ID_1656
253 #define QLOGIC_PCI_DEVICE_ID_1656	0x1656
254 #endif
255 
256 /* 50G Adapter QLE45xxx*/
257 #ifndef QLOGIC_PCI_DEVICE_ID_1654
258 #define QLOGIC_PCI_DEVICE_ID_1654	0x1654
259 #endif
260 
261 /* 10G/25G/40G Adapter QLE41xxx*/
262 #ifndef QLOGIC_PCI_DEVICE_ID_8070
263 #define QLOGIC_PCI_DEVICE_ID_8070	0x8070
264 #endif
265 
266 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/
267 #ifndef QLOGIC_PCI_DEVICE_ID_8090
268 #define QLOGIC_PCI_DEVICE_ID_8090	0x8090
269 #endif
270 
271 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
272     "qlnxe driver parameters");
273 
274 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */
275 static int qlnxe_queue_count = QLNX_DEFAULT_RSS;
276 
277 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
278 		&qlnxe_queue_count, 0, "Multi-Queue queue count");
279 
280 /*
281  * Note on RDMA personality setting
282  *
283  * Read the personality configured in NVRAM
284  * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and
285  * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT
286  * use the personality in NVRAM.
287 
288  * Otherwise use t the personality configured in sysctl.
289  *
290  */
291 #define QLNX_PERSONALITY_DEFAULT	0x0  /* use personality in NVRAM */
292 #define QLNX_PERSONALITY_ETH_ONLY	0x1  /* Override with ETH_ONLY */
293 #define QLNX_PERSONALITY_ETH_IWARP	0x2  /* Override with ETH_IWARP */
294 #define QLNX_PERSONALITY_ETH_ROCE	0x3  /* Override with ETH_ROCE */
295 #define QLNX_PERSONALITY_BITS_PER_FUNC	4
296 #define QLNX_PERSONALIY_MASK		0xF
297 
298 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/
299 static uint64_t qlnxe_rdma_configuration = 0x22222222;
300 
301 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
302                 &qlnxe_rdma_configuration, 0, "RDMA Configuration");
303 
304 int
qlnx_vf_device(qlnx_host_t * ha)305 qlnx_vf_device(qlnx_host_t *ha)
306 {
307         uint16_t	device_id;
308 
309         device_id = ha->device_id;
310 
311         if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
312                 return 0;
313 
314         return -1;
315 }
316 
317 static int
qlnx_valid_device(qlnx_host_t * ha)318 qlnx_valid_device(qlnx_host_t *ha)
319 {
320         uint16_t device_id;
321 
322         device_id = ha->device_id;
323 
324 #ifndef QLNX_VF
325         if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
326                 (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
327                 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
328                 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
329                 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
330                 return 0;
331 #else
332         if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
333 		return 0;
334 
335 #endif /* #ifndef QLNX_VF */
336         return -1;
337 }
338 
339 #ifdef QLNX_ENABLE_IWARP
340 static int
qlnx_rdma_supported(struct qlnx_host * ha)341 qlnx_rdma_supported(struct qlnx_host *ha)
342 {
343 	uint16_t device_id;
344 
345 	device_id = pci_get_device(ha->pci_dev);
346 
347 	if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
348 		(device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
349 		(device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
350 		(device_id == QLOGIC_PCI_DEVICE_ID_8070))
351 		return (0);
352 
353 	return (-1);
354 }
355 #endif /* #ifdef QLNX_ENABLE_IWARP */
356 
357 /*
358  * Name:	qlnx_pci_probe
359  * Function:	Validate the PCI device to be a QLA80XX device
360  */
361 static int
qlnx_pci_probe(device_t dev)362 qlnx_pci_probe(device_t dev)
363 {
364 	snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
365 		QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
366 	snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
367 
368 	if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
369                 return (ENXIO);
370 	}
371 
372         switch (pci_get_device(dev)) {
373 #ifndef QLNX_VF
374 
375         case QLOGIC_PCI_DEVICE_ID_1644:
376 		device_set_descf(dev, "%s v%d.%d.%d",
377 			"Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
378 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
379 			QLNX_VERSION_BUILD);
380                 break;
381 
382         case QLOGIC_PCI_DEVICE_ID_1634:
383 		device_set_descf(dev, "%s v%d.%d.%d",
384 			"Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
385 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
386 			QLNX_VERSION_BUILD);
387                 break;
388 
389         case QLOGIC_PCI_DEVICE_ID_1656:
390 		device_set_descf(dev, "%s v%d.%d.%d",
391 			"Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
392 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
393 			QLNX_VERSION_BUILD);
394                 break;
395 
396         case QLOGIC_PCI_DEVICE_ID_1654:
397 		device_set_descf(dev, "%s v%d.%d.%d",
398 			"Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
399 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
400 			QLNX_VERSION_BUILD);
401                 break;
402 
403 	case QLOGIC_PCI_DEVICE_ID_8070:
404 		device_set_descf(dev, "%s v%d.%d.%d",
405 			"Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)"
406 			" Adapter-Ethernet Function",
407 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
408 			QLNX_VERSION_BUILD);
409 		break;
410 
411 #else
412 	case QLOGIC_PCI_DEVICE_ID_8090:
413 		device_set_descf(dev, "%s v%d.%d.%d",
414 			"Qlogic SRIOV PCI CNA (AH) "
415 			"Adapter-Ethernet Function",
416 			QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
417 			QLNX_VERSION_BUILD);
418 		break;
419 
420 #endif /* #ifndef QLNX_VF */
421 
422         default:
423                 return (ENXIO);
424         }
425 
426 #ifdef QLNX_ENABLE_IWARP
427 	qlnx_rdma_init();
428 #endif /* #ifdef QLNX_ENABLE_IWARP */
429 
430         return (BUS_PROBE_DEFAULT);
431 }
432 
433 static uint16_t
qlnx_num_tx_compl(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)434 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
435 	struct qlnx_tx_queue *txq)
436 {
437 	u16 hw_bd_cons;
438 	u16 ecore_cons_idx;
439 
440 	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
441 
442 	ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl);
443 
444 	return (hw_bd_cons - ecore_cons_idx);
445 }
446 
447 static void
qlnx_sp_intr(void * arg)448 qlnx_sp_intr(void *arg)
449 {
450 	struct ecore_hwfn	*p_hwfn;
451 	qlnx_host_t		*ha;
452 	int			i;
453 
454 	p_hwfn = arg;
455 
456 	if (p_hwfn == NULL) {
457 		printf("%s: spurious slowpath intr\n", __func__);
458 		return;
459 	}
460 
461 	ha = (qlnx_host_t *)p_hwfn->p_dev;
462 
463 	QL_DPRINT2(ha, "enter\n");
464 
465 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
466 		if (&ha->cdev.hwfns[i] == p_hwfn) {
467 			taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
468 			break;
469 		}
470 	}
471 	QL_DPRINT2(ha, "exit\n");
472 
473 	return;
474 }
475 
476 static void
qlnx_sp_taskqueue(void * context,int pending)477 qlnx_sp_taskqueue(void *context, int pending)
478 {
479 	struct ecore_hwfn	*p_hwfn;
480 
481 	p_hwfn = context;
482 
483 	if (p_hwfn != NULL) {
484 		qlnx_sp_isr(p_hwfn);
485 	}
486 	return;
487 }
488 
489 static int
qlnx_create_sp_taskqueues(qlnx_host_t * ha)490 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
491 {
492 	int	i;
493 	uint8_t	tq_name[32];
494 
495 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
496                 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
497 
498 		bzero(tq_name, sizeof (tq_name));
499 		snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
500 
501 		TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
502 
503 		ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT,
504 			 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
505 
506 		if (ha->sp_taskqueue[i] == NULL)
507 			return (-1);
508 
509 		taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
510 			tq_name);
511 
512 		QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
513 	}
514 
515 	return (0);
516 }
517 
518 static void
qlnx_destroy_sp_taskqueues(qlnx_host_t * ha)519 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
520 {
521 	int	i;
522 
523 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
524 		if (ha->sp_taskqueue[i] != NULL) {
525 			taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
526 			taskqueue_free(ha->sp_taskqueue[i]);
527 		}
528 	}
529 	return;
530 }
531 
532 static void
qlnx_fp_taskqueue(void * context,int pending)533 qlnx_fp_taskqueue(void *context, int pending)
534 {
535         struct qlnx_fastpath	*fp;
536         qlnx_host_t		*ha;
537         if_t			ifp;
538 
539         fp = context;
540 
541         if (fp == NULL)
542                 return;
543 
544 	ha = (qlnx_host_t *)fp->edev;
545 
546 	ifp = ha->ifp;
547 
548         if(if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
549                 if (!drbr_empty(ifp, fp->tx_br)) {
550                         if(mtx_trylock(&fp->tx_mtx)) {
551 #ifdef QLNX_TRACE_PERF_DATA
552                                 tx_pkts = fp->tx_pkts_transmitted;
553                                 tx_compl = fp->tx_pkts_completed;
554 #endif
555 
556                                 qlnx_transmit_locked(ifp, fp, NULL);
557 
558 #ifdef QLNX_TRACE_PERF_DATA
559                                 fp->tx_pkts_trans_fp +=
560 					(fp->tx_pkts_transmitted - tx_pkts);
561                                 fp->tx_pkts_compl_fp +=
562 					(fp->tx_pkts_completed - tx_compl);
563 #endif
564                                 mtx_unlock(&fp->tx_mtx);
565                         }
566                 }
567         }
568 
569         QL_DPRINT2(ha, "exit \n");
570         return;
571 }
572 
573 static int
qlnx_create_fp_taskqueues(qlnx_host_t * ha)574 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
575 {
576 	int	i;
577 	uint8_t	tq_name[32];
578 	struct qlnx_fastpath *fp;
579 
580 	for (i = 0; i < ha->num_rss; i++) {
581                 fp = &ha->fp_array[i];
582 
583 		bzero(tq_name, sizeof (tq_name));
584 		snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
585 
586 		TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
587 
588 		fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
589 					taskqueue_thread_enqueue,
590 					&fp->fp_taskqueue);
591 
592 		if (fp->fp_taskqueue == NULL)
593 			return (-1);
594 
595 		taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
596 			tq_name);
597 
598 		QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
599 	}
600 
601 	return (0);
602 }
603 
604 static void
qlnx_destroy_fp_taskqueues(qlnx_host_t * ha)605 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
606 {
607 	int			i;
608 	struct qlnx_fastpath	*fp;
609 
610 	for (i = 0; i < ha->num_rss; i++) {
611                 fp = &ha->fp_array[i];
612 
613 		if (fp->fp_taskqueue != NULL) {
614 			taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
615 			taskqueue_free(fp->fp_taskqueue);
616 			fp->fp_taskqueue = NULL;
617 		}
618 	}
619 	return;
620 }
621 
622 static void
qlnx_drain_fp_taskqueues(qlnx_host_t * ha)623 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
624 {
625 	int			i;
626 	struct qlnx_fastpath	*fp;
627 
628 	for (i = 0; i < ha->num_rss; i++) {
629                 fp = &ha->fp_array[i];
630 
631 		if (fp->fp_taskqueue != NULL) {
632 			QLNX_UNLOCK(ha);
633 			taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
634 			QLNX_LOCK(ha);
635 		}
636 	}
637 	return;
638 }
639 
640 static void
qlnx_get_params(qlnx_host_t * ha)641 qlnx_get_params(qlnx_host_t *ha)
642 {
643 	if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) {
644 		device_printf(ha->pci_dev, "invalid queue_count value (%d)\n",
645 			qlnxe_queue_count);
646 		qlnxe_queue_count = 0;
647 	}
648 	return;
649 }
650 
651 static void
qlnx_error_recovery_taskqueue(void * context,int pending)652 qlnx_error_recovery_taskqueue(void *context, int pending)
653 {
654         qlnx_host_t *ha;
655 
656         ha = context;
657 
658         QL_DPRINT2(ha, "enter\n");
659 
660         QLNX_LOCK(ha);
661         qlnx_stop(ha);
662         QLNX_UNLOCK(ha);
663 
664 #ifdef QLNX_ENABLE_IWARP
665 	qlnx_rdma_dev_remove(ha);
666 #endif /* #ifdef QLNX_ENABLE_IWARP */
667 
668         qlnx_slowpath_stop(ha);
669         qlnx_slowpath_start(ha);
670 
671 #ifdef QLNX_ENABLE_IWARP
672 	qlnx_rdma_dev_add(ha);
673 #endif /* #ifdef QLNX_ENABLE_IWARP */
674 
675         qlnx_init(ha);
676 
677         callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
678 
679         QL_DPRINT2(ha, "exit\n");
680 
681         return;
682 }
683 
684 static int
qlnx_create_error_recovery_taskqueue(qlnx_host_t * ha)685 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha)
686 {
687         uint8_t tq_name[32];
688 
689         bzero(tq_name, sizeof (tq_name));
690         snprintf(tq_name, sizeof (tq_name), "ql_err_tq");
691 
692         TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha);
693 
694         ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
695                                 taskqueue_thread_enqueue, &ha->err_taskqueue);
696 
697         if (ha->err_taskqueue == NULL)
698                 return (-1);
699 
700         taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name);
701 
702         QL_DPRINT1(ha, "%p\n",ha->err_taskqueue);
703 
704         return (0);
705 }
706 
707 static void
qlnx_destroy_error_recovery_taskqueue(qlnx_host_t * ha)708 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha)
709 {
710         if (ha->err_taskqueue != NULL) {
711                 taskqueue_drain(ha->err_taskqueue, &ha->err_task);
712                 taskqueue_free(ha->err_taskqueue);
713         }
714 
715         ha->err_taskqueue = NULL;
716 
717         return;
718 }
719 
720 /*
721  * Name:	qlnx_pci_attach
722  * Function:	attaches the device to the operating system
723  */
724 static int
qlnx_pci_attach(device_t dev)725 qlnx_pci_attach(device_t dev)
726 {
727 	qlnx_host_t	*ha = NULL;
728 	uint32_t	rsrc_len_reg __unused = 0;
729 	uint32_t	rsrc_len_dbells = 0;
730 	uint32_t	rsrc_len_msix __unused = 0;
731 	int		i;
732 	uint32_t	mfw_ver;
733 	uint32_t	num_sp_msix = 0;
734 	uint32_t	num_rdma_irqs = 0;
735 
736         if ((ha = device_get_softc(dev)) == NULL) {
737                 device_printf(dev, "cannot get softc\n");
738                 return (ENOMEM);
739         }
740 
741         memset(ha, 0, sizeof (qlnx_host_t));
742 
743         ha->device_id = pci_get_device(dev);
744 
745         if (qlnx_valid_device(ha) != 0) {
746                 device_printf(dev, "device is not valid device\n");
747                 return (ENXIO);
748 	}
749         ha->pci_func = pci_get_function(dev);
750 
751         ha->pci_dev = dev;
752 
753 	sx_init(&ha->hw_lock, "qlnx_hw_lock");
754 
755         ha->flags.lock_init = 1;
756 
757         pci_enable_busmaster(dev);
758 
759 	/*
760 	 * map the PCI BARs
761 	 */
762 
763         ha->reg_rid = PCIR_BAR(0);
764         ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
765                                 RF_ACTIVE);
766 
767         if (ha->pci_reg == NULL) {
768                 device_printf(dev, "unable to map BAR0\n");
769                 goto qlnx_pci_attach_err;
770         }
771 
772         rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
773                                         ha->reg_rid);
774 
775 	ha->dbells_rid = PCIR_BAR(2);
776 	rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev,
777 					SYS_RES_MEMORY,
778 					ha->dbells_rid);
779 	if (rsrc_len_dbells) {
780 		ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
781 					&ha->dbells_rid, RF_ACTIVE);
782 
783 		if (ha->pci_dbells == NULL) {
784 			device_printf(dev, "unable to map BAR1\n");
785 			goto qlnx_pci_attach_err;
786 		}
787 		ha->dbells_phys_addr = (uint64_t)
788 			bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);
789 
790 		ha->dbells_size = rsrc_len_dbells;
791 	} else {
792 		if (qlnx_vf_device(ha) != 0) {
793 			device_printf(dev, " BAR1 size is zero\n");
794 			goto qlnx_pci_attach_err;
795 		}
796 	}
797 
798         ha->msix_rid = PCIR_BAR(4);
799         ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
800                         &ha->msix_rid, RF_ACTIVE);
801 
802         if (ha->msix_bar == NULL) {
803                 device_printf(dev, "unable to map BAR2\n");
804                 goto qlnx_pci_attach_err;
805 	}
806 
807         rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
808                                         ha->msix_rid);
809 
810 	ha->dbg_level = 0x0000;
811 
812 	QL_DPRINT1(ha, "\n\t\t\t"
813 		"pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
814 		"\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
815 		"\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
816 		" msix_avail = 0x%x "
817 		"\n\t\t\t[ncpus = %d]\n",
818 		ha->pci_dev, ha->pci_reg, rsrc_len_reg,
819 		ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
820 		ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
821 		mp_ncpus);
822 	/*
823 	 * allocate dma tags
824 	 */
825 
826 	if (qlnx_alloc_parent_dma_tag(ha))
827                 goto qlnx_pci_attach_err;
828 
829 	if (qlnx_alloc_tx_dma_tag(ha))
830                 goto qlnx_pci_attach_err;
831 
832 	if (qlnx_alloc_rx_dma_tag(ha))
833                 goto qlnx_pci_attach_err;
834 
835 
836 	if (qlnx_init_hw(ha) != 0)
837 		goto qlnx_pci_attach_err;
838 
839         ha->flags.hw_init = 1;
840 
841 	qlnx_get_params(ha);
842 
843 	if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) &&
844 		(qlnxe_queue_count == QLNX_DEFAULT_RSS)) {
845 		qlnxe_queue_count = QLNX_MAX_RSS;
846 	}
847 
848 	/*
849 	 * Allocate MSI-x vectors
850 	 */
851 	if (qlnx_vf_device(ha) != 0) {
852 		if (qlnxe_queue_count == 0)
853 			ha->num_rss = QLNX_DEFAULT_RSS;
854 		else
855 			ha->num_rss = qlnxe_queue_count;
856 
857 		num_sp_msix = ha->cdev.num_hwfns;
858 	} else {
859 		uint8_t max_rxq;
860 		uint8_t max_txq;
861 
862 		ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq);
863 		ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq);
864 
865 		if (max_rxq < max_txq)
866 			ha->num_rss = max_rxq;
867 		else
868 			ha->num_rss = max_txq;
869 
870 		if (ha->num_rss > QLNX_MAX_VF_RSS)
871 			ha->num_rss = QLNX_MAX_VF_RSS;
872 
873 		num_sp_msix = 0;
874 	}
875 
876 	if (ha->num_rss > mp_ncpus)
877 		ha->num_rss = mp_ncpus;
878 
879 	ha->num_tc = QLNX_MAX_TC;
880 
881         ha->msix_count = pci_msix_count(dev);
882 
883 #ifdef QLNX_ENABLE_IWARP
884 
885 	num_rdma_irqs = qlnx_rdma_get_num_irqs(ha);
886 
887 #endif /* #ifdef QLNX_ENABLE_IWARP */
888 
889         if (!ha->msix_count ||
890 		(ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) {
891                 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
892                         ha->msix_count);
893                 goto qlnx_pci_attach_err;
894         }
895 
896 	if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs))
897 		ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs;
898 	else
899 		ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs);
900 
901 	QL_DPRINT1(ha, "\n\t\t\t"
902 		"pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
903 		"\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
904 		"\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
905 		" msix_avail = 0x%x msix_alloc = 0x%x"
906 		"\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
907 		 ha->pci_reg, rsrc_len_reg,
908 		ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
909 		ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
910 		ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
911 
912         if (pci_alloc_msix(dev, &ha->msix_count)) {
913                 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
914                         ha->msix_count);
915                 ha->msix_count = 0;
916                 goto qlnx_pci_attach_err;
917         }
918 
919 	/*
920 	 * Initialize slow path interrupt and task queue
921 	 */
922 
923 	if (num_sp_msix) {
924 		if (qlnx_create_sp_taskqueues(ha) != 0)
925 			goto qlnx_pci_attach_err;
926 
927 		for (i = 0; i < ha->cdev.num_hwfns; i++) {
928 			struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
929 
930 			ha->sp_irq_rid[i] = i + 1;
931 			ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
932 						&ha->sp_irq_rid[i],
933 						(RF_ACTIVE | RF_SHAREABLE));
934 			if (ha->sp_irq[i] == NULL) {
935                 		device_printf(dev,
936 					"could not allocate mbx interrupt\n");
937 				goto qlnx_pci_attach_err;
938 			}
939 
940 			if (bus_setup_intr(dev, ha->sp_irq[i],
941 				(INTR_TYPE_NET | INTR_MPSAFE), NULL,
942 				qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
943 				device_printf(dev,
944 					"could not setup slow path interrupt\n");
945 				goto qlnx_pci_attach_err;
946 			}
947 
948 			QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
949 				" sp_irq %p sp_handle %p\n", p_hwfn,
950 				ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
951 		}
952 	}
953 
954 	/*
955 	 * initialize fast path interrupt
956 	 */
957 	if (qlnx_create_fp_taskqueues(ha) != 0)
958 		goto qlnx_pci_attach_err;
959 
960         for (i = 0; i < ha->num_rss; i++) {
961                 ha->irq_vec[i].rss_idx = i;
962                 ha->irq_vec[i].ha = ha;
963                 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i;
964 
965                 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
966                                 &ha->irq_vec[i].irq_rid,
967                                 (RF_ACTIVE | RF_SHAREABLE));
968 
969                 if (ha->irq_vec[i].irq == NULL) {
970                         device_printf(dev,
971 				"could not allocate interrupt[%d] irq_rid = %d\n",
972 				i, ha->irq_vec[i].irq_rid);
973                         goto qlnx_pci_attach_err;
974                 }
975 
976 		if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
977                         device_printf(dev, "could not allocate tx_br[%d]\n", i);
978                         goto qlnx_pci_attach_err;
979 		}
980 	}
981 
982 	if (qlnx_vf_device(ha) != 0) {
983 		callout_init(&ha->qlnx_callout, 1);
984 		ha->flags.callout_init = 1;
985 
986 		for (i = 0; i < ha->cdev.num_hwfns; i++) {
987 			if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
988 				goto qlnx_pci_attach_err;
989 			if (ha->grcdump_size[i] == 0)
990 				goto qlnx_pci_attach_err;
991 
992 			ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
993 			QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
994 				i, ha->grcdump_size[i]);
995 
996 			ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
997 			if (ha->grcdump[i] == NULL) {
998 				device_printf(dev, "grcdump alloc[%d] failed\n", i);
999 				goto qlnx_pci_attach_err;
1000 			}
1001 
1002 			if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
1003 				goto qlnx_pci_attach_err;
1004 			if (ha->idle_chk_size[i] == 0)
1005 				goto qlnx_pci_attach_err;
1006 
1007 			ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
1008 			QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
1009 				i, ha->idle_chk_size[i]);
1010 
1011 			ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
1012 
1013 			if (ha->idle_chk[i] == NULL) {
1014 				device_printf(dev, "idle_chk alloc failed\n");
1015 				goto qlnx_pci_attach_err;
1016 			}
1017 		}
1018 
1019 		if (qlnx_create_error_recovery_taskqueue(ha) != 0)
1020 			goto qlnx_pci_attach_err;
1021 	}
1022 
1023 	if (qlnx_slowpath_start(ha) != 0)
1024 		goto qlnx_pci_attach_err;
1025 	else
1026 		ha->flags.slowpath_start = 1;
1027 
1028 	if (qlnx_vf_device(ha) != 0) {
1029 		if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
1030 			qlnx_mdelay(__func__, 1000);
1031 			qlnx_trigger_dump(ha);
1032 
1033 			goto qlnx_pci_attach_err0;
1034 		}
1035 
1036 		if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
1037 			qlnx_mdelay(__func__, 1000);
1038 			qlnx_trigger_dump(ha);
1039 
1040 			goto qlnx_pci_attach_err0;
1041 		}
1042 	} else {
1043 		struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
1044 		ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL);
1045 	}
1046 
1047 	snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
1048 		((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
1049 		((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
1050 	snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
1051 		FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1052 		FW_ENGINEERING_VERSION);
1053 
1054 	QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
1055 		 ha->stormfw_ver, ha->mfw_ver);
1056 
1057 	qlnx_init_ifnet(dev, ha);
1058 
1059 	/*
1060 	 * add sysctls
1061 	 */
1062 	qlnx_add_sysctls(ha);
1063 
1064 qlnx_pci_attach_err0:
1065         /*
1066 	 * create ioctl device interface
1067 	 */
1068 	if (qlnx_vf_device(ha) != 0) {
1069 		if (qlnx_make_cdev(ha)) {
1070 			device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
1071 			goto qlnx_pci_attach_err;
1072 		}
1073 
1074 #ifdef QLNX_ENABLE_IWARP
1075 		qlnx_rdma_dev_add(ha);
1076 #endif /* #ifdef QLNX_ENABLE_IWARP */
1077 	}
1078 
1079 #ifndef QLNX_VF
1080 #ifdef CONFIG_ECORE_SRIOV
1081 
1082 	if (qlnx_vf_device(ha) != 0)
1083 		qlnx_initialize_sriov(ha);
1084 
1085 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1086 #endif /* #ifdef QLNX_VF */
1087 
1088 	QL_DPRINT2(ha, "success\n");
1089 
1090         return (0);
1091 
1092 qlnx_pci_attach_err:
1093 
1094 	qlnx_release(ha);
1095 
1096 	return (ENXIO);
1097 }
1098 
1099 /*
1100  * Name:	qlnx_pci_detach
1101  * Function:	Unhooks the device from the operating system
1102  */
1103 static int
qlnx_pci_detach(device_t dev)1104 qlnx_pci_detach(device_t dev)
1105 {
1106 	qlnx_host_t	*ha = NULL;
1107 
1108         if ((ha = device_get_softc(dev)) == NULL) {
1109                 device_printf(dev, "%s: cannot get softc\n", __func__);
1110                 return (ENOMEM);
1111         }
1112 
1113 	if (qlnx_vf_device(ha) != 0) {
1114 #ifdef CONFIG_ECORE_SRIOV
1115 		int ret;
1116 
1117 		ret = pci_iov_detach(dev);
1118 		if (ret) {
1119                 	device_printf(dev, "%s: SRIOV in use\n", __func__);
1120 			return (ret);
1121 		}
1122 
1123 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1124 
1125 #ifdef QLNX_ENABLE_IWARP
1126 		if (qlnx_rdma_dev_remove(ha) != 0)
1127 			return (EBUSY);
1128 #endif /* #ifdef QLNX_ENABLE_IWARP */
1129 	}
1130 
1131 	QLNX_LOCK(ha);
1132 	qlnx_stop(ha);
1133 	QLNX_UNLOCK(ha);
1134 
1135 	qlnx_release(ha);
1136 
1137         return (0);
1138 }
1139 
1140 #ifdef QLNX_ENABLE_IWARP
1141 
1142 static uint8_t
qlnx_get_personality(uint8_t pci_func)1143 qlnx_get_personality(uint8_t pci_func)
1144 {
1145 	uint8_t personality;
1146 
1147 	personality = (qlnxe_rdma_configuration >>
1148 				(pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) &
1149 				QLNX_PERSONALIY_MASK;
1150 	return (personality);
1151 }
1152 
1153 static void
qlnx_set_personality(qlnx_host_t * ha)1154 qlnx_set_personality(qlnx_host_t *ha)
1155 {
1156 	uint8_t personality;
1157 
1158 	personality = qlnx_get_personality(ha->pci_func);
1159 
1160 	switch (personality) {
1161 	case QLNX_PERSONALITY_DEFAULT:
1162                	device_printf(ha->pci_dev, "%s: DEFAULT\n",
1163 			__func__);
1164 		ha->personality = ECORE_PCI_DEFAULT;
1165 		break;
1166 
1167 	case QLNX_PERSONALITY_ETH_ONLY:
1168                	device_printf(ha->pci_dev, "%s: ETH_ONLY\n",
1169 			__func__);
1170 		ha->personality = ECORE_PCI_ETH;
1171 		break;
1172 
1173 	case QLNX_PERSONALITY_ETH_IWARP:
1174                	device_printf(ha->pci_dev, "%s: ETH_IWARP\n",
1175 			__func__);
1176 		ha->personality = ECORE_PCI_ETH_IWARP;
1177 		break;
1178 
1179 	case QLNX_PERSONALITY_ETH_ROCE:
1180                	device_printf(ha->pci_dev, "%s: ETH_ROCE\n",
1181 			__func__);
1182 		ha->personality = ECORE_PCI_ETH_ROCE;
1183 		break;
1184 	}
1185 
1186 	return;
1187 }
1188 
1189 #endif /* #ifdef QLNX_ENABLE_IWARP */
1190 
1191 static int
qlnx_init_hw(qlnx_host_t * ha)1192 qlnx_init_hw(qlnx_host_t *ha)
1193 {
1194 	int				rval = 0;
1195 	struct ecore_hw_prepare_params	params;
1196 
1197         ha->cdev.ha = ha;
1198 	ecore_init_struct(&ha->cdev);
1199 
1200 	/* ha->dp_module = ECORE_MSG_PROBE |
1201 				ECORE_MSG_INTR |
1202 				ECORE_MSG_SP |
1203 				ECORE_MSG_LINK |
1204 				ECORE_MSG_SPQ |
1205 				ECORE_MSG_RDMA;
1206 	ha->dp_level = ECORE_LEVEL_VERBOSE;*/
1207 	//ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2;
1208 	ha->dp_level = ECORE_LEVEL_NOTICE;
1209 	//ha->dp_level = ECORE_LEVEL_VERBOSE;
1210 
1211 	ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
1212 
1213 	ha->cdev.regview = ha->pci_reg;
1214 
1215 	ha->personality = ECORE_PCI_DEFAULT;
1216 
1217 	if (qlnx_vf_device(ha) == 0) {
1218 		ha->cdev.b_is_vf = true;
1219 
1220 		if (ha->pci_dbells != NULL) {
1221 			ha->cdev.doorbells = ha->pci_dbells;
1222 			ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1223 			ha->cdev.db_size = ha->dbells_size;
1224 		} else {
1225 			ha->pci_dbells = ha->pci_reg;
1226 		}
1227 	} else {
1228 		ha->cdev.doorbells = ha->pci_dbells;
1229 		ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1230 		ha->cdev.db_size = ha->dbells_size;
1231 
1232 #ifdef QLNX_ENABLE_IWARP
1233 
1234 		if (qlnx_rdma_supported(ha) == 0)
1235 			qlnx_set_personality(ha);
1236 
1237 #endif /* #ifdef QLNX_ENABLE_IWARP */
1238 	}
1239 	QL_DPRINT2(ha, "%s: %s\n", __func__,
1240 		(ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet"));
1241 
1242 	bzero(&params, sizeof (struct ecore_hw_prepare_params));
1243 
1244 	params.personality = ha->personality;
1245 
1246 	params.drv_resc_alloc = false;
1247 	params.chk_reg_fifo = false;
1248 	params.initiate_pf_flr = true;
1249 	params.epoch = 0;
1250 
1251 	ecore_hw_prepare(&ha->cdev, &params);
1252 
1253 	qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
1254 
1255 	QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n",
1256 		ha, &ha->cdev, &ha->cdev.hwfns[0]);
1257 
1258 	return (rval);
1259 }
1260 
1261 static void
qlnx_release(qlnx_host_t * ha)1262 qlnx_release(qlnx_host_t *ha)
1263 {
1264         device_t	dev;
1265         int		i;
1266 
1267         dev = ha->pci_dev;
1268 
1269 	QL_DPRINT2(ha, "enter\n");
1270 
1271 	for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
1272 		if (ha->idle_chk[i] != NULL) {
1273 			free(ha->idle_chk[i], M_QLNXBUF);
1274 			ha->idle_chk[i] = NULL;
1275 		}
1276 
1277 		if (ha->grcdump[i] != NULL) {
1278 			free(ha->grcdump[i], M_QLNXBUF);
1279 			ha->grcdump[i] = NULL;
1280 		}
1281 	}
1282 
1283         if (ha->flags.callout_init)
1284                 callout_drain(&ha->qlnx_callout);
1285 
1286 	if (ha->flags.slowpath_start) {
1287 		qlnx_slowpath_stop(ha);
1288 	}
1289 
1290         if (ha->flags.hw_init)
1291 		ecore_hw_remove(&ha->cdev);
1292 
1293         qlnx_del_cdev(ha);
1294 
1295         if (ha->ifp != NULL)
1296                 ether_ifdetach(ha->ifp);
1297 
1298 	qlnx_free_tx_dma_tag(ha);
1299 
1300 	qlnx_free_rx_dma_tag(ha);
1301 
1302 	qlnx_free_parent_dma_tag(ha);
1303 
1304 	if (qlnx_vf_device(ha) != 0) {
1305 		qlnx_destroy_error_recovery_taskqueue(ha);
1306 	}
1307 
1308         for (i = 0; i < ha->num_rss; i++) {
1309 		struct qlnx_fastpath *fp = &ha->fp_array[i];
1310 
1311                 if (ha->irq_vec[i].handle) {
1312                         (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1313                                         ha->irq_vec[i].handle);
1314                 }
1315 
1316                 if (ha->irq_vec[i].irq) {
1317                         (void)bus_release_resource(dev, SYS_RES_IRQ,
1318                                 ha->irq_vec[i].irq_rid,
1319                                 ha->irq_vec[i].irq);
1320                 }
1321 
1322 		qlnx_free_tx_br(ha, fp);
1323         }
1324 	qlnx_destroy_fp_taskqueues(ha);
1325 
1326  	for (i = 0; i < ha->cdev.num_hwfns; i++) {
1327         	if (ha->sp_handle[i])
1328                 	(void)bus_teardown_intr(dev, ha->sp_irq[i],
1329 				ha->sp_handle[i]);
1330 
1331         	if (ha->sp_irq[i])
1332 			(void) bus_release_resource(dev, SYS_RES_IRQ,
1333 				ha->sp_irq_rid[i], ha->sp_irq[i]);
1334 	}
1335 
1336 	qlnx_destroy_sp_taskqueues(ha);
1337 
1338         if (ha->msix_count)
1339                 pci_release_msi(dev);
1340 
1341         if (ha->flags.lock_init) {
1342                 sx_destroy(&ha->hw_lock);
1343         }
1344 
1345         if (ha->pci_reg)
1346                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1347                                 ha->pci_reg);
1348 
1349         if (ha->dbells_size && ha->pci_dbells)
1350                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1351                                 ha->pci_dbells);
1352 
1353         if (ha->msix_bar)
1354                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1355                                 ha->msix_bar);
1356 
1357 	QL_DPRINT2(ha, "exit\n");
1358 	return;
1359 }
1360 
1361 static void
qlnx_trigger_dump(qlnx_host_t * ha)1362 qlnx_trigger_dump(qlnx_host_t *ha)
1363 {
1364 	int	i;
1365 
1366 	if (ha->ifp != NULL)
1367 		if_setdrvflagbits(ha->ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
1368 
1369 	QL_DPRINT2(ha, "enter\n");
1370 
1371 	if (qlnx_vf_device(ha) == 0)
1372 		return;
1373 
1374 	ha->error_recovery = 1;
1375 
1376 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
1377 		qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1378 		qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1379 	}
1380 
1381 	QL_DPRINT2(ha, "exit\n");
1382 
1383 	return;
1384 }
1385 
1386 static int
qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)1387 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1388 {
1389         int		err, ret = 0;
1390         qlnx_host_t	*ha;
1391 
1392         err = sysctl_handle_int(oidp, &ret, 0, req);
1393 
1394         if (err || !req->newptr)
1395                 return (err);
1396 
1397         if (ret == 1) {
1398                 ha = (qlnx_host_t *)arg1;
1399                 qlnx_trigger_dump(ha);
1400         }
1401         return (err);
1402 }
1403 
1404 static int
qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)1405 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1406 {
1407         int			err, i, ret = 0, usecs = 0;
1408         qlnx_host_t		*ha;
1409 	struct ecore_hwfn	*p_hwfn;
1410 	struct qlnx_fastpath	*fp;
1411 
1412         err = sysctl_handle_int(oidp, &usecs, 0, req);
1413 
1414         if (err || !req->newptr || !usecs || (usecs > 255))
1415                 return (err);
1416 
1417         ha = (qlnx_host_t *)arg1;
1418 
1419 	if (qlnx_vf_device(ha) == 0)
1420 		return (-1);
1421 
1422 	for (i = 0; i < ha->num_rss; i++) {
1423 		p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1424 
1425         	fp = &ha->fp_array[i];
1426 
1427 		if (fp->txq[0]->handle != NULL) {
1428 			ret = ecore_set_queue_coalesce(p_hwfn, 0,
1429 					(uint16_t)usecs, fp->txq[0]->handle);
1430 		}
1431         }
1432 
1433 	if (!ret)
1434 		ha->tx_coalesce_usecs = (uint8_t)usecs;
1435 
1436         return (err);
1437 }
1438 
1439 static int
qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)1440 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1441 {
1442         int			err, i, ret = 0, usecs = 0;
1443         qlnx_host_t		*ha;
1444 	struct ecore_hwfn	*p_hwfn;
1445 	struct qlnx_fastpath	*fp;
1446 
1447         err = sysctl_handle_int(oidp, &usecs, 0, req);
1448 
1449         if (err || !req->newptr || !usecs || (usecs > 255))
1450                 return (err);
1451 
1452         ha = (qlnx_host_t *)arg1;
1453 
1454 	if (qlnx_vf_device(ha) == 0)
1455 		return (-1);
1456 
1457 	for (i = 0; i < ha->num_rss; i++) {
1458 		p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1459 
1460         	fp = &ha->fp_array[i];
1461 
1462 		if (fp->rxq->handle != NULL) {
1463 			ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1464 					 0, fp->rxq->handle);
1465 		}
1466 	}
1467 
1468 	if (!ret)
1469 		ha->rx_coalesce_usecs = (uint8_t)usecs;
1470 
1471         return (err);
1472 }
1473 
1474 static void
qlnx_add_sp_stats_sysctls(qlnx_host_t * ha)1475 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1476 {
1477         struct sysctl_ctx_list	*ctx;
1478         struct sysctl_oid_list	*children;
1479 	struct sysctl_oid	*ctx_oid;
1480 
1481         ctx = device_get_sysctl_ctx(ha->pci_dev);
1482 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1483 
1484 	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1485 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat");
1486         children = SYSCTL_CHILDREN(ctx_oid);
1487 
1488 	SYSCTL_ADD_QUAD(ctx, children,
1489                 OID_AUTO, "sp_interrupts",
1490                 CTLFLAG_RD, &ha->sp_interrupts,
1491                 "No. of slowpath interrupts");
1492 
1493 	return;
1494 }
1495 
1496 static void
qlnx_add_fp_stats_sysctls(qlnx_host_t * ha)1497 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1498 {
1499         struct sysctl_ctx_list	*ctx;
1500         struct sysctl_oid_list	*children;
1501         struct sysctl_oid_list	*node_children;
1502 	struct sysctl_oid	*ctx_oid;
1503 	int			i, j;
1504 	uint8_t			name_str[16];
1505 
1506         ctx = device_get_sysctl_ctx(ha->pci_dev);
1507 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1508 
1509 	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1510 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat");
1511 	children = SYSCTL_CHILDREN(ctx_oid);
1512 
1513 	for (i = 0; i < ha->num_rss; i++) {
1514 		bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1515 		snprintf(name_str, sizeof(name_str), "%d", i);
1516 
1517 		ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1518 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
1519 		node_children = SYSCTL_CHILDREN(ctx_oid);
1520 
1521 		/* Tx Related */
1522 
1523 		SYSCTL_ADD_QUAD(ctx, node_children,
1524 			OID_AUTO, "tx_pkts_processed",
1525 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1526 			"No. of packets processed for transmission");
1527 
1528 		SYSCTL_ADD_QUAD(ctx, node_children,
1529 			OID_AUTO, "tx_pkts_freed",
1530 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1531 			"No. of freed packets");
1532 
1533 		SYSCTL_ADD_QUAD(ctx, node_children,
1534 			OID_AUTO, "tx_pkts_transmitted",
1535 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1536 			"No. of transmitted packets");
1537 
1538 		SYSCTL_ADD_QUAD(ctx, node_children,
1539 			OID_AUTO, "tx_pkts_completed",
1540 			CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1541 			"No. of transmit completions");
1542 
1543                 SYSCTL_ADD_QUAD(ctx, node_children,
1544                         OID_AUTO, "tx_non_tso_pkts",
1545                         CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts,
1546                         "No. of non LSO transmited packets");
1547 
1548 #ifdef QLNX_TRACE_PERF_DATA
1549 
1550                 SYSCTL_ADD_QUAD(ctx, node_children,
1551                         OID_AUTO, "tx_pkts_trans_ctx",
1552                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx,
1553                         "No. of transmitted packets in transmit context");
1554 
1555                 SYSCTL_ADD_QUAD(ctx, node_children,
1556                         OID_AUTO, "tx_pkts_compl_ctx",
1557                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx,
1558                         "No. of transmit completions in transmit context");
1559 
1560                 SYSCTL_ADD_QUAD(ctx, node_children,
1561                         OID_AUTO, "tx_pkts_trans_fp",
1562                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp,
1563                         "No. of transmitted packets in taskqueue");
1564 
1565                 SYSCTL_ADD_QUAD(ctx, node_children,
1566                         OID_AUTO, "tx_pkts_compl_fp",
1567                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp,
1568                         "No. of transmit completions in taskqueue");
1569 
1570                 SYSCTL_ADD_QUAD(ctx, node_children,
1571                         OID_AUTO, "tx_pkts_compl_intr",
1572                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr,
1573                         "No. of transmit completions in interrupt ctx");
1574 #endif
1575 
1576                 SYSCTL_ADD_QUAD(ctx, node_children,
1577                         OID_AUTO, "tx_tso_pkts",
1578                         CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts,
1579                         "No. of LSO transmited packets");
1580 
1581 		SYSCTL_ADD_QUAD(ctx, node_children,
1582 			OID_AUTO, "tx_lso_wnd_min_len",
1583 			CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1584 			"tx_lso_wnd_min_len");
1585 
1586 		SYSCTL_ADD_QUAD(ctx, node_children,
1587 			OID_AUTO, "tx_defrag",
1588 			CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1589 			"tx_defrag");
1590 
1591 		SYSCTL_ADD_QUAD(ctx, node_children,
1592 			OID_AUTO, "tx_nsegs_gt_elem_left",
1593 			CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1594 			"tx_nsegs_gt_elem_left");
1595 
1596 		SYSCTL_ADD_UINT(ctx, node_children,
1597 			OID_AUTO, "tx_tso_max_nsegs",
1598 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1599 			ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1600 
1601 		SYSCTL_ADD_UINT(ctx, node_children,
1602 			OID_AUTO, "tx_tso_min_nsegs",
1603 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1604 			ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1605 
1606 		SYSCTL_ADD_UINT(ctx, node_children,
1607 			OID_AUTO, "tx_tso_max_pkt_len",
1608 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1609 			ha->fp_array[i].tx_tso_max_pkt_len,
1610 			"tx_tso_max_pkt_len");
1611 
1612 		SYSCTL_ADD_UINT(ctx, node_children,
1613 			OID_AUTO, "tx_tso_min_pkt_len",
1614 			CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1615 			ha->fp_array[i].tx_tso_min_pkt_len,
1616 			"tx_tso_min_pkt_len");
1617 
1618 		for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1619 			bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1620 			snprintf(name_str, sizeof(name_str),
1621 				"tx_pkts_nseg_%02d", (j+1));
1622 
1623 			SYSCTL_ADD_QUAD(ctx, node_children,
1624 				OID_AUTO, name_str, CTLFLAG_RD,
1625 				&ha->fp_array[i].tx_pkts[j], name_str);
1626 		}
1627 
1628 #ifdef QLNX_TRACE_PERF_DATA
1629                 for (j = 0; j < 18; j++) {
1630                         bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1631                         snprintf(name_str, sizeof(name_str),
1632                                 "tx_pkts_hist_%02d", (j+1));
1633 
1634                         SYSCTL_ADD_QUAD(ctx, node_children,
1635                                 OID_AUTO, name_str, CTLFLAG_RD,
1636                                 &ha->fp_array[i].tx_pkts_hist[j], name_str);
1637                 }
1638                 for (j = 0; j < 5; j++) {
1639                         bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1640                         snprintf(name_str, sizeof(name_str),
1641                                 "tx_comInt_%02d", (j+1));
1642 
1643                         SYSCTL_ADD_QUAD(ctx, node_children,
1644                                 OID_AUTO, name_str, CTLFLAG_RD,
1645                                 &ha->fp_array[i].tx_comInt[j], name_str);
1646                 }
1647                 for (j = 0; j < 18; j++) {
1648                         bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1649                         snprintf(name_str, sizeof(name_str),
1650                                 "tx_pkts_q_%02d", (j+1));
1651 
1652                         SYSCTL_ADD_QUAD(ctx, node_children,
1653                                 OID_AUTO, name_str, CTLFLAG_RD,
1654                                 &ha->fp_array[i].tx_pkts_q[j], name_str);
1655                 }
1656 #endif
1657 
1658 		SYSCTL_ADD_QUAD(ctx, node_children,
1659 			OID_AUTO, "err_tx_nsegs_gt_elem_left",
1660 			CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1661 			"err_tx_nsegs_gt_elem_left");
1662 
1663 		SYSCTL_ADD_QUAD(ctx, node_children,
1664 			OID_AUTO, "err_tx_dmamap_create",
1665 			CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1666 			"err_tx_dmamap_create");
1667 
1668 		SYSCTL_ADD_QUAD(ctx, node_children,
1669 			OID_AUTO, "err_tx_defrag_dmamap_load",
1670 			CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1671 			"err_tx_defrag_dmamap_load");
1672 
1673 		SYSCTL_ADD_QUAD(ctx, node_children,
1674 			OID_AUTO, "err_tx_non_tso_max_seg",
1675 			CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1676 			"err_tx_non_tso_max_seg");
1677 
1678 		SYSCTL_ADD_QUAD(ctx, node_children,
1679 			OID_AUTO, "err_tx_dmamap_load",
1680 			CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1681 			"err_tx_dmamap_load");
1682 
1683 		SYSCTL_ADD_QUAD(ctx, node_children,
1684 			OID_AUTO, "err_tx_defrag",
1685 			CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1686 			"err_tx_defrag");
1687 
1688 		SYSCTL_ADD_QUAD(ctx, node_children,
1689 			OID_AUTO, "err_tx_free_pkt_null",
1690 			CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1691 			"err_tx_free_pkt_null");
1692 
1693 		SYSCTL_ADD_QUAD(ctx, node_children,
1694 			OID_AUTO, "err_tx_cons_idx_conflict",
1695 			CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1696 			"err_tx_cons_idx_conflict");
1697 
1698 		SYSCTL_ADD_QUAD(ctx, node_children,
1699 			OID_AUTO, "lro_cnt_64",
1700 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1701 			"lro_cnt_64");
1702 
1703 		SYSCTL_ADD_QUAD(ctx, node_children,
1704 			OID_AUTO, "lro_cnt_128",
1705 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1706 			"lro_cnt_128");
1707 
1708 		SYSCTL_ADD_QUAD(ctx, node_children,
1709 			OID_AUTO, "lro_cnt_256",
1710 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1711 			"lro_cnt_256");
1712 
1713 		SYSCTL_ADD_QUAD(ctx, node_children,
1714 			OID_AUTO, "lro_cnt_512",
1715 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1716 			"lro_cnt_512");
1717 
1718 		SYSCTL_ADD_QUAD(ctx, node_children,
1719 			OID_AUTO, "lro_cnt_1024",
1720 			CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1721 			"lro_cnt_1024");
1722 
1723 		/* Rx Related */
1724 
1725 		SYSCTL_ADD_QUAD(ctx, node_children,
1726 			OID_AUTO, "rx_pkts",
1727 			CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1728 			"No. of received packets");
1729 
1730 		SYSCTL_ADD_QUAD(ctx, node_children,
1731 			OID_AUTO, "tpa_start",
1732 			CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1733 			"No. of tpa_start packets");
1734 
1735 		SYSCTL_ADD_QUAD(ctx, node_children,
1736 			OID_AUTO, "tpa_cont",
1737 			CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1738 			"No. of tpa_cont packets");
1739 
1740 		SYSCTL_ADD_QUAD(ctx, node_children,
1741 			OID_AUTO, "tpa_end",
1742 			CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1743 			"No. of tpa_end packets");
1744 
1745 		SYSCTL_ADD_QUAD(ctx, node_children,
1746 			OID_AUTO, "err_m_getcl",
1747 			CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1748 			"err_m_getcl");
1749 
1750 		SYSCTL_ADD_QUAD(ctx, node_children,
1751 			OID_AUTO, "err_m_getjcl",
1752 			CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1753 			"err_m_getjcl");
1754 
1755 		SYSCTL_ADD_QUAD(ctx, node_children,
1756 			OID_AUTO, "err_rx_hw_errors",
1757 			CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1758 			"err_rx_hw_errors");
1759 
1760 		SYSCTL_ADD_QUAD(ctx, node_children,
1761 			OID_AUTO, "err_rx_alloc_errors",
1762 			CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1763 			"err_rx_alloc_errors");
1764 	}
1765 
1766 	return;
1767 }
1768 
1769 static void
qlnx_add_hw_stats_sysctls(qlnx_host_t * ha)1770 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1771 {
1772         struct sysctl_ctx_list	*ctx;
1773         struct sysctl_oid_list	*children;
1774 	struct sysctl_oid	*ctx_oid;
1775 
1776         ctx = device_get_sysctl_ctx(ha->pci_dev);
1777 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1778 
1779 	ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1780 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat");
1781         children = SYSCTL_CHILDREN(ctx_oid);
1782 
1783 	SYSCTL_ADD_QUAD(ctx, children,
1784                 OID_AUTO, "no_buff_discards",
1785                 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1786                 "No. of packets discarded due to lack of buffer");
1787 
1788 	SYSCTL_ADD_QUAD(ctx, children,
1789                 OID_AUTO, "packet_too_big_discard",
1790                 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1791                 "No. of packets discarded because packet was too big");
1792 
1793 	SYSCTL_ADD_QUAD(ctx, children,
1794                 OID_AUTO, "ttl0_discard",
1795                 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1796                 "ttl0_discard");
1797 
1798 	SYSCTL_ADD_QUAD(ctx, children,
1799                 OID_AUTO, "rx_ucast_bytes",
1800                 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1801                 "rx_ucast_bytes");
1802 
1803 	SYSCTL_ADD_QUAD(ctx, children,
1804                 OID_AUTO, "rx_mcast_bytes",
1805                 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1806                 "rx_mcast_bytes");
1807 
1808 	SYSCTL_ADD_QUAD(ctx, children,
1809                 OID_AUTO, "rx_bcast_bytes",
1810                 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1811                 "rx_bcast_bytes");
1812 
1813 	SYSCTL_ADD_QUAD(ctx, children,
1814                 OID_AUTO, "rx_ucast_pkts",
1815                 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1816                 "rx_ucast_pkts");
1817 
1818 	SYSCTL_ADD_QUAD(ctx, children,
1819                 OID_AUTO, "rx_mcast_pkts",
1820                 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1821                 "rx_mcast_pkts");
1822 
1823 	SYSCTL_ADD_QUAD(ctx, children,
1824                 OID_AUTO, "rx_bcast_pkts",
1825                 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1826                 "rx_bcast_pkts");
1827 
1828 	SYSCTL_ADD_QUAD(ctx, children,
1829                 OID_AUTO, "mftag_filter_discards",
1830                 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1831                 "mftag_filter_discards");
1832 
1833 	SYSCTL_ADD_QUAD(ctx, children,
1834                 OID_AUTO, "mac_filter_discards",
1835                 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1836                 "mac_filter_discards");
1837 
1838 	SYSCTL_ADD_QUAD(ctx, children,
1839                 OID_AUTO, "tx_ucast_bytes",
1840                 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1841                 "tx_ucast_bytes");
1842 
1843 	SYSCTL_ADD_QUAD(ctx, children,
1844                 OID_AUTO, "tx_mcast_bytes",
1845                 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1846                 "tx_mcast_bytes");
1847 
1848 	SYSCTL_ADD_QUAD(ctx, children,
1849                 OID_AUTO, "tx_bcast_bytes",
1850                 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1851                 "tx_bcast_bytes");
1852 
1853 	SYSCTL_ADD_QUAD(ctx, children,
1854                 OID_AUTO, "tx_ucast_pkts",
1855                 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1856                 "tx_ucast_pkts");
1857 
1858 	SYSCTL_ADD_QUAD(ctx, children,
1859                 OID_AUTO, "tx_mcast_pkts",
1860                 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1861                 "tx_mcast_pkts");
1862 
1863 	SYSCTL_ADD_QUAD(ctx, children,
1864                 OID_AUTO, "tx_bcast_pkts",
1865                 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1866                 "tx_bcast_pkts");
1867 
1868 	SYSCTL_ADD_QUAD(ctx, children,
1869                 OID_AUTO, "tx_err_drop_pkts",
1870                 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1871                 "tx_err_drop_pkts");
1872 
1873 	SYSCTL_ADD_QUAD(ctx, children,
1874                 OID_AUTO, "tpa_coalesced_pkts",
1875                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1876                 "tpa_coalesced_pkts");
1877 
1878 	SYSCTL_ADD_QUAD(ctx, children,
1879                 OID_AUTO, "tpa_coalesced_events",
1880                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1881                 "tpa_coalesced_events");
1882 
1883 	SYSCTL_ADD_QUAD(ctx, children,
1884                 OID_AUTO, "tpa_aborts_num",
1885                 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1886                 "tpa_aborts_num");
1887 
1888 	SYSCTL_ADD_QUAD(ctx, children,
1889                 OID_AUTO, "tpa_not_coalesced_pkts",
1890                 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1891                 "tpa_not_coalesced_pkts");
1892 
1893 	SYSCTL_ADD_QUAD(ctx, children,
1894                 OID_AUTO, "tpa_coalesced_bytes",
1895                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1896                 "tpa_coalesced_bytes");
1897 
1898 	SYSCTL_ADD_QUAD(ctx, children,
1899                 OID_AUTO, "rx_64_byte_packets",
1900                 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1901                 "rx_64_byte_packets");
1902 
1903 	SYSCTL_ADD_QUAD(ctx, children,
1904                 OID_AUTO, "rx_65_to_127_byte_packets",
1905                 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1906                 "rx_65_to_127_byte_packets");
1907 
1908 	SYSCTL_ADD_QUAD(ctx, children,
1909                 OID_AUTO, "rx_128_to_255_byte_packets",
1910                 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1911                 "rx_128_to_255_byte_packets");
1912 
1913 	SYSCTL_ADD_QUAD(ctx, children,
1914                 OID_AUTO, "rx_256_to_511_byte_packets",
1915                 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1916                 "rx_256_to_511_byte_packets");
1917 
1918 	SYSCTL_ADD_QUAD(ctx, children,
1919                 OID_AUTO, "rx_512_to_1023_byte_packets",
1920                 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1921                 "rx_512_to_1023_byte_packets");
1922 
1923 	SYSCTL_ADD_QUAD(ctx, children,
1924                 OID_AUTO, "rx_1024_to_1518_byte_packets",
1925                 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1926                 "rx_1024_to_1518_byte_packets");
1927 
1928 	SYSCTL_ADD_QUAD(ctx, children,
1929                 OID_AUTO, "rx_1519_to_1522_byte_packets",
1930                 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1931                 "rx_1519_to_1522_byte_packets");
1932 
1933 	SYSCTL_ADD_QUAD(ctx, children,
1934                 OID_AUTO, "rx_1523_to_2047_byte_packets",
1935                 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1936                 "rx_1523_to_2047_byte_packets");
1937 
1938 	SYSCTL_ADD_QUAD(ctx, children,
1939                 OID_AUTO, "rx_2048_to_4095_byte_packets",
1940                 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1941                 "rx_2048_to_4095_byte_packets");
1942 
1943 	SYSCTL_ADD_QUAD(ctx, children,
1944                 OID_AUTO, "rx_4096_to_9216_byte_packets",
1945                 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1946                 "rx_4096_to_9216_byte_packets");
1947 
1948 	SYSCTL_ADD_QUAD(ctx, children,
1949                 OID_AUTO, "rx_9217_to_16383_byte_packets",
1950                 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1951                 "rx_9217_to_16383_byte_packets");
1952 
1953 	SYSCTL_ADD_QUAD(ctx, children,
1954                 OID_AUTO, "rx_crc_errors",
1955                 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1956                 "rx_crc_errors");
1957 
1958 	SYSCTL_ADD_QUAD(ctx, children,
1959                 OID_AUTO, "rx_mac_crtl_frames",
1960                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
1961                 "rx_mac_crtl_frames");
1962 
1963 	SYSCTL_ADD_QUAD(ctx, children,
1964                 OID_AUTO, "rx_pause_frames",
1965                 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
1966                 "rx_pause_frames");
1967 
1968 	SYSCTL_ADD_QUAD(ctx, children,
1969                 OID_AUTO, "rx_pfc_frames",
1970                 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
1971                 "rx_pfc_frames");
1972 
1973 	SYSCTL_ADD_QUAD(ctx, children,
1974                 OID_AUTO, "rx_align_errors",
1975                 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
1976                 "rx_align_errors");
1977 
1978 	SYSCTL_ADD_QUAD(ctx, children,
1979                 OID_AUTO, "rx_carrier_errors",
1980                 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
1981                 "rx_carrier_errors");
1982 
1983 	SYSCTL_ADD_QUAD(ctx, children,
1984                 OID_AUTO, "rx_oversize_packets",
1985                 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
1986                 "rx_oversize_packets");
1987 
1988 	SYSCTL_ADD_QUAD(ctx, children,
1989                 OID_AUTO, "rx_jabbers",
1990                 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
1991                 "rx_jabbers");
1992 
1993 	SYSCTL_ADD_QUAD(ctx, children,
1994                 OID_AUTO, "rx_undersize_packets",
1995                 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
1996                 "rx_undersize_packets");
1997 
1998 	SYSCTL_ADD_QUAD(ctx, children,
1999                 OID_AUTO, "rx_fragments",
2000                 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
2001                 "rx_fragments");
2002 
2003 	SYSCTL_ADD_QUAD(ctx, children,
2004                 OID_AUTO, "tx_64_byte_packets",
2005                 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
2006                 "tx_64_byte_packets");
2007 
2008 	SYSCTL_ADD_QUAD(ctx, children,
2009                 OID_AUTO, "tx_65_to_127_byte_packets",
2010                 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
2011                 "tx_65_to_127_byte_packets");
2012 
2013 	SYSCTL_ADD_QUAD(ctx, children,
2014                 OID_AUTO, "tx_128_to_255_byte_packets",
2015                 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
2016                 "tx_128_to_255_byte_packets");
2017 
2018 	SYSCTL_ADD_QUAD(ctx, children,
2019                 OID_AUTO, "tx_256_to_511_byte_packets",
2020                 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
2021                 "tx_256_to_511_byte_packets");
2022 
2023 	SYSCTL_ADD_QUAD(ctx, children,
2024                 OID_AUTO, "tx_512_to_1023_byte_packets",
2025                 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
2026                 "tx_512_to_1023_byte_packets");
2027 
2028 	SYSCTL_ADD_QUAD(ctx, children,
2029                 OID_AUTO, "tx_1024_to_1518_byte_packets",
2030                 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
2031                 "tx_1024_to_1518_byte_packets");
2032 
2033 	SYSCTL_ADD_QUAD(ctx, children,
2034                 OID_AUTO, "tx_1519_to_2047_byte_packets",
2035                 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
2036                 "tx_1519_to_2047_byte_packets");
2037 
2038 	SYSCTL_ADD_QUAD(ctx, children,
2039                 OID_AUTO, "tx_2048_to_4095_byte_packets",
2040                 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
2041                 "tx_2048_to_4095_byte_packets");
2042 
2043 	SYSCTL_ADD_QUAD(ctx, children,
2044                 OID_AUTO, "tx_4096_to_9216_byte_packets",
2045                 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
2046                 "tx_4096_to_9216_byte_packets");
2047 
2048 	SYSCTL_ADD_QUAD(ctx, children,
2049                 OID_AUTO, "tx_9217_to_16383_byte_packets",
2050                 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
2051                 "tx_9217_to_16383_byte_packets");
2052 
2053 	SYSCTL_ADD_QUAD(ctx, children,
2054                 OID_AUTO, "tx_pause_frames",
2055                 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
2056                 "tx_pause_frames");
2057 
2058 	SYSCTL_ADD_QUAD(ctx, children,
2059                 OID_AUTO, "tx_pfc_frames",
2060                 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
2061                 "tx_pfc_frames");
2062 
2063 	SYSCTL_ADD_QUAD(ctx, children,
2064                 OID_AUTO, "tx_lpi_entry_count",
2065                 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
2066                 "tx_lpi_entry_count");
2067 
2068 	SYSCTL_ADD_QUAD(ctx, children,
2069                 OID_AUTO, "tx_total_collisions",
2070                 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
2071                 "tx_total_collisions");
2072 
2073 	SYSCTL_ADD_QUAD(ctx, children,
2074                 OID_AUTO, "brb_truncates",
2075                 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
2076                 "brb_truncates");
2077 
2078 	SYSCTL_ADD_QUAD(ctx, children,
2079                 OID_AUTO, "brb_discards",
2080                 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
2081                 "brb_discards");
2082 
2083 	SYSCTL_ADD_QUAD(ctx, children,
2084                 OID_AUTO, "rx_mac_bytes",
2085                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
2086                 "rx_mac_bytes");
2087 
2088 	SYSCTL_ADD_QUAD(ctx, children,
2089                 OID_AUTO, "rx_mac_uc_packets",
2090                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
2091                 "rx_mac_uc_packets");
2092 
2093 	SYSCTL_ADD_QUAD(ctx, children,
2094                 OID_AUTO, "rx_mac_mc_packets",
2095                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
2096                 "rx_mac_mc_packets");
2097 
2098 	SYSCTL_ADD_QUAD(ctx, children,
2099                 OID_AUTO, "rx_mac_bc_packets",
2100                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
2101                 "rx_mac_bc_packets");
2102 
2103 	SYSCTL_ADD_QUAD(ctx, children,
2104                 OID_AUTO, "rx_mac_frames_ok",
2105                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
2106                 "rx_mac_frames_ok");
2107 
2108 	SYSCTL_ADD_QUAD(ctx, children,
2109                 OID_AUTO, "tx_mac_bytes",
2110                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
2111                 "tx_mac_bytes");
2112 
2113 	SYSCTL_ADD_QUAD(ctx, children,
2114                 OID_AUTO, "tx_mac_uc_packets",
2115                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
2116                 "tx_mac_uc_packets");
2117 
2118 	SYSCTL_ADD_QUAD(ctx, children,
2119                 OID_AUTO, "tx_mac_mc_packets",
2120                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
2121                 "tx_mac_mc_packets");
2122 
2123 	SYSCTL_ADD_QUAD(ctx, children,
2124                 OID_AUTO, "tx_mac_bc_packets",
2125                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
2126                 "tx_mac_bc_packets");
2127 
2128 	SYSCTL_ADD_QUAD(ctx, children,
2129                 OID_AUTO, "tx_mac_ctrl_frames",
2130                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
2131                 "tx_mac_ctrl_frames");
2132 	return;
2133 }
2134 
2135 static void
qlnx_add_sysctls(qlnx_host_t * ha)2136 qlnx_add_sysctls(qlnx_host_t *ha)
2137 {
2138         device_t		dev = ha->pci_dev;
2139 	struct sysctl_ctx_list	*ctx;
2140 	struct sysctl_oid_list	*children;
2141 
2142 	ctx = device_get_sysctl_ctx(dev);
2143 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2144 
2145 	qlnx_add_fp_stats_sysctls(ha);
2146 	qlnx_add_sp_stats_sysctls(ha);
2147 
2148 	if (qlnx_vf_device(ha) != 0)
2149 		qlnx_add_hw_stats_sysctls(ha);
2150 
2151 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
2152 		CTLFLAG_RD, qlnx_ver_str, 0,
2153 		"Driver Version");
2154 
2155 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
2156 		CTLFLAG_RD, ha->stormfw_ver, 0,
2157 		"STORM Firmware Version");
2158 
2159 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
2160 		CTLFLAG_RD, ha->mfw_ver, 0,
2161 		"Management Firmware Version");
2162 
2163         SYSCTL_ADD_UINT(ctx, children,
2164                 OID_AUTO, "personality", CTLFLAG_RD,
2165                 &ha->personality, ha->personality,
2166 		"\tpersonality = 0 => Ethernet Only\n"
2167 		"\tpersonality = 3 => Ethernet and RoCE\n"
2168 		"\tpersonality = 4 => Ethernet and iWARP\n"
2169 		"\tpersonality = 6 => Default in Shared Memory\n");
2170 
2171         ha->dbg_level = 0;
2172         SYSCTL_ADD_UINT(ctx, children,
2173                 OID_AUTO, "debug", CTLFLAG_RW,
2174                 &ha->dbg_level, ha->dbg_level, "Debug Level");
2175 
2176         ha->dp_level = 0x01;
2177         SYSCTL_ADD_UINT(ctx, children,
2178                 OID_AUTO, "dp_level", CTLFLAG_RW,
2179                 &ha->dp_level, ha->dp_level, "DP Level");
2180 
2181         ha->dbg_trace_lro_cnt = 0;
2182         SYSCTL_ADD_UINT(ctx, children,
2183                 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
2184                 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
2185 		"Trace LRO Counts");
2186 
2187         ha->dbg_trace_tso_pkt_len = 0;
2188         SYSCTL_ADD_UINT(ctx, children,
2189                 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
2190                 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
2191 		"Trace TSO packet lengths");
2192 
2193         ha->dp_module = 0;
2194         SYSCTL_ADD_UINT(ctx, children,
2195                 OID_AUTO, "dp_module", CTLFLAG_RW,
2196                 &ha->dp_module, ha->dp_module, "DP Module");
2197 
2198         ha->err_inject = 0;
2199 
2200         SYSCTL_ADD_UINT(ctx, children,
2201                 OID_AUTO, "err_inject", CTLFLAG_RW,
2202                 &ha->err_inject, ha->err_inject, "Error Inject");
2203 
2204 	ha->storm_stats_enable = 0;
2205 
2206 	SYSCTL_ADD_UINT(ctx, children,
2207 		OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
2208 		&ha->storm_stats_enable, ha->storm_stats_enable,
2209 		"Enable Storm Statistics Gathering");
2210 
2211 	ha->storm_stats_index = 0;
2212 
2213 	SYSCTL_ADD_UINT(ctx, children,
2214 		OID_AUTO, "storm_stats_index", CTLFLAG_RD,
2215 		&ha->storm_stats_index, ha->storm_stats_index,
2216 		"Enable Storm Statistics Gathering Current Index");
2217 
2218 	ha->grcdump_taken = 0;
2219 	SYSCTL_ADD_UINT(ctx, children,
2220 		OID_AUTO, "grcdump_taken", CTLFLAG_RD,
2221 		&ha->grcdump_taken, ha->grcdump_taken,
2222 		"grcdump_taken");
2223 
2224 	ha->idle_chk_taken = 0;
2225 	SYSCTL_ADD_UINT(ctx, children,
2226 		OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
2227 		&ha->idle_chk_taken, ha->idle_chk_taken,
2228 		"idle_chk_taken");
2229 
2230 	SYSCTL_ADD_UINT(ctx, children,
2231 		OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
2232 		&ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
2233 		"rx_coalesce_usecs");
2234 
2235 	SYSCTL_ADD_UINT(ctx, children,
2236 		OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
2237 		&ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
2238 		"tx_coalesce_usecs");
2239 
2240 	SYSCTL_ADD_PROC(ctx, children,
2241 	    OID_AUTO, "trigger_dump",
2242 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2243 	    (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump");
2244 
2245 	SYSCTL_ADD_PROC(ctx, children,
2246 	    OID_AUTO, "set_rx_coalesce_usecs",
2247 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2248 	    (void *)ha, 0, qlnx_set_rx_coalesce, "I",
2249 	    "rx interrupt coalesce period microseconds");
2250 
2251 	SYSCTL_ADD_PROC(ctx, children,
2252 	    OID_AUTO, "set_tx_coalesce_usecs",
2253 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2254 	    (void *)ha, 0, qlnx_set_tx_coalesce, "I",
2255 	    "tx interrupt coalesce period microseconds");
2256 
2257 	ha->rx_pkt_threshold = 128;
2258         SYSCTL_ADD_UINT(ctx, children,
2259                 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
2260                 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
2261 		"No. of Rx Pkts to process at a time");
2262 
2263 	ha->rx_jumbo_buf_eq_mtu = 0;
2264         SYSCTL_ADD_UINT(ctx, children,
2265                 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
2266                 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
2267 		"== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
2268 		"otherwise Rx Jumbo buffers are set to >= MTU size\n");
2269 
2270 	SYSCTL_ADD_QUAD(ctx, children,
2271                 OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
2272 		&ha->err_illegal_intr, "err_illegal_intr");
2273 
2274 	SYSCTL_ADD_QUAD(ctx, children,
2275                 OID_AUTO, "err_fp_null", CTLFLAG_RD,
2276 		&ha->err_fp_null, "err_fp_null");
2277 
2278 	SYSCTL_ADD_QUAD(ctx, children,
2279                 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
2280 		&ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
2281 	return;
2282 }
2283 
2284 /*****************************************************************************
2285  * Operating System Network Interface Functions
2286  *****************************************************************************/
2287 
2288 static void
qlnx_init_ifnet(device_t dev,qlnx_host_t * ha)2289 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
2290 {
2291 	uint16_t	device_id;
2292         if_t		ifp;
2293 
2294         ifp = ha->ifp = if_alloc(IFT_ETHER);
2295         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2296 
2297 	device_id = pci_get_device(ha->pci_dev);
2298 
2299         if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
2300 		if_setbaudrate(ifp, IF_Gbps(40));
2301         else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2302 			(device_id == QLOGIC_PCI_DEVICE_ID_8070))
2303 		if_setbaudrate(ifp, IF_Gbps(25));
2304         else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
2305 		if_setbaudrate(ifp, IF_Gbps(50));
2306         else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
2307 		if_setbaudrate(ifp, IF_Gbps(100));
2308 
2309         if_setcapabilities(ifp, IFCAP_LINKSTATE);
2310 
2311         if_setinitfn(ifp, qlnx_init);
2312         if_setsoftc(ifp, ha);
2313         if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2314         if_setioctlfn(ifp, qlnx_ioctl);
2315         if_settransmitfn(ifp, qlnx_transmit);
2316         if_setqflushfn(ifp, qlnx_qflush);
2317 
2318         if_setsendqlen(ifp, qlnx_get_ifq_snd_maxlen(ha));
2319         if_setsendqready(ifp);
2320 
2321 	if_setgetcounterfn(ifp, qlnx_get_counter);
2322 
2323         ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
2324 
2325         memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
2326 
2327 	if (!ha->primary_mac[0] && !ha->primary_mac[1] &&
2328 		!ha->primary_mac[2] && !ha->primary_mac[3] &&
2329 		!ha->primary_mac[4] && !ha->primary_mac[5]) {
2330 		uint32_t rnd;
2331 
2332 		rnd = arc4random();
2333 
2334 		ha->primary_mac[0] = 0x00;
2335 		ha->primary_mac[1] = 0x0e;
2336 		ha->primary_mac[2] = 0x1e;
2337 		ha->primary_mac[3] = rnd & 0xFF;
2338 		ha->primary_mac[4] = (rnd >> 8) & 0xFF;
2339 		ha->primary_mac[5] = (rnd >> 16) & 0xFF;
2340 	}
2341 
2342 	ether_ifattach(ifp, ha->primary_mac);
2343 	bcopy(if_getlladdr(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
2344 
2345 	if_setcapabilities(ifp, IFCAP_HWCSUM);
2346 	if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0);
2347 
2348 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
2349 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
2350 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0);
2351 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
2352 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0);
2353 	if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
2354 	if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0);
2355 	if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
2356 
2357 	if_sethwtsomax(ifp,  QLNX_MAX_TSO_FRAME_SIZE -
2358 				(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2359 	if_sethwtsomaxsegcount(ifp, QLNX_MAX_SEGMENTS - 1); /* hdr */
2360 	if_sethwtsomaxsegsize(ifp, QLNX_MAX_TX_MBUF_SIZE);
2361 
2362         if_setcapenable(ifp, if_getcapabilities(ifp));
2363 
2364 	if_sethwassist(ifp, CSUM_IP);
2365 	if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0);
2366 	if_sethwassistbits(ifp, CSUM_TCP_IPV6 | CSUM_UDP_IPV6, 0);
2367 	if_sethwassistbits(ifp, CSUM_TSO, 0);
2368 
2369 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2370 
2371         ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
2372 		qlnx_media_status);
2373 
2374         if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
2375 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
2376 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
2377 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
2378         } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2379 			(device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
2380 		ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
2381 		ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
2382         } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
2383 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2384 		ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2385         } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
2386 		ifmedia_add(&ha->media,
2387 			(IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
2388 		ifmedia_add(&ha->media,
2389 			(IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
2390 		ifmedia_add(&ha->media,
2391 			(IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
2392 	}
2393 
2394         ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2395         ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2396 
2397         ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2398 
2399         QL_DPRINT2(ha, "exit\n");
2400 
2401         return;
2402 }
2403 
2404 static void
qlnx_init_locked(qlnx_host_t * ha)2405 qlnx_init_locked(qlnx_host_t *ha)
2406 {
2407 	if_t		ifp = ha->ifp;
2408 
2409 	QL_DPRINT1(ha, "Driver Initialization start \n");
2410 
2411 	qlnx_stop(ha);
2412 
2413 	if (qlnx_load(ha) == 0) {
2414 		if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2415 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2416 
2417 #ifdef QLNX_ENABLE_IWARP
2418 		if (qlnx_vf_device(ha) != 0) {
2419 			qlnx_rdma_dev_open(ha);
2420 		}
2421 #endif /* #ifdef QLNX_ENABLE_IWARP */
2422 	}
2423 
2424 	return;
2425 }
2426 
2427 static void
qlnx_init(void * arg)2428 qlnx_init(void *arg)
2429 {
2430 	qlnx_host_t	*ha;
2431 
2432 	ha = (qlnx_host_t *)arg;
2433 
2434 	QL_DPRINT2(ha, "enter\n");
2435 
2436 	QLNX_LOCK(ha);
2437 	qlnx_init_locked(ha);
2438 	QLNX_UNLOCK(ha);
2439 
2440 	QL_DPRINT2(ha, "exit\n");
2441 
2442 	return;
2443 }
2444 
2445 static int
qlnx_config_mcast_mac_addr(qlnx_host_t * ha,uint8_t * mac_addr,uint32_t add_mac)2446 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2447 {
2448 	struct ecore_filter_mcast	*mcast;
2449 	struct ecore_dev		*cdev;
2450 	int				rc;
2451 
2452 	cdev = &ha->cdev;
2453 
2454 	mcast = &ha->ecore_mcast;
2455 	bzero(mcast, sizeof(struct ecore_filter_mcast));
2456 
2457 	if (add_mac)
2458 		mcast->opcode = ECORE_FILTER_ADD;
2459 	else
2460 		mcast->opcode = ECORE_FILTER_REMOVE;
2461 
2462 	mcast->num_mc_addrs = 1;
2463 	memcpy(mcast->mac, mac_addr, ETH_ALEN);
2464 
2465 	rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2466 
2467 	return (rc);
2468 }
2469 
2470 static int
qlnx_hw_add_mcast(qlnx_host_t * ha,uint8_t * mta)2471 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2472 {
2473         int	i;
2474 
2475         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2476                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2477                         return 0; /* its been already added */
2478         }
2479 
2480         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2481                 if ((ha->mcast[i].addr[0] == 0) &&
2482                         (ha->mcast[i].addr[1] == 0) &&
2483                         (ha->mcast[i].addr[2] == 0) &&
2484                         (ha->mcast[i].addr[3] == 0) &&
2485                         (ha->mcast[i].addr[4] == 0) &&
2486                         (ha->mcast[i].addr[5] == 0)) {
2487                         if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2488                                 return (-1);
2489 
2490                         bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2491                         ha->nmcast++;
2492 
2493                         return 0;
2494                 }
2495         }
2496         return 0;
2497 }
2498 
2499 static int
qlnx_hw_del_mcast(qlnx_host_t * ha,uint8_t * mta)2500 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2501 {
2502         int	i;
2503 
2504         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2505                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2506                         if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2507                                 return (-1);
2508 
2509                         ha->mcast[i].addr[0] = 0;
2510                         ha->mcast[i].addr[1] = 0;
2511                         ha->mcast[i].addr[2] = 0;
2512                         ha->mcast[i].addr[3] = 0;
2513                         ha->mcast[i].addr[4] = 0;
2514                         ha->mcast[i].addr[5] = 0;
2515 
2516                         ha->nmcast--;
2517 
2518                         return 0;
2519                 }
2520         }
2521         return 0;
2522 }
2523 
2524 /*
2525  * Name: qls_hw_set_multi
2526  * Function: Sets the Multicast Addresses provided the host O.S into the
2527  *      hardware (for the given interface)
2528  */
2529 static void
qlnx_hw_set_multi(qlnx_host_t * ha,uint8_t * mta,uint32_t mcnt,uint32_t add_mac)2530 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2531 	uint32_t add_mac)
2532 {
2533         int	i;
2534 
2535         for (i = 0; i < mcnt; i++) {
2536                 if (add_mac) {
2537                         if (qlnx_hw_add_mcast(ha, mta))
2538                                 break;
2539                 } else {
2540                         if (qlnx_hw_del_mcast(ha, mta))
2541                                 break;
2542                 }
2543 
2544                 mta += ETHER_HDR_LEN;
2545         }
2546         return;
2547 }
2548 
2549 static u_int
qlnx_copy_maddr(void * arg,struct sockaddr_dl * sdl,u_int mcnt)2550 qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
2551 {
2552 	uint8_t *mta = arg;
2553 
2554 	if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2555 		return (0);
2556 
2557 	bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2558 
2559 	return (1);
2560 }
2561 
2562 static int
qlnx_set_multi(qlnx_host_t * ha,uint32_t add_multi)2563 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2564 {
2565 	uint8_t		mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN];
2566 	if_t		ifp = ha->ifp;
2567 	u_int		mcnt;
2568 
2569 	if (qlnx_vf_device(ha) == 0)
2570 		return (0);
2571 
2572 	mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta);
2573 
2574 	QLNX_LOCK(ha);
2575 	qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2576 	QLNX_UNLOCK(ha);
2577 
2578 	return (0);
2579 }
2580 
2581 static int
qlnx_set_promisc(qlnx_host_t * ha,int enabled)2582 qlnx_set_promisc(qlnx_host_t *ha, int enabled)
2583 {
2584 	int	rc = 0;
2585 	uint8_t	filter;
2586 
2587 	if (qlnx_vf_device(ha) == 0)
2588 		return (0);
2589 
2590 	filter = ha->filter;
2591 	if (enabled) {
2592 		filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2593 		filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2594 	} else {
2595 		filter &= ~ECORE_ACCEPT_MCAST_UNMATCHED;
2596 		filter &= ~ECORE_ACCEPT_UCAST_UNMATCHED;
2597 	}
2598 
2599 	rc = qlnx_set_rx_accept_filter(ha, filter);
2600 	return (rc);
2601 }
2602 
2603 static int
qlnx_set_allmulti(qlnx_host_t * ha,int enabled)2604 qlnx_set_allmulti(qlnx_host_t *ha, int enabled)
2605 {
2606 	int	rc = 0;
2607 	uint8_t	filter;
2608 
2609 	if (qlnx_vf_device(ha) == 0)
2610 		return (0);
2611 
2612 	filter = ha->filter;
2613 	if (enabled) {
2614 		filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2615 	} else {
2616 		filter &= ~ECORE_ACCEPT_MCAST_UNMATCHED;
2617 	}
2618 	rc = qlnx_set_rx_accept_filter(ha, filter);
2619 
2620 	return (rc);
2621 }
2622 
2623 static int
qlnx_ioctl(if_t ifp,u_long cmd,caddr_t data)2624 qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data)
2625 {
2626 	int		ret = 0, mask;
2627 	int		flags;
2628 	struct ifreq	*ifr = (struct ifreq *)data;
2629 #ifdef INET
2630 	struct ifaddr	*ifa = (struct ifaddr *)data;
2631 #endif
2632 	qlnx_host_t	*ha;
2633 
2634 	ha = (qlnx_host_t *)if_getsoftc(ifp);
2635 
2636 	switch (cmd) {
2637 	case SIOCSIFADDR:
2638 		QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2639 
2640 #ifdef INET
2641 		if (ifa->ifa_addr->sa_family == AF_INET) {
2642 			if_setflagbits(ifp, IFF_UP, 0);
2643 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
2644 				QLNX_LOCK(ha);
2645 				qlnx_init_locked(ha);
2646 				QLNX_UNLOCK(ha);
2647 			}
2648 			QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2649 				   cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2650 
2651 			arp_ifinit(ifp, ifa);
2652 			break;
2653 		}
2654 #endif
2655 		ether_ioctl(ifp, cmd, data);
2656 		break;
2657 
2658 	case SIOCSIFMTU:
2659 		QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2660 
2661 		if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2662 			ret = EINVAL;
2663 		} else {
2664 			QLNX_LOCK(ha);
2665 			if_setmtu(ifp, ifr->ifr_mtu);
2666 			ha->max_frame_size =
2667 				if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
2668 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2669 				qlnx_init_locked(ha);
2670 			}
2671 
2672 			QLNX_UNLOCK(ha);
2673 		}
2674 
2675 		break;
2676 
2677 	case SIOCSIFFLAGS:
2678 		QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2679 
2680 		QLNX_LOCK(ha);
2681 		flags = if_getflags(ifp);
2682 
2683 		if (flags & IFF_UP) {
2684 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2685 				if ((flags ^ ha->if_flags) &
2686 					IFF_PROMISC) {
2687 					ret = qlnx_set_promisc(ha, flags & IFF_PROMISC);
2688 				} else if ((if_getflags(ifp) ^ ha->if_flags) &
2689 					IFF_ALLMULTI) {
2690 					ret = qlnx_set_allmulti(ha, flags & IFF_ALLMULTI);
2691 				}
2692 			} else {
2693 				ha->max_frame_size = if_getmtu(ifp) +
2694 					ETHER_HDR_LEN + ETHER_CRC_LEN;
2695 				qlnx_init_locked(ha);
2696 			}
2697 		} else {
2698 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2699 				qlnx_stop(ha);
2700 		}
2701 
2702 		ha->if_flags = if_getflags(ifp);
2703 		QLNX_UNLOCK(ha);
2704 		break;
2705 
2706 	case SIOCADDMULTI:
2707 		QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2708 
2709 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2710 			if (qlnx_set_multi(ha, 1))
2711 				ret = EINVAL;
2712 		}
2713 		break;
2714 
2715 	case SIOCDELMULTI:
2716 		QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2717 
2718 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2719 			if (qlnx_set_multi(ha, 0))
2720 				ret = EINVAL;
2721 		}
2722 		break;
2723 
2724 	case SIOCSIFMEDIA:
2725 	case SIOCGIFMEDIA:
2726 		QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2727 
2728 		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2729 		break;
2730 
2731 	case SIOCSIFCAP:
2732 
2733 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2734 
2735 		QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2736 
2737 		if (mask & IFCAP_HWCSUM)
2738 			if_togglecapenable(ifp, IFCAP_HWCSUM);
2739 		if (mask & IFCAP_TSO4)
2740 			if_togglecapenable(ifp, IFCAP_TSO4);
2741 		if (mask & IFCAP_TSO6)
2742 			if_togglecapenable(ifp, IFCAP_TSO6);
2743 		if (mask & IFCAP_VLAN_HWTAGGING)
2744 			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2745 		if (mask & IFCAP_VLAN_HWTSO)
2746 			if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
2747 		if (mask & IFCAP_LRO)
2748 			if_togglecapenable(ifp, IFCAP_LRO);
2749 
2750 		QLNX_LOCK(ha);
2751 
2752 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2753 			qlnx_init_locked(ha);
2754 
2755 		QLNX_UNLOCK(ha);
2756 
2757 		VLAN_CAPABILITIES(ifp);
2758 		break;
2759 
2760 	case SIOCGI2C:
2761 	{
2762 		struct ifi2creq i2c;
2763 		struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2764 		struct ecore_ptt *p_ptt;
2765 
2766 		ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2767 
2768 		if (ret)
2769 			break;
2770 
2771 		if ((i2c.len > sizeof (i2c.data)) ||
2772 			(i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2773 			ret = EINVAL;
2774 			break;
2775 		}
2776 
2777 		p_ptt = ecore_ptt_acquire(p_hwfn);
2778 
2779 		if (!p_ptt) {
2780 			QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2781 			ret = -1;
2782 			break;
2783 		}
2784 
2785 		ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2786 			(ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2787 			i2c.len, &i2c.data[0]);
2788 
2789 		ecore_ptt_release(p_hwfn, p_ptt);
2790 
2791 		if (ret) {
2792 			ret = -1;
2793 			break;
2794 		}
2795 
2796 		ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2797 
2798 		QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2799 			 len = %d addr = 0x%02x offset = 0x%04x \
2800 			 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2801 			 0x%02x 0x%02x 0x%02x\n",
2802 			ret, i2c.len, i2c.dev_addr, i2c.offset,
2803 			i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2804 			i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2805 		break;
2806 	}
2807 
2808 	default:
2809 		QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2810 		ret = ether_ioctl(ifp, cmd, data);
2811 		break;
2812 	}
2813 
2814 	return (ret);
2815 }
2816 
2817 static int
qlnx_media_change(if_t ifp)2818 qlnx_media_change(if_t ifp)
2819 {
2820 	qlnx_host_t	*ha;
2821 	struct ifmedia	*ifm;
2822 	int		ret = 0;
2823 
2824 	ha = (qlnx_host_t *)if_getsoftc(ifp);
2825 
2826 	QL_DPRINT2(ha, "enter\n");
2827 
2828 	ifm = &ha->media;
2829 
2830 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2831 		ret = EINVAL;
2832 
2833 	QL_DPRINT2(ha, "exit\n");
2834 
2835 	return (ret);
2836 }
2837 
2838 static void
qlnx_media_status(if_t ifp,struct ifmediareq * ifmr)2839 qlnx_media_status(if_t ifp, struct ifmediareq *ifmr)
2840 {
2841 	qlnx_host_t		*ha;
2842 
2843 	ha = (qlnx_host_t *)if_getsoftc(ifp);
2844 
2845 	QL_DPRINT2(ha, "enter\n");
2846 
2847 	ifmr->ifm_status = IFM_AVALID;
2848 	ifmr->ifm_active = IFM_ETHER;
2849 
2850 	if (ha->link_up) {
2851 		ifmr->ifm_status |= IFM_ACTIVE;
2852 		ifmr->ifm_active |=
2853 			(IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2854 
2855 		if (ha->if_link.link_partner_caps &
2856 			(QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2857 			ifmr->ifm_active |=
2858 				(IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2859 	}
2860 
2861 	QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2862 
2863 	return;
2864 }
2865 
2866 static void
qlnx_free_tx_pkt(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)2867 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2868 	struct qlnx_tx_queue *txq)
2869 {
2870 	u16			idx;
2871 	struct mbuf		*mp;
2872 	bus_dmamap_t		map;
2873 	int			i;
2874 //	struct eth_tx_bd	*tx_data_bd;
2875 	struct eth_tx_1st_bd	*first_bd;
2876 	int			nbds = 0;
2877 
2878 	idx = txq->sw_tx_cons;
2879 	mp = txq->sw_tx_ring[idx].mp;
2880 	map = txq->sw_tx_ring[idx].map;
2881 
2882 	if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2883 		QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2884 
2885 		QL_DPRINT1(ha, "(mp == NULL) "
2886 			" tx_idx = 0x%x"
2887 			" ecore_prod_idx = 0x%x"
2888 			" ecore_cons_idx = 0x%x"
2889 			" hw_bd_cons = 0x%x"
2890 			" txq_db_last = 0x%x"
2891 			" elem_left = 0x%x\n",
2892 			fp->rss_id,
2893 			ecore_chain_get_prod_idx(&txq->tx_pbl),
2894 			ecore_chain_get_cons_idx(&txq->tx_pbl),
2895 			le16toh(*txq->hw_cons_ptr),
2896 			txq->tx_db.raw,
2897 			ecore_chain_get_elem_left(&txq->tx_pbl));
2898 
2899 		fp->err_tx_free_pkt_null++;
2900 
2901 		//DEBUG
2902 		qlnx_trigger_dump(ha);
2903 
2904 		return;
2905 	} else {
2906 		QLNX_INC_OPACKETS((ha->ifp));
2907 		QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2908 
2909 		bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2910 		bus_dmamap_unload(ha->tx_tag, map);
2911 
2912 		fp->tx_pkts_freed++;
2913 		fp->tx_pkts_completed++;
2914 
2915 		m_freem(mp);
2916 	}
2917 
2918 	first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
2919 	nbds = first_bd->data.nbds;
2920 
2921 //	BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
2922 
2923 	for (i = 1; i < nbds; i++) {
2924 		/* tx_data_bd = */ ecore_chain_consume(&txq->tx_pbl);
2925 //		BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
2926 	}
2927 	txq->sw_tx_ring[idx].flags = 0;
2928 	txq->sw_tx_ring[idx].mp = NULL;
2929 	txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
2930 
2931 	return;
2932 }
2933 
2934 static void
qlnx_tx_int(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)2935 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2936 	struct qlnx_tx_queue *txq)
2937 {
2938 	u16 hw_bd_cons;
2939 	u16 ecore_cons_idx;
2940 	uint16_t diff;
2941 	uint16_t idx, idx2;
2942 
2943 	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
2944 
2945 	while (hw_bd_cons !=
2946 		(ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
2947 		diff = hw_bd_cons - ecore_cons_idx;
2948 		if ((diff > TX_RING_SIZE) ||
2949 			QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2950 			QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2951 
2952 			QL_DPRINT1(ha, "(diff = 0x%x) "
2953 				" tx_idx = 0x%x"
2954 				" ecore_prod_idx = 0x%x"
2955 				" ecore_cons_idx = 0x%x"
2956 				" hw_bd_cons = 0x%x"
2957 				" txq_db_last = 0x%x"
2958 				" elem_left = 0x%x\n",
2959 				diff,
2960 				fp->rss_id,
2961 				ecore_chain_get_prod_idx(&txq->tx_pbl),
2962 				ecore_chain_get_cons_idx(&txq->tx_pbl),
2963 				le16toh(*txq->hw_cons_ptr),
2964 				txq->tx_db.raw,
2965 				ecore_chain_get_elem_left(&txq->tx_pbl));
2966 
2967 			fp->err_tx_cons_idx_conflict++;
2968 
2969 			//DEBUG
2970 			qlnx_trigger_dump(ha);
2971 		}
2972 
2973 		idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2974 		idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1);
2975 		prefetch(txq->sw_tx_ring[idx].mp);
2976 		prefetch(txq->sw_tx_ring[idx2].mp);
2977 
2978 		qlnx_free_tx_pkt(ha, fp, txq);
2979 
2980 		txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2981 	}
2982 	return;
2983 }
2984 
2985 static int
qlnx_transmit_locked(if_t ifp,struct qlnx_fastpath * fp,struct mbuf * mp)2986 qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp, struct mbuf *mp)
2987 {
2988         int                     ret = 0;
2989         struct qlnx_tx_queue    *txq;
2990         qlnx_host_t *           ha;
2991         uint16_t elem_left;
2992 
2993         txq = fp->txq[0];
2994         ha = (qlnx_host_t *)fp->edev;
2995 
2996         if ((!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) || (!ha->link_up)) {
2997                 if(mp != NULL)
2998                         ret = drbr_enqueue(ifp, fp->tx_br, mp);
2999                 return (ret);
3000         }
3001 
3002         if(mp != NULL)
3003                 ret  = drbr_enqueue(ifp, fp->tx_br, mp);
3004 
3005         mp = drbr_peek(ifp, fp->tx_br);
3006 
3007         while (mp != NULL) {
3008                 if (qlnx_send(ha, fp, &mp)) {
3009                         if (mp != NULL) {
3010                                 drbr_putback(ifp, fp->tx_br, mp);
3011                         } else {
3012                                 fp->tx_pkts_processed++;
3013                                 drbr_advance(ifp, fp->tx_br);
3014                         }
3015                         goto qlnx_transmit_locked_exit;
3016 
3017                 } else {
3018                         drbr_advance(ifp, fp->tx_br);
3019                         fp->tx_pkts_transmitted++;
3020                         fp->tx_pkts_processed++;
3021                 }
3022 
3023                 mp = drbr_peek(ifp, fp->tx_br);
3024         }
3025 
3026 qlnx_transmit_locked_exit:
3027         if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) ||
3028                 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))
3029                                         < QLNX_TX_ELEM_MAX_THRESH))
3030                 (void)qlnx_tx_int(ha, fp, fp->txq[0]);
3031 
3032         QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret);
3033         return ret;
3034 }
3035 
3036 static int
qlnx_transmit(if_t ifp,struct mbuf * mp)3037 qlnx_transmit(if_t ifp, struct mbuf  *mp)
3038 {
3039         qlnx_host_t		*ha = (qlnx_host_t *)if_getsoftc(ifp);
3040         struct qlnx_fastpath	*fp;
3041         int			rss_id = 0, ret = 0;
3042 
3043 #ifdef QLNX_TRACEPERF_DATA
3044         uint64_t tx_pkts = 0, tx_compl = 0;
3045 #endif
3046 
3047         QL_DPRINT2(ha, "enter\n");
3048 
3049         if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
3050                 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
3051 					ha->num_rss;
3052 
3053         fp = &ha->fp_array[rss_id];
3054 
3055         if (fp->tx_br == NULL) {
3056                 ret = EINVAL;
3057                 goto qlnx_transmit_exit;
3058         }
3059 
3060         if (mtx_trylock(&fp->tx_mtx)) {
3061 #ifdef QLNX_TRACEPERF_DATA
3062                         tx_pkts = fp->tx_pkts_transmitted;
3063                         tx_compl = fp->tx_pkts_completed;
3064 #endif
3065 
3066                         ret = qlnx_transmit_locked(ifp, fp, mp);
3067 
3068 #ifdef QLNX_TRACEPERF_DATA
3069                         fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts);
3070                         fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl);
3071 #endif
3072                         mtx_unlock(&fp->tx_mtx);
3073         } else {
3074                 if (mp != NULL && (fp->fp_taskqueue != NULL)) {
3075                         ret = drbr_enqueue(ifp, fp->tx_br, mp);
3076                         taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
3077                 }
3078         }
3079 
3080 qlnx_transmit_exit:
3081 
3082         QL_DPRINT2(ha, "exit ret = %d\n", ret);
3083         return ret;
3084 }
3085 
3086 static void
qlnx_qflush(if_t ifp)3087 qlnx_qflush(if_t ifp)
3088 {
3089 	int			rss_id;
3090 	struct qlnx_fastpath	*fp;
3091 	struct mbuf		*mp;
3092 	qlnx_host_t		*ha;
3093 
3094 	ha = (qlnx_host_t *)if_getsoftc(ifp);
3095 
3096 	QL_DPRINT2(ha, "enter\n");
3097 
3098 	for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
3099 		fp = &ha->fp_array[rss_id];
3100 
3101 		if (fp == NULL)
3102 			continue;
3103 
3104 		if (fp->tx_br) {
3105 			mtx_lock(&fp->tx_mtx);
3106 
3107 			while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
3108 				fp->tx_pkts_freed++;
3109 				m_freem(mp);
3110 			}
3111 			mtx_unlock(&fp->tx_mtx);
3112 		}
3113 	}
3114 	QL_DPRINT2(ha, "exit\n");
3115 
3116 	return;
3117 }
3118 
3119 static void
qlnx_txq_doorbell_wr32(qlnx_host_t * ha,void * reg_addr,uint32_t value)3120 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
3121 {
3122 	uint32_t		offset;
3123 
3124 	offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells);
3125 
3126 	bus_write_4(ha->pci_dbells, offset, value);
3127 	bus_barrier(ha->pci_reg,  0, 0, BUS_SPACE_BARRIER_READ);
3128 	bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
3129 
3130 	return;
3131 }
3132 
3133 static uint32_t
qlnx_tcp_offset(qlnx_host_t * ha,struct mbuf * mp)3134 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
3135 {
3136         struct ether_vlan_header	*eh = NULL;
3137         struct ip			*ip = NULL;
3138         struct ip6_hdr			*ip6 = NULL;
3139         struct tcphdr			*th = NULL;
3140         uint32_t			ehdrlen = 0, ip_hlen = 0, offset = 0;
3141         uint16_t			etype = 0;
3142         uint8_t				buf[sizeof(struct ip6_hdr)];
3143 
3144         eh = mtod(mp, struct ether_vlan_header *);
3145 
3146         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3147                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3148                 etype = ntohs(eh->evl_proto);
3149         } else {
3150                 ehdrlen = ETHER_HDR_LEN;
3151                 etype = ntohs(eh->evl_encap_proto);
3152         }
3153 
3154         switch (etype) {
3155                 case ETHERTYPE_IP:
3156                         ip = (struct ip *)(mp->m_data + ehdrlen);
3157 
3158                         ip_hlen = sizeof (struct ip);
3159 
3160                         if (mp->m_len < (ehdrlen + ip_hlen)) {
3161                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
3162                                 ip = (struct ip *)buf;
3163                         }
3164 
3165                         th = (struct tcphdr *)(ip + 1);
3166 			offset = ip_hlen + ehdrlen + (th->th_off << 2);
3167                 break;
3168 
3169                 case ETHERTYPE_IPV6:
3170                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3171 
3172                         ip_hlen = sizeof(struct ip6_hdr);
3173 
3174                         if (mp->m_len < (ehdrlen + ip_hlen)) {
3175                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
3176                                         buf);
3177                                 ip6 = (struct ip6_hdr *)buf;
3178                         }
3179                         th = (struct tcphdr *)(ip6 + 1);
3180 			offset = ip_hlen + ehdrlen + (th->th_off << 2);
3181                 break;
3182 
3183                 default:
3184                 break;
3185         }
3186 
3187         return (offset);
3188 }
3189 
3190 static __inline int
qlnx_tso_check(struct qlnx_fastpath * fp,bus_dma_segment_t * segs,int nsegs,uint32_t offset)3191 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
3192 	uint32_t offset)
3193 {
3194 	int			i;
3195 	uint32_t		sum, nbds_in_hdr = 1;
3196         uint32_t		window;
3197         bus_dma_segment_t	*s_seg;
3198 
3199         /* If the header spans multiple segments, skip those segments */
3200 
3201         if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM)
3202                 return (0);
3203 
3204         i = 0;
3205 
3206         while ((i < nsegs) && (offset >= segs->ds_len)) {
3207                 offset = offset - segs->ds_len;
3208                 segs++;
3209                 i++;
3210                 nbds_in_hdr++;
3211         }
3212 
3213         window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr;
3214 
3215         nsegs = nsegs - i;
3216 
3217         while (nsegs >= window) {
3218                 sum = 0;
3219                 s_seg = segs;
3220 
3221                 for (i = 0; i < window; i++){
3222                         sum += s_seg->ds_len;
3223                         s_seg++;
3224                 }
3225 
3226                 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
3227                         fp->tx_lso_wnd_min_len++;
3228                         return (-1);
3229                 }
3230 
3231                 nsegs = nsegs - 1;
3232                 segs++;
3233         }
3234 
3235 	return (0);
3236 }
3237 
3238 static int
qlnx_send(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct mbuf ** m_headp)3239 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
3240 {
3241 	bus_dma_segment_t	*segs;
3242 	bus_dmamap_t		map = 0;
3243 	uint32_t		nsegs = 0;
3244 	int			ret = -1;
3245 	struct mbuf		*m_head = *m_headp;
3246 	uint16_t		idx = 0;
3247 	uint16_t		elem_left;
3248 
3249 	uint8_t			nbd = 0;
3250 	struct qlnx_tx_queue    *txq;
3251 
3252 	struct eth_tx_1st_bd    *first_bd;
3253 	struct eth_tx_2nd_bd    *second_bd;
3254 	struct eth_tx_3rd_bd    *third_bd;
3255 	struct eth_tx_bd        *tx_data_bd;
3256 
3257 	int			seg_idx = 0;
3258 	uint32_t		nbds_in_hdr = 0;
3259 	uint32_t		offset = 0;
3260 
3261 #ifdef QLNX_TRACE_PERF_DATA
3262         uint16_t                bd_used;
3263 #endif
3264 
3265 	QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id);
3266 
3267 	if (!ha->link_up)
3268 		return (-1);
3269 
3270 	first_bd	= NULL;
3271 	second_bd	= NULL;
3272 	third_bd	= NULL;
3273 	tx_data_bd	= NULL;
3274 
3275 	txq = fp->txq[0];
3276 
3277         if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) <
3278 		QLNX_TX_ELEM_MIN_THRESH) {
3279                 fp->tx_nsegs_gt_elem_left++;
3280                 fp->err_tx_nsegs_gt_elem_left++;
3281 
3282                 return (ENOBUFS);
3283         }
3284 
3285 	idx = txq->sw_tx_prod;
3286 
3287 	map = txq->sw_tx_ring[idx].map;
3288 	segs = txq->segs;
3289 
3290 	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
3291 			BUS_DMA_NOWAIT);
3292 
3293 	if (ha->dbg_trace_tso_pkt_len) {
3294 		if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3295 			if (!fp->tx_tso_min_pkt_len) {
3296 				fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3297 				fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3298 			} else {
3299 				if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
3300 					fp->tx_tso_min_pkt_len =
3301 						m_head->m_pkthdr.len;
3302 				if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
3303 					fp->tx_tso_max_pkt_len =
3304 						m_head->m_pkthdr.len;
3305 			}
3306 		}
3307 	}
3308 
3309 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3310 		offset = qlnx_tcp_offset(ha, m_head);
3311 
3312 	if ((ret == EFBIG) ||
3313 		((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
3314 			(!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
3315 		((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
3316 			qlnx_tso_check(fp, segs, nsegs, offset))))) {
3317 		struct mbuf *m;
3318 
3319 		QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
3320 
3321 		fp->tx_defrag++;
3322 
3323 		m = m_defrag(m_head, M_NOWAIT);
3324 		if (m == NULL) {
3325 			fp->err_tx_defrag++;
3326 			fp->tx_pkts_freed++;
3327 			m_freem(m_head);
3328 			*m_headp = NULL;
3329 			QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
3330 			return (ENOBUFS);
3331 		}
3332 
3333 		m_head = m;
3334 		*m_headp = m_head;
3335 
3336 		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
3337 				segs, &nsegs, BUS_DMA_NOWAIT))) {
3338 			fp->err_tx_defrag_dmamap_load++;
3339 
3340 			QL_DPRINT1(ha,
3341 				"bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
3342 				ret, m_head->m_pkthdr.len);
3343 
3344 			fp->tx_pkts_freed++;
3345 			m_freem(m_head);
3346 			*m_headp = NULL;
3347 
3348 			return (ret);
3349 		}
3350 
3351 		if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
3352 			!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3353 			fp->err_tx_non_tso_max_seg++;
3354 
3355 			QL_DPRINT1(ha,
3356 				"(%d) nsegs too many for non-TSO [%d, %d]\n",
3357 				ret, nsegs, m_head->m_pkthdr.len);
3358 
3359 			fp->tx_pkts_freed++;
3360 			m_freem(m_head);
3361 			*m_headp = NULL;
3362 
3363 			return (ret);
3364 		}
3365 		if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3366 			offset = qlnx_tcp_offset(ha, m_head);
3367 
3368 	} else if (ret) {
3369 		fp->err_tx_dmamap_load++;
3370 
3371 		QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
3372 			   ret, m_head->m_pkthdr.len);
3373 		fp->tx_pkts_freed++;
3374 		m_freem(m_head);
3375 		*m_headp = NULL;
3376 		return (ret);
3377 	}
3378 
3379 	QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
3380 
3381 	if (ha->dbg_trace_tso_pkt_len) {
3382 		if (nsegs < QLNX_FP_MAX_SEGS)
3383 			fp->tx_pkts[(nsegs - 1)]++;
3384 		else
3385 			fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
3386 	}
3387 
3388 #ifdef QLNX_TRACE_PERF_DATA
3389         if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3390                 if(m_head->m_pkthdr.len <= 2048)
3391                         fp->tx_pkts_hist[0]++;
3392                 else if((m_head->m_pkthdr.len > 2048) &&
3393 				(m_head->m_pkthdr.len <= 4096))
3394                         fp->tx_pkts_hist[1]++;
3395                 else if((m_head->m_pkthdr.len > 4096) &&
3396 				(m_head->m_pkthdr.len <= 8192))
3397                         fp->tx_pkts_hist[2]++;
3398                 else if((m_head->m_pkthdr.len > 8192) &&
3399 				(m_head->m_pkthdr.len <= 12288 ))
3400                         fp->tx_pkts_hist[3]++;
3401                 else if((m_head->m_pkthdr.len > 11288) &&
3402 				(m_head->m_pkthdr.len <= 16394))
3403                         fp->tx_pkts_hist[4]++;
3404                 else if((m_head->m_pkthdr.len > 16384) &&
3405 				(m_head->m_pkthdr.len <= 20480))
3406                         fp->tx_pkts_hist[5]++;
3407                 else if((m_head->m_pkthdr.len > 20480) &&
3408 				(m_head->m_pkthdr.len <= 24576))
3409                         fp->tx_pkts_hist[6]++;
3410                 else if((m_head->m_pkthdr.len > 24576) &&
3411 				(m_head->m_pkthdr.len <= 28672))
3412                         fp->tx_pkts_hist[7]++;
3413                 else if((m_head->m_pkthdr.len > 28762) &&
3414 				(m_head->m_pkthdr.len <= 32768))
3415                         fp->tx_pkts_hist[8]++;
3416                 else if((m_head->m_pkthdr.len > 32768) &&
3417 				(m_head->m_pkthdr.len <= 36864))
3418                         fp->tx_pkts_hist[9]++;
3419                 else if((m_head->m_pkthdr.len > 36864) &&
3420 				(m_head->m_pkthdr.len <= 40960))
3421                         fp->tx_pkts_hist[10]++;
3422                 else if((m_head->m_pkthdr.len > 40960) &&
3423 				(m_head->m_pkthdr.len <= 45056))
3424                         fp->tx_pkts_hist[11]++;
3425                 else if((m_head->m_pkthdr.len > 45056) &&
3426 				(m_head->m_pkthdr.len <= 49152))
3427                         fp->tx_pkts_hist[12]++;
3428                 else if((m_head->m_pkthdr.len > 49512) &&
3429 				m_head->m_pkthdr.len <= 53248))
3430                         fp->tx_pkts_hist[13]++;
3431                 else if((m_head->m_pkthdr.len > 53248) &&
3432 				(m_head->m_pkthdr.len <= 57344))
3433                         fp->tx_pkts_hist[14]++;
3434                 else if((m_head->m_pkthdr.len > 53248) &&
3435 				(m_head->m_pkthdr.len <= 57344))
3436                         fp->tx_pkts_hist[15]++;
3437                 else if((m_head->m_pkthdr.len > 57344) &&
3438 				(m_head->m_pkthdr.len <= 61440))
3439                         fp->tx_pkts_hist[16]++;
3440                 else
3441                         fp->tx_pkts_hist[17]++;
3442         }
3443 
3444         if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3445                 elem_left =  ecore_chain_get_elem_left(&txq->tx_pbl);
3446                 bd_used = TX_RING_SIZE - elem_left;
3447 
3448                 if(bd_used <= 100)
3449                         fp->tx_pkts_q[0]++;
3450                 else if((bd_used > 100) && (bd_used <= 500))
3451                         fp->tx_pkts_q[1]++;
3452                 else if((bd_used > 500) && (bd_used <= 1000))
3453                         fp->tx_pkts_q[2]++;
3454                 else if((bd_used > 1000) && (bd_used <= 2000))
3455                         fp->tx_pkts_q[3]++;
3456                 else if((bd_used > 3000) && (bd_used <= 4000))
3457                         fp->tx_pkts_q[4]++;
3458                 else if((bd_used > 4000) && (bd_used <= 5000))
3459                         fp->tx_pkts_q[5]++;
3460                 else if((bd_used > 6000) && (bd_used <= 7000))
3461                         fp->tx_pkts_q[6]++;
3462                 else if((bd_used > 7000) && (bd_used <= 8000))
3463                         fp->tx_pkts_q[7]++;
3464                 else if((bd_used > 8000) && (bd_used <= 9000))
3465                         fp->tx_pkts_q[8]++;
3466                 else if((bd_used > 9000) && (bd_used <= 10000))
3467                         fp->tx_pkts_q[9]++;
3468                 else if((bd_used > 10000) && (bd_used <= 11000))
3469                         fp->tx_pkts_q[10]++;
3470                 else if((bd_used > 11000) && (bd_used <= 12000))
3471                         fp->tx_pkts_q[11]++;
3472                 else if((bd_used > 12000) && (bd_used <= 13000))
3473                         fp->tx_pkts_q[12]++;
3474                 else if((bd_used > 13000) && (bd_used <= 14000))
3475                         fp->tx_pkts_q[13]++;
3476                 else if((bd_used > 14000) && (bd_used <= 15000))
3477                         fp->tx_pkts_q[14]++;
3478                else if((bd_used > 15000) && (bd_used <= 16000))
3479                         fp->tx_pkts_q[15]++;
3480                 else
3481                         fp->tx_pkts_q[16]++;
3482         }
3483 
3484 #endif /* end of QLNX_TRACE_PERF_DATA */
3485 
3486 	if ((nsegs + QLNX_TX_ELEM_RESERVE) >
3487 		(int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
3488 		QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
3489 			" in chain[%d] trying to free packets\n",
3490 			nsegs, elem_left, fp->rss_id);
3491 
3492 		fp->tx_nsegs_gt_elem_left++;
3493 
3494 		(void)qlnx_tx_int(ha, fp, txq);
3495 
3496 		if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
3497 			ecore_chain_get_elem_left(&txq->tx_pbl))) {
3498 			QL_DPRINT1(ha,
3499 				"(%d, 0x%x) insuffient BDs in chain[%d]\n",
3500 				nsegs, elem_left, fp->rss_id);
3501 
3502 			fp->err_tx_nsegs_gt_elem_left++;
3503 			fp->tx_ring_full = 1;
3504 			if (ha->storm_stats_enable)
3505 				ha->storm_stats_gather = 1;
3506 			return (ENOBUFS);
3507 		}
3508 	}
3509 
3510 	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
3511 
3512 	txq->sw_tx_ring[idx].mp = m_head;
3513 
3514 	first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
3515 
3516 	memset(first_bd, 0, sizeof(*first_bd));
3517 
3518 	first_bd->data.bd_flags.bitfields =
3519 		1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
3520 
3521 	BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
3522 
3523 	nbd++;
3524 
3525 	if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
3526 		first_bd->data.bd_flags.bitfields |=
3527 			(1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3528 	}
3529 
3530 	if (m_head->m_pkthdr.csum_flags &
3531 		(CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
3532 		first_bd->data.bd_flags.bitfields |=
3533 			(1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
3534 	}
3535 
3536         if (m_head->m_flags & M_VLANTAG) {
3537                 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
3538 		first_bd->data.bd_flags.bitfields |=
3539 			(1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
3540         }
3541 
3542 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3543                 first_bd->data.bd_flags.bitfields |=
3544 			(1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
3545 		first_bd->data.bd_flags.bitfields |=
3546 			(1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3547 
3548 		nbds_in_hdr = 1;
3549 
3550 		if (offset == segs->ds_len) {
3551 			BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3552 			segs++;
3553 			seg_idx++;
3554 
3555 			second_bd = (struct eth_tx_2nd_bd *)
3556 					ecore_chain_produce(&txq->tx_pbl);
3557 			memset(second_bd, 0, sizeof(*second_bd));
3558 			nbd++;
3559 
3560 			if (seg_idx < nsegs) {
3561 				BD_SET_UNMAP_ADDR_LEN(second_bd, \
3562 					(segs->ds_addr), (segs->ds_len));
3563 				segs++;
3564 				seg_idx++;
3565 			}
3566 
3567 			third_bd = (struct eth_tx_3rd_bd *)
3568 					ecore_chain_produce(&txq->tx_pbl);
3569 			memset(third_bd, 0, sizeof(*third_bd));
3570 			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3571 			third_bd->data.bitfields |=
3572 				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3573 			nbd++;
3574 
3575 			if (seg_idx < nsegs) {
3576 				BD_SET_UNMAP_ADDR_LEN(third_bd, \
3577 					(segs->ds_addr), (segs->ds_len));
3578 				segs++;
3579 				seg_idx++;
3580 			}
3581 
3582 			for (; seg_idx < nsegs; seg_idx++) {
3583 				tx_data_bd = (struct eth_tx_bd *)
3584 					ecore_chain_produce(&txq->tx_pbl);
3585 				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3586 				BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3587 					segs->ds_addr,\
3588 					segs->ds_len);
3589 				segs++;
3590 				nbd++;
3591 			}
3592 
3593 		} else if (offset < segs->ds_len) {
3594 			BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3595 
3596 			second_bd = (struct eth_tx_2nd_bd *)
3597 					ecore_chain_produce(&txq->tx_pbl);
3598 			memset(second_bd, 0, sizeof(*second_bd));
3599 			BD_SET_UNMAP_ADDR_LEN(second_bd, \
3600 				(segs->ds_addr + offset),\
3601 				(segs->ds_len - offset));
3602 			nbd++;
3603 			segs++;
3604 
3605 			third_bd = (struct eth_tx_3rd_bd *)
3606 					ecore_chain_produce(&txq->tx_pbl);
3607 			memset(third_bd, 0, sizeof(*third_bd));
3608 
3609 			BD_SET_UNMAP_ADDR_LEN(third_bd, \
3610 					segs->ds_addr,\
3611 					segs->ds_len);
3612 			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3613 			third_bd->data.bitfields |=
3614 				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3615 			segs++;
3616 			nbd++;
3617 
3618 			for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3619 				tx_data_bd = (struct eth_tx_bd *)
3620 					ecore_chain_produce(&txq->tx_pbl);
3621 				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3622 				BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3623 					segs->ds_addr,\
3624 					segs->ds_len);
3625 				segs++;
3626 				nbd++;
3627 			}
3628 
3629 		} else {
3630 			offset = offset - segs->ds_len;
3631 			segs++;
3632 
3633 			for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3634 				if (offset)
3635 					nbds_in_hdr++;
3636 
3637 				tx_data_bd = (struct eth_tx_bd *)
3638 					ecore_chain_produce(&txq->tx_pbl);
3639 				memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3640 
3641 				if (second_bd == NULL) {
3642 					second_bd = (struct eth_tx_2nd_bd *)
3643 								tx_data_bd;
3644 				} else if (third_bd == NULL) {
3645 					third_bd = (struct eth_tx_3rd_bd *)
3646 								tx_data_bd;
3647 				}
3648 
3649 				if (offset && (offset < segs->ds_len)) {
3650 					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3651 						segs->ds_addr, offset);
3652 
3653 					tx_data_bd = (struct eth_tx_bd *)
3654 					ecore_chain_produce(&txq->tx_pbl);
3655 
3656 					memset(tx_data_bd, 0,
3657 						sizeof(*tx_data_bd));
3658 
3659 					if (second_bd == NULL) {
3660 						second_bd =
3661 					(struct eth_tx_2nd_bd *)tx_data_bd;
3662 					} else if (third_bd == NULL) {
3663 						third_bd =
3664 					(struct eth_tx_3rd_bd *)tx_data_bd;
3665 					}
3666 					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3667 						(segs->ds_addr + offset), \
3668 						(segs->ds_len - offset));
3669 					nbd++;
3670 					offset = 0;
3671 				} else {
3672 					if (offset)
3673 						offset = offset - segs->ds_len;
3674 					BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3675 						segs->ds_addr, segs->ds_len);
3676 				}
3677 				segs++;
3678 				nbd++;
3679 			}
3680 
3681 			if (third_bd == NULL) {
3682 				third_bd = (struct eth_tx_3rd_bd *)
3683 					ecore_chain_produce(&txq->tx_pbl);
3684 				memset(third_bd, 0, sizeof(*third_bd));
3685 			}
3686 
3687 			third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3688 			third_bd->data.bitfields |=
3689 				(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3690 		}
3691 		fp->tx_tso_pkts++;
3692 	} else {
3693 		segs++;
3694 		for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3695 			tx_data_bd = (struct eth_tx_bd *)
3696 					ecore_chain_produce(&txq->tx_pbl);
3697 			memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3698 			BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3699 				segs->ds_len);
3700 			segs++;
3701 			nbd++;
3702 		}
3703 		first_bd->data.bitfields =
3704 			(m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3705 				 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3706 		first_bd->data.bitfields =
3707 			htole16(first_bd->data.bitfields);
3708 		fp->tx_non_tso_pkts++;
3709 	}
3710 
3711 	first_bd->data.nbds = nbd;
3712 
3713 	if (ha->dbg_trace_tso_pkt_len) {
3714 		if (fp->tx_tso_max_nsegs < nsegs)
3715 			fp->tx_tso_max_nsegs = nsegs;
3716 
3717 		if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3718 			fp->tx_tso_min_nsegs = nsegs;
3719 	}
3720 
3721 	txq->sw_tx_ring[idx].nsegs = nsegs;
3722 	txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3723 
3724 	txq->tx_db.data.bd_prod =
3725 		htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3726 
3727 	qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3728 
3729 	QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id);
3730 	return (0);
3731 }
3732 
3733 static void
qlnx_stop(qlnx_host_t * ha)3734 qlnx_stop(qlnx_host_t *ha)
3735 {
3736 	if_t		ifp = ha->ifp;
3737 	int		i;
3738 
3739 	if_setdrvflagbits(ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
3740 
3741 	/*
3742 	 * We simply lock and unlock each fp->tx_mtx to
3743 	 * propagate the if_drv_flags
3744 	 * state to each tx thread
3745 	 */
3746         QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3747 
3748 	if (ha->state == QLNX_STATE_OPEN) {
3749         	for (i = 0; i < ha->num_rss; i++) {
3750 			struct qlnx_fastpath *fp = &ha->fp_array[i];
3751 
3752 			mtx_lock(&fp->tx_mtx);
3753 			mtx_unlock(&fp->tx_mtx);
3754 
3755 			if (fp->fp_taskqueue != NULL)
3756 				taskqueue_enqueue(fp->fp_taskqueue,
3757 					&fp->fp_task);
3758 		}
3759 	}
3760 #ifdef QLNX_ENABLE_IWARP
3761 	if (qlnx_vf_device(ha) != 0) {
3762 		qlnx_rdma_dev_close(ha);
3763 	}
3764 #endif /* #ifdef QLNX_ENABLE_IWARP */
3765 
3766 	qlnx_unload(ha);
3767 
3768 	return;
3769 }
3770 
3771 static int
qlnx_get_ifq_snd_maxlen(qlnx_host_t * ha)3772 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3773 {
3774         return(TX_RING_SIZE - 1);
3775 }
3776 
3777 uint8_t *
qlnx_get_mac_addr(qlnx_host_t * ha)3778 qlnx_get_mac_addr(qlnx_host_t *ha)
3779 {
3780 	struct ecore_hwfn	*p_hwfn;
3781 	unsigned char mac[ETHER_ADDR_LEN];
3782 	uint8_t			p_is_forced;
3783 
3784 	p_hwfn = &ha->cdev.hwfns[0];
3785 
3786 	if (qlnx_vf_device(ha) != 0)
3787 		return (p_hwfn->hw_info.hw_mac_addr);
3788 
3789 	ecore_vf_read_bulletin(p_hwfn, &p_is_forced);
3790 	if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) ==
3791 		true) {
3792 		device_printf(ha->pci_dev, "%s: p_is_forced = %d"
3793 			" mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
3794 			p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3795         	memcpy(ha->primary_mac, mac, ETH_ALEN);
3796 	}
3797 
3798 	return (ha->primary_mac);
3799 }
3800 
3801 static uint32_t
qlnx_get_optics(qlnx_host_t * ha,struct qlnx_link_output * if_link)3802 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3803 {
3804 	uint32_t	ifm_type = 0;
3805 
3806 	switch (if_link->media_type) {
3807 	case MEDIA_MODULE_FIBER:
3808 	case MEDIA_UNSPECIFIED:
3809 		if (if_link->speed == (100 * 1000))
3810 			ifm_type = QLNX_IFM_100G_SR4;
3811 		else if (if_link->speed == (40 * 1000))
3812 			ifm_type = IFM_40G_SR4;
3813 		else if (if_link->speed == (25 * 1000))
3814 			ifm_type = QLNX_IFM_25G_SR;
3815 		else if (if_link->speed == (10 * 1000))
3816 			ifm_type = (IFM_10G_LR | IFM_10G_SR);
3817 		else if (if_link->speed == (1 * 1000))
3818 			ifm_type = (IFM_1000_SX | IFM_1000_LX);
3819 
3820 		break;
3821 
3822 	case MEDIA_DA_TWINAX:
3823 		if (if_link->speed == (100 * 1000))
3824 			ifm_type = QLNX_IFM_100G_CR4;
3825 		else if (if_link->speed == (40 * 1000))
3826 			ifm_type = IFM_40G_CR4;
3827 		else if (if_link->speed == (25 * 1000))
3828 			ifm_type = QLNX_IFM_25G_CR;
3829 		else if (if_link->speed == (10 * 1000))
3830 			ifm_type = IFM_10G_TWINAX;
3831 
3832 		break;
3833 
3834 	default :
3835 		ifm_type = IFM_UNKNOWN;
3836 		break;
3837 	}
3838 	return (ifm_type);
3839 }
3840 
3841 /*****************************************************************************
3842  * Interrupt Service Functions
3843  *****************************************************************************/
3844 
3845 static int
qlnx_rx_jumbo_chain(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct mbuf * mp_head,uint16_t len)3846 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3847 	struct mbuf *mp_head, uint16_t len)
3848 {
3849 	struct mbuf		*mp, *mpf, *mpl;
3850 	struct sw_rx_data	*sw_rx_data;
3851 	struct qlnx_rx_queue	*rxq;
3852 	uint16_t 		len_in_buffer;
3853 
3854 	rxq = fp->rxq;
3855 	mpf = mpl = mp = NULL;
3856 
3857 	while (len) {
3858         	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3859 
3860                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3861                 mp = sw_rx_data->data;
3862 
3863 		if (mp == NULL) {
3864                 	QL_DPRINT1(ha, "mp = NULL\n");
3865 			fp->err_rx_mp_null++;
3866         		rxq->sw_rx_cons  =
3867 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3868 
3869 			if (mpf != NULL)
3870 				m_freem(mpf);
3871 
3872 			return (-1);
3873 		}
3874 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3875 			BUS_DMASYNC_POSTREAD);
3876 
3877                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3878                         QL_DPRINT1(ha, "New buffer allocation failed, dropping"
3879 				" incoming packet and reusing its buffer\n");
3880 
3881                         qlnx_reuse_rx_data(rxq);
3882                         fp->err_rx_alloc_errors++;
3883 
3884 			if (mpf != NULL)
3885 				m_freem(mpf);
3886 
3887 			return (-1);
3888 		}
3889                 ecore_chain_consume(&rxq->rx_bd_ring);
3890 
3891 		if (len > rxq->rx_buf_size)
3892 			len_in_buffer = rxq->rx_buf_size;
3893 		else
3894 			len_in_buffer = len;
3895 
3896 		len = len - len_in_buffer;
3897 
3898 		mp->m_flags &= ~M_PKTHDR;
3899 		mp->m_next = NULL;
3900 		mp->m_len = len_in_buffer;
3901 
3902 		if (mpf == NULL)
3903 			mpf = mpl = mp;
3904 		else {
3905 			mpl->m_next = mp;
3906 			mpl = mp;
3907 		}
3908 	}
3909 
3910 	if (mpf != NULL)
3911 		mp_head->m_next = mpf;
3912 
3913 	return (0);
3914 }
3915 
3916 static void
qlnx_tpa_start(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_rx_queue * rxq,struct eth_fast_path_rx_tpa_start_cqe * cqe)3917 qlnx_tpa_start(qlnx_host_t *ha,
3918 	struct qlnx_fastpath *fp,
3919 	struct qlnx_rx_queue *rxq,
3920 	struct eth_fast_path_rx_tpa_start_cqe *cqe)
3921 {
3922 	uint32_t		agg_index;
3923         if_t ifp = ha->ifp;
3924 	struct mbuf		*mp;
3925 	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
3926 	struct sw_rx_data	*sw_rx_data;
3927 	dma_addr_t		addr;
3928 	bus_dmamap_t		map;
3929 	struct eth_rx_bd	*rx_bd;
3930 	int			i;
3931 	uint8_t			hash_type;
3932 
3933 	agg_index = cqe->tpa_agg_index;
3934 
3935         QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
3936                 \t type = 0x%x\n \
3937                 \t bitfields = 0x%x\n \
3938                 \t seg_len = 0x%x\n \
3939                 \t pars_flags = 0x%x\n \
3940                 \t vlan_tag = 0x%x\n \
3941                 \t rss_hash = 0x%x\n \
3942                 \t len_on_first_bd = 0x%x\n \
3943                 \t placement_offset = 0x%x\n \
3944                 \t tpa_agg_index = 0x%x\n \
3945                 \t header_len = 0x%x\n \
3946                 \t ext_bd_len_list[0] = 0x%x\n \
3947                 \t ext_bd_len_list[1] = 0x%x\n \
3948                 \t ext_bd_len_list[2] = 0x%x\n \
3949                 \t ext_bd_len_list[3] = 0x%x\n \
3950                 \t ext_bd_len_list[4] = 0x%x\n",
3951                 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
3952                 cqe->pars_flags.flags, cqe->vlan_tag,
3953                 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
3954                 cqe->tpa_agg_index, cqe->header_len,
3955                 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
3956                 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
3957                 cqe->ext_bd_len_list[4]);
3958 
3959 	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3960 		fp->err_rx_tpa_invalid_agg_num++;
3961 		return;
3962 	}
3963 
3964 	sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3965 	bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
3966 	mp = sw_rx_data->data;
3967 
3968 	QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
3969 
3970 	if (mp == NULL) {
3971                	QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
3972 		fp->err_rx_mp_null++;
3973        		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3974 
3975 		return;
3976 	}
3977 
3978 	if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
3979 		QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
3980 			" flags = %x, dropping incoming packet\n", fp->rss_id,
3981 			rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
3982 
3983 		fp->err_rx_hw_errors++;
3984 
3985 		qlnx_reuse_rx_data(rxq);
3986 
3987 		QLNX_INC_IERRORS(ifp);
3988 
3989 		return;
3990 	}
3991 
3992 	if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3993 		QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3994 			" dropping incoming packet and reusing its buffer\n",
3995 			fp->rss_id);
3996 
3997 		fp->err_rx_alloc_errors++;
3998 		QLNX_INC_IQDROPS(ifp);
3999 
4000 		/*
4001 		 * Load the tpa mbuf into the rx ring and save the
4002 		 * posted mbuf
4003 		 */
4004 
4005 		map = sw_rx_data->map;
4006 		addr = sw_rx_data->dma_addr;
4007 
4008 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
4009 
4010 		sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
4011 		sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
4012 		sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
4013 
4014 		rxq->tpa_info[agg_index].rx_buf.data = mp;
4015 		rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
4016 		rxq->tpa_info[agg_index].rx_buf.map = map;
4017 
4018 		rx_bd = (struct eth_rx_bd *)
4019 				ecore_chain_produce(&rxq->rx_bd_ring);
4020 
4021 		rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
4022 		rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
4023 
4024 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4025 			BUS_DMASYNC_PREREAD);
4026 
4027 		rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
4028 		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4029 
4030 		ecore_chain_consume(&rxq->rx_bd_ring);
4031 
4032 		/* Now reuse any buffers posted in ext_bd_len_list */
4033 		for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4034 			if (cqe->ext_bd_len_list[i] == 0)
4035 				break;
4036 
4037 			qlnx_reuse_rx_data(rxq);
4038 		}
4039 
4040 		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4041 		return;
4042 	}
4043 
4044 	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4045 		QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
4046 			" dropping incoming packet and reusing its buffer\n",
4047 			fp->rss_id);
4048 
4049 		QLNX_INC_IQDROPS(ifp);
4050 
4051 		/* if we already have mbuf head in aggregation free it */
4052 		if (rxq->tpa_info[agg_index].mpf) {
4053 			m_freem(rxq->tpa_info[agg_index].mpf);
4054 			rxq->tpa_info[agg_index].mpl = NULL;
4055 		}
4056 		rxq->tpa_info[agg_index].mpf = mp;
4057 		rxq->tpa_info[agg_index].mpl = NULL;
4058 
4059 		rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4060 		ecore_chain_consume(&rxq->rx_bd_ring);
4061 
4062 		/* Now reuse any buffers posted in ext_bd_len_list */
4063 		for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4064 			if (cqe->ext_bd_len_list[i] == 0)
4065 				break;
4066 
4067 			qlnx_reuse_rx_data(rxq);
4068 		}
4069 		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4070 
4071 		return;
4072 	}
4073 
4074 	/*
4075 	 * first process the ext_bd_len_list
4076 	 * if this fails then we simply drop the packet
4077 	 */
4078 	ecore_chain_consume(&rxq->rx_bd_ring);
4079 	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4080 
4081 	for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4082 		QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
4083 
4084 		if (cqe->ext_bd_len_list[i] == 0)
4085 			break;
4086 
4087 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4088 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4089 			BUS_DMASYNC_POSTREAD);
4090 
4091 		mpc = sw_rx_data->data;
4092 
4093 		if (mpc == NULL) {
4094 			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4095 			fp->err_rx_mp_null++;
4096 			if (mpf != NULL)
4097 				m_freem(mpf);
4098 			mpf = mpl = NULL;
4099 			rxq->tpa_info[agg_index].agg_state =
4100 						QLNX_AGG_STATE_ERROR;
4101 			ecore_chain_consume(&rxq->rx_bd_ring);
4102 			rxq->sw_rx_cons =
4103 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4104 			continue;
4105 		}
4106 
4107 		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4108 			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4109 				" dropping incoming packet and reusing its"
4110 				" buffer\n", fp->rss_id);
4111 
4112 			qlnx_reuse_rx_data(rxq);
4113 
4114 			if (mpf != NULL)
4115 				m_freem(mpf);
4116 			mpf = mpl = NULL;
4117 
4118 			rxq->tpa_info[agg_index].agg_state =
4119 						QLNX_AGG_STATE_ERROR;
4120 
4121 			ecore_chain_consume(&rxq->rx_bd_ring);
4122 			rxq->sw_rx_cons =
4123 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4124 
4125 			continue;
4126 		}
4127 
4128 		mpc->m_flags &= ~M_PKTHDR;
4129 		mpc->m_next = NULL;
4130 		mpc->m_len = cqe->ext_bd_len_list[i];
4131 
4132 		if (mpf == NULL) {
4133 			mpf = mpl = mpc;
4134 		} else {
4135 			mpl->m_len = ha->rx_buf_size;
4136 			mpl->m_next = mpc;
4137 			mpl = mpc;
4138 		}
4139 
4140 		ecore_chain_consume(&rxq->rx_bd_ring);
4141 		rxq->sw_rx_cons =
4142 			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4143 	}
4144 
4145 	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4146 		QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
4147 			" incoming packet and reusing its buffer\n",
4148 			fp->rss_id);
4149 
4150 		QLNX_INC_IQDROPS(ifp);
4151 
4152 		rxq->tpa_info[agg_index].mpf = mp;
4153 		rxq->tpa_info[agg_index].mpl = NULL;
4154 
4155 		return;
4156 	}
4157 
4158         rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
4159 
4160         if (mpf != NULL) {
4161                 mp->m_len = ha->rx_buf_size;
4162                 mp->m_next = mpf;
4163                 rxq->tpa_info[agg_index].mpf = mp;
4164                 rxq->tpa_info[agg_index].mpl = mpl;
4165         } else {
4166                 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
4167                 rxq->tpa_info[agg_index].mpf = mp;
4168                 rxq->tpa_info[agg_index].mpl = mp;
4169                 mp->m_next = NULL;
4170         }
4171 
4172 	mp->m_flags |= M_PKTHDR;
4173 
4174 	/* assign packet to this interface interface */
4175 	mp->m_pkthdr.rcvif = ifp;
4176 
4177 	/* assume no hardware checksum has complated */
4178 	mp->m_pkthdr.csum_flags = 0;
4179 
4180 	//mp->m_pkthdr.flowid = fp->rss_id;
4181 	mp->m_pkthdr.flowid = cqe->rss_hash;
4182 
4183 	hash_type = cqe->bitfields &
4184 			(ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4185 			ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4186 
4187 	switch (hash_type) {
4188 	case RSS_HASH_TYPE_IPV4:
4189 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4190 		break;
4191 
4192 	case RSS_HASH_TYPE_TCP_IPV4:
4193 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4194 		break;
4195 
4196 	case RSS_HASH_TYPE_IPV6:
4197 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4198 		break;
4199 
4200 	case RSS_HASH_TYPE_TCP_IPV6:
4201 		M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4202 		break;
4203 
4204 	default:
4205 		M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4206 		break;
4207 	}
4208 
4209 	mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
4210 					CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4211 
4212 	mp->m_pkthdr.csum_data = 0xFFFF;
4213 
4214 	if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
4215 		mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
4216 		mp->m_flags |= M_VLANTAG;
4217 	}
4218 
4219 	rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
4220 
4221         QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
4222 		fp->rss_id, rxq->tpa_info[agg_index].agg_state,
4223                 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
4224 
4225 	return;
4226 }
4227 
4228 static void
qlnx_tpa_cont(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_rx_queue * rxq,struct eth_fast_path_rx_tpa_cont_cqe * cqe)4229 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4230 	struct qlnx_rx_queue *rxq,
4231 	struct eth_fast_path_rx_tpa_cont_cqe *cqe)
4232 {
4233 	struct sw_rx_data	*sw_rx_data;
4234 	int			i;
4235 	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
4236 	struct mbuf		*mp;
4237 	uint32_t		agg_index;
4238 
4239         QL_DPRINT7(ha, "[%d]: enter\n \
4240                 \t type = 0x%x\n \
4241                 \t tpa_agg_index = 0x%x\n \
4242                 \t len_list[0] = 0x%x\n \
4243                 \t len_list[1] = 0x%x\n \
4244                 \t len_list[2] = 0x%x\n \
4245                 \t len_list[3] = 0x%x\n \
4246                 \t len_list[4] = 0x%x\n \
4247                 \t len_list[5] = 0x%x\n",
4248                 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4249                 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4250                 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
4251 
4252 	agg_index = cqe->tpa_agg_index;
4253 
4254 	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4255 		QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4256 		fp->err_rx_tpa_invalid_agg_num++;
4257 		return;
4258 	}
4259 
4260 	for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
4261 		QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4262 
4263 		if (cqe->len_list[i] == 0)
4264 			break;
4265 
4266 		if (rxq->tpa_info[agg_index].agg_state !=
4267 			QLNX_AGG_STATE_START) {
4268 			qlnx_reuse_rx_data(rxq);
4269 			continue;
4270 		}
4271 
4272 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4273 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4274 			BUS_DMASYNC_POSTREAD);
4275 
4276 		mpc = sw_rx_data->data;
4277 
4278 		if (mpc == NULL) {
4279 			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4280 
4281 			fp->err_rx_mp_null++;
4282 			if (mpf != NULL)
4283 				m_freem(mpf);
4284 			mpf = mpl = NULL;
4285 			rxq->tpa_info[agg_index].agg_state =
4286 						QLNX_AGG_STATE_ERROR;
4287 			ecore_chain_consume(&rxq->rx_bd_ring);
4288 			rxq->sw_rx_cons =
4289 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4290 			continue;
4291 		}
4292 
4293 		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4294 			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4295 				" dropping incoming packet and reusing its"
4296 				" buffer\n", fp->rss_id);
4297 
4298 			qlnx_reuse_rx_data(rxq);
4299 
4300 			if (mpf != NULL)
4301 				m_freem(mpf);
4302 			mpf = mpl = NULL;
4303 
4304 			rxq->tpa_info[agg_index].agg_state =
4305 						QLNX_AGG_STATE_ERROR;
4306 
4307 			ecore_chain_consume(&rxq->rx_bd_ring);
4308 			rxq->sw_rx_cons =
4309 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4310 
4311 			continue;
4312 		}
4313 
4314 		mpc->m_flags &= ~M_PKTHDR;
4315 		mpc->m_next = NULL;
4316 		mpc->m_len = cqe->len_list[i];
4317 
4318 		if (mpf == NULL) {
4319 			mpf = mpl = mpc;
4320 		} else {
4321 			mpl->m_len = ha->rx_buf_size;
4322 			mpl->m_next = mpc;
4323 			mpl = mpc;
4324 		}
4325 
4326 		ecore_chain_consume(&rxq->rx_bd_ring);
4327 		rxq->sw_rx_cons =
4328 			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4329 	}
4330 
4331         QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
4332                   fp->rss_id, mpf, mpl);
4333 
4334 	if (mpf != NULL) {
4335 		mp = rxq->tpa_info[agg_index].mpl;
4336 		mp->m_len = ha->rx_buf_size;
4337 		mp->m_next = mpf;
4338 		rxq->tpa_info[agg_index].mpl = mpl;
4339 	}
4340 
4341 	return;
4342 }
4343 
4344 static int
qlnx_tpa_end(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_rx_queue * rxq,struct eth_fast_path_rx_tpa_end_cqe * cqe)4345 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4346 	struct qlnx_rx_queue *rxq,
4347 	struct eth_fast_path_rx_tpa_end_cqe *cqe)
4348 {
4349 	struct sw_rx_data	*sw_rx_data;
4350 	int			i;
4351 	struct mbuf		*mpf = NULL, *mpl = NULL, *mpc = NULL;
4352 	struct mbuf		*mp;
4353 	uint32_t		agg_index;
4354 	uint32_t		len = 0;
4355         if_t ifp = ha->ifp;
4356 
4357         QL_DPRINT7(ha, "[%d]: enter\n \
4358                 \t type = 0x%x\n \
4359                 \t tpa_agg_index = 0x%x\n \
4360                 \t total_packet_len = 0x%x\n \
4361                 \t num_of_bds = 0x%x\n \
4362                 \t end_reason = 0x%x\n \
4363                 \t num_of_coalesced_segs = 0x%x\n \
4364                 \t ts_delta = 0x%x\n \
4365                 \t len_list[0] = 0x%x\n \
4366                 \t len_list[1] = 0x%x\n \
4367                 \t len_list[2] = 0x%x\n \
4368                 \t len_list[3] = 0x%x\n",
4369                  fp->rss_id, cqe->type, cqe->tpa_agg_index,
4370                 cqe->total_packet_len, cqe->num_of_bds,
4371                 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
4372                 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4373                 cqe->len_list[3]);
4374 
4375 	agg_index = cqe->tpa_agg_index;
4376 
4377 	if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4378 		QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4379 
4380 		fp->err_rx_tpa_invalid_agg_num++;
4381 		return (0);
4382 	}
4383 
4384 	for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
4385 		QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4386 
4387 		if (cqe->len_list[i] == 0)
4388 			break;
4389 
4390 		if (rxq->tpa_info[agg_index].agg_state !=
4391 			QLNX_AGG_STATE_START) {
4392 			QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
4393 
4394 			qlnx_reuse_rx_data(rxq);
4395 			continue;
4396 		}
4397 
4398 		sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4399 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4400 			BUS_DMASYNC_POSTREAD);
4401 
4402 		mpc = sw_rx_data->data;
4403 
4404 		if (mpc == NULL) {
4405 			QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4406 
4407 			fp->err_rx_mp_null++;
4408 			if (mpf != NULL)
4409 				m_freem(mpf);
4410 			mpf = mpl = NULL;
4411 			rxq->tpa_info[agg_index].agg_state =
4412 						QLNX_AGG_STATE_ERROR;
4413 			ecore_chain_consume(&rxq->rx_bd_ring);
4414 			rxq->sw_rx_cons =
4415 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4416 			continue;
4417 		}
4418 
4419 		if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4420 			QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4421 				" dropping incoming packet and reusing its"
4422 				" buffer\n", fp->rss_id);
4423 
4424 			qlnx_reuse_rx_data(rxq);
4425 
4426 			if (mpf != NULL)
4427 				m_freem(mpf);
4428 			mpf = mpl = NULL;
4429 
4430 			rxq->tpa_info[agg_index].agg_state =
4431 						QLNX_AGG_STATE_ERROR;
4432 
4433 			ecore_chain_consume(&rxq->rx_bd_ring);
4434 			rxq->sw_rx_cons =
4435 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4436 
4437 			continue;
4438 		}
4439 
4440 		mpc->m_flags &= ~M_PKTHDR;
4441 		mpc->m_next = NULL;
4442 		mpc->m_len = cqe->len_list[i];
4443 
4444 		if (mpf == NULL) {
4445 			mpf = mpl = mpc;
4446 		} else {
4447 			mpl->m_len = ha->rx_buf_size;
4448 			mpl->m_next = mpc;
4449 			mpl = mpc;
4450 		}
4451 
4452 		ecore_chain_consume(&rxq->rx_bd_ring);
4453 		rxq->sw_rx_cons =
4454 			(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4455 	}
4456 
4457 	QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
4458 
4459 	if (mpf != NULL) {
4460 		QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
4461 
4462 		mp = rxq->tpa_info[agg_index].mpl;
4463 		mp->m_len = ha->rx_buf_size;
4464 		mp->m_next = mpf;
4465 	}
4466 
4467 	if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
4468 		QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
4469 
4470 		if (rxq->tpa_info[agg_index].mpf != NULL)
4471 			m_freem(rxq->tpa_info[agg_index].mpf);
4472 		rxq->tpa_info[agg_index].mpf = NULL;
4473 		rxq->tpa_info[agg_index].mpl = NULL;
4474 		rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4475 		return (0);
4476 	}
4477 
4478 	mp = rxq->tpa_info[agg_index].mpf;
4479 	m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
4480 	mp->m_pkthdr.len = cqe->total_packet_len;
4481 
4482 	if (mp->m_next  == NULL)
4483 		mp->m_len = mp->m_pkthdr.len;
4484 	else {
4485 		/* compute the total packet length */
4486 		mpf = mp;
4487 		while (mpf != NULL) {
4488 			len += mpf->m_len;
4489 			mpf = mpf->m_next;
4490 		}
4491 
4492 		if (cqe->total_packet_len > len) {
4493 			mpl = rxq->tpa_info[agg_index].mpl;
4494 			mpl->m_len += (cqe->total_packet_len - len);
4495 		}
4496 	}
4497 
4498 	QLNX_INC_IPACKETS(ifp);
4499 	QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
4500 
4501         QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \
4502 		m_len = 0x%x m_pkthdr_len = 0x%x\n",
4503                 fp->rss_id, mp->m_pkthdr.csum_data,
4504                 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
4505 
4506 	if_input(ifp, mp);
4507 
4508 	rxq->tpa_info[agg_index].mpf = NULL;
4509 	rxq->tpa_info[agg_index].mpl = NULL;
4510 	rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4511 
4512 	return (cqe->num_of_coalesced_segs);
4513 }
4514 
4515 static int
qlnx_rx_int(qlnx_host_t * ha,struct qlnx_fastpath * fp,int budget,int lro_enable)4516 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
4517 	int lro_enable)
4518 {
4519         uint16_t		hw_comp_cons, sw_comp_cons;
4520         int			rx_pkt = 0;
4521         struct qlnx_rx_queue	*rxq = fp->rxq;
4522         if_t ifp = ha->ifp;
4523 	struct ecore_dev	*cdev = &ha->cdev;
4524 	struct ecore_hwfn       *p_hwfn;
4525 
4526 #ifdef QLNX_SOFT_LRO
4527 	struct lro_ctrl		*lro;
4528 
4529 	lro = &rxq->lro;
4530 #endif /* #ifdef QLNX_SOFT_LRO */
4531 
4532         hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
4533         sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4534 
4535 	p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4536 
4537         /* Memory barrier to prevent the CPU from doing speculative reads of CQE
4538          * / BD in the while-loop before reading hw_comp_cons. If the CQE is
4539          * read before it is written by FW, then FW writes CQE and SB, and then
4540          * the CPU reads the hw_comp_cons, it will use an old CQE.
4541          */
4542 
4543         /* Loop to complete all indicated BDs */
4544         while (sw_comp_cons != hw_comp_cons) {
4545                 union eth_rx_cqe		*cqe;
4546                 struct eth_fast_path_rx_reg_cqe	*fp_cqe;
4547                 struct sw_rx_data		*sw_rx_data;
4548 		register struct mbuf		*mp;
4549                 enum eth_rx_cqe_type		cqe_type;
4550                 uint16_t			len, pad, len_on_first_bd;
4551                 uint8_t				*data;
4552 		uint8_t				hash_type;
4553 
4554                 /* Get the CQE from the completion ring */
4555                 cqe = (union eth_rx_cqe *)
4556                         ecore_chain_consume(&rxq->rx_comp_ring);
4557                 cqe_type = cqe->fast_path_regular.type;
4558 
4559                 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4560                         QL_DPRINT3(ha, "Got a slowath CQE\n");
4561 
4562                         ecore_eth_cqe_completion(p_hwfn,
4563                                         (struct eth_slow_path_rx_cqe *)cqe);
4564                         goto next_cqe;
4565                 }
4566 
4567 		if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4568 			switch (cqe_type) {
4569 			case ETH_RX_CQE_TYPE_TPA_START:
4570 				qlnx_tpa_start(ha, fp, rxq,
4571 					&cqe->fast_path_tpa_start);
4572 				fp->tpa_start++;
4573 				break;
4574 
4575 			case ETH_RX_CQE_TYPE_TPA_CONT:
4576 				qlnx_tpa_cont(ha, fp, rxq,
4577 					&cqe->fast_path_tpa_cont);
4578 				fp->tpa_cont++;
4579 				break;
4580 
4581 			case ETH_RX_CQE_TYPE_TPA_END:
4582 				rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4583 						&cqe->fast_path_tpa_end);
4584 				fp->tpa_end++;
4585 				break;
4586 
4587 			default:
4588 				break;
4589 			}
4590 
4591                         goto next_cqe;
4592 		}
4593 
4594                 /* Get the data from the SW ring */
4595                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4596                 mp = sw_rx_data->data;
4597 
4598 		if (mp == NULL) {
4599                 	QL_DPRINT1(ha, "mp = NULL\n");
4600 			fp->err_rx_mp_null++;
4601         		rxq->sw_rx_cons  =
4602 				(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4603 			goto next_cqe;
4604 		}
4605 		bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4606 			BUS_DMASYNC_POSTREAD);
4607 
4608                 /* non GRO */
4609                 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4610                 len =  le16toh(fp_cqe->pkt_len);
4611                 pad = fp_cqe->placement_offset;
4612 #if 0
4613 		QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4614 			" len %u, parsing flags = %d pad  = %d\n",
4615 			cqe_type, fp_cqe->bitfields,
4616 			le16toh(fp_cqe->vlan_tag),
4617 			len, le16toh(fp_cqe->pars_flags.flags), pad);
4618 #endif
4619 		data = mtod(mp, uint8_t *);
4620 		data = data + pad;
4621 
4622 		if (0)
4623 			qlnx_dump_buf8(ha, __func__, data, len);
4624 
4625                 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4626                  * is always with a fixed size. If allocation fails, we take the
4627                  * consumed BD and return it to the ring in the PROD position.
4628                  * The packet that was received on that BD will be dropped (and
4629                  * not passed to the upper stack).
4630                  */
4631 		/* If this is an error packet then drop it */
4632 		if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4633 			CQE_FLAGS_ERR) {
4634 			QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4635 				" dropping incoming packet\n", sw_comp_cons,
4636 			le16toh(cqe->fast_path_regular.pars_flags.flags));
4637 			fp->err_rx_hw_errors++;
4638 
4639                         qlnx_reuse_rx_data(rxq);
4640 
4641 			QLNX_INC_IERRORS(ifp);
4642 
4643 			goto next_cqe;
4644 		}
4645 
4646                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4647                         QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4648 				" incoming packet and reusing its buffer\n");
4649                         qlnx_reuse_rx_data(rxq);
4650 
4651                         fp->err_rx_alloc_errors++;
4652 
4653 			QLNX_INC_IQDROPS(ifp);
4654 
4655                         goto next_cqe;
4656                 }
4657 
4658                 ecore_chain_consume(&rxq->rx_bd_ring);
4659 
4660 		len_on_first_bd = fp_cqe->len_on_first_bd;
4661 		m_adj(mp, pad);
4662 		mp->m_pkthdr.len = len;
4663 
4664 		if ((len > 60 ) && (len > len_on_first_bd)) {
4665 			mp->m_len = len_on_first_bd;
4666 
4667 			if (qlnx_rx_jumbo_chain(ha, fp, mp,
4668 				(len - len_on_first_bd)) != 0) {
4669 				m_freem(mp);
4670 
4671 				QLNX_INC_IQDROPS(ifp);
4672 
4673                         	goto next_cqe;
4674 			}
4675 
4676 		} else if (len_on_first_bd < len) {
4677 			fp->err_rx_jumbo_chain_pkts++;
4678 		} else {
4679 			mp->m_len = len;
4680 		}
4681 
4682 		mp->m_flags |= M_PKTHDR;
4683 
4684 		/* assign packet to this interface interface */
4685 		mp->m_pkthdr.rcvif = ifp;
4686 
4687 		/* assume no hardware checksum has complated */
4688 		mp->m_pkthdr.csum_flags = 0;
4689 
4690 		mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4691 
4692 		hash_type = fp_cqe->bitfields &
4693 				(ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4694 				ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4695 
4696 		switch (hash_type) {
4697 		case RSS_HASH_TYPE_IPV4:
4698 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4699 			break;
4700 
4701 		case RSS_HASH_TYPE_TCP_IPV4:
4702 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4703 			break;
4704 
4705 		case RSS_HASH_TYPE_IPV6:
4706 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4707 			break;
4708 
4709 		case RSS_HASH_TYPE_TCP_IPV6:
4710 			M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4711 			break;
4712 
4713 		default:
4714 			M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4715 			break;
4716 		}
4717 
4718 		if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4719 			mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4720 		}
4721 
4722 		if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4723 			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4724 		}
4725 
4726 		if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4727 			mp->m_pkthdr.csum_data = 0xFFFF;
4728 			mp->m_pkthdr.csum_flags |=
4729 				(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4730 		}
4731 
4732 		if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4733 			mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4734 			mp->m_flags |= M_VLANTAG;
4735 		}
4736 
4737 		QLNX_INC_IPACKETS(ifp);
4738 		QLNX_INC_IBYTES(ifp, len);
4739 
4740 #ifdef QLNX_SOFT_LRO
4741 		if (lro_enable)
4742 			tcp_lro_queue_mbuf(lro, mp);
4743 		else
4744 			if_input(ifp, mp);
4745 #else
4746 
4747 		if_input(ifp, mp);
4748 
4749 #endif /* #ifdef QLNX_SOFT_LRO */
4750 
4751                 rx_pkt++;
4752 
4753         	rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4754 
4755 next_cqe:	/* don't consume bd rx buffer */
4756                 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4757                 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4758 
4759 		/* CR TPA - revisit how to handle budget in TPA perhaps
4760 		   increase on "end" */
4761                 if (rx_pkt == budget)
4762                         break;
4763         } /* repeat while sw_comp_cons != hw_comp_cons... */
4764 
4765         /* Update producers */
4766         qlnx_update_rx_prod(p_hwfn, rxq);
4767 
4768         return rx_pkt;
4769 }
4770 
4771 /*
4772  * fast path interrupt
4773  */
4774 
4775 static void
qlnx_fp_isr(void * arg)4776 qlnx_fp_isr(void *arg)
4777 {
4778         qlnx_ivec_t		*ivec = arg;
4779         qlnx_host_t		*ha;
4780         struct qlnx_fastpath	*fp = NULL;
4781         int			idx;
4782 
4783         ha = ivec->ha;
4784 
4785         if (ha->state != QLNX_STATE_OPEN) {
4786                 return;
4787         }
4788 
4789         idx = ivec->rss_idx;
4790 
4791         if ((idx = ivec->rss_idx) >= ha->num_rss) {
4792                 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4793                 ha->err_illegal_intr++;
4794                 return;
4795         }
4796         fp = &ha->fp_array[idx];
4797 
4798         if (fp == NULL) {
4799                 ha->err_fp_null++;
4800         } else {
4801 		int			rx_int = 0;
4802 #ifdef QLNX_SOFT_LRO
4803 		int			total_rx_count = 0;
4804 #endif
4805 		int 			lro_enable, tc;
4806 		struct qlnx_tx_queue	*txq;
4807 		uint16_t		elem_left;
4808 
4809 		lro_enable = if_getcapenable(ha->ifp) & IFCAP_LRO;
4810 
4811                 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4812 
4813                 do {
4814                         for (tc = 0; tc < ha->num_tc; tc++) {
4815 				txq = fp->txq[tc];
4816 
4817 				if((int)(elem_left =
4818 					ecore_chain_get_elem_left(&txq->tx_pbl)) <
4819 						QLNX_TX_ELEM_THRESH)  {
4820                                 	if (mtx_trylock(&fp->tx_mtx)) {
4821 #ifdef QLNX_TRACE_PERF_DATA
4822 						tx_compl = fp->tx_pkts_completed;
4823 #endif
4824 
4825 						qlnx_tx_int(ha, fp, fp->txq[tc]);
4826 #ifdef QLNX_TRACE_PERF_DATA
4827 						fp->tx_pkts_compl_intr +=
4828 							(fp->tx_pkts_completed - tx_compl);
4829 						if ((fp->tx_pkts_completed - tx_compl) <= 32)
4830 							fp->tx_comInt[0]++;
4831 						else if (((fp->tx_pkts_completed - tx_compl) > 32) &&
4832 							((fp->tx_pkts_completed - tx_compl) <= 64))
4833 							fp->tx_comInt[1]++;
4834 						else if(((fp->tx_pkts_completed - tx_compl) > 64) &&
4835 							((fp->tx_pkts_completed - tx_compl) <= 128))
4836 							fp->tx_comInt[2]++;
4837 						else if(((fp->tx_pkts_completed - tx_compl) > 128))
4838 							fp->tx_comInt[3]++;
4839 #endif
4840 						mtx_unlock(&fp->tx_mtx);
4841 					}
4842 				}
4843                         }
4844 
4845                         rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
4846                                         lro_enable);
4847 
4848                         if (rx_int) {
4849                                 fp->rx_pkts += rx_int;
4850 #ifdef QLNX_SOFT_LRO
4851                                 total_rx_count += rx_int;
4852 #endif
4853                         }
4854 
4855                 } while (rx_int);
4856 
4857 #ifdef QLNX_SOFT_LRO
4858                 {
4859                         struct lro_ctrl *lro;
4860 
4861                         lro = &fp->rxq->lro;
4862 
4863                         if (lro_enable && total_rx_count) {
4864 
4865 #ifdef QLNX_TRACE_LRO_CNT
4866                                 if (lro->lro_mbuf_count & ~1023)
4867                                         fp->lro_cnt_1024++;
4868                                 else if (lro->lro_mbuf_count & ~511)
4869                                         fp->lro_cnt_512++;
4870                                 else if (lro->lro_mbuf_count & ~255)
4871                                         fp->lro_cnt_256++;
4872                                 else if (lro->lro_mbuf_count & ~127)
4873                                         fp->lro_cnt_128++;
4874                                 else if (lro->lro_mbuf_count & ~63)
4875                                         fp->lro_cnt_64++;
4876 #endif /* #ifdef QLNX_TRACE_LRO_CNT */
4877 
4878                                 tcp_lro_flush_all(lro);
4879                         }
4880                 }
4881 #endif /* #ifdef QLNX_SOFT_LRO */
4882 
4883                 ecore_sb_update_sb_idx(fp->sb_info);
4884                 rmb();
4885                 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
4886         }
4887 
4888         return;
4889 }
4890 
4891 /*
4892  * slow path interrupt processing function
4893  * can be invoked in polled mode or in interrupt mode via taskqueue.
4894  */
4895 void
qlnx_sp_isr(void * arg)4896 qlnx_sp_isr(void *arg)
4897 {
4898 	struct ecore_hwfn	*p_hwfn;
4899 	qlnx_host_t		*ha;
4900 
4901 	p_hwfn = arg;
4902 
4903 	ha = (qlnx_host_t *)p_hwfn->p_dev;
4904 
4905 	ha->sp_interrupts++;
4906 
4907 	QL_DPRINT2(ha, "enter\n");
4908 
4909 	ecore_int_sp_dpc(p_hwfn);
4910 
4911 	QL_DPRINT2(ha, "exit\n");
4912 
4913 	return;
4914 }
4915 
4916 /*****************************************************************************
4917  * Support Functions for DMA'able Memory
4918  *****************************************************************************/
4919 
4920 static void
qlnx_dmamap_callback(void * arg,bus_dma_segment_t * segs,int nsegs,int error)4921 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
4922 {
4923         *((bus_addr_t *)arg) = 0;
4924 
4925         if (error) {
4926                 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
4927                 return;
4928         }
4929 
4930         *((bus_addr_t *)arg) = segs[0].ds_addr;
4931 
4932         return;
4933 }
4934 
4935 static int
qlnx_alloc_dmabuf(qlnx_host_t * ha,qlnx_dma_t * dma_buf)4936 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4937 {
4938         int             ret = 0;
4939         bus_addr_t      b_addr;
4940 
4941         ret = bus_dma_tag_create(
4942                         ha->parent_tag,/* parent */
4943                         dma_buf->alignment,
4944                         ((bus_size_t)(1ULL << 32)),/* boundary */
4945                         BUS_SPACE_MAXADDR,      /* lowaddr */
4946                         BUS_SPACE_MAXADDR,      /* highaddr */
4947                         NULL, NULL,             /* filter, filterarg */
4948                         dma_buf->size,          /* maxsize */
4949                         1,                      /* nsegments */
4950                         dma_buf->size,          /* maxsegsize */
4951                         0,                      /* flags */
4952                         NULL, NULL,             /* lockfunc, lockarg */
4953                         &dma_buf->dma_tag);
4954 
4955         if (ret) {
4956                 QL_DPRINT1(ha, "could not create dma tag\n");
4957                 goto qlnx_alloc_dmabuf_exit;
4958         }
4959         ret = bus_dmamem_alloc(dma_buf->dma_tag,
4960                         (void **)&dma_buf->dma_b,
4961                         (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
4962                         &dma_buf->dma_map);
4963         if (ret) {
4964                 bus_dma_tag_destroy(dma_buf->dma_tag);
4965                 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
4966                 goto qlnx_alloc_dmabuf_exit;
4967         }
4968 
4969         ret = bus_dmamap_load(dma_buf->dma_tag,
4970                         dma_buf->dma_map,
4971                         dma_buf->dma_b,
4972                         dma_buf->size,
4973                         qlnx_dmamap_callback,
4974                         &b_addr, BUS_DMA_NOWAIT);
4975 
4976         if (ret || !b_addr) {
4977                 bus_dma_tag_destroy(dma_buf->dma_tag);
4978                 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
4979                         dma_buf->dma_map);
4980                 ret = -1;
4981                 goto qlnx_alloc_dmabuf_exit;
4982         }
4983 
4984         dma_buf->dma_addr = b_addr;
4985 
4986 qlnx_alloc_dmabuf_exit:
4987 
4988         return ret;
4989 }
4990 
4991 static void
qlnx_free_dmabuf(qlnx_host_t * ha,qlnx_dma_t * dma_buf)4992 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4993 {
4994 	bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
4995         bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
4996         bus_dma_tag_destroy(dma_buf->dma_tag);
4997 	return;
4998 }
4999 
5000 void *
qlnx_dma_alloc_coherent(void * ecore_dev,bus_addr_t * phys,uint32_t size)5001 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
5002 {
5003 	qlnx_dma_t	dma_buf;
5004 	qlnx_dma_t	*dma_p;
5005 	qlnx_host_t	*ha __unused;
5006 
5007 	ha = (qlnx_host_t *)ecore_dev;
5008 
5009 	size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5010 
5011 	memset(&dma_buf, 0, sizeof (qlnx_dma_t));
5012 
5013 	dma_buf.size = size + PAGE_SIZE;
5014 	dma_buf.alignment = 8;
5015 
5016 	if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
5017 		return (NULL);
5018 	bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
5019 
5020 	*phys = dma_buf.dma_addr;
5021 
5022 	dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
5023 
5024 	memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
5025 
5026 	QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5027 		(void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
5028 		dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
5029 
5030 	return (dma_buf.dma_b);
5031 }
5032 
5033 void
qlnx_dma_free_coherent(void * ecore_dev,void * v_addr,bus_addr_t phys,uint32_t size)5034 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
5035 	uint32_t size)
5036 {
5037 	qlnx_dma_t dma_buf, *dma_p;
5038 	qlnx_host_t	*ha;
5039 
5040 	ha = (qlnx_host_t *)ecore_dev;
5041 
5042 	if (v_addr == NULL)
5043 		return;
5044 
5045 	size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5046 
5047 	dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
5048 
5049 	QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5050 		(void *)dma_p->dma_map, (void *)dma_p->dma_tag,
5051 		dma_p->dma_b, (void *)dma_p->dma_addr, size);
5052 
5053 	dma_buf = *dma_p;
5054 
5055 	if (!ha->qlnxr_debug)
5056 	qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
5057 	return;
5058 }
5059 
5060 static int
qlnx_alloc_parent_dma_tag(qlnx_host_t * ha)5061 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
5062 {
5063         int             ret;
5064         device_t        dev;
5065 
5066         dev = ha->pci_dev;
5067 
5068         /*
5069          * Allocate parent DMA Tag
5070          */
5071         ret = bus_dma_tag_create(
5072                         bus_get_dma_tag(dev),   /* parent */
5073                         1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
5074                         BUS_SPACE_MAXADDR,      /* lowaddr */
5075                         BUS_SPACE_MAXADDR,      /* highaddr */
5076                         NULL, NULL,             /* filter, filterarg */
5077                         BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
5078                         0,                      /* nsegments */
5079                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
5080                         0,                      /* flags */
5081                         NULL, NULL,             /* lockfunc, lockarg */
5082                         &ha->parent_tag);
5083 
5084         if (ret) {
5085                 QL_DPRINT1(ha, "could not create parent dma tag\n");
5086                 return (-1);
5087         }
5088 
5089         ha->flags.parent_tag = 1;
5090 
5091         return (0);
5092 }
5093 
5094 static void
qlnx_free_parent_dma_tag(qlnx_host_t * ha)5095 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
5096 {
5097         if (ha->parent_tag != NULL) {
5098                 bus_dma_tag_destroy(ha->parent_tag);
5099 		ha->parent_tag = NULL;
5100         }
5101 	return;
5102 }
5103 
5104 static int
qlnx_alloc_tx_dma_tag(qlnx_host_t * ha)5105 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
5106 {
5107         if (bus_dma_tag_create(NULL,    /* parent */
5108                 1, 0,    /* alignment, bounds */
5109                 BUS_SPACE_MAXADDR,       /* lowaddr */
5110                 BUS_SPACE_MAXADDR,       /* highaddr */
5111                 NULL, NULL,      /* filter, filterarg */
5112                 QLNX_MAX_TSO_FRAME_SIZE,     /* maxsize */
5113                 QLNX_MAX_SEGMENTS,        /* nsegments */
5114                 QLNX_MAX_TX_MBUF_SIZE,	  /* maxsegsize */
5115                 0,        /* flags */
5116                 NULL,    /* lockfunc */
5117                 NULL,    /* lockfuncarg */
5118                 &ha->tx_tag)) {
5119                 QL_DPRINT1(ha, "tx_tag alloc failed\n");
5120                 return (-1);
5121         }
5122 
5123 	return (0);
5124 }
5125 
5126 static void
qlnx_free_tx_dma_tag(qlnx_host_t * ha)5127 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
5128 {
5129         if (ha->tx_tag != NULL) {
5130                 bus_dma_tag_destroy(ha->tx_tag);
5131 		ha->tx_tag = NULL;
5132         }
5133 	return;
5134 }
5135 
5136 static int
qlnx_alloc_rx_dma_tag(qlnx_host_t * ha)5137 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
5138 {
5139         if (bus_dma_tag_create(NULL,    /* parent */
5140                         1, 0,    /* alignment, bounds */
5141                         BUS_SPACE_MAXADDR,       /* lowaddr */
5142                         BUS_SPACE_MAXADDR,       /* highaddr */
5143                         NULL, NULL,      /* filter, filterarg */
5144                         MJUM9BYTES,     /* maxsize */
5145                         1,        /* nsegments */
5146                         MJUM9BYTES,        /* maxsegsize */
5147                         0,        /* flags */
5148                         NULL,    /* lockfunc */
5149                         NULL,    /* lockfuncarg */
5150                         &ha->rx_tag)) {
5151                 QL_DPRINT1(ha, " rx_tag alloc failed\n");
5152 
5153                 return (-1);
5154         }
5155 	return (0);
5156 }
5157 
5158 static void
qlnx_free_rx_dma_tag(qlnx_host_t * ha)5159 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
5160 {
5161         if (ha->rx_tag != NULL) {
5162                 bus_dma_tag_destroy(ha->rx_tag);
5163 		ha->rx_tag = NULL;
5164         }
5165 	return;
5166 }
5167 
5168 /*********************************
5169  * Exported functions
5170  *********************************/
5171 uint32_t
qlnx_pci_bus_get_bar_size(void * ecore_dev,uint8_t bar_id)5172 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
5173 {
5174 	uint32_t bar_size;
5175 
5176 	bar_id = bar_id * 2;
5177 
5178 	bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
5179 				SYS_RES_MEMORY,
5180 				PCIR_BAR(bar_id));
5181 
5182 	return (bar_size);
5183 }
5184 
5185 uint32_t
qlnx_pci_read_config_byte(void * ecore_dev,uint32_t pci_reg,uint8_t * reg_value)5186 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
5187 {
5188 	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5189 				pci_reg, 1);
5190 	return 0;
5191 }
5192 
5193 uint32_t
qlnx_pci_read_config_word(void * ecore_dev,uint32_t pci_reg,uint16_t * reg_value)5194 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
5195 	uint16_t *reg_value)
5196 {
5197 	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5198 				pci_reg, 2);
5199 	return 0;
5200 }
5201 
5202 uint32_t
qlnx_pci_read_config_dword(void * ecore_dev,uint32_t pci_reg,uint32_t * reg_value)5203 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
5204 	uint32_t *reg_value)
5205 {
5206 	*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5207 				pci_reg, 4);
5208 	return 0;
5209 }
5210 
5211 void
qlnx_pci_write_config_byte(void * ecore_dev,uint32_t pci_reg,uint8_t reg_value)5212 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
5213 {
5214 	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5215 		pci_reg, reg_value, 1);
5216 	return;
5217 }
5218 
5219 void
qlnx_pci_write_config_word(void * ecore_dev,uint32_t pci_reg,uint16_t reg_value)5220 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
5221 	uint16_t reg_value)
5222 {
5223 	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5224 		pci_reg, reg_value, 2);
5225 	return;
5226 }
5227 
5228 void
qlnx_pci_write_config_dword(void * ecore_dev,uint32_t pci_reg,uint32_t reg_value)5229 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
5230 	uint32_t reg_value)
5231 {
5232 	pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5233 		pci_reg, reg_value, 4);
5234 	return;
5235 }
5236 
5237 int
qlnx_pci_find_capability(void * ecore_dev,int cap)5238 qlnx_pci_find_capability(void *ecore_dev, int cap)
5239 {
5240 	int		reg;
5241 	qlnx_host_t	*ha;
5242 
5243 	ha = ecore_dev;
5244 
5245 	if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, &reg) == 0)
5246 		return reg;
5247 	else {
5248 		QL_DPRINT1(ha, "failed\n");
5249 		return 0;
5250 	}
5251 }
5252 
5253 int
qlnx_pci_find_ext_capability(void * ecore_dev,int ext_cap)5254 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap)
5255 {
5256 	int		reg;
5257 	qlnx_host_t	*ha;
5258 
5259 	ha = ecore_dev;
5260 
5261 	if (pci_find_extcap(ha->pci_dev, ext_cap, &reg) == 0)
5262 		return reg;
5263 	else {
5264 		QL_DPRINT1(ha, "failed\n");
5265 		return 0;
5266 	}
5267 }
5268 
5269 uint32_t
qlnx_reg_rd32(void * hwfn,uint32_t reg_addr)5270 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
5271 {
5272 	uint32_t		data32;
5273 	struct ecore_hwfn	*p_hwfn;
5274 
5275 	p_hwfn = hwfn;
5276 
5277 	data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5278 			(bus_size_t)(p_hwfn->reg_offset + reg_addr));
5279 
5280 	return (data32);
5281 }
5282 
5283 void
qlnx_reg_wr32(void * hwfn,uint32_t reg_addr,uint32_t value)5284 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5285 {
5286 	struct ecore_hwfn	*p_hwfn = hwfn;
5287 
5288 	bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5289 		(bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5290 
5291 	return;
5292 }
5293 
5294 void
qlnx_reg_wr16(void * hwfn,uint32_t reg_addr,uint16_t value)5295 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
5296 {
5297 	struct ecore_hwfn	*p_hwfn = hwfn;
5298 
5299 	bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5300 		(bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5301 	return;
5302 }
5303 
5304 void
qlnx_dbell_wr32_db(void * hwfn,void * reg_addr,uint32_t value)5305 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value)
5306 {
5307 	struct ecore_dev	*cdev;
5308 	struct ecore_hwfn	*p_hwfn;
5309 	uint32_t	offset;
5310 
5311 	p_hwfn = hwfn;
5312 
5313 	cdev = p_hwfn->p_dev;
5314 
5315 	offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells));
5316 	bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value);
5317 
5318 	return;
5319 }
5320 
5321 void
qlnx_dbell_wr32(void * hwfn,uint32_t reg_addr,uint32_t value)5322 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5323 {
5324 	struct ecore_hwfn	*p_hwfn = hwfn;
5325 
5326 	bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \
5327 		(bus_size_t)(p_hwfn->db_offset + reg_addr), value);
5328 
5329 	return;
5330 }
5331 
5332 uint32_t
qlnx_direct_reg_rd32(void * p_hwfn,uint32_t * reg_addr)5333 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
5334 {
5335 	uint32_t		data32;
5336 	bus_size_t		offset;
5337 	struct ecore_dev	*cdev;
5338 
5339 	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5340 	offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5341 
5342 	data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
5343 
5344 	return (data32);
5345 }
5346 
5347 void
qlnx_direct_reg_wr32(void * p_hwfn,void * reg_addr,uint32_t value)5348 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
5349 {
5350 	bus_size_t		offset;
5351 	struct ecore_dev	*cdev;
5352 
5353 	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5354 	offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5355 
5356 	bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5357 
5358 	return;
5359 }
5360 
5361 void
qlnx_direct_reg_wr64(void * p_hwfn,void * reg_addr,uint64_t value)5362 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
5363 {
5364 	bus_size_t		offset;
5365 	struct ecore_dev	*cdev;
5366 
5367 	cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5368 	offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5369 
5370 	bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5371 	return;
5372 }
5373 
5374 void *
qlnx_zalloc(uint32_t size)5375 qlnx_zalloc(uint32_t size)
5376 {
5377 	caddr_t	va;
5378 
5379 	va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
5380 	bzero(va, size);
5381 	return ((void *)va);
5382 }
5383 
5384 void
qlnx_barrier(void * p_dev)5385 qlnx_barrier(void *p_dev)
5386 {
5387 	qlnx_host_t	*ha;
5388 
5389 	ha = ((struct ecore_dev *) p_dev)->ha;
5390 	bus_barrier(ha->pci_reg,  0, 0, BUS_SPACE_BARRIER_WRITE);
5391 }
5392 
5393 void
qlnx_link_update(void * p_hwfn)5394 qlnx_link_update(void *p_hwfn)
5395 {
5396 	qlnx_host_t	*ha;
5397 	int		prev_link_state;
5398 
5399 	ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5400 
5401 	qlnx_fill_link(ha, p_hwfn, &ha->if_link);
5402 
5403 	prev_link_state = ha->link_up;
5404 	ha->link_up = ha->if_link.link_up;
5405 
5406         if (prev_link_state !=  ha->link_up) {
5407                 if (ha->link_up) {
5408                         if_link_state_change(ha->ifp, LINK_STATE_UP);
5409                 } else {
5410                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
5411                 }
5412         }
5413 #ifndef QLNX_VF
5414 #ifdef CONFIG_ECORE_SRIOV
5415 
5416 	if (qlnx_vf_device(ha) != 0) {
5417 		if (ha->sriov_initialized)
5418 			qlnx_inform_vf_link_state(p_hwfn, ha);
5419 	}
5420 
5421 #endif /* #ifdef CONFIG_ECORE_SRIOV */
5422 #endif /* #ifdef QLNX_VF */
5423 
5424         return;
5425 }
5426 
5427 static void
__qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn * p_hwfn,struct ecore_vf_acquire_sw_info * p_sw_info)5428 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn,
5429 	struct ecore_vf_acquire_sw_info *p_sw_info)
5430 {
5431 	p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) |
5432 					(QLNX_VERSION_MINOR << 16) |
5433 					 QLNX_VERSION_BUILD;
5434 	p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD;
5435 
5436 	return;
5437 }
5438 
5439 void
qlnx_osal_vf_fill_acquire_resc_req(void * p_hwfn,void * p_resc_req,void * p_sw_info)5440 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req,
5441 	void *p_sw_info)
5442 {
5443 	__qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info);
5444 
5445 	return;
5446 }
5447 
5448 void
qlnx_fill_link(qlnx_host_t * ha,struct ecore_hwfn * hwfn,struct qlnx_link_output * if_link)5449 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
5450 	struct qlnx_link_output *if_link)
5451 {
5452 	struct ecore_mcp_link_params    link_params;
5453 	struct ecore_mcp_link_state     link_state;
5454 	uint8_t				p_change;
5455 	struct ecore_ptt *p_ptt = NULL;
5456 
5457 	memset(if_link, 0, sizeof(*if_link));
5458 	memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
5459 	memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
5460 
5461 	ha = (qlnx_host_t *)hwfn->p_dev;
5462 
5463 	/* Prepare source inputs */
5464 	/* we only deal with physical functions */
5465 	if (qlnx_vf_device(ha) != 0) {
5466         	p_ptt = ecore_ptt_acquire(hwfn);
5467 
5468 	        if (p_ptt == NULL) {
5469 			QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5470 			return;
5471 		}
5472 
5473 		ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type);
5474 		ecore_ptt_release(hwfn, p_ptt);
5475 
5476 		memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
5477 			sizeof(link_params));
5478 		memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
5479 			sizeof(link_state));
5480 	} else {
5481 		ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type);
5482 		ecore_vf_read_bulletin(hwfn, &p_change);
5483 		ecore_vf_get_link_params(hwfn, &link_params);
5484 		ecore_vf_get_link_state(hwfn, &link_state);
5485 	}
5486 
5487 	/* Set the link parameters to pass to protocol driver */
5488 	if (link_state.link_up) {
5489 		if_link->link_up = true;
5490 		if_link->speed = link_state.speed;
5491 	}
5492 
5493 	if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
5494 
5495 	if (link_params.speed.autoneg)
5496 		if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
5497 
5498 	if (link_params.pause.autoneg ||
5499 		(link_params.pause.forced_rx && link_params.pause.forced_tx))
5500 		if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
5501 
5502 	if (link_params.pause.autoneg || link_params.pause.forced_rx ||
5503 		link_params.pause.forced_tx)
5504 		if_link->supported_caps |= QLNX_LINK_CAP_Pause;
5505 
5506 	if (link_params.speed.advertised_speeds &
5507 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
5508 		if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
5509                                            QLNX_LINK_CAP_1000baseT_Full;
5510 
5511 	if (link_params.speed.advertised_speeds &
5512 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
5513 		if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5514 
5515 	if (link_params.speed.advertised_speeds &
5516 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
5517 		if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5518 
5519 	if (link_params.speed.advertised_speeds &
5520 		NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
5521 		if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5522 
5523 	if (link_params.speed.advertised_speeds &
5524 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
5525 		if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5526 
5527 	if (link_params.speed.advertised_speeds &
5528 		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
5529 		if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5530 
5531 	if_link->advertised_caps = if_link->supported_caps;
5532 
5533 	if_link->autoneg = link_params.speed.autoneg;
5534 	if_link->duplex = QLNX_LINK_DUPLEX;
5535 
5536 	/* Link partner capabilities */
5537 
5538 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
5539 		if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
5540 
5541 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
5542 		if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
5543 
5544 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
5545 		if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5546 
5547 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
5548 		if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5549 
5550 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
5551 		if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5552 
5553 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
5554 		if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5555 
5556 	if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
5557 		if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5558 
5559 	if (link_state.an_complete)
5560 		if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
5561 
5562 	if (link_state.partner_adv_pause)
5563 		if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
5564 
5565 	if ((link_state.partner_adv_pause ==
5566 		ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
5567 		(link_state.partner_adv_pause ==
5568 			ECORE_LINK_PARTNER_BOTH_PAUSE))
5569 		if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
5570 
5571 	return;
5572 }
5573 
5574 void
qlnx_schedule_recovery(void * p_hwfn)5575 qlnx_schedule_recovery(void *p_hwfn)
5576 {
5577 	qlnx_host_t	*ha;
5578 
5579 	ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5580 
5581 	if (qlnx_vf_device(ha) != 0) {
5582 		taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
5583 	}
5584 
5585 	return;
5586 }
5587 
5588 static int
qlnx_nic_setup(struct ecore_dev * cdev,struct ecore_pf_params * func_params)5589 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
5590 {
5591         int	rc, i;
5592 
5593         for (i = 0; i < cdev->num_hwfns; i++) {
5594                 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5595                 p_hwfn->pf_params = *func_params;
5596 
5597 #ifdef QLNX_ENABLE_IWARP
5598 		if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) {
5599 			p_hwfn->using_ll2 = true;
5600 		}
5601 #endif /* #ifdef QLNX_ENABLE_IWARP */
5602         }
5603 
5604         rc = ecore_resc_alloc(cdev);
5605         if (rc)
5606                 goto qlnx_nic_setup_exit;
5607 
5608         ecore_resc_setup(cdev);
5609 
5610 qlnx_nic_setup_exit:
5611 
5612         return rc;
5613 }
5614 
5615 static int
qlnx_nic_start(struct ecore_dev * cdev)5616 qlnx_nic_start(struct ecore_dev *cdev)
5617 {
5618         int				rc;
5619 	struct ecore_hw_init_params	params;
5620 
5621 	bzero(&params, sizeof (struct ecore_hw_init_params));
5622 
5623 	params.p_tunn = NULL;
5624 	params.b_hw_start = true;
5625 	params.int_mode = cdev->int_mode;
5626 	params.allow_npar_tx_switch = true;
5627 	params.bin_fw_data = NULL;
5628 
5629         rc = ecore_hw_init(cdev, &params);
5630         if (rc) {
5631                 ecore_resc_free(cdev);
5632                 return rc;
5633         }
5634 
5635         return 0;
5636 }
5637 
5638 static int
qlnx_slowpath_start(qlnx_host_t * ha)5639 qlnx_slowpath_start(qlnx_host_t *ha)
5640 {
5641 	struct ecore_dev	*cdev;
5642 	struct ecore_pf_params	pf_params;
5643 	int			rc;
5644 
5645 	memset(&pf_params, 0, sizeof(struct ecore_pf_params));
5646 	pf_params.eth_pf_params.num_cons  =
5647 		(ha->num_rss) * (ha->num_tc + 1);
5648 
5649 #ifdef QLNX_ENABLE_IWARP
5650 	if (qlnx_vf_device(ha) != 0) {
5651 		if(ha->personality == ECORE_PCI_ETH_IWARP) {
5652 			device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n");
5653 			pf_params.rdma_pf_params.num_qps = 1024;
5654 			pf_params.rdma_pf_params.num_srqs = 1024;
5655 			pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5656 			pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP;
5657 		} else if(ha->personality == ECORE_PCI_ETH_ROCE) {
5658 			device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n");
5659 			pf_params.rdma_pf_params.num_qps = 8192;
5660 			pf_params.rdma_pf_params.num_srqs = 8192;
5661 			//pf_params.rdma_pf_params.min_dpis = 0;
5662 			pf_params.rdma_pf_params.min_dpis = 8;
5663 			pf_params.rdma_pf_params.roce_edpm_mode = 0;
5664 			pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5665 			pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE;
5666 		}
5667 	}
5668 #endif /* #ifdef QLNX_ENABLE_IWARP */
5669 
5670 	cdev = &ha->cdev;
5671 
5672 	rc = qlnx_nic_setup(cdev, &pf_params);
5673         if (rc)
5674                 goto qlnx_slowpath_start_exit;
5675 
5676         cdev->int_mode = ECORE_INT_MODE_MSIX;
5677         cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5678 
5679 #ifdef QLNX_MAX_COALESCE
5680 	cdev->rx_coalesce_usecs = 255;
5681 	cdev->tx_coalesce_usecs = 255;
5682 #endif
5683 
5684 	rc = qlnx_nic_start(cdev);
5685 
5686 	ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5687 	ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5688 
5689 #ifdef QLNX_USER_LLDP
5690 	(void)qlnx_set_lldp_tlvx(ha, NULL);
5691 #endif /* #ifdef QLNX_USER_LLDP */
5692 
5693 qlnx_slowpath_start_exit:
5694 
5695 	return (rc);
5696 }
5697 
5698 static int
qlnx_slowpath_stop(qlnx_host_t * ha)5699 qlnx_slowpath_stop(qlnx_host_t *ha)
5700 {
5701 	struct ecore_dev	*cdev;
5702 	device_t		dev = ha->pci_dev;
5703 	int			i;
5704 
5705 	cdev = &ha->cdev;
5706 
5707 	ecore_hw_stop(cdev);
5708 
5709  	for (i = 0; i < ha->cdev.num_hwfns; i++) {
5710         	if (ha->sp_handle[i])
5711                 	(void)bus_teardown_intr(dev, ha->sp_irq[i],
5712 				ha->sp_handle[i]);
5713 
5714 		ha->sp_handle[i] = NULL;
5715 
5716         	if (ha->sp_irq[i])
5717 			(void) bus_release_resource(dev, SYS_RES_IRQ,
5718 				ha->sp_irq_rid[i], ha->sp_irq[i]);
5719 		ha->sp_irq[i] = NULL;
5720 	}
5721 
5722         ecore_resc_free(cdev);
5723 
5724         return 0;
5725 }
5726 
5727 static void
qlnx_set_id(struct ecore_dev * cdev,char name[NAME_SIZE],char ver_str[VER_SIZE])5728 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5729 	char ver_str[VER_SIZE])
5730 {
5731         int	i;
5732 
5733         memcpy(cdev->name, name, NAME_SIZE);
5734 
5735         for_each_hwfn(cdev, i) {
5736                 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5737         }
5738 
5739         cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5740 
5741 	return ;
5742 }
5743 
5744 void
qlnx_get_protocol_stats(void * cdev,int proto_type,void * proto_stats)5745 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5746 {
5747 	enum ecore_mcp_protocol_type	type;
5748 	union ecore_mcp_protocol_stats	*stats;
5749 	struct ecore_eth_stats		eth_stats;
5750 	qlnx_host_t			*ha;
5751 
5752 	ha = cdev;
5753 	stats = proto_stats;
5754 	type = proto_type;
5755 
5756         switch (type) {
5757         case ECORE_MCP_LAN_STATS:
5758                 ecore_get_vport_stats((struct ecore_dev *)cdev, &eth_stats);
5759                 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5760                 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5761                 stats->lan_stats.fcs_err = -1;
5762                 break;
5763 
5764 	default:
5765 		ha->err_get_proto_invalid_type++;
5766 
5767 		QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5768 		break;
5769 	}
5770 	return;
5771 }
5772 
5773 static int
qlnx_get_mfw_version(qlnx_host_t * ha,uint32_t * mfw_ver)5774 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5775 {
5776 	struct ecore_hwfn	*p_hwfn;
5777 	struct ecore_ptt	*p_ptt;
5778 
5779 	p_hwfn = &ha->cdev.hwfns[0];
5780 	p_ptt = ecore_ptt_acquire(p_hwfn);
5781 
5782 	if (p_ptt ==  NULL) {
5783                 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5784                 return (-1);
5785 	}
5786 	ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5787 
5788 	ecore_ptt_release(p_hwfn, p_ptt);
5789 
5790 	return (0);
5791 }
5792 
5793 static int
qlnx_get_flash_size(qlnx_host_t * ha,uint32_t * flash_size)5794 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5795 {
5796 	struct ecore_hwfn	*p_hwfn;
5797 	struct ecore_ptt	*p_ptt;
5798 
5799 	p_hwfn = &ha->cdev.hwfns[0];
5800 	p_ptt = ecore_ptt_acquire(p_hwfn);
5801 
5802 	if (p_ptt ==  NULL) {
5803                 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
5804                 return (-1);
5805 	}
5806 	ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5807 
5808 	ecore_ptt_release(p_hwfn, p_ptt);
5809 
5810 	return (0);
5811 }
5812 
5813 static int
qlnx_alloc_mem_arrays(qlnx_host_t * ha)5814 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
5815 {
5816 	bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5817 	bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5818 	bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5819 
5820         return 0;
5821 }
5822 
5823 static void
qlnx_init_fp(qlnx_host_t * ha)5824 qlnx_init_fp(qlnx_host_t *ha)
5825 {
5826 	int rss_id, txq_array_index, tc;
5827 
5828 	for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5829 		struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5830 
5831 		fp->rss_id = rss_id;
5832 		fp->edev = ha;
5833 		fp->sb_info = &ha->sb_array[rss_id];
5834 		fp->rxq = &ha->rxq_array[rss_id];
5835 		fp->rxq->rxq_id = rss_id;
5836 
5837 		for (tc = 0; tc < ha->num_tc; tc++) {
5838                         txq_array_index = tc * ha->num_rss + rss_id;
5839                         fp->txq[tc] = &ha->txq_array[txq_array_index];
5840                         fp->txq[tc]->index = txq_array_index;
5841 		}
5842 
5843 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
5844 			rss_id);
5845 
5846 		fp->tx_ring_full = 0;
5847 
5848 		/* reset all the statistics counters */
5849 
5850 		fp->tx_pkts_processed = 0;
5851 		fp->tx_pkts_freed = 0;
5852 		fp->tx_pkts_transmitted = 0;
5853 		fp->tx_pkts_completed = 0;
5854 
5855 #ifdef QLNX_TRACE_PERF_DATA
5856 		fp->tx_pkts_trans_ctx = 0;
5857 		fp->tx_pkts_compl_ctx = 0;
5858 		fp->tx_pkts_trans_fp = 0;
5859 		fp->tx_pkts_compl_fp = 0;
5860 		fp->tx_pkts_compl_intr = 0;
5861 #endif
5862 		fp->tx_lso_wnd_min_len = 0;
5863 		fp->tx_defrag = 0;
5864 		fp->tx_nsegs_gt_elem_left = 0;
5865 		fp->tx_tso_max_nsegs = 0;
5866 		fp->tx_tso_min_nsegs = 0;
5867 		fp->err_tx_nsegs_gt_elem_left = 0;
5868 		fp->err_tx_dmamap_create = 0;
5869 		fp->err_tx_defrag_dmamap_load = 0;
5870 		fp->err_tx_non_tso_max_seg = 0;
5871 		fp->err_tx_dmamap_load = 0;
5872 		fp->err_tx_defrag = 0;
5873 		fp->err_tx_free_pkt_null = 0;
5874 		fp->err_tx_cons_idx_conflict = 0;
5875 
5876 		fp->rx_pkts = 0;
5877 		fp->err_m_getcl = 0;
5878 		fp->err_m_getjcl = 0;
5879         }
5880 	return;
5881 }
5882 
5883 void
qlnx_free_mem_sb(qlnx_host_t * ha,struct ecore_sb_info * sb_info)5884 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
5885 {
5886 	struct ecore_dev	*cdev;
5887 
5888 	cdev = &ha->cdev;
5889 
5890         if (sb_info->sb_virt) {
5891                 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
5892 			(sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
5893 		sb_info->sb_virt = NULL;
5894 	}
5895 }
5896 
5897 static int
qlnx_sb_init(struct ecore_dev * cdev,struct ecore_sb_info * sb_info,void * sb_virt_addr,bus_addr_t sb_phy_addr,u16 sb_id)5898 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
5899 	void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
5900 {
5901         struct ecore_hwfn	*p_hwfn;
5902         int			hwfn_index, rc;
5903         u16			rel_sb_id;
5904 
5905         hwfn_index = sb_id % cdev->num_hwfns;
5906         p_hwfn = &cdev->hwfns[hwfn_index];
5907         rel_sb_id = sb_id / cdev->num_hwfns;
5908 
5909         QL_DPRINT2(((qlnx_host_t *)cdev),
5910                 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
5911                 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
5912                 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
5913                 sb_virt_addr, (void *)sb_phy_addr);
5914 
5915         rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
5916                              sb_virt_addr, sb_phy_addr, rel_sb_id);
5917 
5918         return rc;
5919 }
5920 
5921 /* This function allocates fast-path status block memory */
5922 int
qlnx_alloc_mem_sb(qlnx_host_t * ha,struct ecore_sb_info * sb_info,u16 sb_id)5923 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
5924 {
5925         struct status_block_e4	*sb_virt;
5926         bus_addr_t		sb_phys;
5927         int			rc;
5928 	uint32_t		size;
5929 	struct ecore_dev	*cdev;
5930 
5931 	cdev = &ha->cdev;
5932 
5933 	size = sizeof(*sb_virt);
5934 	sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
5935 
5936         if (!sb_virt) {
5937                 QL_DPRINT1(ha, "Status block allocation failed\n");
5938                 return -ENOMEM;
5939         }
5940 
5941         rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
5942         if (rc) {
5943                 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
5944         }
5945 
5946 	return rc;
5947 }
5948 
5949 static void
qlnx_free_rx_buffers(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)5950 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5951 {
5952         int			i;
5953 	struct sw_rx_data	*rx_buf;
5954 
5955         for (i = 0; i < rxq->num_rx_buffers; i++) {
5956                 rx_buf = &rxq->sw_rx_ring[i];
5957 
5958 		if (rx_buf->data != NULL) {
5959 			if (rx_buf->map != NULL) {
5960 				bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5961 				bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5962 				rx_buf->map = NULL;
5963 			}
5964 			m_freem(rx_buf->data);
5965 			rx_buf->data = NULL;
5966 		}
5967         }
5968 	return;
5969 }
5970 
5971 static void
qlnx_free_mem_rxq(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)5972 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5973 {
5974 	struct ecore_dev	*cdev;
5975 	int			i;
5976 
5977 	cdev = &ha->cdev;
5978 
5979 	qlnx_free_rx_buffers(ha, rxq);
5980 
5981 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5982 		qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
5983 		if (rxq->tpa_info[i].mpf != NULL)
5984 			m_freem(rxq->tpa_info[i].mpf);
5985 	}
5986 
5987 	bzero((void *)&rxq->sw_rx_ring[0],
5988 		(sizeof (struct sw_rx_data) * RX_RING_SIZE));
5989 
5990         /* Free the real RQ ring used by FW */
5991 	if (rxq->rx_bd_ring.p_virt_addr) {
5992                 ecore_chain_free(cdev, &rxq->rx_bd_ring);
5993                 rxq->rx_bd_ring.p_virt_addr = NULL;
5994         }
5995 
5996         /* Free the real completion ring used by FW */
5997         if (rxq->rx_comp_ring.p_virt_addr &&
5998                         rxq->rx_comp_ring.pbl_sp.p_virt_table) {
5999                 ecore_chain_free(cdev, &rxq->rx_comp_ring);
6000                 rxq->rx_comp_ring.p_virt_addr = NULL;
6001                 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
6002         }
6003 
6004 #ifdef QLNX_SOFT_LRO
6005 	{
6006 		struct lro_ctrl *lro;
6007 
6008 		lro = &rxq->lro;
6009 		tcp_lro_free(lro);
6010 	}
6011 #endif /* #ifdef QLNX_SOFT_LRO */
6012 
6013 	return;
6014 }
6015 
6016 static int
qlnx_alloc_rx_buffer(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)6017 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6018 {
6019         register struct mbuf	*mp;
6020         uint16_t		rx_buf_size;
6021         struct sw_rx_data	*sw_rx_data;
6022         struct eth_rx_bd	*rx_bd;
6023         dma_addr_t		dma_addr;
6024 	bus_dmamap_t		map;
6025 	bus_dma_segment_t       segs[1];
6026 	int			nsegs;
6027 	int			ret;
6028 
6029         rx_buf_size = rxq->rx_buf_size;
6030 
6031 	mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6032 
6033         if (mp == NULL) {
6034                 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6035                 return -ENOMEM;
6036         }
6037 
6038 	mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6039 
6040 	map = (bus_dmamap_t)0;
6041 
6042 	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6043 			BUS_DMA_NOWAIT);
6044 	dma_addr = segs[0].ds_addr;
6045 
6046 	if (ret || !dma_addr || (nsegs != 1)) {
6047 		m_freem(mp);
6048 		QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6049                            ret, (long long unsigned int)dma_addr, nsegs);
6050 		return -ENOMEM;
6051 	}
6052 
6053         sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
6054         sw_rx_data->data = mp;
6055         sw_rx_data->dma_addr = dma_addr;
6056         sw_rx_data->map = map;
6057 
6058         /* Advance PROD and get BD pointer */
6059         rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
6060         rx_bd->addr.hi = htole32(U64_HI(dma_addr));
6061         rx_bd->addr.lo = htole32(U64_LO(dma_addr));
6062 	bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6063 
6064         rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6065 
6066         return 0;
6067 }
6068 
6069 static int
qlnx_alloc_tpa_mbuf(qlnx_host_t * ha,uint16_t rx_buf_size,struct qlnx_agg_info * tpa)6070 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
6071 	struct qlnx_agg_info *tpa)
6072 {
6073 	struct mbuf		*mp;
6074         dma_addr_t		dma_addr;
6075 	bus_dmamap_t		map;
6076 	bus_dma_segment_t       segs[1];
6077 	int			nsegs;
6078 	int			ret;
6079         struct sw_rx_data	*rx_buf;
6080 
6081 	mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6082 
6083         if (mp == NULL) {
6084                 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6085                 return -ENOMEM;
6086         }
6087 
6088 	mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6089 
6090 	map = (bus_dmamap_t)0;
6091 
6092 	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6093 			BUS_DMA_NOWAIT);
6094 	dma_addr = segs[0].ds_addr;
6095 
6096 	if (ret || !dma_addr || (nsegs != 1)) {
6097 		m_freem(mp);
6098 		QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6099 			ret, (long long unsigned int)dma_addr, nsegs);
6100 		return -ENOMEM;
6101 	}
6102 
6103         rx_buf = &tpa->rx_buf;
6104 
6105 	memset(rx_buf, 0, sizeof (struct sw_rx_data));
6106 
6107         rx_buf->data = mp;
6108         rx_buf->dma_addr = dma_addr;
6109         rx_buf->map = map;
6110 
6111 	bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6112 
6113 	return (0);
6114 }
6115 
6116 static void
qlnx_free_tpa_mbuf(qlnx_host_t * ha,struct qlnx_agg_info * tpa)6117 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
6118 {
6119         struct sw_rx_data	*rx_buf;
6120 
6121 	rx_buf = &tpa->rx_buf;
6122 
6123 	if (rx_buf->data != NULL) {
6124 		if (rx_buf->map != NULL) {
6125 			bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6126 			bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6127 			rx_buf->map = NULL;
6128 		}
6129 		m_freem(rx_buf->data);
6130 		rx_buf->data = NULL;
6131 	}
6132 	return;
6133 }
6134 
6135 /* This function allocates all memory needed per Rx queue */
6136 static int
qlnx_alloc_mem_rxq(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)6137 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6138 {
6139         int			i, rc, num_allocated;
6140 	struct ecore_dev	 *cdev;
6141 
6142 	cdev = &ha->cdev;
6143 
6144         rxq->num_rx_buffers = RX_RING_SIZE;
6145 
6146 	rxq->rx_buf_size = ha->rx_buf_size;
6147 
6148         /* Allocate the parallel driver ring for Rx buffers */
6149 	bzero((void *)&rxq->sw_rx_ring[0],
6150 		(sizeof (struct sw_rx_data) * RX_RING_SIZE));
6151 
6152         /* Allocate FW Rx ring  */
6153 
6154         rc = ecore_chain_alloc(cdev,
6155 			ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6156 			ECORE_CHAIN_MODE_NEXT_PTR,
6157 			ECORE_CHAIN_CNT_TYPE_U16,
6158 			RX_RING_SIZE,
6159 			sizeof(struct eth_rx_bd),
6160 			&rxq->rx_bd_ring, NULL);
6161 
6162         if (rc)
6163                 goto err;
6164 
6165         /* Allocate FW completion ring */
6166         rc = ecore_chain_alloc(cdev,
6167                         ECORE_CHAIN_USE_TO_CONSUME,
6168                         ECORE_CHAIN_MODE_PBL,
6169 			ECORE_CHAIN_CNT_TYPE_U16,
6170                         RX_RING_SIZE,
6171                         sizeof(union eth_rx_cqe),
6172                         &rxq->rx_comp_ring, NULL);
6173 
6174         if (rc)
6175                 goto err;
6176 
6177         /* Allocate buffers for the Rx ring */
6178 
6179 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6180 		rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
6181 			&rxq->tpa_info[i]);
6182                 if (rc)
6183                         break;
6184 	}
6185 
6186         for (i = 0; i < rxq->num_rx_buffers; i++) {
6187                 rc = qlnx_alloc_rx_buffer(ha, rxq);
6188                 if (rc)
6189                         break;
6190         }
6191         num_allocated = i;
6192         if (!num_allocated) {
6193 		QL_DPRINT1(ha, "Rx buffers allocation failed\n");
6194                 goto err;
6195         } else if (num_allocated < rxq->num_rx_buffers) {
6196 		QL_DPRINT1(ha, "Allocated less buffers than"
6197 			" desired (%d allocated)\n", num_allocated);
6198         }
6199 
6200 #ifdef QLNX_SOFT_LRO
6201 
6202 	{
6203 		struct lro_ctrl *lro;
6204 
6205 		lro = &rxq->lro;
6206 
6207 		if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
6208 			QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6209 				   rxq->rxq_id);
6210 			goto err;
6211 		}
6212 
6213 		lro->ifp = ha->ifp;
6214 	}
6215 #endif /* #ifdef QLNX_SOFT_LRO */
6216         return 0;
6217 
6218 err:
6219         qlnx_free_mem_rxq(ha, rxq);
6220         return -ENOMEM;
6221 }
6222 
6223 static void
qlnx_free_mem_txq(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)6224 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6225 	struct qlnx_tx_queue *txq)
6226 {
6227 	struct ecore_dev	*cdev;
6228 
6229 	cdev = &ha->cdev;
6230 
6231 	bzero((void *)&txq->sw_tx_ring[0],
6232 		(sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6233 
6234         /* Free the real RQ ring used by FW */
6235         if (txq->tx_pbl.p_virt_addr) {
6236                 ecore_chain_free(cdev, &txq->tx_pbl);
6237                 txq->tx_pbl.p_virt_addr = NULL;
6238         }
6239 	return;
6240 }
6241 
6242 /* This function allocates all memory needed per Tx queue */
6243 static int
qlnx_alloc_mem_txq(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)6244 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6245 	struct qlnx_tx_queue *txq)
6246 {
6247         int			ret = ECORE_SUCCESS;
6248         union eth_tx_bd_types	*p_virt;
6249 	struct ecore_dev	*cdev;
6250 
6251 	cdev = &ha->cdev;
6252 
6253 	bzero((void *)&txq->sw_tx_ring[0],
6254 		(sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6255 
6256         /* Allocate the real Tx ring to be used by FW */
6257         ret = ecore_chain_alloc(cdev,
6258                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6259                         ECORE_CHAIN_MODE_PBL,
6260 			ECORE_CHAIN_CNT_TYPE_U16,
6261                         TX_RING_SIZE,
6262                         sizeof(*p_virt),
6263                         &txq->tx_pbl, NULL);
6264 
6265         if (ret != ECORE_SUCCESS) {
6266                 goto err;
6267         }
6268 
6269 	txq->num_tx_buffers = TX_RING_SIZE;
6270 
6271         return 0;
6272 
6273 err:
6274         qlnx_free_mem_txq(ha, fp, txq);
6275         return -ENOMEM;
6276 }
6277 
6278 static void
qlnx_free_tx_br(qlnx_host_t * ha,struct qlnx_fastpath * fp)6279 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6280 {
6281 	struct mbuf	*mp;
6282 	if_t		ifp = ha->ifp;
6283 
6284 	if (mtx_initialized(&fp->tx_mtx)) {
6285 		if (fp->tx_br != NULL) {
6286 			mtx_lock(&fp->tx_mtx);
6287 
6288 			while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
6289 				fp->tx_pkts_freed++;
6290 				m_freem(mp);
6291 			}
6292 
6293 			mtx_unlock(&fp->tx_mtx);
6294 
6295 			buf_ring_free(fp->tx_br, M_DEVBUF);
6296 			fp->tx_br = NULL;
6297 		}
6298 		mtx_destroy(&fp->tx_mtx);
6299 	}
6300 	return;
6301 }
6302 
6303 static void
qlnx_free_mem_fp(qlnx_host_t * ha,struct qlnx_fastpath * fp)6304 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6305 {
6306         int	tc;
6307 
6308         qlnx_free_mem_sb(ha, fp->sb_info);
6309 
6310         qlnx_free_mem_rxq(ha, fp->rxq);
6311 
6312         for (tc = 0; tc < ha->num_tc; tc++)
6313                 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
6314 
6315 	return;
6316 }
6317 
6318 static int
qlnx_alloc_tx_br(qlnx_host_t * ha,struct qlnx_fastpath * fp)6319 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6320 {
6321 	snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
6322 		"qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
6323 
6324 	mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
6325 
6326         fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
6327                                    M_NOWAIT, &fp->tx_mtx);
6328         if (fp->tx_br == NULL) {
6329 		QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
6330 			ha->dev_unit, fp->rss_id);
6331 		return -ENOMEM;
6332         }
6333 	return 0;
6334 }
6335 
6336 static int
qlnx_alloc_mem_fp(qlnx_host_t * ha,struct qlnx_fastpath * fp)6337 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6338 {
6339         int	rc, tc;
6340 
6341         rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
6342         if (rc)
6343                 goto err;
6344 
6345 	if (ha->rx_jumbo_buf_eq_mtu) {
6346 		if (ha->max_frame_size <= MCLBYTES)
6347 			ha->rx_buf_size = MCLBYTES;
6348 		else if (ha->max_frame_size <= MJUMPAGESIZE)
6349 			ha->rx_buf_size = MJUMPAGESIZE;
6350 		else if (ha->max_frame_size <= MJUM9BYTES)
6351 			ha->rx_buf_size = MJUM9BYTES;
6352 		else if (ha->max_frame_size <= MJUM16BYTES)
6353 			ha->rx_buf_size = MJUM16BYTES;
6354 	} else {
6355 		if (ha->max_frame_size <= MCLBYTES)
6356 			ha->rx_buf_size = MCLBYTES;
6357 		else
6358 			ha->rx_buf_size = MJUMPAGESIZE;
6359 	}
6360 
6361         rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
6362         if (rc)
6363                 goto err;
6364 
6365         for (tc = 0; tc < ha->num_tc; tc++) {
6366                 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
6367                 if (rc)
6368                         goto err;
6369         }
6370 
6371         return 0;
6372 
6373 err:
6374         qlnx_free_mem_fp(ha, fp);
6375         return -ENOMEM;
6376 }
6377 
6378 static void
qlnx_free_mem_load(qlnx_host_t * ha)6379 qlnx_free_mem_load(qlnx_host_t *ha)
6380 {
6381         int			i;
6382 
6383         for (i = 0; i < ha->num_rss; i++) {
6384                 struct qlnx_fastpath *fp = &ha->fp_array[i];
6385 
6386                 qlnx_free_mem_fp(ha, fp);
6387         }
6388 	return;
6389 }
6390 
6391 static int
qlnx_alloc_mem_load(qlnx_host_t * ha)6392 qlnx_alloc_mem_load(qlnx_host_t *ha)
6393 {
6394         int	rc = 0, rss_id;
6395 
6396         for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6397                 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6398 
6399                 rc = qlnx_alloc_mem_fp(ha, fp);
6400                 if (rc)
6401                         break;
6402         }
6403 	return (rc);
6404 }
6405 
6406 static int
qlnx_start_vport(struct ecore_dev * cdev,u8 vport_id,u16 mtu,u8 drop_ttl0_flg,u8 inner_vlan_removal_en_flg,u8 tx_switching,u8 hw_lro_enable)6407 qlnx_start_vport(struct ecore_dev *cdev,
6408                 u8 vport_id,
6409                 u16 mtu,
6410                 u8 drop_ttl0_flg,
6411                 u8 inner_vlan_removal_en_flg,
6412 		u8 tx_switching,
6413 		u8 hw_lro_enable)
6414 {
6415         int					rc, i;
6416 	struct ecore_sp_vport_start_params	vport_start_params = { 0 };
6417 	qlnx_host_t				*ha __unused;
6418 
6419 	ha = (qlnx_host_t *)cdev;
6420 
6421 	vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
6422 	vport_start_params.tx_switching = 0;
6423 	vport_start_params.handle_ptp_pkts = 0;
6424 	vport_start_params.only_untagged = 0;
6425 	vport_start_params.drop_ttl0 = drop_ttl0_flg;
6426 
6427 	vport_start_params.tpa_mode =
6428 		(hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
6429 	vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6430 
6431 	vport_start_params.vport_id = vport_id;
6432 	vport_start_params.mtu = mtu;
6433 
6434 	QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
6435 
6436         for_each_hwfn(cdev, i) {
6437                 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6438 
6439 		vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
6440 		vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6441 
6442                 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
6443 
6444                 if (rc) {
6445 			QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
6446 				" with MTU %d\n" , vport_id, mtu);
6447                         return -ENOMEM;
6448                 }
6449 
6450                 ecore_hw_start_fastpath(p_hwfn);
6451 
6452 		QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
6453 			vport_id, mtu);
6454         }
6455         return 0;
6456 }
6457 
6458 static int
qlnx_update_vport(struct ecore_dev * cdev,struct qlnx_update_vport_params * params)6459 qlnx_update_vport(struct ecore_dev *cdev,
6460 	struct qlnx_update_vport_params *params)
6461 {
6462         struct ecore_sp_vport_update_params	sp_params;
6463         int					rc, i, j, fp_index;
6464 	struct ecore_hwfn			*p_hwfn;
6465         struct ecore_rss_params			*rss;
6466 	qlnx_host_t				*ha = (qlnx_host_t *)cdev;
6467         struct qlnx_fastpath			*fp;
6468 
6469         memset(&sp_params, 0, sizeof(sp_params));
6470         /* Translate protocol params into sp params */
6471         sp_params.vport_id = params->vport_id;
6472 
6473         sp_params.update_vport_active_rx_flg =
6474 		params->update_vport_active_rx_flg;
6475         sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
6476 
6477         sp_params.update_vport_active_tx_flg =
6478 		params->update_vport_active_tx_flg;
6479         sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
6480 
6481         sp_params.update_inner_vlan_removal_flg =
6482                 params->update_inner_vlan_removal_flg;
6483         sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
6484 
6485 	sp_params.sge_tpa_params = params->sge_tpa_params;
6486 
6487         /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
6488          * We need to re-fix the rss values per engine for CMT.
6489          */
6490 	if (params->rss_params->update_rss_config)
6491         sp_params.rss_params = params->rss_params;
6492 	else
6493 		sp_params.rss_params =  NULL;
6494 
6495         for_each_hwfn(cdev, i) {
6496 		p_hwfn = &cdev->hwfns[i];
6497 
6498 		if ((cdev->num_hwfns > 1) &&
6499 			params->rss_params->update_rss_config &&
6500 			params->rss_params->rss_enable) {
6501 			rss = params->rss_params;
6502 
6503 			for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
6504 				fp_index = ((cdev->num_hwfns * j) + i) %
6505 						ha->num_rss;
6506 
6507                 		fp = &ha->fp_array[fp_index];
6508                         	rss->rss_ind_table[j] = fp->rxq->handle;
6509 			}
6510 
6511 			for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
6512 				QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
6513 					rss->rss_ind_table[j],
6514 					rss->rss_ind_table[j+1],
6515 					rss->rss_ind_table[j+2],
6516 					rss->rss_ind_table[j+3],
6517 					rss->rss_ind_table[j+4],
6518 					rss->rss_ind_table[j+5],
6519 					rss->rss_ind_table[j+6],
6520 					rss->rss_ind_table[j+7]);
6521 					j += 8;
6522 			}
6523 		}
6524 
6525                 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6526 
6527 		QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
6528 
6529                 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
6530                                            ECORE_SPQ_MODE_EBLOCK, NULL);
6531                 if (rc) {
6532 			QL_DPRINT1(ha, "Failed to update VPORT\n");
6533                         return rc;
6534                 }
6535 
6536                 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
6537 			rx_active_flag %d [tx_update %d], [rx_update %d]\n",
6538 			params->vport_id, params->vport_active_tx_flg,
6539 			params->vport_active_rx_flg,
6540 			params->update_vport_active_tx_flg,
6541 			params->update_vport_active_rx_flg);
6542         }
6543 
6544         return 0;
6545 }
6546 
6547 static void
qlnx_reuse_rx_data(struct qlnx_rx_queue * rxq)6548 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
6549 {
6550         struct eth_rx_bd	*rx_bd_cons =
6551 					ecore_chain_consume(&rxq->rx_bd_ring);
6552         struct eth_rx_bd	*rx_bd_prod =
6553 					ecore_chain_produce(&rxq->rx_bd_ring);
6554         struct sw_rx_data	*sw_rx_data_cons =
6555 					&rxq->sw_rx_ring[rxq->sw_rx_cons];
6556         struct sw_rx_data	*sw_rx_data_prod =
6557 					&rxq->sw_rx_ring[rxq->sw_rx_prod];
6558 
6559         sw_rx_data_prod->data = sw_rx_data_cons->data;
6560         memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
6561 
6562         rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
6563         rxq->sw_rx_prod  = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6564 
6565 	return;
6566 }
6567 
6568 static void
qlnx_update_rx_prod(struct ecore_hwfn * p_hwfn,struct qlnx_rx_queue * rxq)6569 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
6570 {
6571 
6572         uint16_t	 	bd_prod;
6573         uint16_t		cqe_prod;
6574 	union {
6575 		struct eth_rx_prod_data rx_prod_data;
6576 		uint32_t		data32;
6577 	} rx_prods;
6578 
6579         bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
6580         cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
6581 
6582         /* Update producers */
6583         rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
6584         rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
6585 
6586         /* Make sure that the BD and SGE data is updated before updating the
6587          * producers since FW might read the BD/SGE right after the producer
6588          * is updated.
6589          */
6590 	wmb();
6591 
6592 #ifdef ECORE_CONFIG_DIRECT_HWFN
6593 	internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
6594 		sizeof(rx_prods), &rx_prods.data32);
6595 #else
6596 	internal_ram_wr(rxq->hw_rxq_prod_addr,
6597 		sizeof(rx_prods), &rx_prods.data32);
6598 #endif
6599 
6600         /* mmiowb is needed to synchronize doorbell writes from more than one
6601          * processor. It guarantees that the write arrives to the device before
6602          * the napi lock is released and another qlnx_poll is called (possibly
6603          * on another CPU). Without this barrier, the next doorbell can bypass
6604          * this doorbell. This is applicable to IA64/Altix systems.
6605          */
6606         wmb();
6607 
6608 	return;
6609 }
6610 
6611 static uint32_t qlnx_hash_key[] = {
6612                 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
6613                 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
6614                 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
6615                 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
6616                 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
6617                 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
6618                 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
6619                 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
6620                 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
6621                 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
6622 
6623 static int
qlnx_start_queues(qlnx_host_t * ha)6624 qlnx_start_queues(qlnx_host_t *ha)
6625 {
6626         int				rc, tc, i, vport_id = 0,
6627 					drop_ttl0_flg = 1, vlan_removal_en = 1,
6628 					tx_switching = 0, hw_lro_enable = 0;
6629         struct ecore_dev		*cdev = &ha->cdev;
6630         struct ecore_rss_params		*rss_params = &ha->rss_params;
6631         struct qlnx_update_vport_params	vport_update_params;
6632         if_t				ifp;
6633         struct ecore_hwfn		*p_hwfn;
6634 	struct ecore_sge_tpa_params	tpa_params;
6635 	struct ecore_queue_start_common_params qparams;
6636         struct qlnx_fastpath		*fp;
6637 
6638 	ifp = ha->ifp;
6639 
6640 	QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
6641 
6642         if (!ha->num_rss) {
6643 		QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
6644 			" are no Rx queues\n");
6645                 return -EINVAL;
6646         }
6647 
6648 #ifndef QLNX_SOFT_LRO
6649         hw_lro_enable = if_getcapenable(ifp) & IFCAP_LRO;
6650 #endif /* #ifndef QLNX_SOFT_LRO */
6651 
6652         rc = qlnx_start_vport(cdev, vport_id, if_getmtu(ifp), drop_ttl0_flg,
6653 			vlan_removal_en, tx_switching, hw_lro_enable);
6654 
6655         if (rc) {
6656                 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
6657                 return rc;
6658         }
6659 
6660 	QL_DPRINT2(ha, "Start vport ramrod passed, "
6661 		"vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
6662 		vport_id, (int)(if_getmtu(ifp) + 0xe), vlan_removal_en);
6663 
6664         for_each_rss(i) {
6665 		struct ecore_rxq_start_ret_params rx_ret_params;
6666 		struct ecore_txq_start_ret_params tx_ret_params;
6667 
6668                 fp = &ha->fp_array[i];
6669         	p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6670 
6671 		bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
6672 		bzero(&rx_ret_params,
6673 			sizeof (struct ecore_rxq_start_ret_params));
6674 
6675 		qparams.queue_id = i ;
6676 		qparams.vport_id = vport_id;
6677 		qparams.stats_id = vport_id;
6678 		qparams.p_sb = fp->sb_info;
6679 		qparams.sb_idx = RX_PI;
6680 
6681 
6682 		rc = ecore_eth_rx_queue_start(p_hwfn,
6683 			p_hwfn->hw_info.opaque_fid,
6684 			&qparams,
6685 			fp->rxq->rx_buf_size,	/* bd_max_bytes */
6686 			/* bd_chain_phys_addr */
6687 			fp->rxq->rx_bd_ring.p_phys_addr,
6688 			/* cqe_pbl_addr */
6689 			ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6690 			/* cqe_pbl_size */
6691 			ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6692 			&rx_ret_params);
6693 
6694                 if (rc) {
6695                 	QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6696                         return rc;
6697                 }
6698 
6699 		fp->rxq->hw_rxq_prod_addr	= rx_ret_params.p_prod;
6700 		fp->rxq->handle			= rx_ret_params.p_handle;
6701                 fp->rxq->hw_cons_ptr		=
6702 				&fp->sb_info->sb_virt->pi_array[RX_PI];
6703 
6704                 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6705 
6706                 for (tc = 0; tc < ha->num_tc; tc++) {
6707                         struct qlnx_tx_queue *txq = fp->txq[tc];
6708 
6709 			bzero(&qparams,
6710 				sizeof(struct ecore_queue_start_common_params));
6711 			bzero(&tx_ret_params,
6712 				sizeof (struct ecore_txq_start_ret_params));
6713 
6714 			qparams.queue_id = txq->index / cdev->num_hwfns ;
6715 			qparams.vport_id = vport_id;
6716 			qparams.stats_id = vport_id;
6717 			qparams.p_sb = fp->sb_info;
6718 			qparams.sb_idx = TX_PI(tc);
6719 
6720 			rc = ecore_eth_tx_queue_start(p_hwfn,
6721 				p_hwfn->hw_info.opaque_fid,
6722 				&qparams, tc,
6723 				/* bd_chain_phys_addr */
6724 				ecore_chain_get_pbl_phys(&txq->tx_pbl),
6725 				ecore_chain_get_page_cnt(&txq->tx_pbl),
6726 				&tx_ret_params);
6727 
6728                         if (rc) {
6729                 		QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6730 					   txq->index, rc);
6731                                 return rc;
6732                         }
6733 
6734 			txq->doorbell_addr = tx_ret_params.p_doorbell;
6735 			txq->handle = tx_ret_params.p_handle;
6736 
6737                         txq->hw_cons_ptr =
6738                                 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6739                         SET_FIELD(txq->tx_db.data.params,
6740                                   ETH_DB_DATA_DEST, DB_DEST_XCM);
6741                         SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6742                                   DB_AGG_CMD_SET);
6743                         SET_FIELD(txq->tx_db.data.params,
6744                                   ETH_DB_DATA_AGG_VAL_SEL,
6745                                   DQ_XCM_ETH_TX_BD_PROD_CMD);
6746 
6747                         txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
6748                 }
6749         }
6750 
6751         /* Fill struct with RSS params */
6752         if (ha->num_rss > 1) {
6753                 rss_params->update_rss_config = 1;
6754                 rss_params->rss_enable = 1;
6755                 rss_params->update_rss_capabilities = 1;
6756                 rss_params->update_rss_ind_table = 1;
6757                 rss_params->update_rss_key = 1;
6758                 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
6759                                        ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
6760                 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
6761 
6762                 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
6763                 	fp = &ha->fp_array[(i % ha->num_rss)];
6764                         rss_params->rss_ind_table[i] = fp->rxq->handle;
6765 		}
6766 
6767                 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
6768 			rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
6769 
6770         } else {
6771                 memset(rss_params, 0, sizeof(*rss_params));
6772         }
6773 
6774         /* Prepare and send the vport enable */
6775         memset(&vport_update_params, 0, sizeof(vport_update_params));
6776         vport_update_params.vport_id = vport_id;
6777         vport_update_params.update_vport_active_tx_flg = 1;
6778         vport_update_params.vport_active_tx_flg = 1;
6779         vport_update_params.update_vport_active_rx_flg = 1;
6780         vport_update_params.vport_active_rx_flg = 1;
6781         vport_update_params.rss_params = rss_params;
6782         vport_update_params.update_inner_vlan_removal_flg = 1;
6783         vport_update_params.inner_vlan_removal_flg = 1;
6784 
6785 	if (hw_lro_enable) {
6786 		memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
6787 
6788 		tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6789 
6790 		tpa_params.update_tpa_en_flg = 1;
6791 		tpa_params.tpa_ipv4_en_flg = 1;
6792 		tpa_params.tpa_ipv6_en_flg = 1;
6793 
6794 		tpa_params.update_tpa_param_flg = 1;
6795 		tpa_params.tpa_pkt_split_flg = 0;
6796 		tpa_params.tpa_hdr_data_split_flg = 0;
6797 		tpa_params.tpa_gro_consistent_flg = 0;
6798 		tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
6799 		tpa_params.tpa_max_size = (uint16_t)(-1);
6800 		tpa_params.tpa_min_size_to_start = if_getmtu(ifp) / 2;
6801 		tpa_params.tpa_min_size_to_cont = if_getmtu(ifp) / 2;
6802 
6803 		vport_update_params.sge_tpa_params = &tpa_params;
6804 	}
6805 
6806         rc = qlnx_update_vport(cdev, &vport_update_params);
6807         if (rc) {
6808 		QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6809                 return rc;
6810         }
6811 
6812         return 0;
6813 }
6814 
6815 static int
qlnx_drain_txq(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)6816 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6817 	struct qlnx_tx_queue *txq)
6818 {
6819 	uint16_t	hw_bd_cons;
6820 	uint16_t	ecore_cons_idx;
6821 
6822 	QL_DPRINT2(ha, "enter\n");
6823 
6824 	hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6825 
6826 	while (hw_bd_cons !=
6827 		(ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6828 		mtx_lock(&fp->tx_mtx);
6829 
6830 		(void)qlnx_tx_int(ha, fp, txq);
6831 
6832 		mtx_unlock(&fp->tx_mtx);
6833 
6834 		qlnx_mdelay(__func__, 2);
6835 
6836 		hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6837 	}
6838 
6839 	QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6840 
6841         return 0;
6842 }
6843 
6844 static int
qlnx_stop_queues(qlnx_host_t * ha)6845 qlnx_stop_queues(qlnx_host_t *ha)
6846 {
6847         struct qlnx_update_vport_params	vport_update_params;
6848         struct ecore_dev		*cdev;
6849         struct qlnx_fastpath		*fp;
6850         int				rc, tc, i;
6851 
6852         cdev = &ha->cdev;
6853 
6854         /* Disable the vport */
6855 
6856         memset(&vport_update_params, 0, sizeof(vport_update_params));
6857 
6858         vport_update_params.vport_id = 0;
6859         vport_update_params.update_vport_active_tx_flg = 1;
6860         vport_update_params.vport_active_tx_flg = 0;
6861         vport_update_params.update_vport_active_rx_flg = 1;
6862         vport_update_params.vport_active_rx_flg = 0;
6863         vport_update_params.rss_params = &ha->rss_params;
6864         vport_update_params.rss_params->update_rss_config = 0;
6865         vport_update_params.rss_params->rss_enable = 0;
6866         vport_update_params.update_inner_vlan_removal_flg = 0;
6867         vport_update_params.inner_vlan_removal_flg = 0;
6868 
6869 	QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
6870 
6871         rc = qlnx_update_vport(cdev, &vport_update_params);
6872         if (rc) {
6873 		QL_DPRINT1(ha, "Failed to update vport\n");
6874                 return rc;
6875         }
6876 
6877         /* Flush Tx queues. If needed, request drain from MCP */
6878         for_each_rss(i) {
6879                 fp = &ha->fp_array[i];
6880 
6881                 for (tc = 0; tc < ha->num_tc; tc++) {
6882                         struct qlnx_tx_queue *txq = fp->txq[tc];
6883 
6884                         rc = qlnx_drain_txq(ha, fp, txq);
6885                         if (rc)
6886                                 return rc;
6887                 }
6888         }
6889 
6890         /* Stop all Queues in reverse order*/
6891         for (i = ha->num_rss - 1; i >= 0; i--) {
6892 		struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
6893 
6894                 fp = &ha->fp_array[i];
6895 
6896                 /* Stop the Tx Queue(s)*/
6897                 for (tc = 0; tc < ha->num_tc; tc++) {
6898 			int tx_queue_id __unused;
6899 
6900 			tx_queue_id = tc * ha->num_rss + i;
6901 			rc = ecore_eth_tx_queue_stop(p_hwfn,
6902 					fp->txq[tc]->handle);
6903 
6904                         if (rc) {
6905 				QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
6906 					   tx_queue_id);
6907                                 return rc;
6908                         }
6909                 }
6910 
6911                 /* Stop the Rx Queue*/
6912 		rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
6913 				false);
6914                 if (rc) {
6915                         QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
6916                         return rc;
6917                 }
6918         }
6919 
6920         /* Stop the vport */
6921 	for_each_hwfn(cdev, i) {
6922 		struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6923 
6924 		rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
6925 
6926 		if (rc) {
6927                         QL_DPRINT1(ha, "Failed to stop VPORT\n");
6928 			return rc;
6929 		}
6930 	}
6931 
6932         return rc;
6933 }
6934 
6935 static int
qlnx_set_ucast_rx_mac(qlnx_host_t * ha,enum ecore_filter_opcode opcode,unsigned char mac[ETH_ALEN])6936 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
6937 	enum ecore_filter_opcode opcode,
6938 	unsigned char mac[ETH_ALEN])
6939 {
6940 	struct ecore_filter_ucast	ucast;
6941 	struct ecore_dev		*cdev;
6942 	int				rc;
6943 
6944 	cdev = &ha->cdev;
6945 
6946 	bzero(&ucast, sizeof(struct ecore_filter_ucast));
6947 
6948         ucast.opcode = opcode;
6949         ucast.type = ECORE_FILTER_MAC;
6950         ucast.is_rx_filter = 1;
6951         ucast.vport_to_add_to = 0;
6952         memcpy(&ucast.mac[0], mac, ETH_ALEN);
6953 
6954 	rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6955 
6956         return (rc);
6957 }
6958 
6959 static int
qlnx_remove_all_ucast_mac(qlnx_host_t * ha)6960 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
6961 {
6962 	struct ecore_filter_ucast	ucast;
6963 	struct ecore_dev		*cdev;
6964 	int				rc;
6965 
6966 	bzero(&ucast, sizeof(struct ecore_filter_ucast));
6967 
6968 	ucast.opcode = ECORE_FILTER_REPLACE;
6969 	ucast.type = ECORE_FILTER_MAC;
6970 	ucast.is_rx_filter = 1;
6971 
6972 	cdev = &ha->cdev;
6973 
6974 	rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6975 
6976 	return (rc);
6977 }
6978 
6979 static int
qlnx_remove_all_mcast_mac(qlnx_host_t * ha)6980 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
6981 {
6982 	struct ecore_filter_mcast	*mcast;
6983 	struct ecore_dev		*cdev;
6984 	int				rc, i;
6985 
6986 	cdev = &ha->cdev;
6987 
6988 	mcast = &ha->ecore_mcast;
6989 	bzero(mcast, sizeof(struct ecore_filter_mcast));
6990 
6991 	mcast->opcode = ECORE_FILTER_REMOVE;
6992 
6993 	for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
6994 		if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
6995 			ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
6996 			ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
6997 			memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN);
6998 			mcast->num_mc_addrs++;
6999 		}
7000 	}
7001 	mcast = &ha->ecore_mcast;
7002 
7003 	rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
7004 
7005 	bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
7006 	ha->nmcast = 0;
7007 
7008 	return (rc);
7009 }
7010 
7011 static int
qlnx_clean_filters(qlnx_host_t * ha)7012 qlnx_clean_filters(qlnx_host_t *ha)
7013 {
7014         int	rc = 0;
7015 
7016 	/* Remove all unicast macs */
7017 	rc = qlnx_remove_all_ucast_mac(ha);
7018 	if (rc)
7019 		return rc;
7020 
7021 	/* Remove all multicast macs */
7022 	rc = qlnx_remove_all_mcast_mac(ha);
7023 	if (rc)
7024 		return rc;
7025 
7026         rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
7027 
7028         return (rc);
7029 }
7030 
7031 static int
qlnx_set_rx_accept_filter(qlnx_host_t * ha,uint8_t filter)7032 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
7033 {
7034 	struct ecore_filter_accept_flags	accept;
7035 	int					rc = 0;
7036 	struct ecore_dev			*cdev;
7037 
7038 	cdev = &ha->cdev;
7039 
7040 	bzero(&accept, sizeof(struct ecore_filter_accept_flags));
7041 
7042 	accept.update_rx_mode_config = 1;
7043 	accept.rx_accept_filter = filter;
7044 
7045 	accept.update_tx_mode_config = 1;
7046 	accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
7047 		ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
7048 
7049 	rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
7050 			ECORE_SPQ_MODE_CB, NULL);
7051 
7052 	return (rc);
7053 }
7054 
7055 static int
qlnx_set_rx_mode(qlnx_host_t * ha)7056 qlnx_set_rx_mode(qlnx_host_t *ha)
7057 {
7058 	int	rc = 0;
7059 	uint8_t	filter;
7060 	const if_t ifp = ha->ifp;
7061 	const struct ifaddr *ifa;
7062 	struct sockaddr_dl *sdl;
7063 
7064 	ifa = if_getifaddr(ifp);
7065 	if (if_gettype(ifp) == IFT_ETHER && ifa != NULL &&
7066 			ifa->ifa_addr != NULL) {
7067 		sdl = (struct sockaddr_dl *) ifa->ifa_addr;
7068 
7069 		rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, LLADDR(sdl));
7070 	} else {
7071 		rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
7072 	}
7073         if (rc)
7074                 return rc;
7075 
7076 	rc = qlnx_remove_all_mcast_mac(ha);
7077         if (rc)
7078                 return rc;
7079 
7080 	filter = ECORE_ACCEPT_UCAST_MATCHED |
7081 			ECORE_ACCEPT_MCAST_MATCHED |
7082 			ECORE_ACCEPT_BCAST;
7083 
7084 	if (qlnx_vf_device(ha) == 0 || (if_getflags(ha->ifp) & IFF_PROMISC)) {
7085 		filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
7086 		filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
7087 	} else if (if_getflags(ha->ifp) & IFF_ALLMULTI) {
7088 		filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
7089 	}
7090 	ha->filter = filter;
7091 
7092 	rc = qlnx_set_rx_accept_filter(ha, filter);
7093 
7094 	return (rc);
7095 }
7096 
7097 static int
qlnx_set_link(qlnx_host_t * ha,bool link_up)7098 qlnx_set_link(qlnx_host_t *ha, bool link_up)
7099 {
7100         int			i, rc = 0;
7101 	struct ecore_dev	*cdev;
7102 	struct ecore_hwfn	*hwfn;
7103 	struct ecore_ptt	*ptt;
7104 
7105 	if (qlnx_vf_device(ha) == 0)
7106 		return (0);
7107 
7108 	cdev = &ha->cdev;
7109 
7110         for_each_hwfn(cdev, i) {
7111                 hwfn = &cdev->hwfns[i];
7112 
7113                 ptt = ecore_ptt_acquire(hwfn);
7114        	        if (!ptt)
7115                         return -EBUSY;
7116 
7117                 rc = ecore_mcp_set_link(hwfn, ptt, link_up);
7118 
7119                 ecore_ptt_release(hwfn, ptt);
7120 
7121                 if (rc)
7122                         return rc;
7123         }
7124         return (rc);
7125 }
7126 
7127 static uint64_t
qlnx_get_counter(if_t ifp,ift_counter cnt)7128 qlnx_get_counter(if_t ifp, ift_counter cnt)
7129 {
7130 	qlnx_host_t *ha;
7131 	uint64_t count;
7132 
7133         ha = (qlnx_host_t *)if_getsoftc(ifp);
7134 
7135         switch (cnt) {
7136         case IFCOUNTER_IPACKETS:
7137 		count = ha->hw_stats.common.rx_ucast_pkts +
7138 			ha->hw_stats.common.rx_mcast_pkts +
7139 			ha->hw_stats.common.rx_bcast_pkts;
7140 		break;
7141 
7142         case IFCOUNTER_IERRORS:
7143 		count = ha->hw_stats.common.rx_crc_errors +
7144 			ha->hw_stats.common.rx_align_errors +
7145 			ha->hw_stats.common.rx_oversize_packets +
7146 			ha->hw_stats.common.rx_undersize_packets;
7147 		break;
7148 
7149         case IFCOUNTER_OPACKETS:
7150 		count = ha->hw_stats.common.tx_ucast_pkts +
7151 			ha->hw_stats.common.tx_mcast_pkts +
7152 			ha->hw_stats.common.tx_bcast_pkts;
7153 		break;
7154 
7155         case IFCOUNTER_OERRORS:
7156                 count = ha->hw_stats.common.tx_err_drop_pkts;
7157 		break;
7158 
7159         case IFCOUNTER_COLLISIONS:
7160                 return (0);
7161 
7162         case IFCOUNTER_IBYTES:
7163 		count = ha->hw_stats.common.rx_ucast_bytes +
7164 			ha->hw_stats.common.rx_mcast_bytes +
7165 			ha->hw_stats.common.rx_bcast_bytes;
7166 		break;
7167 
7168         case IFCOUNTER_OBYTES:
7169 		count = ha->hw_stats.common.tx_ucast_bytes +
7170 			ha->hw_stats.common.tx_mcast_bytes +
7171 			ha->hw_stats.common.tx_bcast_bytes;
7172 		break;
7173 
7174         case IFCOUNTER_IMCASTS:
7175 		count = ha->hw_stats.common.rx_mcast_bytes;
7176 		break;
7177 
7178         case IFCOUNTER_OMCASTS:
7179 		count = ha->hw_stats.common.tx_mcast_bytes;
7180 		break;
7181 
7182         case IFCOUNTER_IQDROPS:
7183         case IFCOUNTER_OQDROPS:
7184         case IFCOUNTER_NOPROTO:
7185 
7186         default:
7187                 return (if_get_counter_default(ifp, cnt));
7188         }
7189 	return (count);
7190 }
7191 
7192 static void
qlnx_timer(void * arg)7193 qlnx_timer(void *arg)
7194 {
7195 	qlnx_host_t	*ha;
7196 
7197 	ha = (qlnx_host_t *)arg;
7198 
7199 	if (ha->error_recovery) {
7200 		ha->error_recovery = 0;
7201 		taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
7202 		return;
7203 	}
7204 
7205        	ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
7206 
7207 	if (ha->storm_stats_gather)
7208 		qlnx_sample_storm_stats(ha);
7209 
7210 	callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7211 
7212 	return;
7213 }
7214 
7215 static int
qlnx_load(qlnx_host_t * ha)7216 qlnx_load(qlnx_host_t *ha)
7217 {
7218 	int			i;
7219 	int			rc = 0;
7220         device_t		dev;
7221 
7222         dev = ha->pci_dev;
7223 
7224 	QL_DPRINT2(ha, "enter\n");
7225 
7226         rc = qlnx_alloc_mem_arrays(ha);
7227         if (rc)
7228                 goto qlnx_load_exit0;
7229 
7230         qlnx_init_fp(ha);
7231 
7232         rc = qlnx_alloc_mem_load(ha);
7233         if (rc)
7234                 goto qlnx_load_exit1;
7235 
7236         QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
7237 		   ha->num_rss, ha->num_tc);
7238 
7239 	for (i = 0; i < ha->num_rss; i++) {
7240 		if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
7241                         (INTR_TYPE_NET | INTR_MPSAFE),
7242                         NULL, qlnx_fp_isr, &ha->irq_vec[i],
7243                         &ha->irq_vec[i].handle))) {
7244                         QL_DPRINT1(ha, "could not setup interrupt\n");
7245                         goto qlnx_load_exit2;
7246 		}
7247 
7248 		QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
7249 			 irq %p handle %p\n", i,
7250 			ha->irq_vec[i].irq_rid,
7251 			ha->irq_vec[i].irq, ha->irq_vec[i].handle);
7252 
7253 		bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
7254 	}
7255 
7256         rc = qlnx_start_queues(ha);
7257         if (rc)
7258                 goto qlnx_load_exit2;
7259 
7260         QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
7261 
7262         /* Add primary mac and set Rx filters */
7263         rc = qlnx_set_rx_mode(ha);
7264         if (rc)
7265                 goto qlnx_load_exit2;
7266 
7267         /* Ask for link-up using current configuration */
7268 	qlnx_set_link(ha, true);
7269 
7270 	if (qlnx_vf_device(ha) == 0)
7271 		qlnx_link_update(&ha->cdev.hwfns[0]);
7272 
7273         ha->state = QLNX_STATE_OPEN;
7274 
7275 	bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
7276 
7277 	if (ha->flags.callout_init)
7278         	callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7279 
7280         goto qlnx_load_exit0;
7281 
7282 qlnx_load_exit2:
7283         qlnx_free_mem_load(ha);
7284 
7285 qlnx_load_exit1:
7286         ha->num_rss = 0;
7287 
7288 qlnx_load_exit0:
7289 	QL_DPRINT2(ha, "exit [%d]\n", rc);
7290         return rc;
7291 }
7292 
7293 static void
qlnx_drain_soft_lro(qlnx_host_t * ha)7294 qlnx_drain_soft_lro(qlnx_host_t *ha)
7295 {
7296 #ifdef QLNX_SOFT_LRO
7297 
7298 	if_t		ifp;
7299 	int		i;
7300 
7301 	ifp = ha->ifp;
7302 
7303 	if (if_getcapenable(ifp) & IFCAP_LRO) {
7304 	        for (i = 0; i < ha->num_rss; i++) {
7305 			struct qlnx_fastpath *fp = &ha->fp_array[i];
7306 			struct lro_ctrl *lro;
7307 
7308 			lro = &fp->rxq->lro;
7309 
7310 			tcp_lro_flush_all(lro);
7311                 }
7312 	}
7313 
7314 #endif /* #ifdef QLNX_SOFT_LRO */
7315 
7316 	return;
7317 }
7318 
7319 static void
qlnx_unload(qlnx_host_t * ha)7320 qlnx_unload(qlnx_host_t *ha)
7321 {
7322 	struct ecore_dev	*cdev;
7323         device_t		dev;
7324 	int			i;
7325 
7326 	cdev = &ha->cdev;
7327         dev = ha->pci_dev;
7328 
7329 	QL_DPRINT2(ha, "enter\n");
7330         QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
7331 
7332 	if (ha->state == QLNX_STATE_OPEN) {
7333 		qlnx_set_link(ha, false);
7334 		qlnx_clean_filters(ha);
7335 		qlnx_stop_queues(ha);
7336 		ecore_hw_stop_fastpath(cdev);
7337 
7338 		for (i = 0; i < ha->num_rss; i++) {
7339 			if (ha->irq_vec[i].handle) {
7340 				(void)bus_teardown_intr(dev,
7341 					ha->irq_vec[i].irq,
7342 					ha->irq_vec[i].handle);
7343 				ha->irq_vec[i].handle = NULL;
7344 			}
7345 		}
7346 
7347 		qlnx_drain_fp_taskqueues(ha);
7348 		qlnx_drain_soft_lro(ha);
7349         	qlnx_free_mem_load(ha);
7350 	}
7351 
7352 	if (ha->flags.callout_init)
7353 		callout_drain(&ha->qlnx_callout);
7354 
7355 	qlnx_mdelay(__func__, 1000);
7356 
7357         ha->state = QLNX_STATE_CLOSED;
7358 
7359 	QL_DPRINT2(ha, "exit\n");
7360 	return;
7361 }
7362 
7363 static int
qlnx_grc_dumpsize(qlnx_host_t * ha,uint32_t * num_dwords,int hwfn_index)7364 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7365 {
7366 	int			rval = -1;
7367 	struct ecore_hwfn	*p_hwfn;
7368 	struct ecore_ptt	*p_ptt;
7369 
7370 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7371 
7372 	p_hwfn = &ha->cdev.hwfns[hwfn_index];
7373 	p_ptt = ecore_ptt_acquire(p_hwfn);
7374 
7375         if (!p_ptt) {
7376 		QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7377                 return (rval);
7378         }
7379 
7380         rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7381 
7382 	if (rval == DBG_STATUS_OK)
7383                 rval = 0;
7384         else {
7385 		QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
7386 			"[0x%x]\n", rval);
7387 	}
7388 
7389         ecore_ptt_release(p_hwfn, p_ptt);
7390 
7391         return (rval);
7392 }
7393 
7394 static int
qlnx_idle_chk_size(qlnx_host_t * ha,uint32_t * num_dwords,int hwfn_index)7395 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7396 {
7397 	int			rval = -1;
7398 	struct ecore_hwfn	*p_hwfn;
7399 	struct ecore_ptt	*p_ptt;
7400 
7401 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7402 
7403 	p_hwfn = &ha->cdev.hwfns[hwfn_index];
7404 	p_ptt = ecore_ptt_acquire(p_hwfn);
7405 
7406         if (!p_ptt) {
7407 		QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7408                 return (rval);
7409         }
7410 
7411         rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7412 
7413 	if (rval == DBG_STATUS_OK)
7414                 rval = 0;
7415         else {
7416 		QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
7417 			" [0x%x]\n", rval);
7418 	}
7419 
7420         ecore_ptt_release(p_hwfn, p_ptt);
7421 
7422         return (rval);
7423 }
7424 
7425 static void
qlnx_sample_storm_stats(qlnx_host_t * ha)7426 qlnx_sample_storm_stats(qlnx_host_t *ha)
7427 {
7428         int			i, index;
7429         struct ecore_dev	*cdev;
7430 	qlnx_storm_stats_t	*s_stats;
7431 	uint32_t		reg;
7432         struct ecore_ptt	*p_ptt;
7433         struct ecore_hwfn	*hwfn;
7434 
7435 	if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
7436 		ha->storm_stats_gather = 0;
7437 		return;
7438 	}
7439 
7440         cdev = &ha->cdev;
7441 
7442         for_each_hwfn(cdev, i) {
7443                 hwfn = &cdev->hwfns[i];
7444 
7445                 p_ptt = ecore_ptt_acquire(hwfn);
7446                 if (!p_ptt)
7447                         return;
7448 
7449 		index = ha->storm_stats_index +
7450 				(i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
7451 
7452 		s_stats = &ha->storm_stats[index];
7453 
7454 		/* XSTORM */
7455 		reg = XSEM_REG_FAST_MEMORY +
7456 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7457 		s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7458 
7459 		reg = XSEM_REG_FAST_MEMORY +
7460 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7461 		s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7462 
7463 		reg = XSEM_REG_FAST_MEMORY +
7464 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7465 		s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7466 
7467 		reg = XSEM_REG_FAST_MEMORY +
7468 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7469 		s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7470 
7471 		/* YSTORM */
7472 		reg = YSEM_REG_FAST_MEMORY +
7473 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7474 		s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7475 
7476 		reg = YSEM_REG_FAST_MEMORY +
7477 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7478 		s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7479 
7480 		reg = YSEM_REG_FAST_MEMORY +
7481 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7482 		s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7483 
7484 		reg = YSEM_REG_FAST_MEMORY +
7485 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7486 		s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7487 
7488 		/* PSTORM */
7489 		reg = PSEM_REG_FAST_MEMORY +
7490 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7491 		s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7492 
7493 		reg = PSEM_REG_FAST_MEMORY +
7494 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7495 		s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7496 
7497 		reg = PSEM_REG_FAST_MEMORY +
7498 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7499 		s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7500 
7501 		reg = PSEM_REG_FAST_MEMORY +
7502 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7503 		s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7504 
7505 		/* TSTORM */
7506 		reg = TSEM_REG_FAST_MEMORY +
7507 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7508 		s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7509 
7510 		reg = TSEM_REG_FAST_MEMORY +
7511 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7512 		s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7513 
7514 		reg = TSEM_REG_FAST_MEMORY +
7515 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7516 		s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7517 
7518 		reg = TSEM_REG_FAST_MEMORY +
7519 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7520 		s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7521 
7522 		/* MSTORM */
7523 		reg = MSEM_REG_FAST_MEMORY +
7524 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7525 		s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7526 
7527 		reg = MSEM_REG_FAST_MEMORY +
7528 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7529 		s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7530 
7531 		reg = MSEM_REG_FAST_MEMORY +
7532 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7533 		s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7534 
7535 		reg = MSEM_REG_FAST_MEMORY +
7536 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7537 		s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7538 
7539 		/* USTORM */
7540 		reg = USEM_REG_FAST_MEMORY +
7541 				SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7542 		s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7543 
7544 		reg = USEM_REG_FAST_MEMORY +
7545 				SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7546 		s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7547 
7548 		reg = USEM_REG_FAST_MEMORY +
7549 				SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7550 		s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7551 
7552 		reg = USEM_REG_FAST_MEMORY +
7553 				SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7554 		s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7555 
7556                 ecore_ptt_release(hwfn, p_ptt);
7557         }
7558 
7559 	ha->storm_stats_index++;
7560 
7561         return;
7562 }
7563 
7564 /*
7565  * Name: qlnx_dump_buf8
7566  * Function: dumps a buffer as bytes
7567  */
7568 static void
qlnx_dump_buf8(qlnx_host_t * ha,const char * msg,void * dbuf,uint32_t len)7569 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
7570 {
7571         device_t	dev;
7572         uint32_t	i = 0;
7573         uint8_t		*buf;
7574 
7575         dev = ha->pci_dev;
7576         buf = dbuf;
7577 
7578         device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
7579 
7580         while (len >= 16) {
7581                 device_printf(dev,"0x%08x:"
7582                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7583                         " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7584                         buf[0], buf[1], buf[2], buf[3],
7585                         buf[4], buf[5], buf[6], buf[7],
7586                         buf[8], buf[9], buf[10], buf[11],
7587                         buf[12], buf[13], buf[14], buf[15]);
7588                 i += 16;
7589                 len -= 16;
7590                 buf += 16;
7591         }
7592         switch (len) {
7593         case 1:
7594                 device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
7595                 break;
7596         case 2:
7597                 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
7598                 break;
7599         case 3:
7600                 device_printf(dev,"0x%08x: %02x %02x %02x\n",
7601                         i, buf[0], buf[1], buf[2]);
7602                 break;
7603         case 4:
7604                 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
7605                         buf[0], buf[1], buf[2], buf[3]);
7606                 break;
7607         case 5:
7608                 device_printf(dev,"0x%08x:"
7609                         " %02x %02x %02x %02x %02x\n", i,
7610                         buf[0], buf[1], buf[2], buf[3], buf[4]);
7611                 break;
7612         case 6:
7613                 device_printf(dev,"0x%08x:"
7614                         " %02x %02x %02x %02x %02x %02x\n", i,
7615                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
7616                 break;
7617         case 7:
7618                 device_printf(dev,"0x%08x:"
7619                         " %02x %02x %02x %02x %02x %02x %02x\n", i,
7620                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
7621                 break;
7622         case 8:
7623                 device_printf(dev,"0x%08x:"
7624                         " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7625                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7626                         buf[7]);
7627                 break;
7628         case 9:
7629                 device_printf(dev,"0x%08x:"
7630                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7631                         " %02x\n", i,
7632                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7633                         buf[7], buf[8]);
7634                 break;
7635         case 10:
7636                 device_printf(dev,"0x%08x:"
7637                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7638                         " %02x %02x\n", i,
7639                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7640                         buf[7], buf[8], buf[9]);
7641                 break;
7642         case 11:
7643                 device_printf(dev,"0x%08x:"
7644                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7645                         " %02x %02x %02x\n", i,
7646                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7647                         buf[7], buf[8], buf[9], buf[10]);
7648                 break;
7649         case 12:
7650                 device_printf(dev,"0x%08x:"
7651                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7652                         " %02x %02x %02x %02x\n", i,
7653                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7654                         buf[7], buf[8], buf[9], buf[10], buf[11]);
7655                 break;
7656         case 13:
7657                 device_printf(dev,"0x%08x:"
7658                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7659                         " %02x %02x %02x %02x %02x\n", i,
7660                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7661                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
7662                 break;
7663         case 14:
7664                 device_printf(dev,"0x%08x:"
7665                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7666                         " %02x %02x %02x %02x %02x %02x\n", i,
7667                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7668                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7669                         buf[13]);
7670                 break;
7671         case 15:
7672                 device_printf(dev,"0x%08x:"
7673                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7674                         " %02x %02x %02x %02x %02x %02x %02x\n", i,
7675                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7676                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7677                         buf[13], buf[14]);
7678                 break;
7679         default:
7680                 break;
7681         }
7682 
7683         device_printf(dev, "%s: %s dump end\n", __func__, msg);
7684 
7685         return;
7686 }
7687 
7688 #ifdef CONFIG_ECORE_SRIOV
7689 
7690 static void
__qlnx_osal_iov_vf_cleanup(struct ecore_hwfn * p_hwfn,uint8_t rel_vf_id)7691 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id)
7692 {
7693         struct ecore_public_vf_info *vf_info;
7694 
7695         vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false);
7696 
7697         if (!vf_info)
7698                 return;
7699 
7700         /* Clear the VF mac */
7701         memset(vf_info->forced_mac, 0, ETH_ALEN);
7702 
7703         vf_info->forced_vlan = 0;
7704 
7705 	return;
7706 }
7707 
7708 void
qlnx_osal_iov_vf_cleanup(void * p_hwfn,uint8_t relative_vf_id)7709 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id)
7710 {
7711 	__qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id);
7712 	return;
7713 }
7714 
7715 static int
__qlnx_iov_chk_ucast(struct ecore_hwfn * p_hwfn,int vfid,struct ecore_filter_ucast * params)7716 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid,
7717 	struct ecore_filter_ucast *params)
7718 {
7719         struct ecore_public_vf_info *vf;
7720 
7721 	if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
7722 		QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev),
7723 			"VF[%d] vport not initialized\n", vfid);
7724 		return ECORE_INVAL;
7725 	}
7726 
7727         vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true);
7728         if (!vf)
7729                 return -EINVAL;
7730 
7731         /* No real decision to make; Store the configured MAC */
7732         if (params->type == ECORE_FILTER_MAC ||
7733             params->type == ECORE_FILTER_MAC_VLAN)
7734                 memcpy(params->mac, vf->forced_mac, ETH_ALEN);
7735 
7736         return 0;
7737 }
7738 
7739 int
qlnx_iov_chk_ucast(void * p_hwfn,int vfid,void * params)7740 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params)
7741 {
7742 	return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params));
7743 }
7744 
7745 static int
__qlnx_iov_update_vport(struct ecore_hwfn * hwfn,uint8_t vfid,struct ecore_sp_vport_update_params * params,uint16_t * tlvs)7746 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid,
7747         struct ecore_sp_vport_update_params *params, uint16_t * tlvs)
7748 {
7749 	if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) {
7750 		QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev),
7751 			"VF[%d] vport not initialized\n", vfid);
7752 		return ECORE_INVAL;
7753 	}
7754 
7755         /* Untrusted VFs can't even be trusted to know that fact.
7756          * Simply indicate everything is configured fine, and trace
7757          * configuration 'behind their back'.
7758          */
7759         if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM)))
7760                 return 0;
7761 
7762         return 0;
7763 
7764 }
7765 int
qlnx_iov_update_vport(void * hwfn,uint8_t vfid,void * params,uint16_t * tlvs)7766 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs)
7767 {
7768 	return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs));
7769 }
7770 
7771 static int
qlnx_find_hwfn_index(struct ecore_hwfn * p_hwfn)7772 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn)
7773 {
7774 	int			i;
7775 	struct ecore_dev	*cdev;
7776 
7777 	cdev = p_hwfn->p_dev;
7778 
7779 	for (i = 0; i < cdev->num_hwfns; i++) {
7780 		if (&cdev->hwfns[i] == p_hwfn)
7781 			break;
7782 	}
7783 
7784 	if (i >= cdev->num_hwfns)
7785 		return (-1);
7786 
7787 	return (i);
7788 }
7789 
7790 static int
__qlnx_pf_vf_msg(struct ecore_hwfn * p_hwfn,uint16_t rel_vf_id)7791 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id)
7792 {
7793 	qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7794 	int i;
7795 
7796 	QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n",
7797 		ha, p_hwfn->p_dev, p_hwfn, rel_vf_id);
7798 
7799 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7800 		return (-1);
7801 
7802 	if (ha->sriov_task[i].pf_taskqueue != NULL) {
7803 		atomic_testandset_32(&ha->sriov_task[i].flags,
7804 			QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG);
7805 
7806 		taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7807 			&ha->sriov_task[i].pf_task);
7808 	}
7809 
7810 	return (ECORE_SUCCESS);
7811 }
7812 
7813 int
qlnx_pf_vf_msg(void * p_hwfn,uint16_t relative_vf_id)7814 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id)
7815 {
7816 	return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id));
7817 }
7818 
7819 static void
__qlnx_vf_flr_update(struct ecore_hwfn * p_hwfn)7820 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn)
7821 {
7822 	qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7823 	int i;
7824 
7825 	if (!ha->sriov_initialized)
7826 		return;
7827 
7828 	QL_DPRINT2(ha,  "ha = %p cdev = %p p_hwfn = %p \n",
7829 		ha, p_hwfn->p_dev, p_hwfn);
7830 
7831 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7832 		return;
7833 
7834 	if (ha->sriov_task[i].pf_taskqueue != NULL) {
7835 		atomic_testandset_32(&ha->sriov_task[i].flags,
7836 			QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE);
7837 
7838 		taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7839 			&ha->sriov_task[i].pf_task);
7840 	}
7841 
7842 	return;
7843 }
7844 
7845 void
qlnx_vf_flr_update(void * p_hwfn)7846 qlnx_vf_flr_update(void *p_hwfn)
7847 {
7848 	__qlnx_vf_flr_update(p_hwfn);
7849 
7850 	return;
7851 }
7852 
7853 #ifndef QLNX_VF
7854 
7855 static void
qlnx_vf_bulleting_update(struct ecore_hwfn * p_hwfn)7856 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn)
7857 {
7858 	qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7859 	int i;
7860 
7861 	QL_DPRINT2(ha,  "ha = %p cdev = %p p_hwfn = %p \n",
7862 		ha, p_hwfn->p_dev, p_hwfn);
7863 
7864 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7865 		return;
7866 
7867 	QL_DPRINT2(ha,  "ha = %p cdev = %p p_hwfn = %p i = %d\n",
7868 		ha, p_hwfn->p_dev, p_hwfn, i);
7869 
7870 	if (ha->sriov_task[i].pf_taskqueue != NULL) {
7871 		atomic_testandset_32(&ha->sriov_task[i].flags,
7872 			QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE);
7873 
7874 		taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7875 			&ha->sriov_task[i].pf_task);
7876 	}
7877 }
7878 
7879 static void
qlnx_initialize_sriov(qlnx_host_t * ha)7880 qlnx_initialize_sriov(qlnx_host_t *ha)
7881 {
7882 	device_t	dev;
7883 	nvlist_t	*pf_schema, *vf_schema;
7884 	int		iov_error;
7885 
7886 	dev = ha->pci_dev;
7887 
7888 	pf_schema = pci_iov_schema_alloc_node();
7889 	vf_schema = pci_iov_schema_alloc_node();
7890 
7891 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
7892 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
7893 		IOV_SCHEMA_HASDEFAULT, FALSE);
7894 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
7895 		IOV_SCHEMA_HASDEFAULT, FALSE);
7896 	pci_iov_schema_add_uint16(vf_schema, "num-queues",
7897 		IOV_SCHEMA_HASDEFAULT, 1);
7898 
7899 	iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
7900 
7901 	if (iov_error != 0) {
7902 		ha->sriov_initialized = 0;
7903 	} else {
7904 		device_printf(dev, "SRIOV initialized\n");
7905 		ha->sriov_initialized = 1;
7906 	}
7907 
7908 	return;
7909 }
7910 
7911 static void
qlnx_sriov_disable(qlnx_host_t * ha)7912 qlnx_sriov_disable(qlnx_host_t *ha)
7913 {
7914 	struct ecore_dev *cdev;
7915 	int i, j;
7916 
7917 	cdev = &ha->cdev;
7918 
7919 	ecore_iov_set_vfs_to_disable(cdev, true);
7920 
7921 	for_each_hwfn(cdev, i) {
7922 		struct ecore_hwfn *hwfn = &cdev->hwfns[i];
7923 		struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
7924 
7925 		if (!ptt) {
7926 			QL_DPRINT1(ha, "Failed to acquire ptt\n");
7927 			return;
7928 		}
7929 		/* Clean WFQ db and configure equal weight for all vports */
7930 		ecore_clean_wfq_db(hwfn, ptt);
7931 
7932 		ecore_for_each_vf(hwfn, j) {
7933 			int k = 0;
7934 
7935 			if (!ecore_iov_is_valid_vfid(hwfn, j, true, false))
7936 				continue;
7937 
7938 			if (ecore_iov_is_vf_started(hwfn, j)) {
7939 				/* Wait until VF is disabled before releasing */
7940 
7941 				for (k = 0; k < 100; k++) {
7942 					if (!ecore_iov_is_vf_stopped(hwfn, j)) {
7943 						qlnx_mdelay(__func__, 10);
7944 					} else
7945 						break;
7946 				}
7947 			}
7948 
7949 			if (k < 100)
7950 				ecore_iov_release_hw_for_vf(&cdev->hwfns[i],
7951                                                           ptt, j);
7952 			else {
7953 				QL_DPRINT1(ha,
7954 					"Timeout waiting for VF's FLR to end\n");
7955 			}
7956 		}
7957 		ecore_ptt_release(hwfn, ptt);
7958 	}
7959 
7960 	ecore_iov_set_vfs_to_disable(cdev, false);
7961 
7962 	return;
7963 }
7964 
7965 static void
qlnx_sriov_enable_qid_config(struct ecore_hwfn * hwfn,u16 vfid,struct ecore_iov_vf_init_params * params)7966 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid,
7967 	struct ecore_iov_vf_init_params *params)
7968 {
7969         u16 base, i;
7970 
7971         /* Since we have an equal resource distribution per-VF, and we assume
7972          * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting
7973          * sequentially from there.
7974          */
7975         base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues;
7976 
7977         params->rel_vf_id = vfid;
7978 
7979         for (i = 0; i < params->num_queues; i++) {
7980                 params->req_rx_queue[i] = base + i;
7981                 params->req_tx_queue[i] = base + i;
7982         }
7983 
7984         /* PF uses indices 0 for itself; Set vport/RSS afterwards */
7985         params->vport_id = vfid + 1;
7986         params->rss_eng_id = vfid + 1;
7987 
7988 	return;
7989 }
7990 
7991 static int
qlnx_iov_init(device_t dev,uint16_t num_vfs,const nvlist_t * nvlist_params)7992 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params)
7993 {
7994 	qlnx_host_t		*ha;
7995 	struct ecore_dev	*cdev;
7996 	struct ecore_iov_vf_init_params params;
7997 	int ret, j, i;
7998 	uint32_t max_vfs;
7999 
8000 	if ((ha = device_get_softc(dev)) == NULL) {
8001 		device_printf(dev, "%s: cannot get softc\n", __func__);
8002 		return (-1);
8003 	}
8004 
8005 	if (qlnx_create_pf_taskqueues(ha) != 0)
8006 		goto qlnx_iov_init_err0;
8007 
8008 	cdev = &ha->cdev;
8009 
8010 	max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT);
8011 
8012 	QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n",
8013 		dev, num_vfs, max_vfs);
8014 
8015         if (num_vfs >= max_vfs) {
8016                 QL_DPRINT1(ha, "Can start at most %d VFs\n",
8017                           (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1));
8018 		goto qlnx_iov_init_err0;
8019         }
8020 
8021 	ha->vf_attr =  malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF,
8022 				M_NOWAIT);
8023 
8024 	if (ha->vf_attr == NULL)
8025 		goto qlnx_iov_init_err0;
8026 
8027         memset(&params, 0, sizeof(params));
8028 
8029         /* Initialize HW for VF access */
8030         for_each_hwfn(cdev, j) {
8031                 struct ecore_hwfn *hwfn = &cdev->hwfns[j];
8032                 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
8033 
8034                 /* Make sure not to use more than 16 queues per VF */
8035                 params.num_queues = min_t(int,
8036                                           (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs),
8037                                           16);
8038 
8039                 if (!ptt) {
8040                         QL_DPRINT1(ha, "Failed to acquire ptt\n");
8041                         goto qlnx_iov_init_err1;
8042                 }
8043 
8044                 for (i = 0; i < num_vfs; i++) {
8045                         if (!ecore_iov_is_valid_vfid(hwfn, i, false, true))
8046                                 continue;
8047 
8048                         qlnx_sriov_enable_qid_config(hwfn, i, &params);
8049 
8050                         ret = ecore_iov_init_hw_for_vf(hwfn, ptt, &params);
8051 
8052                         if (ret) {
8053                                 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i);
8054                                 ecore_ptt_release(hwfn, ptt);
8055                                 goto qlnx_iov_init_err1;
8056                         }
8057                 }
8058 
8059                 ecore_ptt_release(hwfn, ptt);
8060         }
8061 
8062 	ha->num_vfs = num_vfs;
8063 	qlnx_inform_vf_link_state(&cdev->hwfns[0], ha);
8064 
8065 	QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs);
8066 
8067 	return (0);
8068 
8069 qlnx_iov_init_err1:
8070 	qlnx_sriov_disable(ha);
8071 
8072 qlnx_iov_init_err0:
8073 	qlnx_destroy_pf_taskqueues(ha);
8074 	ha->num_vfs = 0;
8075 
8076 	return (-1);
8077 }
8078 
8079 static void
qlnx_iov_uninit(device_t dev)8080 qlnx_iov_uninit(device_t dev)
8081 {
8082 	qlnx_host_t	*ha;
8083 
8084 	if ((ha = device_get_softc(dev)) == NULL) {
8085 		device_printf(dev, "%s: cannot get softc\n", __func__);
8086 		return;
8087 	}
8088 
8089 	QL_DPRINT2(ha," dev = %p enter\n", dev);
8090 
8091 	qlnx_sriov_disable(ha);
8092 	qlnx_destroy_pf_taskqueues(ha);
8093 
8094 	free(ha->vf_attr, M_QLNXBUF);
8095 	ha->vf_attr = NULL;
8096 
8097 	ha->num_vfs = 0;
8098 
8099 	QL_DPRINT2(ha," dev = %p exit\n", dev);
8100 	return;
8101 }
8102 
8103 static int
qlnx_iov_add_vf(device_t dev,uint16_t vfnum,const nvlist_t * params)8104 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
8105 {
8106 	qlnx_host_t	*ha;
8107 	qlnx_vf_attr_t	*vf_attr;
8108 	unsigned const char *mac;
8109 	size_t size;
8110 	struct ecore_hwfn *p_hwfn;
8111 
8112 	if ((ha = device_get_softc(dev)) == NULL) {
8113 		device_printf(dev, "%s: cannot get softc\n", __func__);
8114 		return (-1);
8115 	}
8116 
8117 	QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum);
8118 
8119 	if (vfnum > (ha->num_vfs - 1)) {
8120 		QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n",
8121 			vfnum, (ha->num_vfs - 1));
8122 	}
8123 
8124 	vf_attr = &ha->vf_attr[vfnum];
8125 
8126         if (nvlist_exists_binary(params, "mac-addr")) {
8127                 mac = nvlist_get_binary(params, "mac-addr", &size);
8128                 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN);
8129 		device_printf(dev,
8130 			"%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
8131 			__func__, vf_attr->mac_addr[0],
8132 			vf_attr->mac_addr[1], vf_attr->mac_addr[2],
8133 			vf_attr->mac_addr[3], vf_attr->mac_addr[4],
8134 			vf_attr->mac_addr[5]);
8135 		p_hwfn = &ha->cdev.hwfns[0];
8136 		ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr,
8137 			vfnum);
8138 	}
8139 
8140 	QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum);
8141 	return (0);
8142 }
8143 
8144 static void
qlnx_handle_vf_msg(qlnx_host_t * ha,struct ecore_hwfn * p_hwfn)8145 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8146 {
8147         uint64_t events[ECORE_VF_ARRAY_LENGTH];
8148         struct ecore_ptt *ptt;
8149         int i;
8150 
8151         ptt = ecore_ptt_acquire(p_hwfn);
8152         if (!ptt) {
8153                 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8154 		__qlnx_pf_vf_msg(p_hwfn, 0);
8155                 return;
8156         }
8157 
8158         ecore_iov_pf_get_pending_events(p_hwfn, events);
8159 
8160         QL_DPRINT2(ha, "Event mask of VF events:"
8161 		"0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n",
8162                    events[0], events[1], events[2]);
8163 
8164         ecore_for_each_vf(p_hwfn, i) {
8165                 /* Skip VFs with no pending messages */
8166                 if (!(events[i / 64] & (1ULL << (i % 64))))
8167                         continue;
8168 
8169 		QL_DPRINT2(ha,
8170                            "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
8171                            i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
8172 
8173                 /* Copy VF's message to PF's request buffer for that VF */
8174                 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i))
8175                         continue;
8176 
8177                 ecore_iov_process_mbx_req(p_hwfn, ptt, i);
8178         }
8179 
8180         ecore_ptt_release(p_hwfn, ptt);
8181 
8182 	return;
8183 }
8184 
8185 static void
qlnx_handle_vf_flr_update(qlnx_host_t * ha,struct ecore_hwfn * p_hwfn)8186 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8187 {
8188         struct ecore_ptt *ptt;
8189 	int ret;
8190 
8191 	ptt = ecore_ptt_acquire(p_hwfn);
8192 
8193 	if (!ptt) {
8194                 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8195 		__qlnx_vf_flr_update(p_hwfn);
8196                 return;
8197 	}
8198 
8199 	ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt);
8200 
8201 	if (ret) {
8202                 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n");
8203 	}
8204 
8205 	ecore_ptt_release(p_hwfn, ptt);
8206 
8207 	return;
8208 }
8209 
8210 static void
qlnx_handle_bulletin_update(qlnx_host_t * ha,struct ecore_hwfn * p_hwfn)8211 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8212 {
8213         struct ecore_ptt *ptt;
8214 	int i;
8215 
8216 	ptt = ecore_ptt_acquire(p_hwfn);
8217 
8218 	if (!ptt) {
8219                 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8220 		qlnx_vf_bulleting_update(p_hwfn);
8221                 return;
8222 	}
8223 
8224 	ecore_for_each_vf(p_hwfn, i) {
8225 		QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n",
8226 			p_hwfn, i);
8227 		ecore_iov_post_vf_bulletin(p_hwfn, i, ptt);
8228 	}
8229 
8230 	ecore_ptt_release(p_hwfn, ptt);
8231 
8232 	return;
8233 }
8234 
8235 static void
qlnx_pf_taskqueue(void * context,int pending)8236 qlnx_pf_taskqueue(void *context, int pending)
8237 {
8238 	struct ecore_hwfn	*p_hwfn;
8239 	qlnx_host_t		*ha;
8240 	int			i;
8241 
8242 	p_hwfn = context;
8243 
8244 	if (p_hwfn == NULL)
8245 		return;
8246 
8247 	ha = (qlnx_host_t *)(p_hwfn->p_dev);
8248 
8249 	if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8250 		return;
8251 
8252 	if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8253 		QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG))
8254 		qlnx_handle_vf_msg(ha, p_hwfn);
8255 
8256 	if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8257 		QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE))
8258 		qlnx_handle_vf_flr_update(ha, p_hwfn);
8259 
8260 	if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8261 		QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE))
8262 		qlnx_handle_bulletin_update(ha, p_hwfn);
8263 
8264 	return;
8265 }
8266 
8267 static int
qlnx_create_pf_taskqueues(qlnx_host_t * ha)8268 qlnx_create_pf_taskqueues(qlnx_host_t *ha)
8269 {
8270 	int	i;
8271 	uint8_t	tq_name[32];
8272 
8273 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
8274                 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
8275 
8276 		bzero(tq_name, sizeof (tq_name));
8277 		snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i);
8278 
8279 		TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn);
8280 
8281 		ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
8282 			 taskqueue_thread_enqueue,
8283 			&ha->sriov_task[i].pf_taskqueue);
8284 
8285 		if (ha->sriov_task[i].pf_taskqueue == NULL)
8286 			return (-1);
8287 
8288 		taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1,
8289 			PI_NET, "%s", tq_name);
8290 
8291 		QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue);
8292 	}
8293 
8294 	return (0);
8295 }
8296 
8297 static void
qlnx_destroy_pf_taskqueues(qlnx_host_t * ha)8298 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha)
8299 {
8300 	int	i;
8301 
8302 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
8303 		if (ha->sriov_task[i].pf_taskqueue != NULL) {
8304 			taskqueue_drain(ha->sriov_task[i].pf_taskqueue,
8305 				&ha->sriov_task[i].pf_task);
8306 			taskqueue_free(ha->sriov_task[i].pf_taskqueue);
8307 			ha->sriov_task[i].pf_taskqueue = NULL;
8308 		}
8309 	}
8310 	return;
8311 }
8312 
8313 static void
qlnx_inform_vf_link_state(struct ecore_hwfn * p_hwfn,qlnx_host_t * ha)8314 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha)
8315 {
8316 	struct ecore_mcp_link_capabilities caps;
8317 	struct ecore_mcp_link_params params;
8318 	struct ecore_mcp_link_state link;
8319 	int i;
8320 
8321 	if (!p_hwfn->pf_iov_info)
8322 		return;
8323 
8324 	memset(&params, 0, sizeof(struct ecore_mcp_link_params));
8325 	memset(&link, 0, sizeof(struct ecore_mcp_link_state));
8326 	memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities));
8327 
8328 	memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
8329         memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
8330         memcpy(&params, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
8331 
8332 	QL_DPRINT2(ha, "called\n");
8333 
8334         /* Update bulletin of all future possible VFs with link configuration */
8335         for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
8336                 /* Modify link according to the VF's configured link state */
8337 
8338                 link.link_up = false;
8339 
8340                 if (ha->link_up) {
8341                         link.link_up = true;
8342                         /* Set speed according to maximum supported by HW.
8343                          * that is 40G for regular devices and 100G for CMT
8344                          * mode devices.
8345                          */
8346                         link.speed = (p_hwfn->p_dev->num_hwfns > 1) ?
8347 						100000 : link.speed;
8348 		}
8349 		QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up);
8350                 ecore_iov_set_link(p_hwfn, i, &params, &link, &caps);
8351         }
8352 
8353 	qlnx_vf_bulleting_update(p_hwfn);
8354 
8355 	return;
8356 }
8357 #endif /* #ifndef QLNX_VF */
8358 #endif /* #ifdef CONFIG_ECORE_SRIOV */
8359