xref: /freebsd/sys/dev/liquidio/lio_main.c (revision 40427cca7a9ae77b095936fb1954417c290cfb17)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Cavium, Inc.. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Cavium, Inc. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 /*$FreeBSD$*/
34 
35 #include "lio_bsd.h"
36 #include "lio_common.h"
37 
38 #include "lio_droq.h"
39 #include "lio_iq.h"
40 #include "lio_response_manager.h"
41 #include "lio_device.h"
42 #include "lio_ctrl.h"
43 #include "lio_main.h"
44 #include "lio_network.h"
45 #include "cn23xx_pf_device.h"
46 #include "lio_image.h"
47 #include "lio_ioctl.h"
48 #include "lio_rxtx.h"
49 #include "lio_rss.h"
50 
51 /* Number of milliseconds to wait for DDR initialization */
52 #define LIO_DDR_TIMEOUT	10000
53 #define LIO_MAX_FW_TYPE_LEN	8
54 
55 static char fw_type[LIO_MAX_FW_TYPE_LEN];
56 TUNABLE_STR("hw.lio.fw_type", fw_type, sizeof(fw_type));
57 
58 /*
59  * Integers that specify number of queues per PF.
60  * Valid range is 0 to 64.
61  * Use 0 to derive from CPU count.
62  */
63 static int	num_queues_per_pf0;
64 static int	num_queues_per_pf1;
65 TUNABLE_INT("hw.lio.num_queues_per_pf0", &num_queues_per_pf0);
66 TUNABLE_INT("hw.lio.num_queues_per_pf1", &num_queues_per_pf1);
67 
68 #ifdef RSS
69 static int	lio_rss = 1;
70 TUNABLE_INT("hw.lio.rss", &lio_rss);
71 #endif	/* RSS */
72 
73 /* Hardware LRO */
74 unsigned int	lio_hwlro = 0;
75 TUNABLE_INT("hw.lio.hwlro", &lio_hwlro);
76 
77 /*
78  * Bitmask indicating which consoles have debug
79  * output redirected to syslog.
80  */
81 static unsigned long	console_bitmask;
82 TUNABLE_ULONG("hw.lio.console_bitmask", &console_bitmask);
83 
84 /*
85  * \brief determines if a given console has debug enabled.
86  * @param console console to check
87  * @returns  1 = enabled. 0 otherwise
88  */
89 int
90 lio_console_debug_enabled(uint32_t console)
91 {
92 
93 	return (console_bitmask >> (console)) & 0x1;
94 }
95 
96 static int	lio_detach(device_t dev);
97 
98 static int	lio_device_init(struct octeon_device *octeon_dev);
99 static int	lio_chip_specific_setup(struct octeon_device *oct);
100 static void	lio_watchdog(void *param);
101 static int	lio_load_firmware(struct octeon_device *oct);
102 static int	lio_nic_starter(struct octeon_device *oct);
103 static int	lio_init_nic_module(struct octeon_device *oct);
104 static int	lio_setup_nic_devices(struct octeon_device *octeon_dev);
105 static int	lio_link_info(struct lio_recv_info *recv_info, void *ptr);
106 static void	lio_if_cfg_callback(struct octeon_device *oct, uint32_t status,
107 				    void *buf);
108 static int	lio_set_rxcsum_command(struct ifnet *ifp, int command,
109 				       uint8_t rx_cmd);
110 static int	lio_setup_glists(struct octeon_device *oct, struct lio *lio,
111 				 int num_iqs);
112 static void	lio_destroy_nic_device(struct octeon_device *oct, int ifidx);
113 static inline void	lio_update_link_status(struct ifnet *ifp,
114 					       union octeon_link_status *ls);
115 static void	lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop);
116 static int	lio_stop_nic_module(struct octeon_device *oct);
117 static void	lio_destroy_resources(struct octeon_device *oct);
118 static int	lio_setup_rx_oom_poll_fn(struct ifnet *ifp);
119 
120 static void	lio_vlan_rx_add_vid(void *arg, struct ifnet *ifp, uint16_t vid);
121 static void	lio_vlan_rx_kill_vid(void *arg, struct ifnet *ifp,
122 				     uint16_t vid);
123 static struct octeon_device *
124 	lio_get_other_octeon_device(struct octeon_device *oct);
125 
126 static int	lio_wait_for_oq_pkts(struct octeon_device *oct);
127 
128 int	lio_send_rss_param(struct lio *lio);
129 static int	lio_dbg_console_print(struct octeon_device *oct,
130 				      uint32_t console_num, char *prefix,
131 				      char *suffix);
132 
133 /* Polling interval for determining when NIC application is alive */
134 #define LIO_STARTER_POLL_INTERVAL_MS	100
135 
136 /*
137  * vendor_info_array.
138  * This array contains the list of IDs on which the driver should load.
139  */
140 struct lio_vendor_info {
141 	uint16_t	vendor_id;
142 	uint16_t	device_id;
143 	uint16_t	subdevice_id;
144 	uint8_t		revision_id;
145 	uint8_t		index;
146 };
147 
148 static struct lio_vendor_info lio_pci_tbl[] = {
149 	/* CN2350 10G */
150 	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE,
151 		0x02, 0},
152 
153 	/* CN2350 10G */
154 	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE1,
155 		0x02, 0},
156 
157 	/* CN2360 10G */
158 	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_10G_SUBDEVICE,
159 		0x02, 1},
160 
161 	/* CN2350 25G */
162 	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_25G_SUBDEVICE,
163 		0x02, 2},
164 
165 	/* CN2360 25G */
166 	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_25G_SUBDEVICE,
167 		0x02, 3},
168 
169 	{0, 0, 0, 0, 0}
170 };
171 
172 static char *lio_strings[] = {
173 	"LiquidIO 2350 10GbE Server Adapter",
174 	"LiquidIO 2360 10GbE Server Adapter",
175 	"LiquidIO 2350 25GbE Server Adapter",
176 	"LiquidIO 2360 25GbE Server Adapter",
177 };
178 
179 struct lio_if_cfg_resp {
180 	uint64_t	rh;
181 	struct octeon_if_cfg_info cfg_info;
182 	uint64_t	status;
183 };
184 
185 struct lio_if_cfg_context {
186 	int		octeon_id;
187 	volatile int	cond;
188 };
189 
190 struct lio_rx_ctl_context {
191 	int		octeon_id;
192 	volatile int	cond;
193 };
194 
195 static int
196 lio_probe(device_t dev)
197 {
198 	struct lio_vendor_info	*tbl;
199 
200 	uint16_t	vendor_id;
201 	uint16_t	device_id;
202 	uint16_t	subdevice_id;
203 	uint8_t		revision_id;
204 	char		device_ver[256];
205 
206 	vendor_id = pci_get_vendor(dev);
207 	if (vendor_id != PCI_VENDOR_ID_CAVIUM)
208 		return (ENXIO);
209 
210 	device_id = pci_get_device(dev);
211 	subdevice_id = pci_get_subdevice(dev);
212 	revision_id = pci_get_revid(dev);
213 
214 	tbl = lio_pci_tbl;
215 	while (tbl->vendor_id) {
216 		if ((vendor_id == tbl->vendor_id) &&
217 		    (device_id == tbl->device_id) &&
218 		    (subdevice_id == tbl->subdevice_id) &&
219 		    (revision_id == tbl->revision_id)) {
220 			sprintf(device_ver, "%s, Version - %s",
221 				lio_strings[tbl->index], LIO_VERSION);
222 			device_set_desc_copy(dev, device_ver);
223 			return (BUS_PROBE_DEFAULT);
224 		}
225 
226 		tbl++;
227 	}
228 
229 	return (ENXIO);
230 }
231 
232 static int
233 lio_attach(device_t device)
234 {
235 	struct octeon_device	*oct_dev = NULL;
236 	uint64_t	scratch1;
237 	uint32_t	error;
238 	int		timeout, ret = 1;
239 	uint8_t		bus, dev, function;
240 
241 	oct_dev = lio_allocate_device(device);
242 	if (oct_dev == NULL) {
243 		device_printf(device, "Error: Unable to allocate device\n");
244 		return (-ENOMEM);
245 	}
246 
247 	oct_dev->tx_budget = LIO_DEFAULT_TX_PKTS_PROCESS_BUDGET;
248 	oct_dev->rx_budget = LIO_DEFAULT_RX_PKTS_PROCESS_BUDGET;
249 	oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
250 
251 	oct_dev->device = device;
252 	bus = pci_get_bus(device);
253 	dev = pci_get_slot(device);
254 	function = pci_get_function(device);
255 
256 	lio_dev_info(oct_dev, "Initializing device %x:%x %02x:%02x.%01x\n",
257 		     pci_get_vendor(device), pci_get_device(device), bus, dev,
258 		     function);
259 
260 	if (lio_device_init(oct_dev)) {
261 		lio_dev_err(oct_dev, "Failed to init device\n");
262 		lio_detach(device);
263 		return (-ENOMEM);
264 	}
265 
266 	scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1);
267 	if (!(scratch1 & 4ULL)) {
268 		/*
269 		 * Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
270 		 * the lio watchdog kernel thread is running for this
271 		 * NIC.  Each NIC gets one watchdog kernel thread.
272 		 */
273 		scratch1 |= 4ULL;
274 		lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1);
275 
276 		error = kproc_create(lio_watchdog, oct_dev,
277 				     &oct_dev->watchdog_task, 0, 0,
278 				     "liowd/%02hhx:%02hhx.%hhx", bus,
279 				     dev, function);
280 		if (!error) {
281 			kproc_resume(oct_dev->watchdog_task);
282 		} else {
283 			oct_dev->watchdog_task = NULL;
284 			lio_dev_err(oct_dev,
285 				    "failed to create kernel_thread\n");
286 			lio_detach(device);
287 			return (-1);
288 		}
289 	}
290 	oct_dev->rx_pause = 1;
291 	oct_dev->tx_pause = 1;
292 
293 	timeout = 0;
294 	while (timeout < LIO_NIC_STARTER_TIMEOUT) {
295 		lio_mdelay(LIO_STARTER_POLL_INTERVAL_MS);
296 		timeout += LIO_STARTER_POLL_INTERVAL_MS;
297 
298 		/*
299 		 * During the boot process interrupts are not available.
300 		 * So polling for first control message from FW.
301 		 */
302 		if (cold)
303 			lio_droq_bh(oct_dev->droq[0], 0);
304 
305 		if (atomic_load_acq_int(&oct_dev->status) == LIO_DEV_CORE_OK) {
306 			ret = lio_nic_starter(oct_dev);
307 			break;
308 		}
309 	}
310 
311 	if (ret) {
312 		lio_dev_err(oct_dev, "Firmware failed to start\n");
313 		lio_detach(device);
314 		return (-EIO);
315 	}
316 
317 	lio_dev_dbg(oct_dev, "Device is ready\n");
318 
319 	return (0);
320 }
321 
322 static int
323 lio_detach(device_t dev)
324 {
325 	struct octeon_device	*oct_dev = device_get_softc(dev);
326 
327 	lio_dev_dbg(oct_dev, "Stopping device\n");
328 	if (oct_dev->watchdog_task) {
329 		uint64_t	scratch1;
330 
331 		kproc_suspend(oct_dev->watchdog_task, 0);
332 
333 		scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1);
334 		scratch1 &= ~4ULL;
335 		lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1);
336 	}
337 
338 	if (oct_dev->app_mode && (oct_dev->app_mode == LIO_DRV_NIC_APP))
339 		lio_stop_nic_module(oct_dev);
340 
341 	/*
342 	 * Reset the octeon device and cleanup all memory allocated for
343 	 * the octeon device by  driver.
344 	 */
345 	lio_destroy_resources(oct_dev);
346 
347 	lio_dev_info(oct_dev, "Device removed\n");
348 
349 	/*
350 	 * This octeon device has been removed. Update the global
351 	 * data structure to reflect this. Free the device structure.
352 	 */
353 	lio_free_device_mem(oct_dev);
354 	return (0);
355 }
356 
357 static int
358 lio_shutdown(device_t dev)
359 {
360 	struct octeon_device	*oct_dev = device_get_softc(dev);
361 	struct lio	*lio = if_getsoftc(oct_dev->props.ifp);
362 
363 	lio_send_rx_ctrl_cmd(lio, 0);
364 
365 	return (0);
366 }
367 
368 static int
369 lio_suspend(device_t dev)
370 {
371 
372 	return (ENXIO);
373 }
374 
375 static int
376 lio_resume(device_t dev)
377 {
378 
379 	return (ENXIO);
380 }
381 
382 static int
383 lio_event(struct module *mod, int event, void *junk)
384 {
385 
386 	switch (event) {
387 	case MOD_LOAD:
388 		lio_init_device_list(LIO_CFG_TYPE_DEFAULT);
389 		break;
390 	default:
391 		break;
392 	}
393 
394 	return (0);
395 }
396 
397 /*********************************************************************
398  *  FreeBSD Device Interface Entry Points
399  * *******************************************************************/
400 static device_method_t lio_methods[] = {
401 	/* Device interface */
402 	DEVMETHOD(device_probe, lio_probe),
403 	DEVMETHOD(device_attach, lio_attach),
404 	DEVMETHOD(device_detach, lio_detach),
405 	DEVMETHOD(device_shutdown, lio_shutdown),
406 	DEVMETHOD(device_suspend, lio_suspend),
407 	DEVMETHOD(device_resume, lio_resume),
408 	DEVMETHOD_END
409 };
410 
411 static driver_t lio_driver = {
412 	LIO_DRV_NAME, lio_methods, sizeof(struct octeon_device),
413 };
414 
415 devclass_t lio_devclass;
416 DRIVER_MODULE(lio, pci, lio_driver, lio_devclass, lio_event, 0);
417 
418 MODULE_DEPEND(lio, pci, 1, 1, 1);
419 MODULE_DEPEND(lio, ether, 1, 1, 1);
420 MODULE_DEPEND(lio, firmware, 1, 1, 1);
421 
422 static bool
423 fw_type_is_none(void)
424 {
425 	return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
426 		       sizeof(LIO_FW_NAME_TYPE_NONE)) == 0;
427 }
428 
429 /*
430  * \brief Device initialization for each Octeon device that is probed
431  * @param octeon_dev  octeon device
432  */
433 static int
434 lio_device_init(struct octeon_device *octeon_dev)
435 {
436 	unsigned long	ddr_timeout = LIO_DDR_TIMEOUT;
437 	char	*dbg_enb = NULL;
438 	int	fw_loaded = 0;
439 	int	i, j, ret;
440 	uint8_t	bus, dev, function;
441 	char	bootcmd[] = "\n";
442 
443 	bus = pci_get_bus(octeon_dev->device);
444 	dev = pci_get_slot(octeon_dev->device);
445 	function = pci_get_function(octeon_dev->device);
446 
447 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_BEGIN_STATE);
448 
449 	/* Enable access to the octeon device */
450 	if (pci_enable_busmaster(octeon_dev->device)) {
451 		lio_dev_err(octeon_dev, "pci_enable_device failed\n");
452 		return (1);
453 	}
454 
455 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_ENABLE_DONE);
456 
457 	/* Identify the Octeon type and map the BAR address space. */
458 	if (lio_chip_specific_setup(octeon_dev)) {
459 		lio_dev_err(octeon_dev, "Chip specific setup failed\n");
460 		return (1);
461 	}
462 
463 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_MAP_DONE);
464 
465 	/*
466 	 * Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
467 	 * since that is what is required for the reference to be removed
468 	 * during de-initialization (see 'octeon_destroy_resources').
469 	 */
470 	lio_register_device(octeon_dev, bus, dev, function, true);
471 
472 
473 	octeon_dev->app_mode = LIO_DRV_INVALID_APP;
474 
475 	if (!lio_cn23xx_pf_fw_loaded(octeon_dev) && !fw_type_is_none()) {
476 		fw_loaded = 0;
477 		/* Do a soft reset of the Octeon device. */
478 		if (octeon_dev->fn_list.soft_reset(octeon_dev))
479 			return (1);
480 
481 		/* things might have changed */
482 		if (!lio_cn23xx_pf_fw_loaded(octeon_dev))
483 			fw_loaded = 0;
484 		else
485 			fw_loaded = 1;
486 	} else {
487 		fw_loaded = 1;
488 	}
489 
490 	/*
491 	 * Initialize the dispatch mechanism used to push packets arriving on
492 	 * Octeon Output queues.
493 	 */
494 	if (lio_init_dispatch_list(octeon_dev))
495 		return (1);
496 
497 	lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC,
498 				 LIO_OPCODE_NIC_CORE_DRV_ACTIVE,
499 				 lio_core_drv_init, octeon_dev);
500 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DISPATCH_INIT_DONE);
501 
502 	ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
503 	if (ret) {
504 		lio_dev_err(octeon_dev,
505 			    "Failed to configure device registers\n");
506 		return (ret);
507 	}
508 
509 	/* Initialize soft command buffer pool */
510 	if (lio_setup_sc_buffer_pool(octeon_dev)) {
511 		lio_dev_err(octeon_dev, "sc buffer pool allocation failed\n");
512 		return (1);
513 	}
514 
515 	atomic_store_rel_int(&octeon_dev->status,
516 			     LIO_DEV_SC_BUFF_POOL_INIT_DONE);
517 
518 	if (lio_allocate_ioq_vector(octeon_dev)) {
519 		lio_dev_err(octeon_dev,
520 			    "IOQ vector allocation failed\n");
521 		return (1);
522 	}
523 
524 	atomic_store_rel_int(&octeon_dev->status,
525 			     LIO_DEV_MSIX_ALLOC_VECTOR_DONE);
526 
527 	for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) {
528 		octeon_dev->instr_queue[i] =
529 			malloc(sizeof(struct lio_instr_queue),
530 			       M_DEVBUF, M_NOWAIT | M_ZERO);
531 		if (octeon_dev->instr_queue[i] == NULL)
532 			return (1);
533 	}
534 
535 	/* Setup the data structures that manage this Octeon's Input queues. */
536 	if (lio_setup_instr_queue0(octeon_dev)) {
537 		lio_dev_err(octeon_dev,
538 			    "Instruction queue initialization failed\n");
539 		return (1);
540 	}
541 
542 	atomic_store_rel_int(&octeon_dev->status,
543 			     LIO_DEV_INSTR_QUEUE_INIT_DONE);
544 
545 	/*
546 	 * Initialize lists to manage the requests of different types that
547 	 * arrive from user & kernel applications for this octeon device.
548 	 */
549 
550 	if (lio_setup_response_list(octeon_dev)) {
551 		lio_dev_err(octeon_dev, "Response list allocation failed\n");
552 		return (1);
553 	}
554 
555 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_RESP_LIST_INIT_DONE);
556 
557 	for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) {
558 		octeon_dev->droq[i] = malloc(sizeof(*octeon_dev->droq[i]),
559 					     M_DEVBUF, M_NOWAIT | M_ZERO);
560 		if (octeon_dev->droq[i] == NULL)
561 			return (1);
562 	}
563 
564 	if (lio_setup_output_queue0(octeon_dev)) {
565 		lio_dev_err(octeon_dev, "Output queue initialization failed\n");
566 		return (1);
567 	}
568 
569 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DROQ_INIT_DONE);
570 
571 	/*
572 	 * Setup the interrupt handler and record the INT SUM register address
573 	 */
574 	if (lio_setup_interrupt(octeon_dev,
575 				octeon_dev->sriov_info.num_pf_rings))
576 		return (1);
577 
578 	/* Enable Octeon device interrupts */
579 	octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
580 
581 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_INTR_SET_DONE);
582 
583 	/*
584 	 * Send Credit for Octeon Output queues. Credits are always sent BEFORE
585 	 * the output queue is enabled.
586 	 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
587 	 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
588 	 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
589 	 * before any credits have been issued, causing the ring to be reset
590 	 * (and the f/w appear to never have started).
591 	 */
592 	for (j = 0; j < octeon_dev->num_oqs; j++)
593 		lio_write_csr32(octeon_dev,
594 				octeon_dev->droq[j]->pkts_credit_reg,
595 				octeon_dev->droq[j]->max_count);
596 
597 	/* Enable the input and output queues for this Octeon device */
598 	ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
599 	if (ret) {
600 		lio_dev_err(octeon_dev, "Failed to enable input/output queues");
601 		return (ret);
602 	}
603 
604 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_IO_QUEUES_DONE);
605 
606 	if (!fw_loaded) {
607 		lio_dev_dbg(octeon_dev, "Waiting for DDR initialization...\n");
608 		if (!ddr_timeout) {
609 			lio_dev_info(octeon_dev,
610 				     "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
611 		}
612 
613 		lio_sleep_timeout(LIO_RESET_MSECS);
614 
615 		/*
616 		 * Wait for the octeon to initialize DDR after the
617 		 * soft-reset.
618 		 */
619 		while (!ddr_timeout) {
620 			if (pause("-", lio_ms_to_ticks(100))) {
621 				/* user probably pressed Control-C */
622 				return (1);
623 			}
624 		}
625 
626 		ret = lio_wait_for_ddr_init(octeon_dev, &ddr_timeout);
627 		if (ret) {
628 			lio_dev_err(octeon_dev,
629 				    "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
630 				    ret);
631 			return (1);
632 		}
633 
634 		if (lio_wait_for_bootloader(octeon_dev, 1100)) {
635 			lio_dev_err(octeon_dev, "Board not responding\n");
636 			return (1);
637 		}
638 
639 		/* Divert uboot to take commands from host instead. */
640 		ret = lio_console_send_cmd(octeon_dev, bootcmd, 50);
641 
642 		lio_dev_dbg(octeon_dev, "Initializing consoles\n");
643 		ret = lio_init_consoles(octeon_dev);
644 		if (ret) {
645 			lio_dev_err(octeon_dev, "Could not access board consoles\n");
646 			return (1);
647 		}
648 
649 		/*
650 		 * If console debug enabled, specify empty string to
651 		 * use default enablement ELSE specify NULL string for
652 		 * 'disabled'.
653 		 */
654 		dbg_enb = lio_console_debug_enabled(0) ? "" : NULL;
655 		ret = lio_add_console(octeon_dev, 0, dbg_enb);
656 
657 		if (ret) {
658 			lio_dev_err(octeon_dev, "Could not access board console\n");
659 			return (1);
660 		} else if (lio_console_debug_enabled(0)) {
661 			/*
662 			 * If console was added AND we're logging console output
663 			 * then set our console print function.
664 			 */
665 			octeon_dev->console[0].print = lio_dbg_console_print;
666 		}
667 
668 		atomic_store_rel_int(&octeon_dev->status,
669 				     LIO_DEV_CONSOLE_INIT_DONE);
670 
671 		lio_dev_dbg(octeon_dev, "Loading firmware\n");
672 
673 		ret = lio_load_firmware(octeon_dev);
674 		if (ret) {
675 			lio_dev_err(octeon_dev, "Could not load firmware to board\n");
676 			return (1);
677 		}
678 	}
679 
680 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_HOST_OK);
681 
682 	return (0);
683 }
684 
685 /*
686  * \brief PCI FLR for each Octeon device.
687  * @param oct octeon device
688  */
689 static void
690 lio_pci_flr(struct octeon_device *oct)
691 {
692 	uint32_t	exppos, status;
693 
694 	pci_find_cap(oct->device, PCIY_EXPRESS, &exppos);
695 
696 	pci_save_state(oct->device);
697 
698 	/* Quiesce the device completely */
699 	pci_write_config(oct->device, PCIR_COMMAND, PCIM_CMD_INTxDIS, 2);
700 
701 	/* Wait for Transaction Pending bit clean */
702 	lio_mdelay(100);
703 
704 	status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2);
705 	if (status & PCIEM_STA_TRANSACTION_PND) {
706 		lio_dev_info(oct, "Function reset incomplete after 100ms, sleeping for 5 seconds\n");
707 		lio_mdelay(5);
708 
709 		status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2);
710 		if (status & PCIEM_STA_TRANSACTION_PND)
711 			lio_dev_info(oct, "Function reset still incomplete after 5s, reset anyway\n");
712 	}
713 
714 	pci_write_config(oct->device, exppos + PCIER_DEVICE_CTL, PCIEM_CTL_INITIATE_FLR, 2);
715 	lio_mdelay(100);
716 
717 	pci_restore_state(oct->device);
718 }
719 
720 /*
721  * \brief Debug console print function
722  * @param octeon_dev  octeon device
723  * @param console_num console number
724  * @param prefix      first portion of line to display
725  * @param suffix      second portion of line to display
726  *
727  * The OCTEON debug console outputs entire lines (excluding '\n').
728  * Normally, the line will be passed in the 'prefix' parameter.
729  * However, due to buffering, it is possible for a line to be split into two
730  * parts, in which case they will be passed as the 'prefix' parameter and
731  * 'suffix' parameter.
732  */
733 static int
734 lio_dbg_console_print(struct octeon_device *oct, uint32_t console_num,
735 		      char *prefix, char *suffix)
736 {
737 
738 	if (prefix != NULL && suffix != NULL)
739 		lio_dev_info(oct, "%u: %s%s\n", console_num, prefix, suffix);
740 	else if (prefix != NULL)
741 		lio_dev_info(oct, "%u: %s\n", console_num, prefix);
742 	else if (suffix != NULL)
743 		lio_dev_info(oct, "%u: %s\n", console_num, suffix);
744 
745 	return (0);
746 }
747 
748 static void
749 lio_watchdog(void *param)
750 {
751 	int		core_num;
752 	uint16_t	mask_of_crashed_or_stuck_cores = 0;
753 	struct octeon_device	*oct = param;
754 	bool		err_msg_was_printed[12];
755 
756 	bzero(err_msg_was_printed, sizeof(err_msg_was_printed));
757 
758 	while (1) {
759 		kproc_suspend_check(oct->watchdog_task);
760 		mask_of_crashed_or_stuck_cores =
761 			(uint16_t)lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH2);
762 
763 		if (mask_of_crashed_or_stuck_cores) {
764 			struct octeon_device *other_oct;
765 
766 			oct->cores_crashed = true;
767 			other_oct = lio_get_other_octeon_device(oct);
768 			if (other_oct != NULL)
769 				other_oct->cores_crashed = true;
770 
771 			for (core_num = 0; core_num < LIO_MAX_CORES;
772 			     core_num++) {
773 				bool core_crashed_or_got_stuck;
774 
775 				core_crashed_or_got_stuck =
776 				    (mask_of_crashed_or_stuck_cores >>
777 				     core_num) & 1;
778 				if (core_crashed_or_got_stuck &&
779 				    !err_msg_was_printed[core_num]) {
780 					lio_dev_err(oct,
781 						    "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
782 						    core_num);
783 					err_msg_was_printed[core_num] = true;
784 				}
785 			}
786 
787 		}
788 
789 		/* sleep for two seconds */
790 		pause("-", lio_ms_to_ticks(2000));
791 	}
792 }
793 
794 static int
795 lio_chip_specific_setup(struct octeon_device *oct)
796 {
797 	char		*s;
798 	uint32_t	dev_id, rev_id;
799 	int		ret = 1;
800 
801 	dev_id = lio_read_pci_cfg(oct, 0);
802 	rev_id = pci_get_revid(oct->device);
803 	oct->subdevice_id = pci_get_subdevice(oct->device);
804 
805 	switch (dev_id) {
806 	case LIO_CN23XX_PF_PCIID:
807 		oct->chip_id = LIO_CN23XX_PF_VID;
808 		if (pci_get_function(oct->device) == 0) {
809 			if (num_queues_per_pf0 < 0) {
810 				lio_dev_info(oct, "Invalid num_queues_per_pf0: %d, Setting it to default\n",
811 					     num_queues_per_pf0);
812 				num_queues_per_pf0 = 0;
813 			}
814 
815 			oct->sriov_info.num_pf_rings = num_queues_per_pf0;
816 		} else {
817 			if (num_queues_per_pf1 < 0) {
818 				lio_dev_info(oct, "Invalid num_queues_per_pf1: %d, Setting it to default\n",
819 					     num_queues_per_pf1);
820 				num_queues_per_pf1 = 0;
821 			}
822 
823 			oct->sriov_info.num_pf_rings = num_queues_per_pf1;
824 		}
825 
826 		ret = lio_cn23xx_pf_setup_device(oct);
827 		s = "CN23XX";
828 		break;
829 
830 	default:
831 		s = "?";
832 		lio_dev_err(oct, "Unknown device found (dev_id: %x)\n", dev_id);
833 	}
834 
835 	if (!ret)
836 		lio_dev_info(oct, "%s PASS%d.%d %s Version: %s\n", s,
837 			     OCTEON_MAJOR_REV(oct), OCTEON_MINOR_REV(oct),
838 			     lio_get_conf(oct)->card_name, LIO_VERSION);
839 
840 	return (ret);
841 }
842 
843 static struct octeon_device *
844 lio_get_other_octeon_device(struct octeon_device *oct)
845 {
846 	struct octeon_device	*other_oct;
847 
848 	other_oct = lio_get_device(oct->octeon_id + 1);
849 
850 	if ((other_oct != NULL) && other_oct->device) {
851 		int	oct_busnum, other_oct_busnum;
852 
853 		oct_busnum = pci_get_bus(oct->device);
854 		other_oct_busnum = pci_get_bus(other_oct->device);
855 
856 		if (oct_busnum == other_oct_busnum) {
857 			int	oct_slot, other_oct_slot;
858 
859 			oct_slot = pci_get_slot(oct->device);
860 			other_oct_slot = pci_get_slot(other_oct->device);
861 
862 			if (oct_slot == other_oct_slot)
863 				return (other_oct);
864 		}
865 	}
866 	return (NULL);
867 }
868 
869 /*
870  * \brief Load firmware to device
871  * @param oct octeon device
872  *
873  * Maps device to firmware filename, requests firmware, and downloads it
874  */
875 static int
876 lio_load_firmware(struct octeon_device *oct)
877 {
878 	const struct firmware	*fw;
879 	char	*tmp_fw_type = NULL;
880 	int	ret = 0;
881 	char	fw_name[LIO_MAX_FW_FILENAME_LEN];
882 
883 	if (fw_type[0] == '\0')
884 		tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
885 	else
886 		tmp_fw_type = fw_type;
887 
888 	sprintf(fw_name, "%s%s_%s%s", LIO_FW_BASE_NAME,
889 		lio_get_conf(oct)->card_name, tmp_fw_type, LIO_FW_NAME_SUFFIX);
890 
891 	fw = firmware_get(fw_name);
892 	if (fw == NULL) {
893 		lio_dev_err(oct, "Request firmware failed. Could not find file %s.\n",
894 			    fw_name);
895 		return (EINVAL);
896 	}
897 
898 	ret = lio_download_firmware(oct, fw->data, fw->datasize);
899 
900 	firmware_put(fw, FIRMWARE_UNLOAD);
901 
902 	return (ret);
903 }
904 
905 static int
906 lio_nic_starter(struct octeon_device *oct)
907 {
908 	int	ret = 0;
909 
910 	atomic_store_rel_int(&oct->status, LIO_DEV_RUNNING);
911 
912 	if (oct->app_mode && oct->app_mode == LIO_DRV_NIC_APP) {
913 		if (lio_init_nic_module(oct)) {
914 			lio_dev_err(oct, "NIC initialization failed\n");
915 			ret = -1;
916 #ifdef CAVIUM_ONiLY_23XX_VF
917 		} else {
918 			if (octeon_enable_sriov(oct) < 0)
919 				ret = -1;
920 #endif
921 		}
922 	} else {
923 		lio_dev_err(oct,
924 			    "Unexpected application running on NIC (%d). Check firmware.\n",
925 			    oct->app_mode);
926 		ret = -1;
927 	}
928 
929 	return (ret);
930 }
931 
932 static int
933 lio_init_nic_module(struct octeon_device *oct)
934 {
935 	int	num_nic_ports = LIO_GET_NUM_NIC_PORTS_CFG(lio_get_conf(oct));
936 	int	retval = 0;
937 
938 	lio_dev_dbg(oct, "Initializing network interfaces\n");
939 
940 	/*
941 	 * only default iq and oq were initialized
942 	 * initialize the rest as well
943 	 */
944 
945 	/* run port_config command for each port */
946 	oct->ifcount = num_nic_ports;
947 
948 	bzero(&oct->props, sizeof(struct lio_if_props));
949 
950 	oct->props.gmxport = -1;
951 
952 	retval = lio_setup_nic_devices(oct);
953 	if (retval) {
954 		lio_dev_err(oct, "Setup NIC devices failed\n");
955 		goto lio_init_failure;
956 	}
957 
958 	lio_dev_dbg(oct, "Network interfaces ready\n");
959 
960 	return (retval);
961 
962 lio_init_failure:
963 
964 	oct->ifcount = 0;
965 
966 	return (retval);
967 }
968 
969 static int
970 lio_ifmedia_update(struct ifnet *ifp)
971 {
972 	struct lio	*lio = if_getsoftc(ifp);
973 	struct ifmedia	*ifm;
974 
975 	ifm = &lio->ifmedia;
976 
977 	/* We only support Ethernet media type. */
978 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
979 		return (EINVAL);
980 
981 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
982 	case IFM_AUTO:
983 		break;
984 	case IFM_10G_CX4:
985 	case IFM_10G_SR:
986 	case IFM_10G_T:
987 	case IFM_10G_TWINAX:
988 	default:
989 		/* We don't support changing the media type. */
990 		lio_dev_err(lio->oct_dev, "Invalid media type (%d)\n",
991 			    IFM_SUBTYPE(ifm->ifm_media));
992 		return (EINVAL);
993 	}
994 
995 	return (0);
996 }
997 
998 static int
999 lio_get_media_subtype(struct octeon_device *oct)
1000 {
1001 
1002 	switch(oct->subdevice_id) {
1003 	case LIO_CN2350_10G_SUBDEVICE:
1004 	case LIO_CN2350_10G_SUBDEVICE1:
1005 	case LIO_CN2360_10G_SUBDEVICE:
1006 		return (IFM_10G_SR);
1007 
1008 	case LIO_CN2350_25G_SUBDEVICE:
1009 	case LIO_CN2360_25G_SUBDEVICE:
1010 		return (IFM_25G_SR);
1011 	}
1012 
1013 	return (IFM_10G_SR);
1014 }
1015 
1016 static unsigned long
1017 lio_get_baudrate(struct octeon_device *oct)
1018 {
1019 
1020 	switch(oct->subdevice_id) {
1021 	case LIO_CN2350_10G_SUBDEVICE:
1022 	case LIO_CN2350_10G_SUBDEVICE1:
1023 	case LIO_CN2360_10G_SUBDEVICE:
1024 		return (IF_Gbps(10));
1025 
1026 	case LIO_CN2350_25G_SUBDEVICE:
1027 	case LIO_CN2360_25G_SUBDEVICE:
1028 		return (IF_Gbps(25));
1029 	}
1030 
1031 	return (IF_Gbps(10));
1032 }
1033 
1034 static void
1035 lio_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1036 {
1037 	struct lio	*lio = if_getsoftc(ifp);
1038 
1039 	/* Report link down if the driver isn't running. */
1040 	if (!lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
1041 		ifmr->ifm_active |= IFM_NONE;
1042 		return;
1043 	}
1044 
1045 	/* Setup the default interface info. */
1046 	ifmr->ifm_status = IFM_AVALID;
1047 	ifmr->ifm_active = IFM_ETHER;
1048 
1049 	if (lio->linfo.link.s.link_up) {
1050 		ifmr->ifm_status |= IFM_ACTIVE;
1051 	} else {
1052 		ifmr->ifm_active |= IFM_NONE;
1053 		return;
1054 	}
1055 
1056 	ifmr->ifm_active |= lio_get_media_subtype(lio->oct_dev);
1057 
1058 	if (lio->linfo.link.s.duplex)
1059 		ifmr->ifm_active |= IFM_FDX;
1060 	else
1061 		ifmr->ifm_active |= IFM_HDX;
1062 }
1063 
1064 static uint64_t
1065 lio_get_counter(if_t ifp, ift_counter cnt)
1066 {
1067 	struct lio	*lio = if_getsoftc(ifp);
1068 	struct octeon_device	*oct = lio->oct_dev;
1069 	uint64_t	counter = 0;
1070 	int		i, q_no;
1071 
1072 	switch (cnt) {
1073 	case IFCOUNTER_IPACKETS:
1074 		for (i = 0; i < oct->num_oqs; i++) {
1075 			q_no = lio->linfo.rxpciq[i].s.q_no;
1076 			counter += oct->droq[q_no]->stats.rx_pkts_received;
1077 		}
1078 		break;
1079 	case IFCOUNTER_OPACKETS:
1080 		for (i = 0; i < oct->num_iqs; i++) {
1081 			q_no = lio->linfo.txpciq[i].s.q_no;
1082 			counter += oct->instr_queue[q_no]->stats.tx_done;
1083 		}
1084 		break;
1085 	case IFCOUNTER_IBYTES:
1086 		for (i = 0; i < oct->num_oqs; i++) {
1087 			q_no = lio->linfo.rxpciq[i].s.q_no;
1088 			counter += oct->droq[q_no]->stats.rx_bytes_received;
1089 		}
1090 		break;
1091 	case IFCOUNTER_OBYTES:
1092 		for (i = 0; i < oct->num_iqs; i++) {
1093 			q_no = lio->linfo.txpciq[i].s.q_no;
1094 			counter += oct->instr_queue[q_no]->stats.tx_tot_bytes;
1095 		}
1096 		break;
1097 	case IFCOUNTER_IQDROPS:
1098 		for (i = 0; i < oct->num_oqs; i++) {
1099 			q_no = lio->linfo.rxpciq[i].s.q_no;
1100 			counter += oct->droq[q_no]->stats.rx_dropped;
1101 		}
1102 		break;
1103 	case IFCOUNTER_OQDROPS:
1104 		for (i = 0; i < oct->num_iqs; i++) {
1105 			q_no = lio->linfo.txpciq[i].s.q_no;
1106 			counter += oct->instr_queue[q_no]->stats.tx_dropped;
1107 		}
1108 		break;
1109 	case IFCOUNTER_IMCASTS:
1110 		counter = oct->link_stats.fromwire.total_mcst;
1111 		break;
1112 	case IFCOUNTER_OMCASTS:
1113 		counter = oct->link_stats.fromhost.mcast_pkts_sent;
1114 		break;
1115 	case IFCOUNTER_COLLISIONS:
1116 		counter = oct->link_stats.fromhost.total_collisions;
1117 		break;
1118 	case IFCOUNTER_IERRORS:
1119 		counter = oct->link_stats.fromwire.fcs_err +
1120 		    oct->link_stats.fromwire.l2_err +
1121 		    oct->link_stats.fromwire.frame_err;
1122 		break;
1123 	default:
1124 		return (if_get_counter_default(ifp, cnt));
1125 	}
1126 
1127 	return (counter);
1128 }
1129 
1130 static int
1131 lio_init_ifnet(struct lio *lio)
1132 {
1133 	struct octeon_device	*oct = lio->oct_dev;
1134 	if_t ifp = lio->ifp;
1135 
1136 	/* ifconfig entrypoint for media type/status reporting */
1137 	ifmedia_init(&lio->ifmedia, IFM_IMASK, lio_ifmedia_update,
1138 		     lio_ifmedia_status);
1139 
1140 	/* set the default interface values */
1141 	ifmedia_add(&lio->ifmedia,
1142 		    (IFM_ETHER | IFM_FDX | lio_get_media_subtype(oct)),
1143 		    0, NULL);
1144 	ifmedia_add(&lio->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
1145 	ifmedia_set(&lio->ifmedia, (IFM_ETHER | IFM_AUTO));
1146 
1147 	lio->ifmedia.ifm_media = lio->ifmedia.ifm_cur->ifm_media;
1148 	lio_dev_dbg(oct, "IFMEDIA flags : %x\n", lio->ifmedia.ifm_media);
1149 
1150 	if_initname(ifp, device_get_name(oct->device),
1151 		    device_get_unit(oct->device));
1152 	if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
1153 	if_setioctlfn(ifp, lio_ioctl);
1154 	if_setgetcounterfn(ifp, lio_get_counter);
1155 	if_settransmitfn(ifp, lio_mq_start);
1156 	if_setqflushfn(ifp, lio_qflush);
1157 	if_setinitfn(ifp, lio_open);
1158 	if_setmtu(ifp, lio->linfo.link.s.mtu);
1159 	lio->mtu = lio->linfo.link.s.mtu;
1160 	if_sethwassist(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1161 			     CSUM_TCP_IPV6 | CSUM_UDP_IPV6));
1162 
1163 	if_setcapabilitiesbit(ifp, (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
1164 				    IFCAP_TSO | IFCAP_LRO |
1165 				    IFCAP_JUMBO_MTU | IFCAP_HWSTATS |
1166 				    IFCAP_LINKSTATE | IFCAP_VLAN_HWFILTER |
1167 				    IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTAGGING |
1168 				    IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU), 0);
1169 
1170 	if_setcapenable(ifp, if_getcapabilities(ifp));
1171 	if_setbaudrate(ifp, lio_get_baudrate(oct));
1172 
1173 	return (0);
1174 }
1175 
1176 static void
1177 lio_tcp_lro_free(struct octeon_device *octeon_dev, struct ifnet *ifp)
1178 {
1179 	struct lio	*lio = if_getsoftc(ifp);
1180 	struct lio_droq	*droq;
1181 	int		q_no;
1182 	int		i;
1183 
1184 	for (i = 0; i < octeon_dev->num_oqs; i++) {
1185 		q_no = lio->linfo.rxpciq[i].s.q_no;
1186 		droq = octeon_dev->droq[q_no];
1187 		if (droq->lro.ifp) {
1188 			tcp_lro_free(&droq->lro);
1189 			droq->lro.ifp = NULL;
1190 		}
1191 	}
1192 }
1193 
1194 static int
1195 lio_tcp_lro_init(struct octeon_device *octeon_dev, struct ifnet *ifp)
1196 {
1197 	struct lio	*lio = if_getsoftc(ifp);
1198 	struct lio_droq	*droq;
1199 	struct lro_ctrl	*lro;
1200 	int		i, q_no, ret = 0;
1201 
1202 	for (i = 0; i < octeon_dev->num_oqs; i++) {
1203 		q_no = lio->linfo.rxpciq[i].s.q_no;
1204 		droq = octeon_dev->droq[q_no];
1205 		lro = &droq->lro;
1206 		ret = tcp_lro_init(lro);
1207 		if (ret) {
1208 			lio_dev_err(octeon_dev, "LRO Initialization failed ret %d\n",
1209 				    ret);
1210 			goto lro_init_failed;
1211 		}
1212 
1213 		lro->ifp = ifp;
1214 	}
1215 
1216 	return (ret);
1217 
1218 lro_init_failed:
1219 	lio_tcp_lro_free(octeon_dev, ifp);
1220 
1221 	return (ret);
1222 }
1223 
1224 static int
1225 lio_setup_nic_devices(struct octeon_device *octeon_dev)
1226 {
1227 	union		octeon_if_cfg if_cfg;
1228 	struct lio	*lio = NULL;
1229 	struct ifnet	*ifp = NULL;
1230 	struct lio_version		*vdata;
1231 	struct lio_soft_command		*sc;
1232 	struct lio_if_cfg_context	*ctx;
1233 	struct lio_if_cfg_resp		*resp;
1234 	struct lio_if_props		*props;
1235 	int		num_iqueues, num_oqueues, retval;
1236 	unsigned int	base_queue;
1237 	unsigned int	gmx_port_id;
1238 	uint32_t	ctx_size, data_size;
1239 	uint32_t	ifidx_or_pfnum, resp_size;
1240 	uint8_t		mac[ETHER_HDR_LEN], i, j;
1241 
1242 	/* This is to handle link status changes */
1243 	lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC,
1244 				 LIO_OPCODE_NIC_INFO,
1245 				 lio_link_info, octeon_dev);
1246 
1247 	for (i = 0; i < octeon_dev->ifcount; i++) {
1248 		resp_size = sizeof(struct lio_if_cfg_resp);
1249 		ctx_size = sizeof(struct lio_if_cfg_context);
1250 		data_size = sizeof(struct lio_version);
1251 		sc = lio_alloc_soft_command(octeon_dev, data_size, resp_size,
1252 					    ctx_size);
1253 		if (sc == NULL)
1254 			return (ENOMEM);
1255 
1256 		resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1257 		ctx = (struct lio_if_cfg_context *)sc->ctxptr;
1258 		vdata = (struct lio_version *)sc->virtdptr;
1259 
1260 		*((uint64_t *)vdata) = 0;
1261 		vdata->major = htobe16(LIO_BASE_MAJOR_VERSION);
1262 		vdata->minor = htobe16(LIO_BASE_MINOR_VERSION);
1263 		vdata->micro = htobe16(LIO_BASE_MICRO_VERSION);
1264 
1265 		num_iqueues = octeon_dev->sriov_info.num_pf_rings;
1266 		num_oqueues = octeon_dev->sriov_info.num_pf_rings;
1267 		base_queue = octeon_dev->sriov_info.pf_srn;
1268 
1269 		gmx_port_id = octeon_dev->pf_num;
1270 		ifidx_or_pfnum = octeon_dev->pf_num;
1271 
1272 		lio_dev_dbg(octeon_dev, "requesting config for interface %d, iqs %d, oqs %d\n",
1273 			    ifidx_or_pfnum, num_iqueues, num_oqueues);
1274 		ctx->cond = 0;
1275 		ctx->octeon_id = lio_get_device_id(octeon_dev);
1276 
1277 		if_cfg.if_cfg64 = 0;
1278 		if_cfg.s.num_iqueues = num_iqueues;
1279 		if_cfg.s.num_oqueues = num_oqueues;
1280 		if_cfg.s.base_queue = base_queue;
1281 		if_cfg.s.gmx_port_id = gmx_port_id;
1282 
1283 		sc->iq_no = 0;
1284 
1285 		lio_prepare_soft_command(octeon_dev, sc, LIO_OPCODE_NIC,
1286 					 LIO_OPCODE_NIC_IF_CFG, 0,
1287 					 if_cfg.if_cfg64, 0);
1288 
1289 		sc->callback = lio_if_cfg_callback;
1290 		sc->callback_arg = sc;
1291 		sc->wait_time = 3000;
1292 
1293 		retval = lio_send_soft_command(octeon_dev, sc);
1294 		if (retval == LIO_IQ_SEND_FAILED) {
1295 			lio_dev_err(octeon_dev, "iq/oq config failed status: %x\n",
1296 				    retval);
1297 			/* Soft instr is freed by driver in case of failure. */
1298 			goto setup_nic_dev_fail;
1299 		}
1300 
1301 		/*
1302 		 * Sleep on a wait queue till the cond flag indicates that the
1303 		 * response arrived or timed-out.
1304 		 */
1305 		lio_sleep_cond(octeon_dev, &ctx->cond);
1306 
1307 		retval = resp->status;
1308 		if (retval) {
1309 			lio_dev_err(octeon_dev, "iq/oq config failed\n");
1310 			goto setup_nic_dev_fail;
1311 		}
1312 
1313 		lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
1314 				 (sizeof(struct octeon_if_cfg_info)) >> 3);
1315 
1316 		num_iqueues = bitcount64(resp->cfg_info.iqmask);
1317 		num_oqueues = bitcount64(resp->cfg_info.oqmask);
1318 
1319 		if (!(num_iqueues) || !(num_oqueues)) {
1320 			lio_dev_err(octeon_dev,
1321 				    "Got bad iqueues (%016lX) or oqueues (%016lX) from firmware.\n",
1322 				    resp->cfg_info.iqmask,
1323 				    resp->cfg_info.oqmask);
1324 			goto setup_nic_dev_fail;
1325 		}
1326 
1327 		lio_dev_dbg(octeon_dev,
1328 			    "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n",
1329 			    i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
1330 			    num_iqueues, num_oqueues);
1331 
1332 		ifp = if_alloc(IFT_ETHER);
1333 
1334 		if (ifp == NULL) {
1335 			lio_dev_err(octeon_dev, "Device allocation failed\n");
1336 			goto setup_nic_dev_fail;
1337 		}
1338 
1339 		lio = malloc(sizeof(struct lio), M_DEVBUF, M_NOWAIT | M_ZERO);
1340 
1341 		if (lio == NULL) {
1342 			lio_dev_err(octeon_dev, "Lio allocation failed\n");
1343 			goto setup_nic_dev_fail;
1344 		}
1345 
1346 		if_setsoftc(ifp, lio);
1347 
1348 		ifp->if_hw_tsomax = LIO_MAX_FRAME_SIZE;
1349 		ifp->if_hw_tsomaxsegcount = LIO_MAX_SG;
1350 		ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
1351 
1352 		lio->ifidx = ifidx_or_pfnum;
1353 
1354 		props = &octeon_dev->props;
1355 		props->gmxport = resp->cfg_info.linfo.gmxport;
1356 		props->ifp = ifp;
1357 
1358 		lio->linfo.num_rxpciq = num_oqueues;
1359 		lio->linfo.num_txpciq = num_iqueues;
1360 		for (j = 0; j < num_oqueues; j++) {
1361 			lio->linfo.rxpciq[j].rxpciq64 =
1362 			    resp->cfg_info.linfo.rxpciq[j].rxpciq64;
1363 		}
1364 
1365 		for (j = 0; j < num_iqueues; j++) {
1366 			lio->linfo.txpciq[j].txpciq64 =
1367 			    resp->cfg_info.linfo.txpciq[j].txpciq64;
1368 		}
1369 
1370 		lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1371 		lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1372 		lio->linfo.link.link_status64 =
1373 		    resp->cfg_info.linfo.link.link_status64;
1374 
1375 		/*
1376 		 * Point to the properties for octeon device to which this
1377 		 * interface belongs.
1378 		 */
1379 		lio->oct_dev = octeon_dev;
1380 		lio->ifp = ifp;
1381 
1382 		lio_dev_dbg(octeon_dev, "if%d gmx: %d hw_addr: 0x%llx\n", i,
1383 			    lio->linfo.gmxport, LIO_CAST64(lio->linfo.hw_addr));
1384 		lio_init_ifnet(lio);
1385 		/* 64-bit swap required on LE machines */
1386 		lio_swap_8B_data(&lio->linfo.hw_addr, 1);
1387 		for (j = 0; j < 6; j++)
1388 			mac[j] = *((uint8_t *)(
1389 				   ((uint8_t *)&lio->linfo.hw_addr) + 2 + j));
1390 
1391 		ether_ifattach(ifp, mac);
1392 
1393 		/*
1394 		 * By default all interfaces on a single Octeon uses the same
1395 		 * tx and rx queues
1396 		 */
1397 		lio->txq = lio->linfo.txpciq[0].s.q_no;
1398 		lio->rxq = lio->linfo.rxpciq[0].s.q_no;
1399 		if (lio_setup_io_queues(octeon_dev, i, lio->linfo.num_txpciq,
1400 					lio->linfo.num_rxpciq)) {
1401 			lio_dev_err(octeon_dev, "I/O queues creation failed\n");
1402 			goto setup_nic_dev_fail;
1403 		}
1404 
1405 		lio_ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
1406 
1407 		lio->tx_qsize = lio_get_tx_qsize(octeon_dev, lio->txq);
1408 		lio->rx_qsize = lio_get_rx_qsize(octeon_dev, lio->rxq);
1409 
1410 		if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
1411 			lio_dev_err(octeon_dev, "Gather list allocation failed\n");
1412 			goto setup_nic_dev_fail;
1413 		}
1414 
1415 		if ((lio_hwlro == 0) && lio_tcp_lro_init(octeon_dev, ifp))
1416 			goto setup_nic_dev_fail;
1417 
1418 		if (lio_hwlro &&
1419 		    (if_getcapenable(ifp) & IFCAP_LRO) &&
1420 		    (if_getcapenable(ifp) & IFCAP_RXCSUM) &&
1421 		    (if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6))
1422 			lio_set_feature(ifp, LIO_CMD_LRO_ENABLE,
1423 					LIO_LROIPV4 | LIO_LROIPV6);
1424 
1425 		if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER))
1426 			lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 1);
1427 		else
1428 			lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 0);
1429 
1430 		if (lio_setup_rx_oom_poll_fn(ifp))
1431 			goto setup_nic_dev_fail;
1432 
1433 		lio_dev_dbg(octeon_dev, "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
1434 			    i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1435 		lio->link_changes++;
1436 
1437 		lio_ifstate_set(lio, LIO_IFSTATE_REGISTERED);
1438 
1439 		/*
1440 		 * Sending command to firmware to enable Rx checksum offload
1441 		 * by default at the time of setup of Liquidio driver for
1442 		 * this device
1443 		 */
1444 		lio_set_rxcsum_command(ifp, LIO_CMD_TNL_RX_CSUM_CTL,
1445 				       LIO_CMD_RXCSUM_ENABLE);
1446 		lio_set_feature(ifp, LIO_CMD_TNL_TX_CSUM_CTL,
1447 				LIO_CMD_TXCSUM_ENABLE);
1448 
1449 #ifdef RSS
1450 		if (lio_rss) {
1451 			if (lio_send_rss_param(lio))
1452 				goto setup_nic_dev_fail;
1453 		} else
1454 #endif	/* RSS */
1455 
1456 			lio_set_feature(ifp, LIO_CMD_SET_FNV,
1457 					LIO_CMD_FNV_ENABLE);
1458 
1459 		lio_dev_dbg(octeon_dev, "NIC ifidx:%d Setup successful\n", i);
1460 
1461 		lio_free_soft_command(octeon_dev, sc);
1462 		lio->vlan_attach =
1463 		    EVENTHANDLER_REGISTER(vlan_config,
1464 					  lio_vlan_rx_add_vid, lio,
1465 					  EVENTHANDLER_PRI_FIRST);
1466 		lio->vlan_detach =
1467 		    EVENTHANDLER_REGISTER(vlan_unconfig,
1468 					  lio_vlan_rx_kill_vid, lio,
1469 					  EVENTHANDLER_PRI_FIRST);
1470 
1471 		/* Update stats periodically */
1472 		callout_init(&lio->stats_timer, 0);
1473 		lio->stats_interval = LIO_DEFAULT_STATS_INTERVAL;
1474 
1475 		lio_add_hw_stats(lio);
1476 	}
1477 
1478 	return (0);
1479 
1480 setup_nic_dev_fail:
1481 
1482 	lio_free_soft_command(octeon_dev, sc);
1483 
1484 	while (i--) {
1485 		lio_dev_err(octeon_dev, "NIC ifidx:%d Setup failed\n", i);
1486 		lio_destroy_nic_device(octeon_dev, i);
1487 	}
1488 
1489 	return (ENODEV);
1490 }
1491 
1492 static int
1493 lio_link_info(struct lio_recv_info *recv_info, void *ptr)
1494 {
1495 	struct octeon_device	*oct = (struct octeon_device *)ptr;
1496 	struct lio_recv_pkt	*recv_pkt = recv_info->recv_pkt;
1497 	union octeon_link_status *ls;
1498 	int	gmxport = 0, i;
1499 
1500 	lio_dev_dbg(oct, "%s Called\n", __func__);
1501 	if (recv_pkt->buffer_size[0] != (sizeof(*ls) + LIO_DROQ_INFO_SIZE)) {
1502 		lio_dev_err(oct, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
1503 			    recv_pkt->buffer_size[0],
1504 			    recv_pkt->rh.r_nic_info.gmxport);
1505 		goto nic_info_err;
1506 	}
1507 	gmxport = recv_pkt->rh.r_nic_info.gmxport;
1508 	ls = (union octeon_link_status *)(recv_pkt->buffer_ptr[0]->m_data +
1509 					  LIO_DROQ_INFO_SIZE);
1510 	lio_swap_8B_data((uint64_t *)ls,
1511 			 (sizeof(union octeon_link_status)) >> 3);
1512 
1513 	if (oct->props.gmxport == gmxport)
1514 		lio_update_link_status(oct->props.ifp, ls);
1515 
1516 nic_info_err:
1517 	for (i = 0; i < recv_pkt->buffer_count; i++)
1518 		lio_recv_buffer_free(recv_pkt->buffer_ptr[i]);
1519 
1520 	lio_free_recv_info(recv_info);
1521 	return (0);
1522 }
1523 
1524 void
1525 lio_free_mbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo)
1526 {
1527 
1528 	bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE);
1529 	bus_dmamap_unload(iq->txtag, finfo->map);
1530 	m_freem(finfo->mb);
1531 }
1532 
1533 void
1534 lio_free_sgmbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo)
1535 {
1536 	struct lio_gather	*g;
1537 	struct octeon_device	*oct;
1538 	struct lio		*lio;
1539 	int	iq_no;
1540 
1541 	g = finfo->g;
1542 	iq_no = iq->txpciq.s.q_no;
1543 	oct = iq->oct_dev;
1544 	lio = if_getsoftc(oct->props.ifp);
1545 
1546 	mtx_lock(&lio->glist_lock[iq_no]);
1547 	STAILQ_INSERT_TAIL(&lio->ghead[iq_no], &g->node, entries);
1548 	mtx_unlock(&lio->glist_lock[iq_no]);
1549 
1550 	bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE);
1551 	bus_dmamap_unload(iq->txtag, finfo->map);
1552 	m_freem(finfo->mb);
1553 }
1554 
1555 static void
1556 lio_if_cfg_callback(struct octeon_device *oct, uint32_t status, void *buf)
1557 {
1558 	struct lio_soft_command	*sc = (struct lio_soft_command *)buf;
1559 	struct lio_if_cfg_resp	*resp;
1560 	struct lio_if_cfg_context *ctx;
1561 
1562 	resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1563 	ctx = (struct lio_if_cfg_context *)sc->ctxptr;
1564 
1565 	oct = lio_get_device(ctx->octeon_id);
1566 	if (resp->status)
1567 		lio_dev_err(oct, "nic if cfg instruction failed. Status: %llx (0x%08x)\n",
1568 			    LIO_CAST64(resp->status), status);
1569 	ctx->cond = 1;
1570 
1571 	snprintf(oct->fw_info.lio_firmware_version, 32, "%s",
1572 		 resp->cfg_info.lio_firmware_version);
1573 
1574 	/*
1575 	 * This barrier is required to be sure that the response has been
1576 	 * written fully before waking up the handler
1577 	 */
1578 	wmb();
1579 }
1580 
1581 static int
1582 lio_is_mac_changed(uint8_t *new, uint8_t *old)
1583 {
1584 
1585 	return ((new[0] != old[0]) || (new[1] != old[1]) ||
1586 		(new[2] != old[2]) || (new[3] != old[3]) ||
1587 		(new[4] != old[4]) || (new[5] != old[5]));
1588 }
1589 
1590 void
1591 lio_open(void *arg)
1592 {
1593 	struct lio	*lio = arg;
1594 	struct ifnet	*ifp = lio->ifp;
1595 	struct octeon_device	*oct = lio->oct_dev;
1596 	uint8_t	*mac_new, mac_old[ETHER_HDR_LEN];
1597 	int	ret = 0;
1598 
1599 	lio_ifstate_set(lio, LIO_IFSTATE_RUNNING);
1600 
1601 	/* Ready for link status updates */
1602 	lio->intf_open = 1;
1603 
1604 	lio_dev_info(oct, "Interface Open, ready for traffic\n");
1605 
1606 	/* tell Octeon to start forwarding packets to host */
1607 	lio_send_rx_ctrl_cmd(lio, 1);
1608 
1609 	mac_new = IF_LLADDR(ifp);
1610 	memcpy(mac_old, ((uint8_t *)&lio->linfo.hw_addr) + 2, ETHER_HDR_LEN);
1611 
1612 	if (lio_is_mac_changed(mac_new, mac_old)) {
1613 		ret = lio_set_mac(ifp, mac_new);
1614 		if (ret)
1615 			lio_dev_err(oct, "MAC change failed, error: %d\n", ret);
1616 	}
1617 
1618 	/* Now inform the stack we're ready */
1619 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1620 
1621 	lio_dev_info(oct, "Interface is opened\n");
1622 }
1623 
1624 static int
1625 lio_set_rxcsum_command(struct ifnet *ifp, int command, uint8_t rx_cmd)
1626 {
1627 	struct lio_ctrl_pkt	nctrl;
1628 	struct lio		*lio = if_getsoftc(ifp);
1629 	struct octeon_device	*oct = lio->oct_dev;
1630 	int	ret = 0;
1631 
1632 	nctrl.ncmd.cmd64 = 0;
1633 	nctrl.ncmd.s.cmd = command;
1634 	nctrl.ncmd.s.param1 = rx_cmd;
1635 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1636 	nctrl.wait_time = 100;
1637 	nctrl.lio = lio;
1638 	nctrl.cb_fn = lio_ctrl_cmd_completion;
1639 
1640 	ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
1641 	if (ret < 0) {
1642 		lio_dev_err(oct, "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
1643 			    ret);
1644 	}
1645 
1646 	return (ret);
1647 }
1648 
1649 static int
1650 lio_stop_nic_module(struct octeon_device *oct)
1651 {
1652 	int		i, j;
1653 	struct lio	*lio;
1654 
1655 	lio_dev_dbg(oct, "Stopping network interfaces\n");
1656 	if (!oct->ifcount) {
1657 		lio_dev_err(oct, "Init for Octeon was not completed\n");
1658 		return (1);
1659 	}
1660 
1661 	mtx_lock(&oct->cmd_resp_wqlock);
1662 	oct->cmd_resp_state = LIO_DRV_OFFLINE;
1663 	mtx_unlock(&oct->cmd_resp_wqlock);
1664 
1665 	for (i = 0; i < oct->ifcount; i++) {
1666 		lio = if_getsoftc(oct->props.ifp);
1667 		for (j = 0; j < oct->num_oqs; j++)
1668 			lio_unregister_droq_ops(oct,
1669 						lio->linfo.rxpciq[j].s.q_no);
1670 	}
1671 
1672 	callout_drain(&lio->stats_timer);
1673 
1674 	for (i = 0; i < oct->ifcount; i++)
1675 		lio_destroy_nic_device(oct, i);
1676 
1677 	lio_dev_dbg(oct, "Network interface stopped\n");
1678 
1679 	return (0);
1680 }
1681 
1682 static void
1683 lio_delete_glists(struct octeon_device *oct, struct lio *lio)
1684 {
1685 	struct lio_gather	*g;
1686 	int	i;
1687 
1688 	if (lio->glist_lock != NULL) {
1689 		free((void *)lio->glist_lock, M_DEVBUF);
1690 		lio->glist_lock = NULL;
1691 	}
1692 
1693 	if (lio->ghead == NULL)
1694 		return;
1695 
1696 	for (i = 0; i < lio->linfo.num_txpciq; i++) {
1697 		do {
1698 			g = (struct lio_gather *)
1699 			    lio_delete_first_node(&lio->ghead[i]);
1700 			free(g, M_DEVBUF);
1701 		} while (g);
1702 
1703 		if ((lio->glists_virt_base != NULL) &&
1704 		    (lio->glists_virt_base[i] != NULL)) {
1705 			lio_dma_free(lio->glist_entry_size * lio->tx_qsize,
1706 				     lio->glists_virt_base[i]);
1707 		}
1708 	}
1709 
1710 	free(lio->glists_virt_base, M_DEVBUF);
1711 	lio->glists_virt_base = NULL;
1712 
1713 	free(lio->glists_dma_base, M_DEVBUF);
1714 	lio->glists_dma_base = NULL;
1715 
1716 	free(lio->ghead, M_DEVBUF);
1717 	lio->ghead = NULL;
1718 }
1719 
1720 static int
1721 lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
1722 {
1723 	struct lio_gather	*g;
1724 	int	i, j;
1725 
1726 	lio->glist_lock = malloc(num_iqs * sizeof(*lio->glist_lock), M_DEVBUF,
1727 				 M_NOWAIT | M_ZERO);
1728 	if (lio->glist_lock == NULL)
1729 		return (1);
1730 
1731 	lio->ghead = malloc(num_iqs * sizeof(*lio->ghead), M_DEVBUF,
1732 			    M_NOWAIT | M_ZERO);
1733 	if (lio->ghead == NULL) {
1734 		free((void *)lio->glist_lock, M_DEVBUF);
1735 		lio->glist_lock = NULL;
1736 		return (1);
1737 	}
1738 
1739 	lio->glist_entry_size = ROUNDUP8((ROUNDUP4(LIO_MAX_SG) >> 2) *
1740 					 LIO_SG_ENTRY_SIZE);
1741 	/*
1742 	 * allocate memory to store virtual and dma base address of
1743 	 * per glist consistent memory
1744 	 */
1745 	lio->glists_virt_base = malloc(num_iqs * sizeof(void *), M_DEVBUF,
1746 				       M_NOWAIT | M_ZERO);
1747 	lio->glists_dma_base = malloc(num_iqs * sizeof(vm_paddr_t), M_DEVBUF,
1748 				      M_NOWAIT | M_ZERO);
1749 	if ((lio->glists_virt_base == NULL) || (lio->glists_dma_base == NULL)) {
1750 		lio_delete_glists(oct, lio);
1751 		return (1);
1752 	}
1753 
1754 	for (i = 0; i < num_iqs; i++) {
1755 		mtx_init(&lio->glist_lock[i], "glist_lock", NULL, MTX_DEF);
1756 
1757 		STAILQ_INIT(&lio->ghead[i]);
1758 
1759 		lio->glists_virt_base[i] =
1760 		    lio_dma_alloc(lio->glist_entry_size * lio->tx_qsize,
1761 				  (vm_paddr_t *)&lio->glists_dma_base[i]);
1762 		if (lio->glists_virt_base[i] == NULL) {
1763 			lio_delete_glists(oct, lio);
1764 			return (1);
1765 		}
1766 
1767 		for (j = 0; j < lio->tx_qsize; j++) {
1768 			g = malloc(sizeof(*g), M_DEVBUF, M_NOWAIT | M_ZERO);
1769 			if (g == NULL)
1770 				break;
1771 
1772 			g->sg = (struct lio_sg_entry *)
1773 			    ((uint64_t)lio->glists_virt_base[i] +
1774 			     (j * lio->glist_entry_size));
1775 			g->sg_dma_ptr = (uint64_t)lio->glists_dma_base[i] +
1776 				(j * lio->glist_entry_size);
1777 			STAILQ_INSERT_TAIL(&lio->ghead[i], &g->node, entries);
1778 		}
1779 
1780 		if (j != lio->tx_qsize) {
1781 			lio_delete_glists(oct, lio);
1782 			return (1);
1783 		}
1784 	}
1785 
1786 	return (0);
1787 }
1788 
1789 void
1790 lio_stop(struct ifnet *ifp)
1791 {
1792 	struct lio	*lio = if_getsoftc(ifp);
1793 	struct octeon_device	*oct = lio->oct_dev;
1794 
1795 	lio_ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1796 	if_link_state_change(ifp, LINK_STATE_DOWN);
1797 
1798 	lio->intf_open = 0;
1799 	lio->linfo.link.s.link_up = 0;
1800 	lio->link_changes++;
1801 
1802 	lio_send_rx_ctrl_cmd(lio, 0);
1803 
1804 	/* Tell the stack that the interface is no longer active */
1805 	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1806 
1807 	lio_dev_info(oct, "Interface is stopped\n");
1808 }
1809 
1810 static void
1811 lio_check_rx_oom_status(struct lio *lio)
1812 {
1813 	struct lio_droq	*droq;
1814 	struct octeon_device *oct = lio->oct_dev;
1815 	int	desc_refilled;
1816 	int	q, q_no = 0;
1817 
1818 	for (q = 0; q < oct->num_oqs; q++) {
1819 		q_no = lio->linfo.rxpciq[q].s.q_no;
1820 		droq = oct->droq[q_no];
1821 		if (droq == NULL)
1822 			continue;
1823 		if (lio_read_csr32(oct, droq->pkts_credit_reg) <= 0x40) {
1824 			mtx_lock(&droq->lock);
1825 			desc_refilled = lio_droq_refill(oct, droq);
1826 			/*
1827 			 * Flush the droq descriptor data to memory to be sure
1828 			 * that when we update the credits the data in memory
1829 			 * is accurate.
1830 			 */
1831 			wmb();
1832 			lio_write_csr32(oct, droq->pkts_credit_reg,
1833 					desc_refilled);
1834 			/* make sure mmio write completes */
1835 			__compiler_membar();
1836 			mtx_unlock(&droq->lock);
1837 		}
1838 	}
1839 }
1840 
1841 static void
1842 lio_poll_check_rx_oom_status(void *arg, int pending __unused)
1843 {
1844 	struct lio_tq	*rx_status_tq = arg;
1845 	struct lio	*lio = rx_status_tq->ctxptr;
1846 
1847 	if (lio_ifstate_check(lio, LIO_IFSTATE_RUNNING))
1848 		lio_check_rx_oom_status(lio);
1849 
1850 	taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work,
1851 				  lio_ms_to_ticks(50));
1852 }
1853 
1854 static int
1855 lio_setup_rx_oom_poll_fn(struct ifnet *ifp)
1856 {
1857 	struct lio	*lio = if_getsoftc(ifp);
1858 	struct octeon_device	*oct = lio->oct_dev;
1859 	struct lio_tq	*rx_status_tq;
1860 
1861 	rx_status_tq = &lio->rx_status_tq;
1862 
1863 	rx_status_tq->tq = taskqueue_create("lio_rx_oom_status", M_WAITOK,
1864 					    taskqueue_thread_enqueue,
1865 					    &rx_status_tq->tq);
1866 	if (rx_status_tq->tq == NULL) {
1867 		lio_dev_err(oct, "unable to create lio rx oom status tq\n");
1868 		return (-1);
1869 	}
1870 
1871 	TIMEOUT_TASK_INIT(rx_status_tq->tq, &rx_status_tq->work, 0,
1872 			  lio_poll_check_rx_oom_status, (void *)rx_status_tq);
1873 
1874 	rx_status_tq->ctxptr = lio;
1875 
1876 	taskqueue_start_threads(&rx_status_tq->tq, 1, PI_NET,
1877 				"lio%d_rx_oom_status",
1878 				oct->octeon_id);
1879 
1880 	taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work,
1881 				  lio_ms_to_ticks(50));
1882 
1883 	return (0);
1884 }
1885 
1886 static void
1887 lio_cleanup_rx_oom_poll_fn(struct ifnet *ifp)
1888 {
1889 	struct lio	*lio = if_getsoftc(ifp);
1890 
1891 	if (lio->rx_status_tq.tq != NULL) {
1892 		while (taskqueue_cancel_timeout(lio->rx_status_tq.tq,
1893 						&lio->rx_status_tq.work, NULL))
1894 			taskqueue_drain_timeout(lio->rx_status_tq.tq,
1895 						&lio->rx_status_tq.work);
1896 
1897 		taskqueue_free(lio->rx_status_tq.tq);
1898 
1899 		lio->rx_status_tq.tq = NULL;
1900 	}
1901 }
1902 
1903 static void
1904 lio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1905 {
1906 	struct ifnet	*ifp = oct->props.ifp;
1907 	struct lio	*lio;
1908 
1909 	if (ifp == NULL) {
1910 		lio_dev_err(oct, "%s No ifp ptr for index %d\n",
1911 			    __func__, ifidx);
1912 		return;
1913 	}
1914 
1915 	lio = if_getsoftc(ifp);
1916 
1917 	lio_ifstate_set(lio, LIO_IFSTATE_DETACH);
1918 
1919 	lio_dev_dbg(oct, "NIC device cleanup\n");
1920 
1921 	if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1922 		lio_stop(ifp);
1923 
1924 	if (lio_wait_for_pending_requests(oct))
1925 		lio_dev_err(oct, "There were pending requests\n");
1926 
1927 	if (lio_wait_for_instr_fetch(oct))
1928 		lio_dev_err(oct, "IQ had pending instructions\n");
1929 
1930 	if (lio_wait_for_oq_pkts(oct))
1931 		lio_dev_err(oct, "OQ had pending packets\n");
1932 
1933 	if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1934 		ether_ifdetach(ifp);
1935 
1936 	lio_tcp_lro_free(oct, ifp);
1937 
1938 	lio_cleanup_rx_oom_poll_fn(ifp);
1939 
1940 	lio_delete_glists(oct, lio);
1941 
1942 	EVENTHANDLER_DEREGISTER(vlan_config, lio->vlan_attach);
1943 	EVENTHANDLER_DEREGISTER(vlan_unconfig, lio->vlan_detach);
1944 
1945 	free(lio, M_DEVBUF);
1946 
1947 	if_free(ifp);
1948 
1949 	oct->props.gmxport = -1;
1950 
1951 	oct->props.ifp = NULL;
1952 }
1953 
1954 static void
1955 print_link_info(struct ifnet *ifp)
1956 {
1957 	struct lio	*lio = if_getsoftc(ifp);
1958 
1959 	if (!lio_ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
1960 	    lio_ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
1961 		struct octeon_link_info *linfo = &lio->linfo;
1962 
1963 		if (linfo->link.s.link_up) {
1964 			lio_dev_info(lio->oct_dev, "%d Mbps %s Duplex UP\n",
1965 				     linfo->link.s.speed,
1966 				     (linfo->link.s.duplex) ? "Full" : "Half");
1967 		} else {
1968 			lio_dev_info(lio->oct_dev, "Link Down\n");
1969 		}
1970 	}
1971 }
1972 
1973 static inline void
1974 lio_update_link_status(struct ifnet *ifp, union octeon_link_status *ls)
1975 {
1976 	struct lio	*lio = if_getsoftc(ifp);
1977 	int	changed = (lio->linfo.link.link_status64 != ls->link_status64);
1978 
1979 	lio->linfo.link.link_status64 = ls->link_status64;
1980 
1981 	if ((lio->intf_open) && (changed)) {
1982 		print_link_info(ifp);
1983 		lio->link_changes++;
1984 		if (lio->linfo.link.s.link_up)
1985 			if_link_state_change(ifp, LINK_STATE_UP);
1986 		else
1987 			if_link_state_change(ifp, LINK_STATE_DOWN);
1988 	}
1989 }
1990 
1991 /*
1992  * \brief Callback for rx ctrl
1993  * @param status status of request
1994  * @param buf pointer to resp structure
1995  */
1996 static void
1997 lio_rx_ctl_callback(struct octeon_device *oct, uint32_t status, void *buf)
1998 {
1999 	struct lio_soft_command	*sc = (struct lio_soft_command *)buf;
2000 	struct lio_rx_ctl_context *ctx;
2001 
2002 	ctx = (struct lio_rx_ctl_context *)sc->ctxptr;
2003 
2004 	oct = lio_get_device(ctx->octeon_id);
2005 	if (status)
2006 		lio_dev_err(oct, "rx ctl instruction failed. Status: %llx\n",
2007 			    LIO_CAST64(status));
2008 	ctx->cond = 1;
2009 
2010 	/*
2011 	 * This barrier is required to be sure that the response has been
2012 	 * written fully before waking up the handler
2013 	 */
2014 	wmb();
2015 }
2016 
2017 static void
2018 lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop)
2019 {
2020 	struct lio_soft_command	*sc;
2021 	struct lio_rx_ctl_context *ctx;
2022 	union octeon_cmd	*ncmd;
2023 	struct octeon_device	*oct = (struct octeon_device *)lio->oct_dev;
2024 	int	ctx_size = sizeof(struct lio_rx_ctl_context);
2025 	int	retval;
2026 
2027 	if (oct->props.rx_on == start_stop)
2028 		return;
2029 
2030 	sc = lio_alloc_soft_command(oct, OCTEON_CMD_SIZE, 16, ctx_size);
2031 	if (sc == NULL)
2032 		return;
2033 
2034 	ncmd = (union octeon_cmd *)sc->virtdptr;
2035 	ctx = (struct lio_rx_ctl_context *)sc->ctxptr;
2036 
2037 	ctx->cond = 0;
2038 	ctx->octeon_id = lio_get_device_id(oct);
2039 	ncmd->cmd64 = 0;
2040 	ncmd->s.cmd = LIO_CMD_RX_CTL;
2041 	ncmd->s.param1 = start_stop;
2042 
2043 	lio_swap_8B_data((uint64_t *)ncmd, (OCTEON_CMD_SIZE >> 3));
2044 
2045 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2046 
2047 	lio_prepare_soft_command(oct, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_CMD, 0,
2048 				 0, 0);
2049 
2050 	sc->callback = lio_rx_ctl_callback;
2051 	sc->callback_arg = sc;
2052 	sc->wait_time = 5000;
2053 
2054 	retval = lio_send_soft_command(oct, sc);
2055 	if (retval == LIO_IQ_SEND_FAILED) {
2056 		lio_dev_err(oct, "Failed to send RX Control message\n");
2057 	} else {
2058 		/*
2059 		 * Sleep on a wait queue till the cond flag indicates that the
2060 		 * response arrived or timed-out.
2061 		 */
2062 		lio_sleep_cond(oct, &ctx->cond);
2063 		oct->props.rx_on = start_stop;
2064 	}
2065 
2066 	lio_free_soft_command(oct, sc);
2067 }
2068 
2069 static void
2070 lio_vlan_rx_add_vid(void *arg, struct ifnet *ifp, uint16_t vid)
2071 {
2072 	struct lio_ctrl_pkt	nctrl;
2073 	struct lio		*lio = if_getsoftc(ifp);
2074 	struct octeon_device	*oct = lio->oct_dev;
2075 	int	ret = 0;
2076 
2077 	if (if_getsoftc(ifp) != arg)	/* Not our event */
2078 		return;
2079 
2080 	if ((vid == 0) || (vid > 4095))	/* Invalid */
2081 		return;
2082 
2083 	bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
2084 
2085 	nctrl.ncmd.cmd64 = 0;
2086 	nctrl.ncmd.s.cmd = LIO_CMD_ADD_VLAN_FILTER;
2087 	nctrl.ncmd.s.param1 = vid;
2088 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2089 	nctrl.wait_time = 100;
2090 	nctrl.lio = lio;
2091 	nctrl.cb_fn = lio_ctrl_cmd_completion;
2092 
2093 	ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
2094 	if (ret < 0) {
2095 		lio_dev_err(oct, "Add VLAN filter failed in core (ret: 0x%x)\n",
2096 			    ret);
2097 	}
2098 }
2099 
2100 static void
2101 lio_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, uint16_t vid)
2102 {
2103 	struct lio_ctrl_pkt	nctrl;
2104 	struct lio		*lio = if_getsoftc(ifp);
2105 	struct octeon_device	*oct = lio->oct_dev;
2106 	int	ret = 0;
2107 
2108 	if (if_getsoftc(ifp) != arg)	/* Not our event */
2109 		return;
2110 
2111 	if ((vid == 0) || (vid > 4095))	/* Invalid */
2112 		return;
2113 
2114 	bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
2115 
2116 	nctrl.ncmd.cmd64 = 0;
2117 	nctrl.ncmd.s.cmd = LIO_CMD_DEL_VLAN_FILTER;
2118 	nctrl.ncmd.s.param1 = vid;
2119 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2120 	nctrl.wait_time = 100;
2121 	nctrl.lio = lio;
2122 	nctrl.cb_fn = lio_ctrl_cmd_completion;
2123 
2124 	ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
2125 	if (ret < 0) {
2126 		lio_dev_err(oct,
2127 			    "Kill VLAN filter failed in core (ret: 0x%x)\n",
2128 			    ret);
2129 	}
2130 }
2131 
2132 static int
2133 lio_wait_for_oq_pkts(struct octeon_device *oct)
2134 {
2135 	int	i, pending_pkts, pkt_cnt = 0, retry = 100;
2136 
2137 	do {
2138 		pending_pkts = 0;
2139 
2140 		for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) {
2141 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
2142 				continue;
2143 
2144 			pkt_cnt = lio_droq_check_hw_for_pkts(oct->droq[i]);
2145 			if (pkt_cnt > 0) {
2146 				pending_pkts += pkt_cnt;
2147 				taskqueue_enqueue(oct->droq[i]->droq_taskqueue,
2148 						  &oct->droq[i]->droq_task);
2149 			}
2150 		}
2151 
2152 		pkt_cnt = 0;
2153 		lio_sleep_timeout(1);
2154 	} while (retry-- && pending_pkts);
2155 
2156 	return (pkt_cnt);
2157 }
2158 
2159 static void
2160 lio_destroy_resources(struct octeon_device *oct)
2161 {
2162 	int i, refcount;
2163 
2164 	switch (atomic_load_acq_int(&oct->status)) {
2165 	case LIO_DEV_RUNNING:
2166 	case LIO_DEV_CORE_OK:
2167 		/* No more instructions will be forwarded. */
2168 		atomic_store_rel_int(&oct->status, LIO_DEV_IN_RESET);
2169 
2170 		oct->app_mode = LIO_DRV_INVALID_APP;
2171 		lio_dev_dbg(oct, "Device state is now %s\n",
2172 			    lio_get_state_string(&oct->status));
2173 
2174 		lio_sleep_timeout(100);
2175 
2176 		/* fallthrough */
2177 	case LIO_DEV_HOST_OK:
2178 
2179 		/* fallthrough */
2180 	case LIO_DEV_CONSOLE_INIT_DONE:
2181 		/* Remove any consoles */
2182 		lio_remove_consoles(oct);
2183 
2184 		/* fallthrough */
2185 	case LIO_DEV_IO_QUEUES_DONE:
2186 		if (lio_wait_for_pending_requests(oct))
2187 			lio_dev_err(oct, "There were pending requests\n");
2188 
2189 		if (lio_wait_for_instr_fetch(oct))
2190 			lio_dev_err(oct, "IQ had pending instructions\n");
2191 
2192 		/*
2193 		 * Disable the input and output queues now. No more packets will
2194 		 * arrive from Octeon, but we should wait for all packet
2195 		 * processing to finish.
2196 		 */
2197 		oct->fn_list.disable_io_queues(oct);
2198 
2199 		if (lio_wait_for_oq_pkts(oct))
2200 			lio_dev_err(oct, "OQ had pending packets\n");
2201 
2202 		/* fallthrough */
2203 	case LIO_DEV_INTR_SET_DONE:
2204 		/* Disable interrupts  */
2205 		oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
2206 
2207 		if (oct->msix_on) {
2208 			for (i = 0; i < oct->num_msix_irqs - 1; i++) {
2209 				if (oct->ioq_vector[i].tag != NULL) {
2210 					bus_teardown_intr(oct->device,
2211 						  oct->ioq_vector[i].msix_res,
2212 						      oct->ioq_vector[i].tag);
2213 					oct->ioq_vector[i].tag = NULL;
2214 				}
2215 				if (oct->ioq_vector[i].msix_res != NULL) {
2216 					bus_release_resource(oct->device,
2217 						SYS_RES_IRQ,
2218 						oct->ioq_vector[i].vector,
2219 						oct->ioq_vector[i].msix_res);
2220 					oct->ioq_vector[i].msix_res = NULL;
2221 				}
2222 			}
2223 			/* non-iov vector's argument is oct struct */
2224 			if (oct->tag != NULL) {
2225 				bus_teardown_intr(oct->device, oct->msix_res,
2226 						  oct->tag);
2227 				oct->tag = NULL;
2228 			}
2229 
2230 			if (oct->msix_res != NULL) {
2231 				bus_release_resource(oct->device, SYS_RES_IRQ,
2232 						     oct->aux_vector,
2233 						     oct->msix_res);
2234 				oct->msix_res = NULL;
2235 			}
2236 
2237 			pci_release_msi(oct->device);
2238 		}
2239 		/* fallthrough */
2240 	case LIO_DEV_IN_RESET:
2241 	case LIO_DEV_DROQ_INIT_DONE:
2242 		/* Wait for any pending operations */
2243 		lio_mdelay(100);
2244 		for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) {
2245 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
2246 				continue;
2247 			lio_delete_droq(oct, i);
2248 		}
2249 
2250 		/* fallthrough */
2251 	case LIO_DEV_RESP_LIST_INIT_DONE:
2252 		for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) {
2253 			if (oct->droq[i] != NULL) {
2254 				free(oct->droq[i], M_DEVBUF);
2255 				oct->droq[i] = NULL;
2256 			}
2257 		}
2258 		lio_delete_response_list(oct);
2259 
2260 		/* fallthrough */
2261 	case LIO_DEV_INSTR_QUEUE_INIT_DONE:
2262 		for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) {
2263 			if (!(oct->io_qmask.iq & BIT_ULL(i)))
2264 				continue;
2265 
2266 			lio_delete_instr_queue(oct, i);
2267 		}
2268 
2269 		/* fallthrough */
2270 	case LIO_DEV_MSIX_ALLOC_VECTOR_DONE:
2271 		for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) {
2272 			if (oct->instr_queue[i] != NULL) {
2273 				free(oct->instr_queue[i], M_DEVBUF);
2274 				oct->instr_queue[i] = NULL;
2275 			}
2276 		}
2277 		lio_free_ioq_vector(oct);
2278 
2279 		/* fallthrough */
2280 	case LIO_DEV_SC_BUFF_POOL_INIT_DONE:
2281 		lio_free_sc_buffer_pool(oct);
2282 
2283 		/* fallthrough */
2284 	case LIO_DEV_DISPATCH_INIT_DONE:
2285 		lio_delete_dispatch_list(oct);
2286 
2287 		/* fallthrough */
2288 	case LIO_DEV_PCI_MAP_DONE:
2289 		refcount = lio_deregister_device(oct);
2290 
2291 		if (fw_type_is_none())
2292 			lio_pci_flr(oct);
2293 
2294 		if (!refcount)
2295 			oct->fn_list.soft_reset(oct);
2296 
2297 		lio_unmap_pci_barx(oct, 0);
2298 		lio_unmap_pci_barx(oct, 1);
2299 
2300 		/* fallthrough */
2301 	case LIO_DEV_PCI_ENABLE_DONE:
2302 		/* Disable the device, releasing the PCI INT */
2303 		pci_disable_busmaster(oct->device);
2304 
2305 		/* fallthrough */
2306 	case LIO_DEV_BEGIN_STATE:
2307 		break;
2308 	}	/* end switch (oct->status) */
2309 }
2310