xref: /freebsd/sys/dev/liquidio/lio_main.c (revision 5a94c2e89f6a4fbdea49d6c3a51b5fe0154d2495)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Cavium, Inc.. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Cavium, Inc. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "lio_bsd.h"
35 #include "lio_common.h"
36 
37 #include "lio_droq.h"
38 #include "lio_iq.h"
39 #include "lio_response_manager.h"
40 #include "lio_device.h"
41 #include "lio_ctrl.h"
42 #include "lio_main.h"
43 #include "lio_network.h"
44 #include "cn23xx_pf_device.h"
45 #include "lio_image.h"
46 #include "lio_ioctl.h"
47 #include "lio_rxtx.h"
48 #include "lio_rss.h"
49 
50 /* Number of milliseconds to wait for DDR initialization */
51 #define LIO_DDR_TIMEOUT	10000
52 #define LIO_MAX_FW_TYPE_LEN	8
53 
54 static char fw_type[LIO_MAX_FW_TYPE_LEN];
55 TUNABLE_STR("hw.lio.fw_type", fw_type, sizeof(fw_type));
56 
57 /*
58  * Integers that specify number of queues per PF.
59  * Valid range is 0 to 64.
60  * Use 0 to derive from CPU count.
61  */
62 static int	num_queues_per_pf0;
63 static int	num_queues_per_pf1;
64 TUNABLE_INT("hw.lio.num_queues_per_pf0", &num_queues_per_pf0);
65 TUNABLE_INT("hw.lio.num_queues_per_pf1", &num_queues_per_pf1);
66 
67 static int	lio_rss = 1;
68 TUNABLE_INT("hw.lio.rss", &lio_rss);
69 
70 /* Hardware LRO */
71 unsigned int	lio_hwlro = 0;
72 TUNABLE_INT("hw.lio.hwlro", &lio_hwlro);
73 
74 /*
75  * Bitmask indicating which consoles have debug
76  * output redirected to syslog.
77  */
78 static unsigned long	console_bitmask;
79 TUNABLE_ULONG("hw.lio.console_bitmask", &console_bitmask);
80 
81 /*
82  * \brief determines if a given console has debug enabled.
83  * @param console console to check
84  * @returns  1 = enabled. 0 otherwise
85  */
86 int
lio_console_debug_enabled(uint32_t console)87 lio_console_debug_enabled(uint32_t console)
88 {
89 
90 	return (console_bitmask >> (console)) & 0x1;
91 }
92 
93 static int	lio_detach(device_t dev);
94 
95 static int	lio_device_init(struct octeon_device *octeon_dev);
96 static int	lio_chip_specific_setup(struct octeon_device *oct);
97 static void	lio_watchdog(void *param);
98 static int	lio_load_firmware(struct octeon_device *oct);
99 static int	lio_nic_starter(struct octeon_device *oct);
100 static int	lio_init_nic_module(struct octeon_device *oct);
101 static int	lio_setup_nic_devices(struct octeon_device *octeon_dev);
102 static int	lio_link_info(struct lio_recv_info *recv_info, void *ptr);
103 static void	lio_if_cfg_callback(struct octeon_device *oct, uint32_t status,
104 				    void *buf);
105 static int	lio_set_rxcsum_command(if_t ifp, int command,
106 				       uint8_t rx_cmd);
107 static int	lio_setup_glists(struct octeon_device *oct, struct lio *lio,
108 				 int num_iqs);
109 static void	lio_destroy_nic_device(struct octeon_device *oct, int ifidx);
110 static inline void	lio_update_link_status(if_t ifp,
111 					       union octeon_link_status *ls);
112 static void	lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop);
113 static int	lio_stop_nic_module(struct octeon_device *oct);
114 static void	lio_destroy_resources(struct octeon_device *oct);
115 static int	lio_setup_rx_oom_poll_fn(if_t ifp);
116 
117 static void	lio_vlan_rx_add_vid(void *arg, if_t ifp, uint16_t vid);
118 static void	lio_vlan_rx_kill_vid(void *arg, if_t ifp,
119 				     uint16_t vid);
120 static struct octeon_device *
121 	lio_get_other_octeon_device(struct octeon_device *oct);
122 
123 static int	lio_wait_for_oq_pkts(struct octeon_device *oct);
124 
125 int	lio_send_rss_param(struct lio *lio);
126 static int	lio_dbg_console_print(struct octeon_device *oct,
127 				      uint32_t console_num, char *prefix,
128 				      char *suffix);
129 
130 /* Polling interval for determining when NIC application is alive */
131 #define LIO_STARTER_POLL_INTERVAL_MS	100
132 
133 /*
134  * vendor_info_array.
135  * This array contains the list of IDs on which the driver should load.
136  */
137 struct lio_vendor_info {
138 	uint16_t	vendor_id;
139 	uint16_t	device_id;
140 	uint16_t	subdevice_id;
141 	uint8_t		revision_id;
142 	uint8_t		index;
143 };
144 
145 static struct lio_vendor_info lio_pci_tbl[] = {
146 	/* CN2350 10G */
147 	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE,
148 		0x02, 0},
149 
150 	/* CN2350 10G */
151 	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE1,
152 		0x02, 0},
153 
154 	/* CN2360 10G */
155 	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_10G_SUBDEVICE,
156 		0x02, 1},
157 
158 	/* CN2350 25G */
159 	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_25G_SUBDEVICE,
160 		0x02, 2},
161 
162 	/* CN2360 25G */
163 	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_25G_SUBDEVICE,
164 		0x02, 3},
165 
166 	{0, 0, 0, 0, 0}
167 };
168 
169 static char *lio_strings[] = {
170 	"LiquidIO 2350 10GbE Server Adapter",
171 	"LiquidIO 2360 10GbE Server Adapter",
172 	"LiquidIO 2350 25GbE Server Adapter",
173 	"LiquidIO 2360 25GbE Server Adapter",
174 };
175 
176 struct lio_if_cfg_resp {
177 	uint64_t	rh;
178 	struct octeon_if_cfg_info cfg_info;
179 	uint64_t	status;
180 };
181 
182 struct lio_if_cfg_context {
183 	int		octeon_id;
184 	volatile int	cond;
185 };
186 
187 struct lio_rx_ctl_context {
188 	int		octeon_id;
189 	volatile int	cond;
190 };
191 
192 static int
lio_probe(device_t dev)193 lio_probe(device_t dev)
194 {
195 	struct lio_vendor_info	*tbl;
196 
197 	uint16_t	vendor_id;
198 	uint16_t	device_id;
199 	uint16_t	subdevice_id;
200 	uint8_t		revision_id;
201 
202 	vendor_id = pci_get_vendor(dev);
203 	if (vendor_id != PCI_VENDOR_ID_CAVIUM)
204 		return (ENXIO);
205 
206 	device_id = pci_get_device(dev);
207 	subdevice_id = pci_get_subdevice(dev);
208 	revision_id = pci_get_revid(dev);
209 
210 	tbl = lio_pci_tbl;
211 	while (tbl->vendor_id) {
212 		if ((vendor_id == tbl->vendor_id) &&
213 		    (device_id == tbl->device_id) &&
214 		    (subdevice_id == tbl->subdevice_id) &&
215 		    (revision_id == tbl->revision_id)) {
216 			device_set_descf(dev, "%s, Version - %s",
217 			    lio_strings[tbl->index], LIO_VERSION);
218 			return (BUS_PROBE_DEFAULT);
219 		}
220 
221 		tbl++;
222 	}
223 
224 	return (ENXIO);
225 }
226 
227 static int
lio_attach(device_t device)228 lio_attach(device_t device)
229 {
230 	struct octeon_device	*oct_dev = NULL;
231 	uint64_t	scratch1;
232 	uint32_t	error;
233 	int		timeout, ret = 1;
234 	uint8_t		bus, dev, function;
235 
236 	oct_dev = lio_allocate_device(device);
237 	if (oct_dev == NULL) {
238 		device_printf(device, "Error: Unable to allocate device\n");
239 		return (-ENOMEM);
240 	}
241 
242 	oct_dev->tx_budget = LIO_DEFAULT_TX_PKTS_PROCESS_BUDGET;
243 	oct_dev->rx_budget = LIO_DEFAULT_RX_PKTS_PROCESS_BUDGET;
244 	oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
245 
246 	oct_dev->device = device;
247 	bus = pci_get_bus(device);
248 	dev = pci_get_slot(device);
249 	function = pci_get_function(device);
250 
251 	lio_dev_info(oct_dev, "Initializing device %x:%x %02x:%02x.%01x\n",
252 		     pci_get_vendor(device), pci_get_device(device), bus, dev,
253 		     function);
254 
255 	if (lio_device_init(oct_dev)) {
256 		lio_dev_err(oct_dev, "Failed to init device\n");
257 		lio_detach(device);
258 		return (-ENOMEM);
259 	}
260 
261 	scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1);
262 	if (!(scratch1 & 4ULL)) {
263 		/*
264 		 * Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
265 		 * the lio watchdog kernel thread is running for this
266 		 * NIC.  Each NIC gets one watchdog kernel thread.
267 		 */
268 		scratch1 |= 4ULL;
269 		lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1);
270 
271 		error = kproc_create(lio_watchdog, oct_dev,
272 				     &oct_dev->watchdog_task, 0, 0,
273 				     "liowd/%02hhx:%02hhx.%hhx", bus,
274 				     dev, function);
275 		if (!error) {
276 			kproc_resume(oct_dev->watchdog_task);
277 		} else {
278 			oct_dev->watchdog_task = NULL;
279 			lio_dev_err(oct_dev,
280 				    "failed to create kernel_thread\n");
281 			lio_detach(device);
282 			return (-1);
283 		}
284 	}
285 	oct_dev->rx_pause = 1;
286 	oct_dev->tx_pause = 1;
287 
288 	timeout = 0;
289 	while (timeout < LIO_NIC_STARTER_TIMEOUT) {
290 		lio_mdelay(LIO_STARTER_POLL_INTERVAL_MS);
291 		timeout += LIO_STARTER_POLL_INTERVAL_MS;
292 
293 		/*
294 		 * During the boot process interrupts are not available.
295 		 * So polling for first control message from FW.
296 		 */
297 		if (cold)
298 			lio_droq_bh(oct_dev->droq[0], 0);
299 
300 		if (atomic_load_acq_int(&oct_dev->status) == LIO_DEV_CORE_OK) {
301 			ret = lio_nic_starter(oct_dev);
302 			break;
303 		}
304 	}
305 
306 	if (ret) {
307 		lio_dev_err(oct_dev, "Firmware failed to start\n");
308 		lio_detach(device);
309 		return (-EIO);
310 	}
311 
312 	lio_dev_dbg(oct_dev, "Device is ready\n");
313 
314 	return (0);
315 }
316 
317 static int
lio_detach(device_t dev)318 lio_detach(device_t dev)
319 {
320 	struct octeon_device	*oct_dev = device_get_softc(dev);
321 
322 	lio_dev_dbg(oct_dev, "Stopping device\n");
323 	if (oct_dev->watchdog_task) {
324 		uint64_t	scratch1;
325 
326 		kproc_suspend(oct_dev->watchdog_task, 0);
327 
328 		scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1);
329 		scratch1 &= ~4ULL;
330 		lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1);
331 	}
332 
333 	if (oct_dev->app_mode && (oct_dev->app_mode == LIO_DRV_NIC_APP))
334 		lio_stop_nic_module(oct_dev);
335 
336 	/*
337 	 * Reset the octeon device and cleanup all memory allocated for
338 	 * the octeon device by  driver.
339 	 */
340 	lio_destroy_resources(oct_dev);
341 
342 	lio_dev_info(oct_dev, "Device removed\n");
343 
344 	/*
345 	 * This octeon device has been removed. Update the global
346 	 * data structure to reflect this. Free the device structure.
347 	 */
348 	lio_free_device_mem(oct_dev);
349 	return (0);
350 }
351 
352 static int
lio_shutdown(device_t dev)353 lio_shutdown(device_t dev)
354 {
355 	struct octeon_device	*oct_dev = device_get_softc(dev);
356 	struct lio	*lio = if_getsoftc(oct_dev->props.ifp);
357 
358 	lio_send_rx_ctrl_cmd(lio, 0);
359 
360 	return (0);
361 }
362 
363 static int
lio_suspend(device_t dev)364 lio_suspend(device_t dev)
365 {
366 
367 	return (ENXIO);
368 }
369 
370 static int
lio_resume(device_t dev)371 lio_resume(device_t dev)
372 {
373 
374 	return (ENXIO);
375 }
376 
377 static int
lio_event(struct module * mod,int event,void * junk)378 lio_event(struct module *mod, int event, void *junk)
379 {
380 
381 	switch (event) {
382 	case MOD_LOAD:
383 		lio_init_device_list(LIO_CFG_TYPE_DEFAULT);
384 		break;
385 	default:
386 		break;
387 	}
388 
389 	return (0);
390 }
391 
392 /*********************************************************************
393  *  FreeBSD Device Interface Entry Points
394  * *******************************************************************/
395 static device_method_t lio_methods[] = {
396 	/* Device interface */
397 	DEVMETHOD(device_probe, lio_probe),
398 	DEVMETHOD(device_attach, lio_attach),
399 	DEVMETHOD(device_detach, lio_detach),
400 	DEVMETHOD(device_shutdown, lio_shutdown),
401 	DEVMETHOD(device_suspend, lio_suspend),
402 	DEVMETHOD(device_resume, lio_resume),
403 	DEVMETHOD_END
404 };
405 
406 static driver_t lio_driver = {
407 	LIO_DRV_NAME, lio_methods, sizeof(struct octeon_device),
408 };
409 
410 DRIVER_MODULE(lio, pci, lio_driver, lio_event, NULL);
411 
412 MODULE_DEPEND(lio, pci, 1, 1, 1);
413 MODULE_DEPEND(lio, ether, 1, 1, 1);
414 MODULE_DEPEND(lio, firmware, 1, 1, 1);
415 
416 static bool
fw_type_is_none(void)417 fw_type_is_none(void)
418 {
419 	return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
420 		       sizeof(LIO_FW_NAME_TYPE_NONE)) == 0;
421 }
422 
423 /*
424  * \brief Device initialization for each Octeon device that is probed
425  * @param octeon_dev  octeon device
426  */
427 static int
lio_device_init(struct octeon_device * octeon_dev)428 lio_device_init(struct octeon_device *octeon_dev)
429 {
430 	unsigned long	ddr_timeout = LIO_DDR_TIMEOUT;
431 	char	*dbg_enb = NULL;
432 	int	fw_loaded = 0;
433 	int	i, j, ret;
434 	uint8_t	bus, dev, function;
435 	char	bootcmd[] = "\n";
436 
437 	bus = pci_get_bus(octeon_dev->device);
438 	dev = pci_get_slot(octeon_dev->device);
439 	function = pci_get_function(octeon_dev->device);
440 
441 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_BEGIN_STATE);
442 
443 	/* Enable access to the octeon device */
444 	if (pci_enable_busmaster(octeon_dev->device)) {
445 		lio_dev_err(octeon_dev, "pci_enable_device failed\n");
446 		return (1);
447 	}
448 
449 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_ENABLE_DONE);
450 
451 	/* Identify the Octeon type and map the BAR address space. */
452 	if (lio_chip_specific_setup(octeon_dev)) {
453 		lio_dev_err(octeon_dev, "Chip specific setup failed\n");
454 		return (1);
455 	}
456 
457 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_MAP_DONE);
458 
459 	/*
460 	 * Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
461 	 * since that is what is required for the reference to be removed
462 	 * during de-initialization (see 'octeon_destroy_resources').
463 	 */
464 	lio_register_device(octeon_dev, bus, dev, function, true);
465 
466 
467 	octeon_dev->app_mode = LIO_DRV_INVALID_APP;
468 
469 	if (!lio_cn23xx_pf_fw_loaded(octeon_dev) && !fw_type_is_none()) {
470 		fw_loaded = 0;
471 		/* Do a soft reset of the Octeon device. */
472 		if (octeon_dev->fn_list.soft_reset(octeon_dev))
473 			return (1);
474 
475 		/* things might have changed */
476 		if (!lio_cn23xx_pf_fw_loaded(octeon_dev))
477 			fw_loaded = 0;
478 		else
479 			fw_loaded = 1;
480 	} else {
481 		fw_loaded = 1;
482 	}
483 
484 	/*
485 	 * Initialize the dispatch mechanism used to push packets arriving on
486 	 * Octeon Output queues.
487 	 */
488 	if (lio_init_dispatch_list(octeon_dev))
489 		return (1);
490 
491 	lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC,
492 				 LIO_OPCODE_NIC_CORE_DRV_ACTIVE,
493 				 lio_core_drv_init, octeon_dev);
494 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DISPATCH_INIT_DONE);
495 
496 	ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
497 	if (ret) {
498 		lio_dev_err(octeon_dev,
499 			    "Failed to configure device registers\n");
500 		return (ret);
501 	}
502 
503 	/* Initialize soft command buffer pool */
504 	if (lio_setup_sc_buffer_pool(octeon_dev)) {
505 		lio_dev_err(octeon_dev, "sc buffer pool allocation failed\n");
506 		return (1);
507 	}
508 
509 	atomic_store_rel_int(&octeon_dev->status,
510 			     LIO_DEV_SC_BUFF_POOL_INIT_DONE);
511 
512 	if (lio_allocate_ioq_vector(octeon_dev)) {
513 		lio_dev_err(octeon_dev,
514 			    "IOQ vector allocation failed\n");
515 		return (1);
516 	}
517 
518 	atomic_store_rel_int(&octeon_dev->status,
519 			     LIO_DEV_MSIX_ALLOC_VECTOR_DONE);
520 
521 	for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) {
522 		octeon_dev->instr_queue[i] =
523 			malloc(sizeof(struct lio_instr_queue),
524 			       M_DEVBUF, M_NOWAIT | M_ZERO);
525 		if (octeon_dev->instr_queue[i] == NULL)
526 			return (1);
527 	}
528 
529 	/* Setup the data structures that manage this Octeon's Input queues. */
530 	if (lio_setup_instr_queue0(octeon_dev)) {
531 		lio_dev_err(octeon_dev,
532 			    "Instruction queue initialization failed\n");
533 		return (1);
534 	}
535 
536 	atomic_store_rel_int(&octeon_dev->status,
537 			     LIO_DEV_INSTR_QUEUE_INIT_DONE);
538 
539 	/*
540 	 * Initialize lists to manage the requests of different types that
541 	 * arrive from user & kernel applications for this octeon device.
542 	 */
543 
544 	if (lio_setup_response_list(octeon_dev)) {
545 		lio_dev_err(octeon_dev, "Response list allocation failed\n");
546 		return (1);
547 	}
548 
549 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_RESP_LIST_INIT_DONE);
550 
551 	for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) {
552 		octeon_dev->droq[i] = malloc(sizeof(*octeon_dev->droq[i]),
553 					     M_DEVBUF, M_NOWAIT | M_ZERO);
554 		if (octeon_dev->droq[i] == NULL)
555 			return (1);
556 	}
557 
558 	if (lio_setup_output_queue0(octeon_dev)) {
559 		lio_dev_err(octeon_dev, "Output queue initialization failed\n");
560 		return (1);
561 	}
562 
563 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DROQ_INIT_DONE);
564 
565 	/*
566 	 * Setup the interrupt handler and record the INT SUM register address
567 	 */
568 	if (lio_setup_interrupt(octeon_dev,
569 				octeon_dev->sriov_info.num_pf_rings))
570 		return (1);
571 
572 	/* Enable Octeon device interrupts */
573 	octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
574 
575 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_INTR_SET_DONE);
576 
577 	/*
578 	 * Send Credit for Octeon Output queues. Credits are always sent BEFORE
579 	 * the output queue is enabled.
580 	 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
581 	 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
582 	 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
583 	 * before any credits have been issued, causing the ring to be reset
584 	 * (and the f/w appear to never have started).
585 	 */
586 	for (j = 0; j < octeon_dev->num_oqs; j++)
587 		lio_write_csr32(octeon_dev,
588 				octeon_dev->droq[j]->pkts_credit_reg,
589 				octeon_dev->droq[j]->max_count);
590 
591 	/* Enable the input and output queues for this Octeon device */
592 	ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
593 	if (ret) {
594 		lio_dev_err(octeon_dev, "Failed to enable input/output queues");
595 		return (ret);
596 	}
597 
598 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_IO_QUEUES_DONE);
599 
600 	if (!fw_loaded) {
601 		lio_dev_dbg(octeon_dev, "Waiting for DDR initialization...\n");
602 		if (!ddr_timeout) {
603 			lio_dev_info(octeon_dev,
604 				     "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
605 		}
606 
607 		lio_sleep_timeout(LIO_RESET_MSECS);
608 
609 		/*
610 		 * Wait for the octeon to initialize DDR after the
611 		 * soft-reset.
612 		 */
613 		while (!ddr_timeout) {
614 			if (pause("-", lio_ms_to_ticks(100))) {
615 				/* user probably pressed Control-C */
616 				return (1);
617 			}
618 		}
619 
620 		ret = lio_wait_for_ddr_init(octeon_dev, &ddr_timeout);
621 		if (ret) {
622 			lio_dev_err(octeon_dev,
623 				    "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
624 				    ret);
625 			return (1);
626 		}
627 
628 		if (lio_wait_for_bootloader(octeon_dev, 1100)) {
629 			lio_dev_err(octeon_dev, "Board not responding\n");
630 			return (1);
631 		}
632 
633 		/* Divert uboot to take commands from host instead. */
634 		ret = lio_console_send_cmd(octeon_dev, bootcmd, 50);
635 
636 		lio_dev_dbg(octeon_dev, "Initializing consoles\n");
637 		ret = lio_init_consoles(octeon_dev);
638 		if (ret) {
639 			lio_dev_err(octeon_dev, "Could not access board consoles\n");
640 			return (1);
641 		}
642 
643 		/*
644 		 * If console debug enabled, specify empty string to
645 		 * use default enablement ELSE specify NULL string for
646 		 * 'disabled'.
647 		 */
648 		dbg_enb = lio_console_debug_enabled(0) ? "" : NULL;
649 		ret = lio_add_console(octeon_dev, 0, dbg_enb);
650 
651 		if (ret) {
652 			lio_dev_err(octeon_dev, "Could not access board console\n");
653 			return (1);
654 		} else if (lio_console_debug_enabled(0)) {
655 			/*
656 			 * If console was added AND we're logging console output
657 			 * then set our console print function.
658 			 */
659 			octeon_dev->console[0].print = lio_dbg_console_print;
660 		}
661 
662 		atomic_store_rel_int(&octeon_dev->status,
663 				     LIO_DEV_CONSOLE_INIT_DONE);
664 
665 		lio_dev_dbg(octeon_dev, "Loading firmware\n");
666 
667 		ret = lio_load_firmware(octeon_dev);
668 		if (ret) {
669 			lio_dev_err(octeon_dev, "Could not load firmware to board\n");
670 			return (1);
671 		}
672 	}
673 
674 	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_HOST_OK);
675 
676 	return (0);
677 }
678 
679 /*
680  * \brief PCI FLR for each Octeon device.
681  * @param oct octeon device
682  */
683 static void
lio_pci_flr(struct octeon_device * oct)684 lio_pci_flr(struct octeon_device *oct)
685 {
686 	uint32_t	exppos, status;
687 
688 	pci_find_cap(oct->device, PCIY_EXPRESS, &exppos);
689 
690 	pci_save_state(oct->device);
691 
692 	/* Quiesce the device completely */
693 	pci_write_config(oct->device, PCIR_COMMAND, PCIM_CMD_INTxDIS, 2);
694 
695 	/* Wait for Transaction Pending bit clean */
696 	lio_mdelay(100);
697 
698 	status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2);
699 	if (status & PCIEM_STA_TRANSACTION_PND) {
700 		lio_dev_info(oct, "Function reset incomplete after 100ms, sleeping for 5 seconds\n");
701 		lio_mdelay(5);
702 
703 		status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2);
704 		if (status & PCIEM_STA_TRANSACTION_PND)
705 			lio_dev_info(oct, "Function reset still incomplete after 5s, reset anyway\n");
706 	}
707 
708 	pci_write_config(oct->device, exppos + PCIER_DEVICE_CTL, PCIEM_CTL_INITIATE_FLR, 2);
709 	lio_mdelay(100);
710 
711 	pci_restore_state(oct->device);
712 }
713 
714 /*
715  * \brief Debug console print function
716  * @param octeon_dev  octeon device
717  * @param console_num console number
718  * @param prefix      first portion of line to display
719  * @param suffix      second portion of line to display
720  *
721  * The OCTEON debug console outputs entire lines (excluding '\n').
722  * Normally, the line will be passed in the 'prefix' parameter.
723  * However, due to buffering, it is possible for a line to be split into two
724  * parts, in which case they will be passed as the 'prefix' parameter and
725  * 'suffix' parameter.
726  */
727 static int
lio_dbg_console_print(struct octeon_device * oct,uint32_t console_num,char * prefix,char * suffix)728 lio_dbg_console_print(struct octeon_device *oct, uint32_t console_num,
729 		      char *prefix, char *suffix)
730 {
731 
732 	if (prefix != NULL && suffix != NULL)
733 		lio_dev_info(oct, "%u: %s%s\n", console_num, prefix, suffix);
734 	else if (prefix != NULL)
735 		lio_dev_info(oct, "%u: %s\n", console_num, prefix);
736 	else if (suffix != NULL)
737 		lio_dev_info(oct, "%u: %s\n", console_num, suffix);
738 
739 	return (0);
740 }
741 
742 static void
lio_watchdog(void * param)743 lio_watchdog(void *param)
744 {
745 	int		core_num;
746 	uint16_t	mask_of_crashed_or_stuck_cores = 0;
747 	struct octeon_device	*oct = param;
748 	bool		err_msg_was_printed[12];
749 
750 	bzero(err_msg_was_printed, sizeof(err_msg_was_printed));
751 
752 	while (1) {
753 		kproc_suspend_check(oct->watchdog_task);
754 		mask_of_crashed_or_stuck_cores =
755 			(uint16_t)lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH2);
756 
757 		if (mask_of_crashed_or_stuck_cores) {
758 			struct octeon_device *other_oct;
759 
760 			oct->cores_crashed = true;
761 			other_oct = lio_get_other_octeon_device(oct);
762 			if (other_oct != NULL)
763 				other_oct->cores_crashed = true;
764 
765 			for (core_num = 0; core_num < LIO_MAX_CORES;
766 			     core_num++) {
767 				bool core_crashed_or_got_stuck;
768 
769 				core_crashed_or_got_stuck =
770 				    (mask_of_crashed_or_stuck_cores >>
771 				     core_num) & 1;
772 				if (core_crashed_or_got_stuck &&
773 				    !err_msg_was_printed[core_num]) {
774 					lio_dev_err(oct,
775 						    "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
776 						    core_num);
777 					err_msg_was_printed[core_num] = true;
778 				}
779 			}
780 
781 		}
782 
783 		/* sleep for two seconds */
784 		pause("-", lio_ms_to_ticks(2000));
785 	}
786 }
787 
788 static int
lio_chip_specific_setup(struct octeon_device * oct)789 lio_chip_specific_setup(struct octeon_device *oct)
790 {
791 	char		*s;
792 	uint32_t	dev_id;
793 	int		ret = 1;
794 
795 	dev_id = lio_read_pci_cfg(oct, 0);
796 	oct->subdevice_id = pci_get_subdevice(oct->device);
797 
798 	switch (dev_id) {
799 	case LIO_CN23XX_PF_PCIID:
800 		oct->chip_id = LIO_CN23XX_PF_VID;
801 		if (pci_get_function(oct->device) == 0) {
802 			if (num_queues_per_pf0 < 0) {
803 				lio_dev_info(oct, "Invalid num_queues_per_pf0: %d, Setting it to default\n",
804 					     num_queues_per_pf0);
805 				num_queues_per_pf0 = 0;
806 			}
807 
808 			oct->sriov_info.num_pf_rings = num_queues_per_pf0;
809 		} else {
810 			if (num_queues_per_pf1 < 0) {
811 				lio_dev_info(oct, "Invalid num_queues_per_pf1: %d, Setting it to default\n",
812 					     num_queues_per_pf1);
813 				num_queues_per_pf1 = 0;
814 			}
815 
816 			oct->sriov_info.num_pf_rings = num_queues_per_pf1;
817 		}
818 
819 		ret = lio_cn23xx_pf_setup_device(oct);
820 		s = "CN23XX";
821 		break;
822 
823 	default:
824 		s = "?";
825 		lio_dev_err(oct, "Unknown device found (dev_id: %x)\n", dev_id);
826 	}
827 
828 	if (!ret)
829 		lio_dev_info(oct, "%s PASS%d.%d %s Version: %s\n", s,
830 			     OCTEON_MAJOR_REV(oct), OCTEON_MINOR_REV(oct),
831 			     lio_get_conf(oct)->card_name, LIO_VERSION);
832 
833 	return (ret);
834 }
835 
836 static struct octeon_device *
lio_get_other_octeon_device(struct octeon_device * oct)837 lio_get_other_octeon_device(struct octeon_device *oct)
838 {
839 	struct octeon_device	*other_oct;
840 
841 	other_oct = lio_get_device(oct->octeon_id + 1);
842 
843 	if ((other_oct != NULL) && other_oct->device) {
844 		int	oct_busnum, other_oct_busnum;
845 
846 		oct_busnum = pci_get_bus(oct->device);
847 		other_oct_busnum = pci_get_bus(other_oct->device);
848 
849 		if (oct_busnum == other_oct_busnum) {
850 			int	oct_slot, other_oct_slot;
851 
852 			oct_slot = pci_get_slot(oct->device);
853 			other_oct_slot = pci_get_slot(other_oct->device);
854 
855 			if (oct_slot == other_oct_slot)
856 				return (other_oct);
857 		}
858 	}
859 	return (NULL);
860 }
861 
862 /*
863  * \brief Load firmware to device
864  * @param oct octeon device
865  *
866  * Maps device to firmware filename, requests firmware, and downloads it
867  */
868 static int
lio_load_firmware(struct octeon_device * oct)869 lio_load_firmware(struct octeon_device *oct)
870 {
871 	const struct firmware	*fw;
872 	char	*tmp_fw_type = NULL;
873 	int	ret = 0;
874 	char	fw_name[LIO_MAX_FW_FILENAME_LEN];
875 
876 	if (fw_type[0] == '\0')
877 		tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
878 	else
879 		tmp_fw_type = fw_type;
880 
881 	sprintf(fw_name, "%s%s_%s%s", LIO_FW_BASE_NAME,
882 		lio_get_conf(oct)->card_name, tmp_fw_type, LIO_FW_NAME_SUFFIX);
883 
884 	fw = firmware_get(fw_name);
885 	if (fw == NULL) {
886 		lio_dev_err(oct, "Request firmware failed. Could not find file %s.\n",
887 			    fw_name);
888 		return (EINVAL);
889 	}
890 
891 	ret = lio_download_firmware(oct, fw->data, fw->datasize);
892 
893 	firmware_put(fw, FIRMWARE_UNLOAD);
894 
895 	return (ret);
896 }
897 
898 static int
lio_nic_starter(struct octeon_device * oct)899 lio_nic_starter(struct octeon_device *oct)
900 {
901 	int	ret = 0;
902 
903 	atomic_store_rel_int(&oct->status, LIO_DEV_RUNNING);
904 
905 	if (oct->app_mode && oct->app_mode == LIO_DRV_NIC_APP) {
906 		if (lio_init_nic_module(oct)) {
907 			lio_dev_err(oct, "NIC initialization failed\n");
908 			ret = -1;
909 #ifdef CAVIUM_ONiLY_23XX_VF
910 		} else {
911 			if (octeon_enable_sriov(oct) < 0)
912 				ret = -1;
913 #endif
914 		}
915 	} else {
916 		lio_dev_err(oct,
917 			    "Unexpected application running on NIC (%d). Check firmware.\n",
918 			    oct->app_mode);
919 		ret = -1;
920 	}
921 
922 	return (ret);
923 }
924 
925 static int
lio_init_nic_module(struct octeon_device * oct)926 lio_init_nic_module(struct octeon_device *oct)
927 {
928 	int	num_nic_ports = LIO_GET_NUM_NIC_PORTS_CFG(lio_get_conf(oct));
929 	int	retval = 0;
930 
931 	lio_dev_dbg(oct, "Initializing network interfaces\n");
932 
933 	/*
934 	 * only default iq and oq were initialized
935 	 * initialize the rest as well
936 	 */
937 
938 	/* run port_config command for each port */
939 	oct->ifcount = num_nic_ports;
940 
941 	bzero(&oct->props, sizeof(struct lio_if_props));
942 
943 	oct->props.gmxport = -1;
944 
945 	retval = lio_setup_nic_devices(oct);
946 	if (retval) {
947 		lio_dev_err(oct, "Setup NIC devices failed\n");
948 		goto lio_init_failure;
949 	}
950 
951 	lio_dev_dbg(oct, "Network interfaces ready\n");
952 
953 	return (retval);
954 
955 lio_init_failure:
956 
957 	oct->ifcount = 0;
958 
959 	return (retval);
960 }
961 
962 static int
lio_ifmedia_update(if_t ifp)963 lio_ifmedia_update(if_t ifp)
964 {
965 	struct lio	*lio = if_getsoftc(ifp);
966 	struct ifmedia	*ifm;
967 
968 	ifm = &lio->ifmedia;
969 
970 	/* We only support Ethernet media type. */
971 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
972 		return (EINVAL);
973 
974 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
975 	case IFM_AUTO:
976 		break;
977 	case IFM_10G_CX4:
978 	case IFM_10G_SR:
979 	case IFM_10G_T:
980 	case IFM_10G_TWINAX:
981 	default:
982 		/* We don't support changing the media type. */
983 		lio_dev_err(lio->oct_dev, "Invalid media type (%d)\n",
984 			    IFM_SUBTYPE(ifm->ifm_media));
985 		return (EINVAL);
986 	}
987 
988 	return (0);
989 }
990 
991 static int
lio_get_media_subtype(struct octeon_device * oct)992 lio_get_media_subtype(struct octeon_device *oct)
993 {
994 
995 	switch(oct->subdevice_id) {
996 	case LIO_CN2350_10G_SUBDEVICE:
997 	case LIO_CN2350_10G_SUBDEVICE1:
998 	case LIO_CN2360_10G_SUBDEVICE:
999 		return (IFM_10G_SR);
1000 
1001 	case LIO_CN2350_25G_SUBDEVICE:
1002 	case LIO_CN2360_25G_SUBDEVICE:
1003 		return (IFM_25G_SR);
1004 	}
1005 
1006 	return (IFM_10G_SR);
1007 }
1008 
1009 static uint64_t
lio_get_baudrate(struct octeon_device * oct)1010 lio_get_baudrate(struct octeon_device *oct)
1011 {
1012 
1013 	switch(oct->subdevice_id) {
1014 	case LIO_CN2350_10G_SUBDEVICE:
1015 	case LIO_CN2350_10G_SUBDEVICE1:
1016 	case LIO_CN2360_10G_SUBDEVICE:
1017 		return (IF_Gbps(10));
1018 
1019 	case LIO_CN2350_25G_SUBDEVICE:
1020 	case LIO_CN2360_25G_SUBDEVICE:
1021 		return (IF_Gbps(25));
1022 	}
1023 
1024 	return (IF_Gbps(10));
1025 }
1026 
1027 static void
lio_ifmedia_status(if_t ifp,struct ifmediareq * ifmr)1028 lio_ifmedia_status(if_t ifp, struct ifmediareq *ifmr)
1029 {
1030 	struct lio	*lio = if_getsoftc(ifp);
1031 
1032 	/* Report link down if the driver isn't running. */
1033 	if (!lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
1034 		ifmr->ifm_active |= IFM_NONE;
1035 		return;
1036 	}
1037 
1038 	/* Setup the default interface info. */
1039 	ifmr->ifm_status = IFM_AVALID;
1040 	ifmr->ifm_active = IFM_ETHER;
1041 
1042 	if (lio->linfo.link.s.link_up) {
1043 		ifmr->ifm_status |= IFM_ACTIVE;
1044 	} else {
1045 		ifmr->ifm_active |= IFM_NONE;
1046 		return;
1047 	}
1048 
1049 	ifmr->ifm_active |= lio_get_media_subtype(lio->oct_dev);
1050 
1051 	if (lio->linfo.link.s.duplex)
1052 		ifmr->ifm_active |= IFM_FDX;
1053 	else
1054 		ifmr->ifm_active |= IFM_HDX;
1055 }
1056 
1057 static uint64_t
lio_get_counter(if_t ifp,ift_counter cnt)1058 lio_get_counter(if_t ifp, ift_counter cnt)
1059 {
1060 	struct lio	*lio = if_getsoftc(ifp);
1061 	struct octeon_device	*oct = lio->oct_dev;
1062 	uint64_t	counter = 0;
1063 	int		i, q_no;
1064 
1065 	switch (cnt) {
1066 	case IFCOUNTER_IPACKETS:
1067 		for (i = 0; i < oct->num_oqs; i++) {
1068 			q_no = lio->linfo.rxpciq[i].s.q_no;
1069 			counter += oct->droq[q_no]->stats.rx_pkts_received;
1070 		}
1071 		break;
1072 	case IFCOUNTER_OPACKETS:
1073 		for (i = 0; i < oct->num_iqs; i++) {
1074 			q_no = lio->linfo.txpciq[i].s.q_no;
1075 			counter += oct->instr_queue[q_no]->stats.tx_done;
1076 		}
1077 		break;
1078 	case IFCOUNTER_IBYTES:
1079 		for (i = 0; i < oct->num_oqs; i++) {
1080 			q_no = lio->linfo.rxpciq[i].s.q_no;
1081 			counter += oct->droq[q_no]->stats.rx_bytes_received;
1082 		}
1083 		break;
1084 	case IFCOUNTER_OBYTES:
1085 		for (i = 0; i < oct->num_iqs; i++) {
1086 			q_no = lio->linfo.txpciq[i].s.q_no;
1087 			counter += oct->instr_queue[q_no]->stats.tx_tot_bytes;
1088 		}
1089 		break;
1090 	case IFCOUNTER_IQDROPS:
1091 		for (i = 0; i < oct->num_oqs; i++) {
1092 			q_no = lio->linfo.rxpciq[i].s.q_no;
1093 			counter += oct->droq[q_no]->stats.rx_dropped;
1094 		}
1095 		break;
1096 	case IFCOUNTER_OQDROPS:
1097 		for (i = 0; i < oct->num_iqs; i++) {
1098 			q_no = lio->linfo.txpciq[i].s.q_no;
1099 			counter += oct->instr_queue[q_no]->stats.tx_dropped;
1100 		}
1101 		break;
1102 	case IFCOUNTER_IMCASTS:
1103 		counter = oct->link_stats.fromwire.total_mcst;
1104 		break;
1105 	case IFCOUNTER_OMCASTS:
1106 		counter = oct->link_stats.fromhost.mcast_pkts_sent;
1107 		break;
1108 	case IFCOUNTER_COLLISIONS:
1109 		counter = oct->link_stats.fromhost.total_collisions;
1110 		break;
1111 	case IFCOUNTER_IERRORS:
1112 		counter = oct->link_stats.fromwire.fcs_err +
1113 		    oct->link_stats.fromwire.l2_err +
1114 		    oct->link_stats.fromwire.frame_err;
1115 		break;
1116 	default:
1117 		return (if_get_counter_default(ifp, cnt));
1118 	}
1119 
1120 	return (counter);
1121 }
1122 
1123 static int
lio_init_ifnet(struct lio * lio)1124 lio_init_ifnet(struct lio *lio)
1125 {
1126 	struct octeon_device	*oct = lio->oct_dev;
1127 	if_t			ifp = lio->ifp;
1128 
1129 	/* ifconfig entrypoint for media type/status reporting */
1130 	ifmedia_init(&lio->ifmedia, IFM_IMASK, lio_ifmedia_update,
1131 		     lio_ifmedia_status);
1132 
1133 	/* set the default interface values */
1134 	ifmedia_add(&lio->ifmedia,
1135 		    (IFM_ETHER | IFM_FDX | lio_get_media_subtype(oct)),
1136 		    0, NULL);
1137 	ifmedia_add(&lio->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
1138 	ifmedia_set(&lio->ifmedia, (IFM_ETHER | IFM_AUTO));
1139 
1140 	lio->ifmedia.ifm_media = lio->ifmedia.ifm_cur->ifm_media;
1141 	lio_dev_dbg(oct, "IFMEDIA flags : %x\n", lio->ifmedia.ifm_media);
1142 
1143 	if_initname(ifp, device_get_name(oct->device),
1144 		    device_get_unit(oct->device));
1145 	if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
1146 	if_setioctlfn(ifp, lio_ioctl);
1147 	if_setgetcounterfn(ifp, lio_get_counter);
1148 	if_settransmitfn(ifp, lio_mq_start);
1149 	if_setqflushfn(ifp, lio_qflush);
1150 	if_setinitfn(ifp, lio_open);
1151 	if_setmtu(ifp, lio->linfo.link.s.mtu);
1152 	lio->mtu = lio->linfo.link.s.mtu;
1153 	if_sethwassist(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1154 			     CSUM_TCP_IPV6 | CSUM_UDP_IPV6));
1155 
1156 	if_setcapabilitiesbit(ifp, (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
1157 				    IFCAP_TSO | IFCAP_LRO |
1158 				    IFCAP_JUMBO_MTU | IFCAP_HWSTATS |
1159 				    IFCAP_LINKSTATE | IFCAP_VLAN_HWFILTER |
1160 				    IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTAGGING |
1161 				    IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU), 0);
1162 
1163 	if_setcapenable(ifp, if_getcapabilities(ifp));
1164 	if_setbaudrate(ifp, lio_get_baudrate(oct));
1165 
1166 	return (0);
1167 }
1168 
1169 static void
lio_tcp_lro_free(struct octeon_device * octeon_dev,if_t ifp)1170 lio_tcp_lro_free(struct octeon_device *octeon_dev, if_t ifp)
1171 {
1172 	struct lio	*lio = if_getsoftc(ifp);
1173 	struct lio_droq	*droq;
1174 	int		q_no;
1175 	int		i;
1176 
1177 	for (i = 0; i < octeon_dev->num_oqs; i++) {
1178 		q_no = lio->linfo.rxpciq[i].s.q_no;
1179 		droq = octeon_dev->droq[q_no];
1180 		if (droq->lro.ifp) {
1181 			tcp_lro_free(&droq->lro);
1182 			droq->lro.ifp = NULL;
1183 		}
1184 	}
1185 }
1186 
1187 static int
lio_tcp_lro_init(struct octeon_device * octeon_dev,if_t ifp)1188 lio_tcp_lro_init(struct octeon_device *octeon_dev, if_t ifp)
1189 {
1190 	struct lio	*lio = if_getsoftc(ifp);
1191 	struct lio_droq	*droq;
1192 	struct lro_ctrl	*lro;
1193 	int		i, q_no, ret = 0;
1194 
1195 	for (i = 0; i < octeon_dev->num_oqs; i++) {
1196 		q_no = lio->linfo.rxpciq[i].s.q_no;
1197 		droq = octeon_dev->droq[q_no];
1198 		lro = &droq->lro;
1199 		ret = tcp_lro_init(lro);
1200 		if (ret) {
1201 			lio_dev_err(octeon_dev, "LRO Initialization failed ret %d\n",
1202 				    ret);
1203 			goto lro_init_failed;
1204 		}
1205 
1206 		lro->ifp = ifp;
1207 	}
1208 
1209 	return (ret);
1210 
1211 lro_init_failed:
1212 	lio_tcp_lro_free(octeon_dev, ifp);
1213 
1214 	return (ret);
1215 }
1216 
1217 static int
lio_setup_nic_devices(struct octeon_device * octeon_dev)1218 lio_setup_nic_devices(struct octeon_device *octeon_dev)
1219 {
1220 	union		octeon_if_cfg if_cfg;
1221 	struct lio	*lio = NULL;
1222 	if_t		ifp = NULL;
1223 	struct lio_version		*vdata;
1224 	struct lio_soft_command		*sc;
1225 	struct lio_if_cfg_context	*ctx;
1226 	struct lio_if_cfg_resp		*resp;
1227 	struct lio_if_props		*props;
1228 	int		num_iqueues, num_oqueues, retval;
1229 	unsigned int	base_queue;
1230 	unsigned int	gmx_port_id;
1231 	uint32_t	ctx_size, data_size;
1232 	uint32_t	ifidx_or_pfnum, resp_size;
1233 	uint8_t		mac[ETHER_HDR_LEN], i, j;
1234 
1235 	/* This is to handle link status changes */
1236 	lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC,
1237 				 LIO_OPCODE_NIC_INFO,
1238 				 lio_link_info, octeon_dev);
1239 
1240 	for (i = 0; i < octeon_dev->ifcount; i++) {
1241 		resp_size = sizeof(struct lio_if_cfg_resp);
1242 		ctx_size = sizeof(struct lio_if_cfg_context);
1243 		data_size = sizeof(struct lio_version);
1244 		sc = lio_alloc_soft_command(octeon_dev, data_size, resp_size,
1245 					    ctx_size);
1246 		if (sc == NULL)
1247 			return (ENOMEM);
1248 
1249 		resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1250 		ctx = (struct lio_if_cfg_context *)sc->ctxptr;
1251 		vdata = (struct lio_version *)sc->virtdptr;
1252 
1253 		*((uint64_t *)vdata) = 0;
1254 		vdata->major = htobe16(LIO_BASE_MAJOR_VERSION);
1255 		vdata->minor = htobe16(LIO_BASE_MINOR_VERSION);
1256 		vdata->micro = htobe16(LIO_BASE_MICRO_VERSION);
1257 
1258 		num_iqueues = octeon_dev->sriov_info.num_pf_rings;
1259 		num_oqueues = octeon_dev->sriov_info.num_pf_rings;
1260 		base_queue = octeon_dev->sriov_info.pf_srn;
1261 
1262 		gmx_port_id = octeon_dev->pf_num;
1263 		ifidx_or_pfnum = octeon_dev->pf_num;
1264 
1265 		lio_dev_dbg(octeon_dev, "requesting config for interface %d, iqs %d, oqs %d\n",
1266 			    ifidx_or_pfnum, num_iqueues, num_oqueues);
1267 		ctx->cond = 0;
1268 		ctx->octeon_id = lio_get_device_id(octeon_dev);
1269 
1270 		if_cfg.if_cfg64 = 0;
1271 		if_cfg.s.num_iqueues = num_iqueues;
1272 		if_cfg.s.num_oqueues = num_oqueues;
1273 		if_cfg.s.base_queue = base_queue;
1274 		if_cfg.s.gmx_port_id = gmx_port_id;
1275 
1276 		sc->iq_no = 0;
1277 
1278 		lio_prepare_soft_command(octeon_dev, sc, LIO_OPCODE_NIC,
1279 					 LIO_OPCODE_NIC_IF_CFG, 0,
1280 					 if_cfg.if_cfg64, 0);
1281 
1282 		sc->callback = lio_if_cfg_callback;
1283 		sc->callback_arg = sc;
1284 		sc->wait_time = 3000;
1285 
1286 		retval = lio_send_soft_command(octeon_dev, sc);
1287 		if (retval == LIO_IQ_SEND_FAILED) {
1288 			lio_dev_err(octeon_dev, "iq/oq config failed status: %x\n",
1289 				    retval);
1290 			/* Soft instr is freed by driver in case of failure. */
1291 			goto setup_nic_dev_fail;
1292 		}
1293 
1294 		/*
1295 		 * Sleep on a wait queue till the cond flag indicates that the
1296 		 * response arrived or timed-out.
1297 		 */
1298 		lio_sleep_cond(octeon_dev, &ctx->cond);
1299 
1300 		retval = resp->status;
1301 		if (retval) {
1302 			lio_dev_err(octeon_dev, "iq/oq config failed\n");
1303 			goto setup_nic_dev_fail;
1304 		}
1305 
1306 		lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
1307 				 (sizeof(struct octeon_if_cfg_info)) >> 3);
1308 
1309 		num_iqueues = bitcount64(resp->cfg_info.iqmask);
1310 		num_oqueues = bitcount64(resp->cfg_info.oqmask);
1311 
1312 		if (!(num_iqueues) || !(num_oqueues)) {
1313 			lio_dev_err(octeon_dev,
1314 				    "Got bad iqueues (%016llX) or oqueues (%016llX) from firmware.\n",
1315 				    LIO_CAST64(resp->cfg_info.iqmask),
1316 				    LIO_CAST64(resp->cfg_info.oqmask));
1317 			goto setup_nic_dev_fail;
1318 		}
1319 
1320 		lio_dev_dbg(octeon_dev,
1321 			    "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
1322 			    i, LIO_CAST64(resp->cfg_info.iqmask),
1323 			    LIO_CAST64(resp->cfg_info.oqmask),
1324 			    num_iqueues, num_oqueues);
1325 
1326 		ifp = if_alloc(IFT_ETHER);
1327 
1328 		lio = malloc(sizeof(struct lio), M_DEVBUF, M_NOWAIT | M_ZERO);
1329 
1330 		if (lio == NULL) {
1331 			lio_dev_err(octeon_dev, "Lio allocation failed\n");
1332 			goto setup_nic_dev_fail;
1333 		}
1334 
1335 		if_setsoftc(ifp, lio);
1336 
1337 		if_sethwtsomax(ifp, LIO_MAX_FRAME_SIZE);
1338 		if_sethwtsomaxsegcount(ifp, LIO_MAX_SG);
1339 		if_sethwtsomaxsegsize(ifp, PAGE_SIZE);
1340 
1341 		lio->ifidx = ifidx_or_pfnum;
1342 
1343 		props = &octeon_dev->props;
1344 		props->gmxport = resp->cfg_info.linfo.gmxport;
1345 		props->ifp = ifp;
1346 
1347 		lio->linfo.num_rxpciq = num_oqueues;
1348 		lio->linfo.num_txpciq = num_iqueues;
1349 		for (j = 0; j < num_oqueues; j++) {
1350 			lio->linfo.rxpciq[j].rxpciq64 =
1351 			    resp->cfg_info.linfo.rxpciq[j].rxpciq64;
1352 		}
1353 
1354 		for (j = 0; j < num_iqueues; j++) {
1355 			lio->linfo.txpciq[j].txpciq64 =
1356 			    resp->cfg_info.linfo.txpciq[j].txpciq64;
1357 		}
1358 
1359 		lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1360 		lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1361 		lio->linfo.link.link_status64 =
1362 		    resp->cfg_info.linfo.link.link_status64;
1363 
1364 		/*
1365 		 * Point to the properties for octeon device to which this
1366 		 * interface belongs.
1367 		 */
1368 		lio->oct_dev = octeon_dev;
1369 		lio->ifp = ifp;
1370 
1371 		lio_dev_dbg(octeon_dev, "if%d gmx: %d hw_addr: 0x%llx\n", i,
1372 			    lio->linfo.gmxport, LIO_CAST64(lio->linfo.hw_addr));
1373 		lio_init_ifnet(lio);
1374 		/* 64-bit swap required on LE machines */
1375 		lio_swap_8B_data(&lio->linfo.hw_addr, 1);
1376 		for (j = 0; j < 6; j++)
1377 			mac[j] = *((uint8_t *)(
1378 				   ((uint8_t *)&lio->linfo.hw_addr) + 2 + j));
1379 
1380 		ether_ifattach(ifp, mac);
1381 
1382 		/*
1383 		 * By default all interfaces on a single Octeon uses the same
1384 		 * tx and rx queues
1385 		 */
1386 		lio->txq = lio->linfo.txpciq[0].s.q_no;
1387 		lio->rxq = lio->linfo.rxpciq[0].s.q_no;
1388 		if (lio_setup_io_queues(octeon_dev, i, lio->linfo.num_txpciq,
1389 					lio->linfo.num_rxpciq)) {
1390 			lio_dev_err(octeon_dev, "I/O queues creation failed\n");
1391 			goto setup_nic_dev_fail;
1392 		}
1393 
1394 		lio_ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
1395 
1396 		lio->tx_qsize = lio_get_tx_qsize(octeon_dev, lio->txq);
1397 		lio->rx_qsize = lio_get_rx_qsize(octeon_dev, lio->rxq);
1398 
1399 		if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
1400 			lio_dev_err(octeon_dev, "Gather list allocation failed\n");
1401 			goto setup_nic_dev_fail;
1402 		}
1403 
1404 		if ((lio_hwlro == 0) && lio_tcp_lro_init(octeon_dev, ifp))
1405 			goto setup_nic_dev_fail;
1406 
1407 		if (lio_hwlro &&
1408 		    (if_getcapenable(ifp) & IFCAP_LRO) &&
1409 		    (if_getcapenable(ifp) & IFCAP_RXCSUM) &&
1410 		    (if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6))
1411 			lio_set_feature(ifp, LIO_CMD_LRO_ENABLE,
1412 					LIO_LROIPV4 | LIO_LROIPV6);
1413 
1414 		if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER))
1415 			lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 1);
1416 		else
1417 			lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 0);
1418 
1419 		if (lio_setup_rx_oom_poll_fn(ifp))
1420 			goto setup_nic_dev_fail;
1421 
1422 		lio_dev_dbg(octeon_dev, "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
1423 			    i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1424 		lio->link_changes++;
1425 
1426 		lio_ifstate_set(lio, LIO_IFSTATE_REGISTERED);
1427 
1428 		/*
1429 		 * Sending command to firmware to enable Rx checksum offload
1430 		 * by default at the time of setup of Liquidio driver for
1431 		 * this device
1432 		 */
1433 		lio_set_rxcsum_command(ifp, LIO_CMD_TNL_RX_CSUM_CTL,
1434 				       LIO_CMD_RXCSUM_ENABLE);
1435 		lio_set_feature(ifp, LIO_CMD_TNL_TX_CSUM_CTL,
1436 				LIO_CMD_TXCSUM_ENABLE);
1437 
1438 		if (lio_rss) {
1439 			if (lio_send_rss_param(lio))
1440 				goto setup_nic_dev_fail;
1441 		} else
1442 			lio_set_feature(ifp, LIO_CMD_SET_FNV,
1443 					LIO_CMD_FNV_ENABLE);
1444 
1445 		lio_dev_dbg(octeon_dev, "NIC ifidx:%d Setup successful\n", i);
1446 
1447 		lio_free_soft_command(octeon_dev, sc);
1448 		lio->vlan_attach =
1449 		    EVENTHANDLER_REGISTER(vlan_config,
1450 					  lio_vlan_rx_add_vid, lio,
1451 					  EVENTHANDLER_PRI_FIRST);
1452 		lio->vlan_detach =
1453 		    EVENTHANDLER_REGISTER(vlan_unconfig,
1454 					  lio_vlan_rx_kill_vid, lio,
1455 					  EVENTHANDLER_PRI_FIRST);
1456 
1457 		/* Update stats periodically */
1458 		callout_init(&lio->stats_timer, 0);
1459 		lio->stats_interval = LIO_DEFAULT_STATS_INTERVAL;
1460 
1461 		lio_add_hw_stats(lio);
1462 	}
1463 
1464 	return (0);
1465 
1466 setup_nic_dev_fail:
1467 
1468 	lio_free_soft_command(octeon_dev, sc);
1469 
1470 	while (i--) {
1471 		lio_dev_err(octeon_dev, "NIC ifidx:%d Setup failed\n", i);
1472 		lio_destroy_nic_device(octeon_dev, i);
1473 	}
1474 
1475 	return (ENODEV);
1476 }
1477 
1478 static int
lio_link_info(struct lio_recv_info * recv_info,void * ptr)1479 lio_link_info(struct lio_recv_info *recv_info, void *ptr)
1480 {
1481 	struct octeon_device	*oct = (struct octeon_device *)ptr;
1482 	struct lio_recv_pkt	*recv_pkt = recv_info->recv_pkt;
1483 	union octeon_link_status *ls;
1484 	int	gmxport = 0, i;
1485 
1486 	lio_dev_dbg(oct, "%s Called\n", __func__);
1487 	if (recv_pkt->buffer_size[0] != (sizeof(*ls) + LIO_DROQ_INFO_SIZE)) {
1488 		lio_dev_err(oct, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
1489 			    recv_pkt->buffer_size[0],
1490 			    recv_pkt->rh.r_nic_info.gmxport);
1491 		goto nic_info_err;
1492 	}
1493 	gmxport = recv_pkt->rh.r_nic_info.gmxport;
1494 	ls = (union octeon_link_status *)(recv_pkt->buffer_ptr[0]->m_data +
1495 					  LIO_DROQ_INFO_SIZE);
1496 	lio_swap_8B_data((uint64_t *)ls,
1497 			 (sizeof(union octeon_link_status)) >> 3);
1498 
1499 	if (oct->props.gmxport == gmxport)
1500 		lio_update_link_status(oct->props.ifp, ls);
1501 
1502 nic_info_err:
1503 	for (i = 0; i < recv_pkt->buffer_count; i++)
1504 		lio_recv_buffer_free(recv_pkt->buffer_ptr[i]);
1505 
1506 	lio_free_recv_info(recv_info);
1507 	return (0);
1508 }
1509 
1510 void
lio_free_mbuf(struct lio_instr_queue * iq,struct lio_mbuf_free_info * finfo)1511 lio_free_mbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo)
1512 {
1513 
1514 	bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE);
1515 	bus_dmamap_unload(iq->txtag, finfo->map);
1516 	m_freem(finfo->mb);
1517 }
1518 
1519 void
lio_free_sgmbuf(struct lio_instr_queue * iq,struct lio_mbuf_free_info * finfo)1520 lio_free_sgmbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo)
1521 {
1522 	struct lio_gather	*g;
1523 	struct octeon_device	*oct;
1524 	struct lio		*lio;
1525 	int	iq_no;
1526 
1527 	g = finfo->g;
1528 	iq_no = iq->txpciq.s.q_no;
1529 	oct = iq->oct_dev;
1530 	lio = if_getsoftc(oct->props.ifp);
1531 
1532 	mtx_lock(&lio->glist_lock[iq_no]);
1533 	STAILQ_INSERT_TAIL(&lio->ghead[iq_no], &g->node, entries);
1534 	mtx_unlock(&lio->glist_lock[iq_no]);
1535 
1536 	bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE);
1537 	bus_dmamap_unload(iq->txtag, finfo->map);
1538 	m_freem(finfo->mb);
1539 }
1540 
1541 static void
lio_if_cfg_callback(struct octeon_device * oct,uint32_t status,void * buf)1542 lio_if_cfg_callback(struct octeon_device *oct, uint32_t status, void *buf)
1543 {
1544 	struct lio_soft_command	*sc = (struct lio_soft_command *)buf;
1545 	struct lio_if_cfg_resp	*resp;
1546 	struct lio_if_cfg_context *ctx;
1547 
1548 	resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1549 	ctx = (struct lio_if_cfg_context *)sc->ctxptr;
1550 
1551 	oct = lio_get_device(ctx->octeon_id);
1552 	if (resp->status)
1553 		lio_dev_err(oct, "nic if cfg instruction failed. Status: %llx (0x%08x)\n",
1554 			    LIO_CAST64(resp->status), status);
1555 	ctx->cond = 1;
1556 
1557 	snprintf(oct->fw_info.lio_firmware_version, 32, "%s",
1558 		 resp->cfg_info.lio_firmware_version);
1559 
1560 	/*
1561 	 * This barrier is required to be sure that the response has been
1562 	 * written fully before waking up the handler
1563 	 */
1564 	wmb();
1565 }
1566 
1567 static int
lio_is_mac_changed(uint8_t * new,uint8_t * old)1568 lio_is_mac_changed(uint8_t *new, uint8_t *old)
1569 {
1570 
1571 	return ((new[0] != old[0]) || (new[1] != old[1]) ||
1572 		(new[2] != old[2]) || (new[3] != old[3]) ||
1573 		(new[4] != old[4]) || (new[5] != old[5]));
1574 }
1575 
1576 void
lio_open(void * arg)1577 lio_open(void *arg)
1578 {
1579 	struct lio	*lio = arg;
1580 	if_t		ifp = lio->ifp;
1581 	struct octeon_device	*oct = lio->oct_dev;
1582 	uint8_t	*mac_new, mac_old[ETHER_HDR_LEN];
1583 	int	ret = 0;
1584 
1585 	lio_ifstate_set(lio, LIO_IFSTATE_RUNNING);
1586 
1587 	/* Ready for link status updates */
1588 	lio->intf_open = 1;
1589 
1590 	lio_dev_info(oct, "Interface Open, ready for traffic\n");
1591 
1592 	/* tell Octeon to start forwarding packets to host */
1593 	lio_send_rx_ctrl_cmd(lio, 1);
1594 
1595 	mac_new = if_getlladdr(ifp);
1596 	memcpy(mac_old, ((uint8_t *)&lio->linfo.hw_addr) + 2, ETHER_HDR_LEN);
1597 
1598 	if (lio_is_mac_changed(mac_new, mac_old)) {
1599 		ret = lio_set_mac(ifp, mac_new);
1600 		if (ret)
1601 			lio_dev_err(oct, "MAC change failed, error: %d\n", ret);
1602 	}
1603 
1604 	/* Now inform the stack we're ready */
1605 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1606 
1607 	lio_dev_info(oct, "Interface is opened\n");
1608 }
1609 
1610 static int
lio_set_rxcsum_command(if_t ifp,int command,uint8_t rx_cmd)1611 lio_set_rxcsum_command(if_t ifp, int command, uint8_t rx_cmd)
1612 {
1613 	struct lio_ctrl_pkt	nctrl;
1614 	struct lio		*lio = if_getsoftc(ifp);
1615 	struct octeon_device	*oct = lio->oct_dev;
1616 	int	ret = 0;
1617 
1618 	nctrl.ncmd.cmd64 = 0;
1619 	nctrl.ncmd.s.cmd = command;
1620 	nctrl.ncmd.s.param1 = rx_cmd;
1621 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1622 	nctrl.wait_time = 100;
1623 	nctrl.lio = lio;
1624 	nctrl.cb_fn = lio_ctrl_cmd_completion;
1625 
1626 	ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
1627 	if (ret < 0) {
1628 		lio_dev_err(oct, "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
1629 			    ret);
1630 	}
1631 
1632 	return (ret);
1633 }
1634 
1635 static int
lio_stop_nic_module(struct octeon_device * oct)1636 lio_stop_nic_module(struct octeon_device *oct)
1637 {
1638 	int		i, j;
1639 	struct lio	*lio;
1640 
1641 	lio_dev_dbg(oct, "Stopping network interfaces\n");
1642 	if (!oct->ifcount) {
1643 		lio_dev_err(oct, "Init for Octeon was not completed\n");
1644 		return (1);
1645 	}
1646 
1647 	mtx_lock(&oct->cmd_resp_wqlock);
1648 	oct->cmd_resp_state = LIO_DRV_OFFLINE;
1649 	mtx_unlock(&oct->cmd_resp_wqlock);
1650 
1651 	for (i = 0; i < oct->ifcount; i++) {
1652 		lio = if_getsoftc(oct->props.ifp);
1653 		for (j = 0; j < oct->num_oqs; j++)
1654 			lio_unregister_droq_ops(oct,
1655 						lio->linfo.rxpciq[j].s.q_no);
1656 	}
1657 
1658 	callout_drain(&lio->stats_timer);
1659 
1660 	for (i = 0; i < oct->ifcount; i++)
1661 		lio_destroy_nic_device(oct, i);
1662 
1663 	lio_dev_dbg(oct, "Network interface stopped\n");
1664 
1665 	return (0);
1666 }
1667 
1668 static void
lio_delete_glists(struct octeon_device * oct,struct lio * lio)1669 lio_delete_glists(struct octeon_device *oct, struct lio *lio)
1670 {
1671 	struct lio_gather	*g;
1672 	int	i;
1673 
1674 	if (lio->glist_lock != NULL) {
1675 		free((void *)lio->glist_lock, M_DEVBUF);
1676 		lio->glist_lock = NULL;
1677 	}
1678 
1679 	if (lio->ghead == NULL)
1680 		return;
1681 
1682 	for (i = 0; i < lio->linfo.num_txpciq; i++) {
1683 		do {
1684 			g = (struct lio_gather *)
1685 			    lio_delete_first_node(&lio->ghead[i]);
1686 			free(g, M_DEVBUF);
1687 		} while (g);
1688 
1689 		if ((lio->glists_virt_base != NULL) &&
1690 		    (lio->glists_virt_base[i] != NULL)) {
1691 			lio_dma_free(lio->glist_entry_size * lio->tx_qsize,
1692 				     lio->glists_virt_base[i]);
1693 		}
1694 	}
1695 
1696 	free(lio->glists_virt_base, M_DEVBUF);
1697 	lio->glists_virt_base = NULL;
1698 
1699 	free(lio->glists_dma_base, M_DEVBUF);
1700 	lio->glists_dma_base = NULL;
1701 
1702 	free(lio->ghead, M_DEVBUF);
1703 	lio->ghead = NULL;
1704 }
1705 
1706 static int
lio_setup_glists(struct octeon_device * oct,struct lio * lio,int num_iqs)1707 lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
1708 {
1709 	struct lio_gather	*g;
1710 	int	i, j;
1711 
1712 	lio->glist_lock = malloc(num_iqs * sizeof(*lio->glist_lock), M_DEVBUF,
1713 				 M_NOWAIT | M_ZERO);
1714 	if (lio->glist_lock == NULL)
1715 		return (1);
1716 
1717 	lio->ghead = malloc(num_iqs * sizeof(*lio->ghead), M_DEVBUF,
1718 			    M_NOWAIT | M_ZERO);
1719 	if (lio->ghead == NULL) {
1720 		free((void *)lio->glist_lock, M_DEVBUF);
1721 		lio->glist_lock = NULL;
1722 		return (1);
1723 	}
1724 
1725 	lio->glist_entry_size = ROUNDUP8((ROUNDUP4(LIO_MAX_SG) >> 2) *
1726 					 LIO_SG_ENTRY_SIZE);
1727 	/*
1728 	 * allocate memory to store virtual and dma base address of
1729 	 * per glist consistent memory
1730 	 */
1731 	lio->glists_virt_base = malloc(num_iqs * sizeof(void *), M_DEVBUF,
1732 				       M_NOWAIT | M_ZERO);
1733 	lio->glists_dma_base = malloc(num_iqs * sizeof(vm_paddr_t), M_DEVBUF,
1734 				      M_NOWAIT | M_ZERO);
1735 	if ((lio->glists_virt_base == NULL) || (lio->glists_dma_base == NULL)) {
1736 		lio_delete_glists(oct, lio);
1737 		return (1);
1738 	}
1739 
1740 	for (i = 0; i < num_iqs; i++) {
1741 		mtx_init(&lio->glist_lock[i], "glist_lock", NULL, MTX_DEF);
1742 
1743 		STAILQ_INIT(&lio->ghead[i]);
1744 
1745 		lio->glists_virt_base[i] =
1746 		    lio_dma_alloc(lio->glist_entry_size * lio->tx_qsize,
1747 				  (vm_paddr_t *)&lio->glists_dma_base[i]);
1748 		if (lio->glists_virt_base[i] == NULL) {
1749 			lio_delete_glists(oct, lio);
1750 			return (1);
1751 		}
1752 
1753 		for (j = 0; j < lio->tx_qsize; j++) {
1754 			g = malloc(sizeof(*g), M_DEVBUF, M_NOWAIT | M_ZERO);
1755 			if (g == NULL)
1756 				break;
1757 
1758 			g->sg = (struct lio_sg_entry *)(uintptr_t)
1759 			    ((uint64_t)(uintptr_t)lio->glists_virt_base[i] +
1760 			     (j * lio->glist_entry_size));
1761 			g->sg_dma_ptr = (uint64_t)lio->glists_dma_base[i] +
1762 				(j * lio->glist_entry_size);
1763 			STAILQ_INSERT_TAIL(&lio->ghead[i], &g->node, entries);
1764 		}
1765 
1766 		if (j != lio->tx_qsize) {
1767 			lio_delete_glists(oct, lio);
1768 			return (1);
1769 		}
1770 	}
1771 
1772 	return (0);
1773 }
1774 
1775 void
lio_stop(if_t ifp)1776 lio_stop(if_t ifp)
1777 {
1778 	struct lio	*lio = if_getsoftc(ifp);
1779 	struct octeon_device	*oct = lio->oct_dev;
1780 
1781 	lio_ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1782 	if_link_state_change(ifp, LINK_STATE_DOWN);
1783 
1784 	lio->intf_open = 0;
1785 	lio->linfo.link.s.link_up = 0;
1786 	lio->link_changes++;
1787 
1788 	lio_send_rx_ctrl_cmd(lio, 0);
1789 
1790 	/* Tell the stack that the interface is no longer active */
1791 	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1792 
1793 	lio_dev_info(oct, "Interface is stopped\n");
1794 }
1795 
1796 static void
lio_check_rx_oom_status(struct lio * lio)1797 lio_check_rx_oom_status(struct lio *lio)
1798 {
1799 	struct lio_droq	*droq;
1800 	struct octeon_device *oct = lio->oct_dev;
1801 	int	desc_refilled;
1802 	int	q, q_no = 0;
1803 
1804 	for (q = 0; q < oct->num_oqs; q++) {
1805 		q_no = lio->linfo.rxpciq[q].s.q_no;
1806 		droq = oct->droq[q_no];
1807 		if (droq == NULL)
1808 			continue;
1809 		if (lio_read_csr32(oct, droq->pkts_credit_reg) <= 0x40) {
1810 			mtx_lock(&droq->lock);
1811 			desc_refilled = lio_droq_refill(oct, droq);
1812 			/*
1813 			 * Flush the droq descriptor data to memory to be sure
1814 			 * that when we update the credits the data in memory
1815 			 * is accurate.
1816 			 */
1817 			wmb();
1818 			lio_write_csr32(oct, droq->pkts_credit_reg,
1819 					desc_refilled);
1820 			/* make sure mmio write completes */
1821 			__compiler_membar();
1822 			mtx_unlock(&droq->lock);
1823 		}
1824 	}
1825 }
1826 
1827 static void
lio_poll_check_rx_oom_status(void * arg,int pending __unused)1828 lio_poll_check_rx_oom_status(void *arg, int pending __unused)
1829 {
1830 	struct lio_tq	*rx_status_tq = arg;
1831 	struct lio	*lio = rx_status_tq->ctxptr;
1832 
1833 	if (lio_ifstate_check(lio, LIO_IFSTATE_RUNNING))
1834 		lio_check_rx_oom_status(lio);
1835 
1836 	taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work,
1837 				  lio_ms_to_ticks(50));
1838 }
1839 
1840 static int
lio_setup_rx_oom_poll_fn(if_t ifp)1841 lio_setup_rx_oom_poll_fn(if_t ifp)
1842 {
1843 	struct lio	*lio = if_getsoftc(ifp);
1844 	struct octeon_device	*oct = lio->oct_dev;
1845 	struct lio_tq	*rx_status_tq;
1846 
1847 	rx_status_tq = &lio->rx_status_tq;
1848 
1849 	rx_status_tq->tq = taskqueue_create("lio_rx_oom_status", M_WAITOK,
1850 					    taskqueue_thread_enqueue,
1851 					    &rx_status_tq->tq);
1852 
1853 	TIMEOUT_TASK_INIT(rx_status_tq->tq, &rx_status_tq->work, 0,
1854 			  lio_poll_check_rx_oom_status, (void *)rx_status_tq);
1855 
1856 	rx_status_tq->ctxptr = lio;
1857 
1858 	taskqueue_start_threads(&rx_status_tq->tq, 1, PI_NET,
1859 				"lio%d_rx_oom_status",
1860 				oct->octeon_id);
1861 
1862 	taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work,
1863 				  lio_ms_to_ticks(50));
1864 
1865 	return (0);
1866 }
1867 
1868 static void
lio_cleanup_rx_oom_poll_fn(if_t ifp)1869 lio_cleanup_rx_oom_poll_fn(if_t ifp)
1870 {
1871 	struct lio	*lio = if_getsoftc(ifp);
1872 
1873 	if (lio->rx_status_tq.tq != NULL) {
1874 		while (taskqueue_cancel_timeout(lio->rx_status_tq.tq,
1875 						&lio->rx_status_tq.work, NULL))
1876 			taskqueue_drain_timeout(lio->rx_status_tq.tq,
1877 						&lio->rx_status_tq.work);
1878 
1879 		taskqueue_free(lio->rx_status_tq.tq);
1880 
1881 		lio->rx_status_tq.tq = NULL;
1882 	}
1883 }
1884 
1885 static void
lio_destroy_nic_device(struct octeon_device * oct,int ifidx)1886 lio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1887 {
1888 	if_t		ifp = oct->props.ifp;
1889 	struct lio	*lio;
1890 
1891 	if (ifp == NULL) {
1892 		lio_dev_err(oct, "%s No ifp ptr for index %d\n",
1893 			    __func__, ifidx);
1894 		return;
1895 	}
1896 
1897 	lio = if_getsoftc(ifp);
1898 
1899 	lio_ifstate_set(lio, LIO_IFSTATE_DETACH);
1900 
1901 	lio_dev_dbg(oct, "NIC device cleanup\n");
1902 
1903 	if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1904 		lio_stop(ifp);
1905 
1906 	if (lio_wait_for_pending_requests(oct))
1907 		lio_dev_err(oct, "There were pending requests\n");
1908 
1909 	if (lio_wait_for_instr_fetch(oct))
1910 		lio_dev_err(oct, "IQ had pending instructions\n");
1911 
1912 	if (lio_wait_for_oq_pkts(oct))
1913 		lio_dev_err(oct, "OQ had pending packets\n");
1914 
1915 	if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1916 		ether_ifdetach(ifp);
1917 
1918 	lio_tcp_lro_free(oct, ifp);
1919 
1920 	lio_cleanup_rx_oom_poll_fn(ifp);
1921 
1922 	lio_delete_glists(oct, lio);
1923 
1924 	EVENTHANDLER_DEREGISTER(vlan_config, lio->vlan_attach);
1925 	EVENTHANDLER_DEREGISTER(vlan_unconfig, lio->vlan_detach);
1926 
1927 	free(lio, M_DEVBUF);
1928 
1929 	if_free(ifp);
1930 
1931 	oct->props.gmxport = -1;
1932 
1933 	oct->props.ifp = NULL;
1934 }
1935 
1936 static void
print_link_info(if_t ifp)1937 print_link_info(if_t ifp)
1938 {
1939 	struct lio	*lio = if_getsoftc(ifp);
1940 
1941 	if (!lio_ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
1942 	    lio_ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
1943 		struct octeon_link_info *linfo = &lio->linfo;
1944 
1945 		if (linfo->link.s.link_up) {
1946 			lio_dev_info(lio->oct_dev, "%d Mbps %s Duplex UP\n",
1947 				     linfo->link.s.speed,
1948 				     (linfo->link.s.duplex) ? "Full" : "Half");
1949 		} else {
1950 			lio_dev_info(lio->oct_dev, "Link Down\n");
1951 		}
1952 	}
1953 }
1954 
1955 static inline void
lio_update_link_status(if_t ifp,union octeon_link_status * ls)1956 lio_update_link_status(if_t ifp, union octeon_link_status *ls)
1957 {
1958 	struct lio	*lio = if_getsoftc(ifp);
1959 	int	changed = (lio->linfo.link.link_status64 != ls->link_status64);
1960 
1961 	lio->linfo.link.link_status64 = ls->link_status64;
1962 
1963 	if ((lio->intf_open) && (changed)) {
1964 		print_link_info(ifp);
1965 		lio->link_changes++;
1966 		if (lio->linfo.link.s.link_up)
1967 			if_link_state_change(ifp, LINK_STATE_UP);
1968 		else
1969 			if_link_state_change(ifp, LINK_STATE_DOWN);
1970 	}
1971 }
1972 
1973 /*
1974  * \brief Callback for rx ctrl
1975  * @param status status of request
1976  * @param buf pointer to resp structure
1977  */
1978 static void
lio_rx_ctl_callback(struct octeon_device * oct,uint32_t status,void * buf)1979 lio_rx_ctl_callback(struct octeon_device *oct, uint32_t status, void *buf)
1980 {
1981 	struct lio_soft_command	*sc = (struct lio_soft_command *)buf;
1982 	struct lio_rx_ctl_context *ctx;
1983 
1984 	ctx = (struct lio_rx_ctl_context *)sc->ctxptr;
1985 
1986 	oct = lio_get_device(ctx->octeon_id);
1987 	if (status)
1988 		lio_dev_err(oct, "rx ctl instruction failed. Status: %llx\n",
1989 			    LIO_CAST64(status));
1990 	ctx->cond = 1;
1991 
1992 	/*
1993 	 * This barrier is required to be sure that the response has been
1994 	 * written fully before waking up the handler
1995 	 */
1996 	wmb();
1997 }
1998 
1999 static void
lio_send_rx_ctrl_cmd(struct lio * lio,int start_stop)2000 lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop)
2001 {
2002 	struct lio_soft_command	*sc;
2003 	struct lio_rx_ctl_context *ctx;
2004 	union octeon_cmd	*ncmd;
2005 	struct octeon_device	*oct = (struct octeon_device *)lio->oct_dev;
2006 	int	ctx_size = sizeof(struct lio_rx_ctl_context);
2007 	int	retval;
2008 
2009 	if (oct->props.rx_on == start_stop)
2010 		return;
2011 
2012 	sc = lio_alloc_soft_command(oct, OCTEON_CMD_SIZE, 16, ctx_size);
2013 	if (sc == NULL)
2014 		return;
2015 
2016 	ncmd = (union octeon_cmd *)sc->virtdptr;
2017 	ctx = (struct lio_rx_ctl_context *)sc->ctxptr;
2018 
2019 	ctx->cond = 0;
2020 	ctx->octeon_id = lio_get_device_id(oct);
2021 	ncmd->cmd64 = 0;
2022 	ncmd->s.cmd = LIO_CMD_RX_CTL;
2023 	ncmd->s.param1 = start_stop;
2024 
2025 	lio_swap_8B_data((uint64_t *)ncmd, (OCTEON_CMD_SIZE >> 3));
2026 
2027 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2028 
2029 	lio_prepare_soft_command(oct, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_CMD, 0,
2030 				 0, 0);
2031 
2032 	sc->callback = lio_rx_ctl_callback;
2033 	sc->callback_arg = sc;
2034 	sc->wait_time = 5000;
2035 
2036 	retval = lio_send_soft_command(oct, sc);
2037 	if (retval == LIO_IQ_SEND_FAILED) {
2038 		lio_dev_err(oct, "Failed to send RX Control message\n");
2039 	} else {
2040 		/*
2041 		 * Sleep on a wait queue till the cond flag indicates that the
2042 		 * response arrived or timed-out.
2043 		 */
2044 		lio_sleep_cond(oct, &ctx->cond);
2045 		oct->props.rx_on = start_stop;
2046 	}
2047 
2048 	lio_free_soft_command(oct, sc);
2049 }
2050 
2051 static void
lio_vlan_rx_add_vid(void * arg,if_t ifp,uint16_t vid)2052 lio_vlan_rx_add_vid(void *arg, if_t ifp, uint16_t vid)
2053 {
2054 	struct lio_ctrl_pkt	nctrl;
2055 	struct lio		*lio = if_getsoftc(ifp);
2056 	struct octeon_device	*oct = lio->oct_dev;
2057 	int	ret = 0;
2058 
2059 	if (if_getsoftc(ifp) != arg)	/* Not our event */
2060 		return;
2061 
2062 	if ((vid == 0) || (vid > 4095))	/* Invalid */
2063 		return;
2064 
2065 	bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
2066 
2067 	nctrl.ncmd.cmd64 = 0;
2068 	nctrl.ncmd.s.cmd = LIO_CMD_ADD_VLAN_FILTER;
2069 	nctrl.ncmd.s.param1 = vid;
2070 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2071 	nctrl.wait_time = 100;
2072 	nctrl.lio = lio;
2073 	nctrl.cb_fn = lio_ctrl_cmd_completion;
2074 
2075 	ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
2076 	if (ret < 0) {
2077 		lio_dev_err(oct, "Add VLAN filter failed in core (ret: 0x%x)\n",
2078 			    ret);
2079 	}
2080 }
2081 
2082 static void
lio_vlan_rx_kill_vid(void * arg,if_t ifp,uint16_t vid)2083 lio_vlan_rx_kill_vid(void *arg, if_t ifp, uint16_t vid)
2084 {
2085 	struct lio_ctrl_pkt	nctrl;
2086 	struct lio		*lio = if_getsoftc(ifp);
2087 	struct octeon_device	*oct = lio->oct_dev;
2088 	int	ret = 0;
2089 
2090 	if (if_getsoftc(ifp) != arg)	/* Not our event */
2091 		return;
2092 
2093 	if ((vid == 0) || (vid > 4095))	/* Invalid */
2094 		return;
2095 
2096 	bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
2097 
2098 	nctrl.ncmd.cmd64 = 0;
2099 	nctrl.ncmd.s.cmd = LIO_CMD_DEL_VLAN_FILTER;
2100 	nctrl.ncmd.s.param1 = vid;
2101 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2102 	nctrl.wait_time = 100;
2103 	nctrl.lio = lio;
2104 	nctrl.cb_fn = lio_ctrl_cmd_completion;
2105 
2106 	ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
2107 	if (ret < 0) {
2108 		lio_dev_err(oct,
2109 			    "Kill VLAN filter failed in core (ret: 0x%x)\n",
2110 			    ret);
2111 	}
2112 }
2113 
2114 static int
lio_wait_for_oq_pkts(struct octeon_device * oct)2115 lio_wait_for_oq_pkts(struct octeon_device *oct)
2116 {
2117 	int	i, pending_pkts, pkt_cnt = 0, retry = 100;
2118 
2119 	do {
2120 		pending_pkts = 0;
2121 
2122 		for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) {
2123 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
2124 				continue;
2125 
2126 			pkt_cnt = lio_droq_check_hw_for_pkts(oct->droq[i]);
2127 			if (pkt_cnt > 0) {
2128 				pending_pkts += pkt_cnt;
2129 				taskqueue_enqueue(oct->droq[i]->droq_taskqueue,
2130 						  &oct->droq[i]->droq_task);
2131 			}
2132 		}
2133 
2134 		pkt_cnt = 0;
2135 		lio_sleep_timeout(1);
2136 	} while (retry-- && pending_pkts);
2137 
2138 	return (pkt_cnt);
2139 }
2140 
2141 static void
lio_destroy_resources(struct octeon_device * oct)2142 lio_destroy_resources(struct octeon_device *oct)
2143 {
2144 	int i, refcount;
2145 
2146 	switch (atomic_load_acq_int(&oct->status)) {
2147 	case LIO_DEV_RUNNING:
2148 	case LIO_DEV_CORE_OK:
2149 		/* No more instructions will be forwarded. */
2150 		atomic_store_rel_int(&oct->status, LIO_DEV_IN_RESET);
2151 
2152 		oct->app_mode = LIO_DRV_INVALID_APP;
2153 		lio_dev_dbg(oct, "Device state is now %s\n",
2154 			    lio_get_state_string(&oct->status));
2155 
2156 		lio_sleep_timeout(100);
2157 
2158 		/* fallthrough */
2159 	case LIO_DEV_HOST_OK:
2160 
2161 		/* fallthrough */
2162 	case LIO_DEV_CONSOLE_INIT_DONE:
2163 		/* Remove any consoles */
2164 		lio_remove_consoles(oct);
2165 
2166 		/* fallthrough */
2167 	case LIO_DEV_IO_QUEUES_DONE:
2168 		if (lio_wait_for_pending_requests(oct))
2169 			lio_dev_err(oct, "There were pending requests\n");
2170 
2171 		if (lio_wait_for_instr_fetch(oct))
2172 			lio_dev_err(oct, "IQ had pending instructions\n");
2173 
2174 		/*
2175 		 * Disable the input and output queues now. No more packets will
2176 		 * arrive from Octeon, but we should wait for all packet
2177 		 * processing to finish.
2178 		 */
2179 		oct->fn_list.disable_io_queues(oct);
2180 
2181 		if (lio_wait_for_oq_pkts(oct))
2182 			lio_dev_err(oct, "OQ had pending packets\n");
2183 
2184 		/* fallthrough */
2185 	case LIO_DEV_INTR_SET_DONE:
2186 		/* Disable interrupts  */
2187 		oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
2188 
2189 		if (oct->msix_on) {
2190 			for (i = 0; i < oct->num_msix_irqs - 1; i++) {
2191 				if (oct->ioq_vector[i].tag != NULL) {
2192 					bus_teardown_intr(oct->device,
2193 						  oct->ioq_vector[i].msix_res,
2194 						      oct->ioq_vector[i].tag);
2195 					oct->ioq_vector[i].tag = NULL;
2196 				}
2197 				if (oct->ioq_vector[i].msix_res != NULL) {
2198 					bus_release_resource(oct->device,
2199 						SYS_RES_IRQ,
2200 						oct->ioq_vector[i].vector,
2201 						oct->ioq_vector[i].msix_res);
2202 					oct->ioq_vector[i].msix_res = NULL;
2203 				}
2204 			}
2205 			/* non-iov vector's argument is oct struct */
2206 			if (oct->tag != NULL) {
2207 				bus_teardown_intr(oct->device, oct->msix_res,
2208 						  oct->tag);
2209 				oct->tag = NULL;
2210 			}
2211 
2212 			if (oct->msix_res != NULL) {
2213 				bus_release_resource(oct->device, SYS_RES_IRQ,
2214 						     oct->aux_vector,
2215 						     oct->msix_res);
2216 				oct->msix_res = NULL;
2217 			}
2218 
2219 			pci_release_msi(oct->device);
2220 		}
2221 		/* fallthrough */
2222 	case LIO_DEV_IN_RESET:
2223 	case LIO_DEV_DROQ_INIT_DONE:
2224 		/* Wait for any pending operations */
2225 		lio_mdelay(100);
2226 		for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) {
2227 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
2228 				continue;
2229 			lio_delete_droq(oct, i);
2230 		}
2231 
2232 		/* fallthrough */
2233 	case LIO_DEV_RESP_LIST_INIT_DONE:
2234 		for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) {
2235 			if (oct->droq[i] != NULL) {
2236 				free(oct->droq[i], M_DEVBUF);
2237 				oct->droq[i] = NULL;
2238 			}
2239 		}
2240 		lio_delete_response_list(oct);
2241 
2242 		/* fallthrough */
2243 	case LIO_DEV_INSTR_QUEUE_INIT_DONE:
2244 		for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) {
2245 			if (!(oct->io_qmask.iq & BIT_ULL(i)))
2246 				continue;
2247 
2248 			lio_delete_instr_queue(oct, i);
2249 		}
2250 
2251 		/* fallthrough */
2252 	case LIO_DEV_MSIX_ALLOC_VECTOR_DONE:
2253 		for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) {
2254 			if (oct->instr_queue[i] != NULL) {
2255 				free(oct->instr_queue[i], M_DEVBUF);
2256 				oct->instr_queue[i] = NULL;
2257 			}
2258 		}
2259 		lio_free_ioq_vector(oct);
2260 
2261 		/* fallthrough */
2262 	case LIO_DEV_SC_BUFF_POOL_INIT_DONE:
2263 		lio_free_sc_buffer_pool(oct);
2264 
2265 		/* fallthrough */
2266 	case LIO_DEV_DISPATCH_INIT_DONE:
2267 		lio_delete_dispatch_list(oct);
2268 
2269 		/* fallthrough */
2270 	case LIO_DEV_PCI_MAP_DONE:
2271 		refcount = lio_deregister_device(oct);
2272 
2273 		if (fw_type_is_none())
2274 			lio_pci_flr(oct);
2275 
2276 		if (!refcount)
2277 			oct->fn_list.soft_reset(oct);
2278 
2279 		lio_unmap_pci_barx(oct, 0);
2280 		lio_unmap_pci_barx(oct, 1);
2281 
2282 		/* fallthrough */
2283 	case LIO_DEV_PCI_ENABLE_DONE:
2284 		/* Disable the device, releasing the PCI INT */
2285 		pci_disable_busmaster(oct->device);
2286 
2287 		/* fallthrough */
2288 	case LIO_DEV_BEGIN_STATE:
2289 		break;
2290 	}	/* end switch (oct->status) */
2291 }
2292