1 /*
2 * BSD LICENSE
3 *
4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "lio_bsd.h"
35 #include "lio_common.h"
36
37 #include "lio_droq.h"
38 #include "lio_iq.h"
39 #include "lio_response_manager.h"
40 #include "lio_device.h"
41 #include "lio_ctrl.h"
42 #include "lio_main.h"
43 #include "lio_network.h"
44 #include "cn23xx_pf_device.h"
45 #include "lio_image.h"
46 #include "lio_ioctl.h"
47 #include "lio_rxtx.h"
48 #include "lio_rss.h"
49
50 /* Number of milliseconds to wait for DDR initialization */
51 #define LIO_DDR_TIMEOUT 10000
52 #define LIO_MAX_FW_TYPE_LEN 8
53
54 static char fw_type[LIO_MAX_FW_TYPE_LEN];
55 TUNABLE_STR("hw.lio.fw_type", fw_type, sizeof(fw_type));
56
57 /*
58 * Integers that specify number of queues per PF.
59 * Valid range is 0 to 64.
60 * Use 0 to derive from CPU count.
61 */
62 static int num_queues_per_pf0;
63 static int num_queues_per_pf1;
64 TUNABLE_INT("hw.lio.num_queues_per_pf0", &num_queues_per_pf0);
65 TUNABLE_INT("hw.lio.num_queues_per_pf1", &num_queues_per_pf1);
66
67 static int lio_rss = 1;
68 TUNABLE_INT("hw.lio.rss", &lio_rss);
69
70 /* Hardware LRO */
71 unsigned int lio_hwlro = 0;
72 TUNABLE_INT("hw.lio.hwlro", &lio_hwlro);
73
74 /*
75 * Bitmask indicating which consoles have debug
76 * output redirected to syslog.
77 */
78 static unsigned long console_bitmask;
79 TUNABLE_ULONG("hw.lio.console_bitmask", &console_bitmask);
80
81 /*
82 * \brief determines if a given console has debug enabled.
83 * @param console console to check
84 * @returns 1 = enabled. 0 otherwise
85 */
86 int
lio_console_debug_enabled(uint32_t console)87 lio_console_debug_enabled(uint32_t console)
88 {
89
90 return (console_bitmask >> (console)) & 0x1;
91 }
92
93 static int lio_detach(device_t dev);
94
95 static int lio_device_init(struct octeon_device *octeon_dev);
96 static int lio_chip_specific_setup(struct octeon_device *oct);
97 static void lio_watchdog(void *param);
98 static int lio_load_firmware(struct octeon_device *oct);
99 static int lio_nic_starter(struct octeon_device *oct);
100 static int lio_init_nic_module(struct octeon_device *oct);
101 static int lio_setup_nic_devices(struct octeon_device *octeon_dev);
102 static int lio_link_info(struct lio_recv_info *recv_info, void *ptr);
103 static void lio_if_cfg_callback(struct octeon_device *oct, uint32_t status,
104 void *buf);
105 static int lio_set_rxcsum_command(if_t ifp, int command,
106 uint8_t rx_cmd);
107 static int lio_setup_glists(struct octeon_device *oct, struct lio *lio,
108 int num_iqs);
109 static void lio_destroy_nic_device(struct octeon_device *oct, int ifidx);
110 static inline void lio_update_link_status(if_t ifp,
111 union octeon_link_status *ls);
112 static void lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop);
113 static int lio_stop_nic_module(struct octeon_device *oct);
114 static void lio_destroy_resources(struct octeon_device *oct);
115 static int lio_setup_rx_oom_poll_fn(if_t ifp);
116
117 static void lio_vlan_rx_add_vid(void *arg, if_t ifp, uint16_t vid);
118 static void lio_vlan_rx_kill_vid(void *arg, if_t ifp,
119 uint16_t vid);
120 static struct octeon_device *
121 lio_get_other_octeon_device(struct octeon_device *oct);
122
123 static int lio_wait_for_oq_pkts(struct octeon_device *oct);
124
125 int lio_send_rss_param(struct lio *lio);
126 static int lio_dbg_console_print(struct octeon_device *oct,
127 uint32_t console_num, char *prefix,
128 char *suffix);
129
130 /* Polling interval for determining when NIC application is alive */
131 #define LIO_STARTER_POLL_INTERVAL_MS 100
132
133 /*
134 * vendor_info_array.
135 * This array contains the list of IDs on which the driver should load.
136 */
137 struct lio_vendor_info {
138 uint16_t vendor_id;
139 uint16_t device_id;
140 uint16_t subdevice_id;
141 uint8_t revision_id;
142 uint8_t index;
143 };
144
145 static struct lio_vendor_info lio_pci_tbl[] = {
146 /* CN2350 10G */
147 {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE,
148 0x02, 0},
149
150 /* CN2350 10G */
151 {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE1,
152 0x02, 0},
153
154 /* CN2360 10G */
155 {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_10G_SUBDEVICE,
156 0x02, 1},
157
158 /* CN2350 25G */
159 {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_25G_SUBDEVICE,
160 0x02, 2},
161
162 /* CN2360 25G */
163 {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_25G_SUBDEVICE,
164 0x02, 3},
165
166 {0, 0, 0, 0, 0}
167 };
168
169 static char *lio_strings[] = {
170 "LiquidIO 2350 10GbE Server Adapter",
171 "LiquidIO 2360 10GbE Server Adapter",
172 "LiquidIO 2350 25GbE Server Adapter",
173 "LiquidIO 2360 25GbE Server Adapter",
174 };
175
176 struct lio_if_cfg_resp {
177 uint64_t rh;
178 struct octeon_if_cfg_info cfg_info;
179 uint64_t status;
180 };
181
182 struct lio_if_cfg_context {
183 int octeon_id;
184 volatile int cond;
185 };
186
187 struct lio_rx_ctl_context {
188 int octeon_id;
189 volatile int cond;
190 };
191
192 static int
lio_probe(device_t dev)193 lio_probe(device_t dev)
194 {
195 struct lio_vendor_info *tbl;
196
197 uint16_t vendor_id;
198 uint16_t device_id;
199 uint16_t subdevice_id;
200 uint8_t revision_id;
201
202 vendor_id = pci_get_vendor(dev);
203 if (vendor_id != PCI_VENDOR_ID_CAVIUM)
204 return (ENXIO);
205
206 device_id = pci_get_device(dev);
207 subdevice_id = pci_get_subdevice(dev);
208 revision_id = pci_get_revid(dev);
209
210 tbl = lio_pci_tbl;
211 while (tbl->vendor_id) {
212 if ((vendor_id == tbl->vendor_id) &&
213 (device_id == tbl->device_id) &&
214 (subdevice_id == tbl->subdevice_id) &&
215 (revision_id == tbl->revision_id)) {
216 device_set_descf(dev, "%s, Version - %s",
217 lio_strings[tbl->index], LIO_VERSION);
218 return (BUS_PROBE_DEFAULT);
219 }
220
221 tbl++;
222 }
223
224 return (ENXIO);
225 }
226
227 static int
lio_attach(device_t device)228 lio_attach(device_t device)
229 {
230 struct octeon_device *oct_dev = NULL;
231 uint64_t scratch1;
232 uint32_t error;
233 int timeout, ret = 1;
234 uint8_t bus, dev, function;
235
236 oct_dev = lio_allocate_device(device);
237 if (oct_dev == NULL) {
238 device_printf(device, "Error: Unable to allocate device\n");
239 return (-ENOMEM);
240 }
241
242 oct_dev->tx_budget = LIO_DEFAULT_TX_PKTS_PROCESS_BUDGET;
243 oct_dev->rx_budget = LIO_DEFAULT_RX_PKTS_PROCESS_BUDGET;
244 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
245
246 oct_dev->device = device;
247 bus = pci_get_bus(device);
248 dev = pci_get_slot(device);
249 function = pci_get_function(device);
250
251 lio_dev_info(oct_dev, "Initializing device %x:%x %02x:%02x.%01x\n",
252 pci_get_vendor(device), pci_get_device(device), bus, dev,
253 function);
254
255 if (lio_device_init(oct_dev)) {
256 lio_dev_err(oct_dev, "Failed to init device\n");
257 lio_detach(device);
258 return (-ENOMEM);
259 }
260
261 scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1);
262 if (!(scratch1 & 4ULL)) {
263 /*
264 * Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
265 * the lio watchdog kernel thread is running for this
266 * NIC. Each NIC gets one watchdog kernel thread.
267 */
268 scratch1 |= 4ULL;
269 lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1);
270
271 error = kproc_create(lio_watchdog, oct_dev,
272 &oct_dev->watchdog_task, 0, 0,
273 "liowd/%02hhx:%02hhx.%hhx", bus,
274 dev, function);
275 if (!error) {
276 kproc_resume(oct_dev->watchdog_task);
277 } else {
278 oct_dev->watchdog_task = NULL;
279 lio_dev_err(oct_dev,
280 "failed to create kernel_thread\n");
281 lio_detach(device);
282 return (-1);
283 }
284 }
285 oct_dev->rx_pause = 1;
286 oct_dev->tx_pause = 1;
287
288 timeout = 0;
289 while (timeout < LIO_NIC_STARTER_TIMEOUT) {
290 lio_mdelay(LIO_STARTER_POLL_INTERVAL_MS);
291 timeout += LIO_STARTER_POLL_INTERVAL_MS;
292
293 /*
294 * During the boot process interrupts are not available.
295 * So polling for first control message from FW.
296 */
297 if (cold)
298 lio_droq_bh(oct_dev->droq[0], 0);
299
300 if (atomic_load_acq_int(&oct_dev->status) == LIO_DEV_CORE_OK) {
301 ret = lio_nic_starter(oct_dev);
302 break;
303 }
304 }
305
306 if (ret) {
307 lio_dev_err(oct_dev, "Firmware failed to start\n");
308 lio_detach(device);
309 return (-EIO);
310 }
311
312 lio_dev_dbg(oct_dev, "Device is ready\n");
313
314 return (0);
315 }
316
317 static int
lio_detach(device_t dev)318 lio_detach(device_t dev)
319 {
320 struct octeon_device *oct_dev = device_get_softc(dev);
321
322 lio_dev_dbg(oct_dev, "Stopping device\n");
323 if (oct_dev->watchdog_task) {
324 uint64_t scratch1;
325
326 kproc_suspend(oct_dev->watchdog_task, 0);
327
328 scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1);
329 scratch1 &= ~4ULL;
330 lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1);
331 }
332
333 if (oct_dev->app_mode && (oct_dev->app_mode == LIO_DRV_NIC_APP))
334 lio_stop_nic_module(oct_dev);
335
336 /*
337 * Reset the octeon device and cleanup all memory allocated for
338 * the octeon device by driver.
339 */
340 lio_destroy_resources(oct_dev);
341
342 lio_dev_info(oct_dev, "Device removed\n");
343
344 /*
345 * This octeon device has been removed. Update the global
346 * data structure to reflect this. Free the device structure.
347 */
348 lio_free_device_mem(oct_dev);
349 return (0);
350 }
351
352 static int
lio_shutdown(device_t dev)353 lio_shutdown(device_t dev)
354 {
355 struct octeon_device *oct_dev = device_get_softc(dev);
356 struct lio *lio = if_getsoftc(oct_dev->props.ifp);
357
358 lio_send_rx_ctrl_cmd(lio, 0);
359
360 return (0);
361 }
362
363 static int
lio_suspend(device_t dev)364 lio_suspend(device_t dev)
365 {
366
367 return (ENXIO);
368 }
369
370 static int
lio_resume(device_t dev)371 lio_resume(device_t dev)
372 {
373
374 return (ENXIO);
375 }
376
377 static int
lio_event(struct module * mod,int event,void * junk)378 lio_event(struct module *mod, int event, void *junk)
379 {
380
381 switch (event) {
382 case MOD_LOAD:
383 lio_init_device_list(LIO_CFG_TYPE_DEFAULT);
384 break;
385 default:
386 break;
387 }
388
389 return (0);
390 }
391
392 /*********************************************************************
393 * FreeBSD Device Interface Entry Points
394 * *******************************************************************/
395 static device_method_t lio_methods[] = {
396 /* Device interface */
397 DEVMETHOD(device_probe, lio_probe),
398 DEVMETHOD(device_attach, lio_attach),
399 DEVMETHOD(device_detach, lio_detach),
400 DEVMETHOD(device_shutdown, lio_shutdown),
401 DEVMETHOD(device_suspend, lio_suspend),
402 DEVMETHOD(device_resume, lio_resume),
403 DEVMETHOD_END
404 };
405
406 static driver_t lio_driver = {
407 LIO_DRV_NAME, lio_methods, sizeof(struct octeon_device),
408 };
409
410 DRIVER_MODULE(lio, pci, lio_driver, lio_event, NULL);
411
412 MODULE_DEPEND(lio, pci, 1, 1, 1);
413 MODULE_DEPEND(lio, ether, 1, 1, 1);
414 MODULE_DEPEND(lio, firmware, 1, 1, 1);
415
416 static bool
fw_type_is_none(void)417 fw_type_is_none(void)
418 {
419 return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
420 sizeof(LIO_FW_NAME_TYPE_NONE)) == 0;
421 }
422
423 /*
424 * \brief Device initialization for each Octeon device that is probed
425 * @param octeon_dev octeon device
426 */
427 static int
lio_device_init(struct octeon_device * octeon_dev)428 lio_device_init(struct octeon_device *octeon_dev)
429 {
430 unsigned long ddr_timeout = LIO_DDR_TIMEOUT;
431 char *dbg_enb = NULL;
432 int fw_loaded = 0;
433 int i, j, ret;
434 uint8_t bus, dev, function;
435 char bootcmd[] = "\n";
436
437 bus = pci_get_bus(octeon_dev->device);
438 dev = pci_get_slot(octeon_dev->device);
439 function = pci_get_function(octeon_dev->device);
440
441 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_BEGIN_STATE);
442
443 /* Enable access to the octeon device */
444 if (pci_enable_busmaster(octeon_dev->device)) {
445 lio_dev_err(octeon_dev, "pci_enable_device failed\n");
446 return (1);
447 }
448
449 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_ENABLE_DONE);
450
451 /* Identify the Octeon type and map the BAR address space. */
452 if (lio_chip_specific_setup(octeon_dev)) {
453 lio_dev_err(octeon_dev, "Chip specific setup failed\n");
454 return (1);
455 }
456
457 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_MAP_DONE);
458
459 /*
460 * Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
461 * since that is what is required for the reference to be removed
462 * during de-initialization (see 'octeon_destroy_resources').
463 */
464 lio_register_device(octeon_dev, bus, dev, function, true);
465
466
467 octeon_dev->app_mode = LIO_DRV_INVALID_APP;
468
469 if (!lio_cn23xx_pf_fw_loaded(octeon_dev) && !fw_type_is_none()) {
470 fw_loaded = 0;
471 /* Do a soft reset of the Octeon device. */
472 if (octeon_dev->fn_list.soft_reset(octeon_dev))
473 return (1);
474
475 /* things might have changed */
476 if (!lio_cn23xx_pf_fw_loaded(octeon_dev))
477 fw_loaded = 0;
478 else
479 fw_loaded = 1;
480 } else {
481 fw_loaded = 1;
482 }
483
484 /*
485 * Initialize the dispatch mechanism used to push packets arriving on
486 * Octeon Output queues.
487 */
488 if (lio_init_dispatch_list(octeon_dev))
489 return (1);
490
491 lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC,
492 LIO_OPCODE_NIC_CORE_DRV_ACTIVE,
493 lio_core_drv_init, octeon_dev);
494 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DISPATCH_INIT_DONE);
495
496 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
497 if (ret) {
498 lio_dev_err(octeon_dev,
499 "Failed to configure device registers\n");
500 return (ret);
501 }
502
503 /* Initialize soft command buffer pool */
504 if (lio_setup_sc_buffer_pool(octeon_dev)) {
505 lio_dev_err(octeon_dev, "sc buffer pool allocation failed\n");
506 return (1);
507 }
508
509 atomic_store_rel_int(&octeon_dev->status,
510 LIO_DEV_SC_BUFF_POOL_INIT_DONE);
511
512 if (lio_allocate_ioq_vector(octeon_dev)) {
513 lio_dev_err(octeon_dev,
514 "IOQ vector allocation failed\n");
515 return (1);
516 }
517
518 atomic_store_rel_int(&octeon_dev->status,
519 LIO_DEV_MSIX_ALLOC_VECTOR_DONE);
520
521 for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) {
522 octeon_dev->instr_queue[i] =
523 malloc(sizeof(struct lio_instr_queue),
524 M_DEVBUF, M_NOWAIT | M_ZERO);
525 if (octeon_dev->instr_queue[i] == NULL)
526 return (1);
527 }
528
529 /* Setup the data structures that manage this Octeon's Input queues. */
530 if (lio_setup_instr_queue0(octeon_dev)) {
531 lio_dev_err(octeon_dev,
532 "Instruction queue initialization failed\n");
533 return (1);
534 }
535
536 atomic_store_rel_int(&octeon_dev->status,
537 LIO_DEV_INSTR_QUEUE_INIT_DONE);
538
539 /*
540 * Initialize lists to manage the requests of different types that
541 * arrive from user & kernel applications for this octeon device.
542 */
543
544 if (lio_setup_response_list(octeon_dev)) {
545 lio_dev_err(octeon_dev, "Response list allocation failed\n");
546 return (1);
547 }
548
549 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_RESP_LIST_INIT_DONE);
550
551 for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) {
552 octeon_dev->droq[i] = malloc(sizeof(*octeon_dev->droq[i]),
553 M_DEVBUF, M_NOWAIT | M_ZERO);
554 if (octeon_dev->droq[i] == NULL)
555 return (1);
556 }
557
558 if (lio_setup_output_queue0(octeon_dev)) {
559 lio_dev_err(octeon_dev, "Output queue initialization failed\n");
560 return (1);
561 }
562
563 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DROQ_INIT_DONE);
564
565 /*
566 * Setup the interrupt handler and record the INT SUM register address
567 */
568 if (lio_setup_interrupt(octeon_dev,
569 octeon_dev->sriov_info.num_pf_rings))
570 return (1);
571
572 /* Enable Octeon device interrupts */
573 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
574
575 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_INTR_SET_DONE);
576
577 /*
578 * Send Credit for Octeon Output queues. Credits are always sent BEFORE
579 * the output queue is enabled.
580 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
581 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
582 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
583 * before any credits have been issued, causing the ring to be reset
584 * (and the f/w appear to never have started).
585 */
586 for (j = 0; j < octeon_dev->num_oqs; j++)
587 lio_write_csr32(octeon_dev,
588 octeon_dev->droq[j]->pkts_credit_reg,
589 octeon_dev->droq[j]->max_count);
590
591 /* Enable the input and output queues for this Octeon device */
592 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
593 if (ret) {
594 lio_dev_err(octeon_dev, "Failed to enable input/output queues");
595 return (ret);
596 }
597
598 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_IO_QUEUES_DONE);
599
600 if (!fw_loaded) {
601 lio_dev_dbg(octeon_dev, "Waiting for DDR initialization...\n");
602 if (!ddr_timeout) {
603 lio_dev_info(octeon_dev,
604 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
605 }
606
607 lio_sleep_timeout(LIO_RESET_MSECS);
608
609 /*
610 * Wait for the octeon to initialize DDR after the
611 * soft-reset.
612 */
613 while (!ddr_timeout) {
614 if (pause("-", lio_ms_to_ticks(100))) {
615 /* user probably pressed Control-C */
616 return (1);
617 }
618 }
619
620 ret = lio_wait_for_ddr_init(octeon_dev, &ddr_timeout);
621 if (ret) {
622 lio_dev_err(octeon_dev,
623 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
624 ret);
625 return (1);
626 }
627
628 if (lio_wait_for_bootloader(octeon_dev, 1100)) {
629 lio_dev_err(octeon_dev, "Board not responding\n");
630 return (1);
631 }
632
633 /* Divert uboot to take commands from host instead. */
634 ret = lio_console_send_cmd(octeon_dev, bootcmd, 50);
635
636 lio_dev_dbg(octeon_dev, "Initializing consoles\n");
637 ret = lio_init_consoles(octeon_dev);
638 if (ret) {
639 lio_dev_err(octeon_dev, "Could not access board consoles\n");
640 return (1);
641 }
642
643 /*
644 * If console debug enabled, specify empty string to
645 * use default enablement ELSE specify NULL string for
646 * 'disabled'.
647 */
648 dbg_enb = lio_console_debug_enabled(0) ? "" : NULL;
649 ret = lio_add_console(octeon_dev, 0, dbg_enb);
650
651 if (ret) {
652 lio_dev_err(octeon_dev, "Could not access board console\n");
653 return (1);
654 } else if (lio_console_debug_enabled(0)) {
655 /*
656 * If console was added AND we're logging console output
657 * then set our console print function.
658 */
659 octeon_dev->console[0].print = lio_dbg_console_print;
660 }
661
662 atomic_store_rel_int(&octeon_dev->status,
663 LIO_DEV_CONSOLE_INIT_DONE);
664
665 lio_dev_dbg(octeon_dev, "Loading firmware\n");
666
667 ret = lio_load_firmware(octeon_dev);
668 if (ret) {
669 lio_dev_err(octeon_dev, "Could not load firmware to board\n");
670 return (1);
671 }
672 }
673
674 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_HOST_OK);
675
676 return (0);
677 }
678
679 /*
680 * \brief PCI FLR for each Octeon device.
681 * @param oct octeon device
682 */
683 static void
lio_pci_flr(struct octeon_device * oct)684 lio_pci_flr(struct octeon_device *oct)
685 {
686 uint32_t exppos, status;
687
688 pci_find_cap(oct->device, PCIY_EXPRESS, &exppos);
689
690 pci_save_state(oct->device);
691
692 /* Quiesce the device completely */
693 pci_write_config(oct->device, PCIR_COMMAND, PCIM_CMD_INTxDIS, 2);
694
695 /* Wait for Transaction Pending bit clean */
696 lio_mdelay(100);
697
698 status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2);
699 if (status & PCIEM_STA_TRANSACTION_PND) {
700 lio_dev_info(oct, "Function reset incomplete after 100ms, sleeping for 5 seconds\n");
701 lio_mdelay(5);
702
703 status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2);
704 if (status & PCIEM_STA_TRANSACTION_PND)
705 lio_dev_info(oct, "Function reset still incomplete after 5s, reset anyway\n");
706 }
707
708 pci_write_config(oct->device, exppos + PCIER_DEVICE_CTL, PCIEM_CTL_INITIATE_FLR, 2);
709 lio_mdelay(100);
710
711 pci_restore_state(oct->device);
712 }
713
714 /*
715 * \brief Debug console print function
716 * @param octeon_dev octeon device
717 * @param console_num console number
718 * @param prefix first portion of line to display
719 * @param suffix second portion of line to display
720 *
721 * The OCTEON debug console outputs entire lines (excluding '\n').
722 * Normally, the line will be passed in the 'prefix' parameter.
723 * However, due to buffering, it is possible for a line to be split into two
724 * parts, in which case they will be passed as the 'prefix' parameter and
725 * 'suffix' parameter.
726 */
727 static int
lio_dbg_console_print(struct octeon_device * oct,uint32_t console_num,char * prefix,char * suffix)728 lio_dbg_console_print(struct octeon_device *oct, uint32_t console_num,
729 char *prefix, char *suffix)
730 {
731
732 if (prefix != NULL && suffix != NULL)
733 lio_dev_info(oct, "%u: %s%s\n", console_num, prefix, suffix);
734 else if (prefix != NULL)
735 lio_dev_info(oct, "%u: %s\n", console_num, prefix);
736 else if (suffix != NULL)
737 lio_dev_info(oct, "%u: %s\n", console_num, suffix);
738
739 return (0);
740 }
741
742 static void
lio_watchdog(void * param)743 lio_watchdog(void *param)
744 {
745 int core_num;
746 uint16_t mask_of_crashed_or_stuck_cores = 0;
747 struct octeon_device *oct = param;
748 bool err_msg_was_printed[12];
749
750 bzero(err_msg_was_printed, sizeof(err_msg_was_printed));
751
752 while (1) {
753 kproc_suspend_check(oct->watchdog_task);
754 mask_of_crashed_or_stuck_cores =
755 (uint16_t)lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH2);
756
757 if (mask_of_crashed_or_stuck_cores) {
758 struct octeon_device *other_oct;
759
760 oct->cores_crashed = true;
761 other_oct = lio_get_other_octeon_device(oct);
762 if (other_oct != NULL)
763 other_oct->cores_crashed = true;
764
765 for (core_num = 0; core_num < LIO_MAX_CORES;
766 core_num++) {
767 bool core_crashed_or_got_stuck;
768
769 core_crashed_or_got_stuck =
770 (mask_of_crashed_or_stuck_cores >>
771 core_num) & 1;
772 if (core_crashed_or_got_stuck &&
773 !err_msg_was_printed[core_num]) {
774 lio_dev_err(oct,
775 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
776 core_num);
777 err_msg_was_printed[core_num] = true;
778 }
779 }
780
781 }
782
783 /* sleep for two seconds */
784 pause("-", lio_ms_to_ticks(2000));
785 }
786 }
787
788 static int
lio_chip_specific_setup(struct octeon_device * oct)789 lio_chip_specific_setup(struct octeon_device *oct)
790 {
791 char *s;
792 uint32_t dev_id;
793 int ret = 1;
794
795 dev_id = lio_read_pci_cfg(oct, 0);
796 oct->subdevice_id = pci_get_subdevice(oct->device);
797
798 switch (dev_id) {
799 case LIO_CN23XX_PF_PCIID:
800 oct->chip_id = LIO_CN23XX_PF_VID;
801 if (pci_get_function(oct->device) == 0) {
802 if (num_queues_per_pf0 < 0) {
803 lio_dev_info(oct, "Invalid num_queues_per_pf0: %d, Setting it to default\n",
804 num_queues_per_pf0);
805 num_queues_per_pf0 = 0;
806 }
807
808 oct->sriov_info.num_pf_rings = num_queues_per_pf0;
809 } else {
810 if (num_queues_per_pf1 < 0) {
811 lio_dev_info(oct, "Invalid num_queues_per_pf1: %d, Setting it to default\n",
812 num_queues_per_pf1);
813 num_queues_per_pf1 = 0;
814 }
815
816 oct->sriov_info.num_pf_rings = num_queues_per_pf1;
817 }
818
819 ret = lio_cn23xx_pf_setup_device(oct);
820 s = "CN23XX";
821 break;
822
823 default:
824 s = "?";
825 lio_dev_err(oct, "Unknown device found (dev_id: %x)\n", dev_id);
826 }
827
828 if (!ret)
829 lio_dev_info(oct, "%s PASS%d.%d %s Version: %s\n", s,
830 OCTEON_MAJOR_REV(oct), OCTEON_MINOR_REV(oct),
831 lio_get_conf(oct)->card_name, LIO_VERSION);
832
833 return (ret);
834 }
835
836 static struct octeon_device *
lio_get_other_octeon_device(struct octeon_device * oct)837 lio_get_other_octeon_device(struct octeon_device *oct)
838 {
839 struct octeon_device *other_oct;
840
841 other_oct = lio_get_device(oct->octeon_id + 1);
842
843 if ((other_oct != NULL) && other_oct->device) {
844 int oct_busnum, other_oct_busnum;
845
846 oct_busnum = pci_get_bus(oct->device);
847 other_oct_busnum = pci_get_bus(other_oct->device);
848
849 if (oct_busnum == other_oct_busnum) {
850 int oct_slot, other_oct_slot;
851
852 oct_slot = pci_get_slot(oct->device);
853 other_oct_slot = pci_get_slot(other_oct->device);
854
855 if (oct_slot == other_oct_slot)
856 return (other_oct);
857 }
858 }
859 return (NULL);
860 }
861
862 /*
863 * \brief Load firmware to device
864 * @param oct octeon device
865 *
866 * Maps device to firmware filename, requests firmware, and downloads it
867 */
868 static int
lio_load_firmware(struct octeon_device * oct)869 lio_load_firmware(struct octeon_device *oct)
870 {
871 const struct firmware *fw;
872 char *tmp_fw_type = NULL;
873 int ret = 0;
874 char fw_name[LIO_MAX_FW_FILENAME_LEN];
875
876 if (fw_type[0] == '\0')
877 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
878 else
879 tmp_fw_type = fw_type;
880
881 sprintf(fw_name, "%s%s_%s%s", LIO_FW_BASE_NAME,
882 lio_get_conf(oct)->card_name, tmp_fw_type, LIO_FW_NAME_SUFFIX);
883
884 fw = firmware_get(fw_name);
885 if (fw == NULL) {
886 lio_dev_err(oct, "Request firmware failed. Could not find file %s.\n",
887 fw_name);
888 return (EINVAL);
889 }
890
891 ret = lio_download_firmware(oct, fw->data, fw->datasize);
892
893 firmware_put(fw, FIRMWARE_UNLOAD);
894
895 return (ret);
896 }
897
898 static int
lio_nic_starter(struct octeon_device * oct)899 lio_nic_starter(struct octeon_device *oct)
900 {
901 int ret = 0;
902
903 atomic_store_rel_int(&oct->status, LIO_DEV_RUNNING);
904
905 if (oct->app_mode && oct->app_mode == LIO_DRV_NIC_APP) {
906 if (lio_init_nic_module(oct)) {
907 lio_dev_err(oct, "NIC initialization failed\n");
908 ret = -1;
909 #ifdef CAVIUM_ONiLY_23XX_VF
910 } else {
911 if (octeon_enable_sriov(oct) < 0)
912 ret = -1;
913 #endif
914 }
915 } else {
916 lio_dev_err(oct,
917 "Unexpected application running on NIC (%d). Check firmware.\n",
918 oct->app_mode);
919 ret = -1;
920 }
921
922 return (ret);
923 }
924
925 static int
lio_init_nic_module(struct octeon_device * oct)926 lio_init_nic_module(struct octeon_device *oct)
927 {
928 int num_nic_ports = LIO_GET_NUM_NIC_PORTS_CFG(lio_get_conf(oct));
929 int retval = 0;
930
931 lio_dev_dbg(oct, "Initializing network interfaces\n");
932
933 /*
934 * only default iq and oq were initialized
935 * initialize the rest as well
936 */
937
938 /* run port_config command for each port */
939 oct->ifcount = num_nic_ports;
940
941 bzero(&oct->props, sizeof(struct lio_if_props));
942
943 oct->props.gmxport = -1;
944
945 retval = lio_setup_nic_devices(oct);
946 if (retval) {
947 lio_dev_err(oct, "Setup NIC devices failed\n");
948 goto lio_init_failure;
949 }
950
951 lio_dev_dbg(oct, "Network interfaces ready\n");
952
953 return (retval);
954
955 lio_init_failure:
956
957 oct->ifcount = 0;
958
959 return (retval);
960 }
961
962 static int
lio_ifmedia_update(if_t ifp)963 lio_ifmedia_update(if_t ifp)
964 {
965 struct lio *lio = if_getsoftc(ifp);
966 struct ifmedia *ifm;
967
968 ifm = &lio->ifmedia;
969
970 /* We only support Ethernet media type. */
971 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
972 return (EINVAL);
973
974 switch (IFM_SUBTYPE(ifm->ifm_media)) {
975 case IFM_AUTO:
976 break;
977 case IFM_10G_CX4:
978 case IFM_10G_SR:
979 case IFM_10G_T:
980 case IFM_10G_TWINAX:
981 default:
982 /* We don't support changing the media type. */
983 lio_dev_err(lio->oct_dev, "Invalid media type (%d)\n",
984 IFM_SUBTYPE(ifm->ifm_media));
985 return (EINVAL);
986 }
987
988 return (0);
989 }
990
991 static int
lio_get_media_subtype(struct octeon_device * oct)992 lio_get_media_subtype(struct octeon_device *oct)
993 {
994
995 switch(oct->subdevice_id) {
996 case LIO_CN2350_10G_SUBDEVICE:
997 case LIO_CN2350_10G_SUBDEVICE1:
998 case LIO_CN2360_10G_SUBDEVICE:
999 return (IFM_10G_SR);
1000
1001 case LIO_CN2350_25G_SUBDEVICE:
1002 case LIO_CN2360_25G_SUBDEVICE:
1003 return (IFM_25G_SR);
1004 }
1005
1006 return (IFM_10G_SR);
1007 }
1008
1009 static uint64_t
lio_get_baudrate(struct octeon_device * oct)1010 lio_get_baudrate(struct octeon_device *oct)
1011 {
1012
1013 switch(oct->subdevice_id) {
1014 case LIO_CN2350_10G_SUBDEVICE:
1015 case LIO_CN2350_10G_SUBDEVICE1:
1016 case LIO_CN2360_10G_SUBDEVICE:
1017 return (IF_Gbps(10));
1018
1019 case LIO_CN2350_25G_SUBDEVICE:
1020 case LIO_CN2360_25G_SUBDEVICE:
1021 return (IF_Gbps(25));
1022 }
1023
1024 return (IF_Gbps(10));
1025 }
1026
1027 static void
lio_ifmedia_status(if_t ifp,struct ifmediareq * ifmr)1028 lio_ifmedia_status(if_t ifp, struct ifmediareq *ifmr)
1029 {
1030 struct lio *lio = if_getsoftc(ifp);
1031
1032 /* Report link down if the driver isn't running. */
1033 if (!lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
1034 ifmr->ifm_active |= IFM_NONE;
1035 return;
1036 }
1037
1038 /* Setup the default interface info. */
1039 ifmr->ifm_status = IFM_AVALID;
1040 ifmr->ifm_active = IFM_ETHER;
1041
1042 if (lio->linfo.link.s.link_up) {
1043 ifmr->ifm_status |= IFM_ACTIVE;
1044 } else {
1045 ifmr->ifm_active |= IFM_NONE;
1046 return;
1047 }
1048
1049 ifmr->ifm_active |= lio_get_media_subtype(lio->oct_dev);
1050
1051 if (lio->linfo.link.s.duplex)
1052 ifmr->ifm_active |= IFM_FDX;
1053 else
1054 ifmr->ifm_active |= IFM_HDX;
1055 }
1056
1057 static uint64_t
lio_get_counter(if_t ifp,ift_counter cnt)1058 lio_get_counter(if_t ifp, ift_counter cnt)
1059 {
1060 struct lio *lio = if_getsoftc(ifp);
1061 struct octeon_device *oct = lio->oct_dev;
1062 uint64_t counter = 0;
1063 int i, q_no;
1064
1065 switch (cnt) {
1066 case IFCOUNTER_IPACKETS:
1067 for (i = 0; i < oct->num_oqs; i++) {
1068 q_no = lio->linfo.rxpciq[i].s.q_no;
1069 counter += oct->droq[q_no]->stats.rx_pkts_received;
1070 }
1071 break;
1072 case IFCOUNTER_OPACKETS:
1073 for (i = 0; i < oct->num_iqs; i++) {
1074 q_no = lio->linfo.txpciq[i].s.q_no;
1075 counter += oct->instr_queue[q_no]->stats.tx_done;
1076 }
1077 break;
1078 case IFCOUNTER_IBYTES:
1079 for (i = 0; i < oct->num_oqs; i++) {
1080 q_no = lio->linfo.rxpciq[i].s.q_no;
1081 counter += oct->droq[q_no]->stats.rx_bytes_received;
1082 }
1083 break;
1084 case IFCOUNTER_OBYTES:
1085 for (i = 0; i < oct->num_iqs; i++) {
1086 q_no = lio->linfo.txpciq[i].s.q_no;
1087 counter += oct->instr_queue[q_no]->stats.tx_tot_bytes;
1088 }
1089 break;
1090 case IFCOUNTER_IQDROPS:
1091 for (i = 0; i < oct->num_oqs; i++) {
1092 q_no = lio->linfo.rxpciq[i].s.q_no;
1093 counter += oct->droq[q_no]->stats.rx_dropped;
1094 }
1095 break;
1096 case IFCOUNTER_OQDROPS:
1097 for (i = 0; i < oct->num_iqs; i++) {
1098 q_no = lio->linfo.txpciq[i].s.q_no;
1099 counter += oct->instr_queue[q_no]->stats.tx_dropped;
1100 }
1101 break;
1102 case IFCOUNTER_IMCASTS:
1103 counter = oct->link_stats.fromwire.total_mcst;
1104 break;
1105 case IFCOUNTER_OMCASTS:
1106 counter = oct->link_stats.fromhost.mcast_pkts_sent;
1107 break;
1108 case IFCOUNTER_COLLISIONS:
1109 counter = oct->link_stats.fromhost.total_collisions;
1110 break;
1111 case IFCOUNTER_IERRORS:
1112 counter = oct->link_stats.fromwire.fcs_err +
1113 oct->link_stats.fromwire.l2_err +
1114 oct->link_stats.fromwire.frame_err;
1115 break;
1116 default:
1117 return (if_get_counter_default(ifp, cnt));
1118 }
1119
1120 return (counter);
1121 }
1122
1123 static int
lio_init_ifnet(struct lio * lio)1124 lio_init_ifnet(struct lio *lio)
1125 {
1126 struct octeon_device *oct = lio->oct_dev;
1127 if_t ifp = lio->ifp;
1128
1129 /* ifconfig entrypoint for media type/status reporting */
1130 ifmedia_init(&lio->ifmedia, IFM_IMASK, lio_ifmedia_update,
1131 lio_ifmedia_status);
1132
1133 /* set the default interface values */
1134 ifmedia_add(&lio->ifmedia,
1135 (IFM_ETHER | IFM_FDX | lio_get_media_subtype(oct)),
1136 0, NULL);
1137 ifmedia_add(&lio->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
1138 ifmedia_set(&lio->ifmedia, (IFM_ETHER | IFM_AUTO));
1139
1140 lio->ifmedia.ifm_media = lio->ifmedia.ifm_cur->ifm_media;
1141 lio_dev_dbg(oct, "IFMEDIA flags : %x\n", lio->ifmedia.ifm_media);
1142
1143 if_initname(ifp, device_get_name(oct->device),
1144 device_get_unit(oct->device));
1145 if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
1146 if_setioctlfn(ifp, lio_ioctl);
1147 if_setgetcounterfn(ifp, lio_get_counter);
1148 if_settransmitfn(ifp, lio_mq_start);
1149 if_setqflushfn(ifp, lio_qflush);
1150 if_setinitfn(ifp, lio_open);
1151 if_setmtu(ifp, lio->linfo.link.s.mtu);
1152 lio->mtu = lio->linfo.link.s.mtu;
1153 if_sethwassist(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1154 CSUM_TCP_IPV6 | CSUM_UDP_IPV6));
1155
1156 if_setcapabilitiesbit(ifp, (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
1157 IFCAP_TSO | IFCAP_LRO |
1158 IFCAP_JUMBO_MTU | IFCAP_HWSTATS |
1159 IFCAP_LINKSTATE | IFCAP_VLAN_HWFILTER |
1160 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTAGGING |
1161 IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU), 0);
1162
1163 if_setcapenable(ifp, if_getcapabilities(ifp));
1164 if_setbaudrate(ifp, lio_get_baudrate(oct));
1165
1166 return (0);
1167 }
1168
1169 static void
lio_tcp_lro_free(struct octeon_device * octeon_dev,if_t ifp)1170 lio_tcp_lro_free(struct octeon_device *octeon_dev, if_t ifp)
1171 {
1172 struct lio *lio = if_getsoftc(ifp);
1173 struct lio_droq *droq;
1174 int q_no;
1175 int i;
1176
1177 for (i = 0; i < octeon_dev->num_oqs; i++) {
1178 q_no = lio->linfo.rxpciq[i].s.q_no;
1179 droq = octeon_dev->droq[q_no];
1180 if (droq->lro.ifp) {
1181 tcp_lro_free(&droq->lro);
1182 droq->lro.ifp = NULL;
1183 }
1184 }
1185 }
1186
1187 static int
lio_tcp_lro_init(struct octeon_device * octeon_dev,if_t ifp)1188 lio_tcp_lro_init(struct octeon_device *octeon_dev, if_t ifp)
1189 {
1190 struct lio *lio = if_getsoftc(ifp);
1191 struct lio_droq *droq;
1192 struct lro_ctrl *lro;
1193 int i, q_no, ret = 0;
1194
1195 for (i = 0; i < octeon_dev->num_oqs; i++) {
1196 q_no = lio->linfo.rxpciq[i].s.q_no;
1197 droq = octeon_dev->droq[q_no];
1198 lro = &droq->lro;
1199 ret = tcp_lro_init(lro);
1200 if (ret) {
1201 lio_dev_err(octeon_dev, "LRO Initialization failed ret %d\n",
1202 ret);
1203 goto lro_init_failed;
1204 }
1205
1206 lro->ifp = ifp;
1207 }
1208
1209 return (ret);
1210
1211 lro_init_failed:
1212 lio_tcp_lro_free(octeon_dev, ifp);
1213
1214 return (ret);
1215 }
1216
1217 static int
lio_setup_nic_devices(struct octeon_device * octeon_dev)1218 lio_setup_nic_devices(struct octeon_device *octeon_dev)
1219 {
1220 union octeon_if_cfg if_cfg;
1221 struct lio *lio = NULL;
1222 if_t ifp = NULL;
1223 struct lio_version *vdata;
1224 struct lio_soft_command *sc;
1225 struct lio_if_cfg_context *ctx;
1226 struct lio_if_cfg_resp *resp;
1227 struct lio_if_props *props;
1228 int num_iqueues, num_oqueues, retval;
1229 unsigned int base_queue;
1230 unsigned int gmx_port_id;
1231 uint32_t ctx_size, data_size;
1232 uint32_t ifidx_or_pfnum, resp_size;
1233 uint8_t mac[ETHER_ADDR_LEN], i, j;
1234
1235 /* This is to handle link status changes */
1236 lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC,
1237 LIO_OPCODE_NIC_INFO,
1238 lio_link_info, octeon_dev);
1239
1240 for (i = 0; i < octeon_dev->ifcount; i++) {
1241 resp_size = sizeof(struct lio_if_cfg_resp);
1242 ctx_size = sizeof(struct lio_if_cfg_context);
1243 data_size = sizeof(struct lio_version);
1244 sc = lio_alloc_soft_command(octeon_dev, data_size, resp_size,
1245 ctx_size);
1246 if (sc == NULL)
1247 return (ENOMEM);
1248
1249 resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1250 ctx = (struct lio_if_cfg_context *)sc->ctxptr;
1251 vdata = (struct lio_version *)sc->virtdptr;
1252
1253 *((uint64_t *)vdata) = 0;
1254 vdata->major = htobe16(LIO_BASE_MAJOR_VERSION);
1255 vdata->minor = htobe16(LIO_BASE_MINOR_VERSION);
1256 vdata->micro = htobe16(LIO_BASE_MICRO_VERSION);
1257
1258 num_iqueues = octeon_dev->sriov_info.num_pf_rings;
1259 num_oqueues = octeon_dev->sriov_info.num_pf_rings;
1260 base_queue = octeon_dev->sriov_info.pf_srn;
1261
1262 gmx_port_id = octeon_dev->pf_num;
1263 ifidx_or_pfnum = octeon_dev->pf_num;
1264
1265 lio_dev_dbg(octeon_dev, "requesting config for interface %d, iqs %d, oqs %d\n",
1266 ifidx_or_pfnum, num_iqueues, num_oqueues);
1267 ctx->cond = 0;
1268 ctx->octeon_id = lio_get_device_id(octeon_dev);
1269
1270 if_cfg.if_cfg64 = 0;
1271 if_cfg.s.num_iqueues = num_iqueues;
1272 if_cfg.s.num_oqueues = num_oqueues;
1273 if_cfg.s.base_queue = base_queue;
1274 if_cfg.s.gmx_port_id = gmx_port_id;
1275
1276 sc->iq_no = 0;
1277
1278 lio_prepare_soft_command(octeon_dev, sc, LIO_OPCODE_NIC,
1279 LIO_OPCODE_NIC_IF_CFG, 0,
1280 if_cfg.if_cfg64, 0);
1281
1282 sc->callback = lio_if_cfg_callback;
1283 sc->callback_arg = sc;
1284 sc->wait_time = 3000;
1285
1286 retval = lio_send_soft_command(octeon_dev, sc);
1287 if (retval == LIO_IQ_SEND_FAILED) {
1288 lio_dev_err(octeon_dev, "iq/oq config failed status: %x\n",
1289 retval);
1290 /* Soft instr is freed by driver in case of failure. */
1291 goto setup_nic_dev_fail;
1292 }
1293
1294 /*
1295 * Sleep on a wait queue till the cond flag indicates that the
1296 * response arrived or timed-out.
1297 */
1298 lio_sleep_cond(octeon_dev, &ctx->cond);
1299
1300 retval = resp->status;
1301 if (retval) {
1302 lio_dev_err(octeon_dev, "iq/oq config failed\n");
1303 goto setup_nic_dev_fail;
1304 }
1305
1306 lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
1307 (sizeof(struct octeon_if_cfg_info)) >> 3);
1308
1309 num_iqueues = bitcount64(resp->cfg_info.iqmask);
1310 num_oqueues = bitcount64(resp->cfg_info.oqmask);
1311
1312 if (!(num_iqueues) || !(num_oqueues)) {
1313 lio_dev_err(octeon_dev,
1314 "Got bad iqueues (%016llX) or oqueues (%016llX) from firmware.\n",
1315 LIO_CAST64(resp->cfg_info.iqmask),
1316 LIO_CAST64(resp->cfg_info.oqmask));
1317 goto setup_nic_dev_fail;
1318 }
1319
1320 lio_dev_dbg(octeon_dev,
1321 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
1322 i, LIO_CAST64(resp->cfg_info.iqmask),
1323 LIO_CAST64(resp->cfg_info.oqmask),
1324 num_iqueues, num_oqueues);
1325
1326 ifp = if_alloc(IFT_ETHER);
1327
1328 lio = malloc(sizeof(struct lio), M_DEVBUF, M_NOWAIT | M_ZERO);
1329
1330 if (lio == NULL) {
1331 lio_dev_err(octeon_dev, "Lio allocation failed\n");
1332 goto setup_nic_dev_fail;
1333 }
1334
1335 if_setsoftc(ifp, lio);
1336
1337 if_sethwtsomax(ifp, LIO_MAX_FRAME_SIZE);
1338 if_sethwtsomaxsegcount(ifp, LIO_MAX_SG);
1339 if_sethwtsomaxsegsize(ifp, PAGE_SIZE);
1340
1341 lio->ifidx = ifidx_or_pfnum;
1342
1343 props = &octeon_dev->props;
1344 props->gmxport = resp->cfg_info.linfo.gmxport;
1345 props->ifp = ifp;
1346
1347 lio->linfo.num_rxpciq = num_oqueues;
1348 lio->linfo.num_txpciq = num_iqueues;
1349 for (j = 0; j < num_oqueues; j++) {
1350 lio->linfo.rxpciq[j].rxpciq64 =
1351 resp->cfg_info.linfo.rxpciq[j].rxpciq64;
1352 }
1353
1354 for (j = 0; j < num_iqueues; j++) {
1355 lio->linfo.txpciq[j].txpciq64 =
1356 resp->cfg_info.linfo.txpciq[j].txpciq64;
1357 }
1358
1359 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1360 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1361 lio->linfo.link.link_status64 =
1362 resp->cfg_info.linfo.link.link_status64;
1363
1364 /*
1365 * Point to the properties for octeon device to which this
1366 * interface belongs.
1367 */
1368 lio->oct_dev = octeon_dev;
1369 lio->ifp = ifp;
1370
1371 lio_dev_dbg(octeon_dev, "if%d gmx: %d hw_addr: 0x%llx\n", i,
1372 lio->linfo.gmxport, LIO_CAST64(lio->linfo.hw_addr));
1373 lio_init_ifnet(lio);
1374 /* 64-bit swap required on LE machines */
1375 lio_swap_8B_data(&lio->linfo.hw_addr, 1);
1376 memcpy(mac, (uint8_t *)&lio->linfo.hw_addr + 2, ETHER_ADDR_LEN);
1377
1378 ether_ifattach(ifp, mac);
1379
1380 /*
1381 * By default all interfaces on a single Octeon uses the same
1382 * tx and rx queues
1383 */
1384 lio->txq = lio->linfo.txpciq[0].s.q_no;
1385 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
1386 if (lio_setup_io_queues(octeon_dev, i, lio->linfo.num_txpciq,
1387 lio->linfo.num_rxpciq)) {
1388 lio_dev_err(octeon_dev, "I/O queues creation failed\n");
1389 goto setup_nic_dev_fail;
1390 }
1391
1392 lio_ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
1393
1394 lio->tx_qsize = lio_get_tx_qsize(octeon_dev, lio->txq);
1395 lio->rx_qsize = lio_get_rx_qsize(octeon_dev, lio->rxq);
1396
1397 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
1398 lio_dev_err(octeon_dev, "Gather list allocation failed\n");
1399 goto setup_nic_dev_fail;
1400 }
1401
1402 if ((lio_hwlro == 0) && lio_tcp_lro_init(octeon_dev, ifp))
1403 goto setup_nic_dev_fail;
1404
1405 if (lio_hwlro &&
1406 (if_getcapenable(ifp) & IFCAP_LRO) &&
1407 (if_getcapenable(ifp) & IFCAP_RXCSUM) &&
1408 (if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6))
1409 lio_set_feature(ifp, LIO_CMD_LRO_ENABLE,
1410 LIO_LROIPV4 | LIO_LROIPV6);
1411
1412 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER))
1413 lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 1);
1414 else
1415 lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 0);
1416
1417 if (lio_setup_rx_oom_poll_fn(ifp))
1418 goto setup_nic_dev_fail;
1419
1420 lio_dev_dbg(octeon_dev, "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
1421 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1422 lio->link_changes++;
1423
1424 lio_ifstate_set(lio, LIO_IFSTATE_REGISTERED);
1425
1426 /*
1427 * Sending command to firmware to enable Rx checksum offload
1428 * by default at the time of setup of Liquidio driver for
1429 * this device
1430 */
1431 lio_set_rxcsum_command(ifp, LIO_CMD_TNL_RX_CSUM_CTL,
1432 LIO_CMD_RXCSUM_ENABLE);
1433 lio_set_feature(ifp, LIO_CMD_TNL_TX_CSUM_CTL,
1434 LIO_CMD_TXCSUM_ENABLE);
1435
1436 if (lio_rss) {
1437 if (lio_send_rss_param(lio))
1438 goto setup_nic_dev_fail;
1439 } else
1440 lio_set_feature(ifp, LIO_CMD_SET_FNV,
1441 LIO_CMD_FNV_ENABLE);
1442
1443 lio_dev_dbg(octeon_dev, "NIC ifidx:%d Setup successful\n", i);
1444
1445 lio_free_soft_command(octeon_dev, sc);
1446 lio->vlan_attach =
1447 EVENTHANDLER_REGISTER(vlan_config,
1448 lio_vlan_rx_add_vid, lio,
1449 EVENTHANDLER_PRI_FIRST);
1450 lio->vlan_detach =
1451 EVENTHANDLER_REGISTER(vlan_unconfig,
1452 lio_vlan_rx_kill_vid, lio,
1453 EVENTHANDLER_PRI_FIRST);
1454
1455 /* Update stats periodically */
1456 callout_init(&lio->stats_timer, 0);
1457 lio->stats_interval = LIO_DEFAULT_STATS_INTERVAL;
1458
1459 lio_add_hw_stats(lio);
1460 }
1461
1462 return (0);
1463
1464 setup_nic_dev_fail:
1465
1466 lio_free_soft_command(octeon_dev, sc);
1467
1468 while (i--) {
1469 lio_dev_err(octeon_dev, "NIC ifidx:%d Setup failed\n", i);
1470 lio_destroy_nic_device(octeon_dev, i);
1471 }
1472
1473 return (ENODEV);
1474 }
1475
1476 static int
lio_link_info(struct lio_recv_info * recv_info,void * ptr)1477 lio_link_info(struct lio_recv_info *recv_info, void *ptr)
1478 {
1479 struct octeon_device *oct = (struct octeon_device *)ptr;
1480 struct lio_recv_pkt *recv_pkt = recv_info->recv_pkt;
1481 union octeon_link_status *ls;
1482 int gmxport = 0, i;
1483
1484 lio_dev_dbg(oct, "%s Called\n", __func__);
1485 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + LIO_DROQ_INFO_SIZE)) {
1486 lio_dev_err(oct, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
1487 recv_pkt->buffer_size[0],
1488 recv_pkt->rh.r_nic_info.gmxport);
1489 goto nic_info_err;
1490 }
1491 gmxport = recv_pkt->rh.r_nic_info.gmxport;
1492 ls = (union octeon_link_status *)(recv_pkt->buffer_ptr[0]->m_data +
1493 LIO_DROQ_INFO_SIZE);
1494 lio_swap_8B_data((uint64_t *)ls,
1495 (sizeof(union octeon_link_status)) >> 3);
1496
1497 if (oct->props.gmxport == gmxport)
1498 lio_update_link_status(oct->props.ifp, ls);
1499
1500 nic_info_err:
1501 for (i = 0; i < recv_pkt->buffer_count; i++)
1502 lio_recv_buffer_free(recv_pkt->buffer_ptr[i]);
1503
1504 lio_free_recv_info(recv_info);
1505 return (0);
1506 }
1507
1508 void
lio_free_mbuf(struct lio_instr_queue * iq,struct lio_mbuf_free_info * finfo)1509 lio_free_mbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo)
1510 {
1511
1512 bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE);
1513 bus_dmamap_unload(iq->txtag, finfo->map);
1514 m_freem(finfo->mb);
1515 }
1516
1517 void
lio_free_sgmbuf(struct lio_instr_queue * iq,struct lio_mbuf_free_info * finfo)1518 lio_free_sgmbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo)
1519 {
1520 struct lio_gather *g;
1521 struct octeon_device *oct;
1522 struct lio *lio;
1523 int iq_no;
1524
1525 g = finfo->g;
1526 iq_no = iq->txpciq.s.q_no;
1527 oct = iq->oct_dev;
1528 lio = if_getsoftc(oct->props.ifp);
1529
1530 mtx_lock(&lio->glist_lock[iq_no]);
1531 STAILQ_INSERT_TAIL(&lio->ghead[iq_no], &g->node, entries);
1532 mtx_unlock(&lio->glist_lock[iq_no]);
1533
1534 bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE);
1535 bus_dmamap_unload(iq->txtag, finfo->map);
1536 m_freem(finfo->mb);
1537 }
1538
1539 static void
lio_if_cfg_callback(struct octeon_device * oct,uint32_t status,void * buf)1540 lio_if_cfg_callback(struct octeon_device *oct, uint32_t status, void *buf)
1541 {
1542 struct lio_soft_command *sc = (struct lio_soft_command *)buf;
1543 struct lio_if_cfg_resp *resp;
1544 struct lio_if_cfg_context *ctx;
1545
1546 resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1547 ctx = (struct lio_if_cfg_context *)sc->ctxptr;
1548
1549 oct = lio_get_device(ctx->octeon_id);
1550 if (resp->status)
1551 lio_dev_err(oct, "nic if cfg instruction failed. Status: %llx (0x%08x)\n",
1552 LIO_CAST64(resp->status), status);
1553 ctx->cond = 1;
1554
1555 snprintf(oct->fw_info.lio_firmware_version, 32, "%s",
1556 resp->cfg_info.lio_firmware_version);
1557
1558 /*
1559 * This barrier is required to be sure that the response has been
1560 * written fully before waking up the handler
1561 */
1562 wmb();
1563 }
1564
1565 static int
lio_is_mac_changed(uint8_t * new,uint8_t * old)1566 lio_is_mac_changed(uint8_t *new, uint8_t *old)
1567 {
1568
1569 return ((new[0] != old[0]) || (new[1] != old[1]) ||
1570 (new[2] != old[2]) || (new[3] != old[3]) ||
1571 (new[4] != old[4]) || (new[5] != old[5]));
1572 }
1573
1574 void
lio_open(void * arg)1575 lio_open(void *arg)
1576 {
1577 struct lio *lio = arg;
1578 if_t ifp = lio->ifp;
1579 struct octeon_device *oct = lio->oct_dev;
1580 uint8_t *mac_new, mac_old[ETHER_ADDR_LEN];
1581 int ret = 0;
1582
1583 lio_ifstate_set(lio, LIO_IFSTATE_RUNNING);
1584
1585 /* Ready for link status updates */
1586 lio->intf_open = 1;
1587
1588 lio_dev_info(oct, "Interface Open, ready for traffic\n");
1589
1590 /* tell Octeon to start forwarding packets to host */
1591 lio_send_rx_ctrl_cmd(lio, 1);
1592
1593 mac_new = if_getlladdr(ifp);
1594 memcpy(mac_old, ((uint8_t *)&lio->linfo.hw_addr) + 2, ETHER_ADDR_LEN);
1595
1596 if (lio_is_mac_changed(mac_new, mac_old)) {
1597 ret = lio_set_mac(ifp, mac_new);
1598 if (ret)
1599 lio_dev_err(oct, "MAC change failed, error: %d\n", ret);
1600 }
1601
1602 /* Now inform the stack we're ready */
1603 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1604
1605 lio_dev_info(oct, "Interface is opened\n");
1606 }
1607
1608 static int
lio_set_rxcsum_command(if_t ifp,int command,uint8_t rx_cmd)1609 lio_set_rxcsum_command(if_t ifp, int command, uint8_t rx_cmd)
1610 {
1611 struct lio_ctrl_pkt nctrl;
1612 struct lio *lio = if_getsoftc(ifp);
1613 struct octeon_device *oct = lio->oct_dev;
1614 int ret = 0;
1615
1616 nctrl.ncmd.cmd64 = 0;
1617 nctrl.ncmd.s.cmd = command;
1618 nctrl.ncmd.s.param1 = rx_cmd;
1619 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1620 nctrl.wait_time = 100;
1621 nctrl.lio = lio;
1622 nctrl.cb_fn = lio_ctrl_cmd_completion;
1623
1624 ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
1625 if (ret < 0) {
1626 lio_dev_err(oct, "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
1627 ret);
1628 }
1629
1630 return (ret);
1631 }
1632
1633 static int
lio_stop_nic_module(struct octeon_device * oct)1634 lio_stop_nic_module(struct octeon_device *oct)
1635 {
1636 int i, j;
1637 struct lio *lio;
1638
1639 lio_dev_dbg(oct, "Stopping network interfaces\n");
1640 if (!oct->ifcount) {
1641 lio_dev_err(oct, "Init for Octeon was not completed\n");
1642 return (1);
1643 }
1644
1645 mtx_lock(&oct->cmd_resp_wqlock);
1646 oct->cmd_resp_state = LIO_DRV_OFFLINE;
1647 mtx_unlock(&oct->cmd_resp_wqlock);
1648
1649 for (i = 0; i < oct->ifcount; i++) {
1650 lio = if_getsoftc(oct->props.ifp);
1651 for (j = 0; j < oct->num_oqs; j++)
1652 lio_unregister_droq_ops(oct,
1653 lio->linfo.rxpciq[j].s.q_no);
1654 }
1655
1656 callout_drain(&lio->stats_timer);
1657
1658 for (i = 0; i < oct->ifcount; i++)
1659 lio_destroy_nic_device(oct, i);
1660
1661 lio_dev_dbg(oct, "Network interface stopped\n");
1662
1663 return (0);
1664 }
1665
1666 static void
lio_delete_glists(struct octeon_device * oct,struct lio * lio)1667 lio_delete_glists(struct octeon_device *oct, struct lio *lio)
1668 {
1669 struct lio_gather *g;
1670 int i;
1671
1672 if (lio->glist_lock != NULL) {
1673 free((void *)lio->glist_lock, M_DEVBUF);
1674 lio->glist_lock = NULL;
1675 }
1676
1677 if (lio->ghead == NULL)
1678 return;
1679
1680 for (i = 0; i < lio->linfo.num_txpciq; i++) {
1681 do {
1682 g = (struct lio_gather *)
1683 lio_delete_first_node(&lio->ghead[i]);
1684 free(g, M_DEVBUF);
1685 } while (g);
1686
1687 if ((lio->glists_virt_base != NULL) &&
1688 (lio->glists_virt_base[i] != NULL)) {
1689 lio_dma_free(lio->glist_entry_size * lio->tx_qsize,
1690 lio->glists_virt_base[i]);
1691 }
1692 }
1693
1694 free(lio->glists_virt_base, M_DEVBUF);
1695 lio->glists_virt_base = NULL;
1696
1697 free(lio->glists_dma_base, M_DEVBUF);
1698 lio->glists_dma_base = NULL;
1699
1700 free(lio->ghead, M_DEVBUF);
1701 lio->ghead = NULL;
1702 }
1703
1704 static int
lio_setup_glists(struct octeon_device * oct,struct lio * lio,int num_iqs)1705 lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
1706 {
1707 struct lio_gather *g;
1708 int i, j;
1709
1710 lio->glist_lock = malloc(num_iqs * sizeof(*lio->glist_lock), M_DEVBUF,
1711 M_NOWAIT | M_ZERO);
1712 if (lio->glist_lock == NULL)
1713 return (1);
1714
1715 lio->ghead = malloc(num_iqs * sizeof(*lio->ghead), M_DEVBUF,
1716 M_NOWAIT | M_ZERO);
1717 if (lio->ghead == NULL) {
1718 free((void *)lio->glist_lock, M_DEVBUF);
1719 lio->glist_lock = NULL;
1720 return (1);
1721 }
1722
1723 lio->glist_entry_size = ROUNDUP8((ROUNDUP4(LIO_MAX_SG) >> 2) *
1724 LIO_SG_ENTRY_SIZE);
1725 /*
1726 * allocate memory to store virtual and dma base address of
1727 * per glist consistent memory
1728 */
1729 lio->glists_virt_base = malloc(num_iqs * sizeof(void *), M_DEVBUF,
1730 M_NOWAIT | M_ZERO);
1731 lio->glists_dma_base = malloc(num_iqs * sizeof(vm_paddr_t), M_DEVBUF,
1732 M_NOWAIT | M_ZERO);
1733 if ((lio->glists_virt_base == NULL) || (lio->glists_dma_base == NULL)) {
1734 lio_delete_glists(oct, lio);
1735 return (1);
1736 }
1737
1738 for (i = 0; i < num_iqs; i++) {
1739 mtx_init(&lio->glist_lock[i], "glist_lock", NULL, MTX_DEF);
1740
1741 STAILQ_INIT(&lio->ghead[i]);
1742
1743 lio->glists_virt_base[i] =
1744 lio_dma_alloc(lio->glist_entry_size * lio->tx_qsize,
1745 (vm_paddr_t *)&lio->glists_dma_base[i]);
1746 if (lio->glists_virt_base[i] == NULL) {
1747 lio_delete_glists(oct, lio);
1748 return (1);
1749 }
1750
1751 for (j = 0; j < lio->tx_qsize; j++) {
1752 g = malloc(sizeof(*g), M_DEVBUF, M_NOWAIT | M_ZERO);
1753 if (g == NULL)
1754 break;
1755
1756 g->sg = (struct lio_sg_entry *)(uintptr_t)
1757 ((uint64_t)(uintptr_t)lio->glists_virt_base[i] +
1758 (j * lio->glist_entry_size));
1759 g->sg_dma_ptr = (uint64_t)lio->glists_dma_base[i] +
1760 (j * lio->glist_entry_size);
1761 STAILQ_INSERT_TAIL(&lio->ghead[i], &g->node, entries);
1762 }
1763
1764 if (j != lio->tx_qsize) {
1765 lio_delete_glists(oct, lio);
1766 return (1);
1767 }
1768 }
1769
1770 return (0);
1771 }
1772
1773 void
lio_stop(if_t ifp)1774 lio_stop(if_t ifp)
1775 {
1776 struct lio *lio = if_getsoftc(ifp);
1777 struct octeon_device *oct = lio->oct_dev;
1778
1779 lio_ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1780 if_link_state_change(ifp, LINK_STATE_DOWN);
1781
1782 lio->intf_open = 0;
1783 lio->linfo.link.s.link_up = 0;
1784 lio->link_changes++;
1785
1786 lio_send_rx_ctrl_cmd(lio, 0);
1787
1788 /* Tell the stack that the interface is no longer active */
1789 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1790
1791 lio_dev_info(oct, "Interface is stopped\n");
1792 }
1793
1794 static void
lio_check_rx_oom_status(struct lio * lio)1795 lio_check_rx_oom_status(struct lio *lio)
1796 {
1797 struct lio_droq *droq;
1798 struct octeon_device *oct = lio->oct_dev;
1799 int desc_refilled;
1800 int q, q_no = 0;
1801
1802 for (q = 0; q < oct->num_oqs; q++) {
1803 q_no = lio->linfo.rxpciq[q].s.q_no;
1804 droq = oct->droq[q_no];
1805 if (droq == NULL)
1806 continue;
1807 if (lio_read_csr32(oct, droq->pkts_credit_reg) <= 0x40) {
1808 mtx_lock(&droq->lock);
1809 desc_refilled = lio_droq_refill(oct, droq);
1810 /*
1811 * Flush the droq descriptor data to memory to be sure
1812 * that when we update the credits the data in memory
1813 * is accurate.
1814 */
1815 wmb();
1816 lio_write_csr32(oct, droq->pkts_credit_reg,
1817 desc_refilled);
1818 /* make sure mmio write completes */
1819 __compiler_membar();
1820 mtx_unlock(&droq->lock);
1821 }
1822 }
1823 }
1824
1825 static void
lio_poll_check_rx_oom_status(void * arg,int pending __unused)1826 lio_poll_check_rx_oom_status(void *arg, int pending __unused)
1827 {
1828 struct lio_tq *rx_status_tq = arg;
1829 struct lio *lio = rx_status_tq->ctxptr;
1830
1831 if (lio_ifstate_check(lio, LIO_IFSTATE_RUNNING))
1832 lio_check_rx_oom_status(lio);
1833
1834 taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work,
1835 lio_ms_to_ticks(50));
1836 }
1837
1838 static int
lio_setup_rx_oom_poll_fn(if_t ifp)1839 lio_setup_rx_oom_poll_fn(if_t ifp)
1840 {
1841 struct lio *lio = if_getsoftc(ifp);
1842 struct octeon_device *oct = lio->oct_dev;
1843 struct lio_tq *rx_status_tq;
1844
1845 rx_status_tq = &lio->rx_status_tq;
1846
1847 rx_status_tq->tq = taskqueue_create("lio_rx_oom_status", M_WAITOK,
1848 taskqueue_thread_enqueue,
1849 &rx_status_tq->tq);
1850
1851 TIMEOUT_TASK_INIT(rx_status_tq->tq, &rx_status_tq->work, 0,
1852 lio_poll_check_rx_oom_status, (void *)rx_status_tq);
1853
1854 rx_status_tq->ctxptr = lio;
1855
1856 taskqueue_start_threads(&rx_status_tq->tq, 1, PI_NET,
1857 "lio%d_rx_oom_status",
1858 oct->octeon_id);
1859
1860 taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work,
1861 lio_ms_to_ticks(50));
1862
1863 return (0);
1864 }
1865
1866 static void
lio_cleanup_rx_oom_poll_fn(if_t ifp)1867 lio_cleanup_rx_oom_poll_fn(if_t ifp)
1868 {
1869 struct lio *lio = if_getsoftc(ifp);
1870
1871 if (lio->rx_status_tq.tq != NULL) {
1872 while (taskqueue_cancel_timeout(lio->rx_status_tq.tq,
1873 &lio->rx_status_tq.work, NULL))
1874 taskqueue_drain_timeout(lio->rx_status_tq.tq,
1875 &lio->rx_status_tq.work);
1876
1877 taskqueue_free(lio->rx_status_tq.tq);
1878
1879 lio->rx_status_tq.tq = NULL;
1880 }
1881 }
1882
1883 static void
lio_destroy_nic_device(struct octeon_device * oct,int ifidx)1884 lio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1885 {
1886 if_t ifp = oct->props.ifp;
1887 struct lio *lio;
1888
1889 if (ifp == NULL) {
1890 lio_dev_err(oct, "%s No ifp ptr for index %d\n",
1891 __func__, ifidx);
1892 return;
1893 }
1894
1895 lio = if_getsoftc(ifp);
1896
1897 lio_ifstate_set(lio, LIO_IFSTATE_DETACH);
1898
1899 lio_dev_dbg(oct, "NIC device cleanup\n");
1900
1901 if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1902 lio_stop(ifp);
1903
1904 if (lio_wait_for_pending_requests(oct))
1905 lio_dev_err(oct, "There were pending requests\n");
1906
1907 if (lio_wait_for_instr_fetch(oct))
1908 lio_dev_err(oct, "IQ had pending instructions\n");
1909
1910 if (lio_wait_for_oq_pkts(oct))
1911 lio_dev_err(oct, "OQ had pending packets\n");
1912
1913 if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1914 ether_ifdetach(ifp);
1915
1916 lio_tcp_lro_free(oct, ifp);
1917
1918 lio_cleanup_rx_oom_poll_fn(ifp);
1919
1920 lio_delete_glists(oct, lio);
1921
1922 EVENTHANDLER_DEREGISTER(vlan_config, lio->vlan_attach);
1923 EVENTHANDLER_DEREGISTER(vlan_unconfig, lio->vlan_detach);
1924
1925 free(lio, M_DEVBUF);
1926
1927 if_free(ifp);
1928
1929 oct->props.gmxport = -1;
1930
1931 oct->props.ifp = NULL;
1932 }
1933
1934 static void
print_link_info(if_t ifp)1935 print_link_info(if_t ifp)
1936 {
1937 struct lio *lio = if_getsoftc(ifp);
1938
1939 if (!lio_ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
1940 lio_ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
1941 struct octeon_link_info *linfo = &lio->linfo;
1942
1943 if (linfo->link.s.link_up) {
1944 lio_dev_info(lio->oct_dev, "%d Mbps %s Duplex UP\n",
1945 linfo->link.s.speed,
1946 (linfo->link.s.duplex) ? "Full" : "Half");
1947 } else {
1948 lio_dev_info(lio->oct_dev, "Link Down\n");
1949 }
1950 }
1951 }
1952
1953 static inline void
lio_update_link_status(if_t ifp,union octeon_link_status * ls)1954 lio_update_link_status(if_t ifp, union octeon_link_status *ls)
1955 {
1956 struct lio *lio = if_getsoftc(ifp);
1957 int changed = (lio->linfo.link.link_status64 != ls->link_status64);
1958
1959 lio->linfo.link.link_status64 = ls->link_status64;
1960
1961 if ((lio->intf_open) && (changed)) {
1962 print_link_info(ifp);
1963 lio->link_changes++;
1964 if (lio->linfo.link.s.link_up)
1965 if_link_state_change(ifp, LINK_STATE_UP);
1966 else
1967 if_link_state_change(ifp, LINK_STATE_DOWN);
1968 }
1969 }
1970
1971 /*
1972 * \brief Callback for rx ctrl
1973 * @param status status of request
1974 * @param buf pointer to resp structure
1975 */
1976 static void
lio_rx_ctl_callback(struct octeon_device * oct,uint32_t status,void * buf)1977 lio_rx_ctl_callback(struct octeon_device *oct, uint32_t status, void *buf)
1978 {
1979 struct lio_soft_command *sc = (struct lio_soft_command *)buf;
1980 struct lio_rx_ctl_context *ctx;
1981
1982 ctx = (struct lio_rx_ctl_context *)sc->ctxptr;
1983
1984 oct = lio_get_device(ctx->octeon_id);
1985 if (status)
1986 lio_dev_err(oct, "rx ctl instruction failed. Status: %llx\n",
1987 LIO_CAST64(status));
1988 ctx->cond = 1;
1989
1990 /*
1991 * This barrier is required to be sure that the response has been
1992 * written fully before waking up the handler
1993 */
1994 wmb();
1995 }
1996
1997 static void
lio_send_rx_ctrl_cmd(struct lio * lio,int start_stop)1998 lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1999 {
2000 struct lio_soft_command *sc;
2001 struct lio_rx_ctl_context *ctx;
2002 union octeon_cmd *ncmd;
2003 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
2004 int ctx_size = sizeof(struct lio_rx_ctl_context);
2005 int retval;
2006
2007 if (oct->props.rx_on == start_stop)
2008 return;
2009
2010 sc = lio_alloc_soft_command(oct, OCTEON_CMD_SIZE, 16, ctx_size);
2011 if (sc == NULL)
2012 return;
2013
2014 ncmd = (union octeon_cmd *)sc->virtdptr;
2015 ctx = (struct lio_rx_ctl_context *)sc->ctxptr;
2016
2017 ctx->cond = 0;
2018 ctx->octeon_id = lio_get_device_id(oct);
2019 ncmd->cmd64 = 0;
2020 ncmd->s.cmd = LIO_CMD_RX_CTL;
2021 ncmd->s.param1 = start_stop;
2022
2023 lio_swap_8B_data((uint64_t *)ncmd, (OCTEON_CMD_SIZE >> 3));
2024
2025 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2026
2027 lio_prepare_soft_command(oct, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_CMD, 0,
2028 0, 0);
2029
2030 sc->callback = lio_rx_ctl_callback;
2031 sc->callback_arg = sc;
2032 sc->wait_time = 5000;
2033
2034 retval = lio_send_soft_command(oct, sc);
2035 if (retval == LIO_IQ_SEND_FAILED) {
2036 lio_dev_err(oct, "Failed to send RX Control message\n");
2037 } else {
2038 /*
2039 * Sleep on a wait queue till the cond flag indicates that the
2040 * response arrived or timed-out.
2041 */
2042 lio_sleep_cond(oct, &ctx->cond);
2043 oct->props.rx_on = start_stop;
2044 }
2045
2046 lio_free_soft_command(oct, sc);
2047 }
2048
2049 static void
lio_vlan_rx_add_vid(void * arg,if_t ifp,uint16_t vid)2050 lio_vlan_rx_add_vid(void *arg, if_t ifp, uint16_t vid)
2051 {
2052 struct lio_ctrl_pkt nctrl;
2053 struct lio *lio = if_getsoftc(ifp);
2054 struct octeon_device *oct = lio->oct_dev;
2055 int ret = 0;
2056
2057 if (if_getsoftc(ifp) != arg) /* Not our event */
2058 return;
2059
2060 if ((vid == 0) || (vid > 4095)) /* Invalid */
2061 return;
2062
2063 bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
2064
2065 nctrl.ncmd.cmd64 = 0;
2066 nctrl.ncmd.s.cmd = LIO_CMD_ADD_VLAN_FILTER;
2067 nctrl.ncmd.s.param1 = vid;
2068 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2069 nctrl.wait_time = 100;
2070 nctrl.lio = lio;
2071 nctrl.cb_fn = lio_ctrl_cmd_completion;
2072
2073 ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
2074 if (ret < 0) {
2075 lio_dev_err(oct, "Add VLAN filter failed in core (ret: 0x%x)\n",
2076 ret);
2077 }
2078 }
2079
2080 static void
lio_vlan_rx_kill_vid(void * arg,if_t ifp,uint16_t vid)2081 lio_vlan_rx_kill_vid(void *arg, if_t ifp, uint16_t vid)
2082 {
2083 struct lio_ctrl_pkt nctrl;
2084 struct lio *lio = if_getsoftc(ifp);
2085 struct octeon_device *oct = lio->oct_dev;
2086 int ret = 0;
2087
2088 if (if_getsoftc(ifp) != arg) /* Not our event */
2089 return;
2090
2091 if ((vid == 0) || (vid > 4095)) /* Invalid */
2092 return;
2093
2094 bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
2095
2096 nctrl.ncmd.cmd64 = 0;
2097 nctrl.ncmd.s.cmd = LIO_CMD_DEL_VLAN_FILTER;
2098 nctrl.ncmd.s.param1 = vid;
2099 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2100 nctrl.wait_time = 100;
2101 nctrl.lio = lio;
2102 nctrl.cb_fn = lio_ctrl_cmd_completion;
2103
2104 ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
2105 if (ret < 0) {
2106 lio_dev_err(oct,
2107 "Kill VLAN filter failed in core (ret: 0x%x)\n",
2108 ret);
2109 }
2110 }
2111
2112 static int
lio_wait_for_oq_pkts(struct octeon_device * oct)2113 lio_wait_for_oq_pkts(struct octeon_device *oct)
2114 {
2115 int i, pending_pkts, pkt_cnt = 0, retry = 100;
2116
2117 do {
2118 pending_pkts = 0;
2119
2120 for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) {
2121 if (!(oct->io_qmask.oq & BIT_ULL(i)))
2122 continue;
2123
2124 pkt_cnt = lio_droq_check_hw_for_pkts(oct->droq[i]);
2125 if (pkt_cnt > 0) {
2126 pending_pkts += pkt_cnt;
2127 taskqueue_enqueue(oct->droq[i]->droq_taskqueue,
2128 &oct->droq[i]->droq_task);
2129 }
2130 }
2131
2132 pkt_cnt = 0;
2133 lio_sleep_timeout(1);
2134 } while (retry-- && pending_pkts);
2135
2136 return (pkt_cnt);
2137 }
2138
2139 static void
lio_destroy_resources(struct octeon_device * oct)2140 lio_destroy_resources(struct octeon_device *oct)
2141 {
2142 int i, refcount;
2143
2144 switch (atomic_load_acq_int(&oct->status)) {
2145 case LIO_DEV_RUNNING:
2146 case LIO_DEV_CORE_OK:
2147 /* No more instructions will be forwarded. */
2148 atomic_store_rel_int(&oct->status, LIO_DEV_IN_RESET);
2149
2150 oct->app_mode = LIO_DRV_INVALID_APP;
2151 lio_dev_dbg(oct, "Device state is now %s\n",
2152 lio_get_state_string(&oct->status));
2153
2154 lio_sleep_timeout(100);
2155
2156 /* fallthrough */
2157 case LIO_DEV_HOST_OK:
2158
2159 /* fallthrough */
2160 case LIO_DEV_CONSOLE_INIT_DONE:
2161 /* Remove any consoles */
2162 lio_remove_consoles(oct);
2163
2164 /* fallthrough */
2165 case LIO_DEV_IO_QUEUES_DONE:
2166 if (lio_wait_for_pending_requests(oct))
2167 lio_dev_err(oct, "There were pending requests\n");
2168
2169 if (lio_wait_for_instr_fetch(oct))
2170 lio_dev_err(oct, "IQ had pending instructions\n");
2171
2172 /*
2173 * Disable the input and output queues now. No more packets will
2174 * arrive from Octeon, but we should wait for all packet
2175 * processing to finish.
2176 */
2177 oct->fn_list.disable_io_queues(oct);
2178
2179 if (lio_wait_for_oq_pkts(oct))
2180 lio_dev_err(oct, "OQ had pending packets\n");
2181
2182 /* fallthrough */
2183 case LIO_DEV_INTR_SET_DONE:
2184 /* Disable interrupts */
2185 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
2186
2187 if (oct->msix_on) {
2188 for (i = 0; i < oct->num_msix_irqs - 1; i++) {
2189 if (oct->ioq_vector[i].tag != NULL) {
2190 bus_teardown_intr(oct->device,
2191 oct->ioq_vector[i].msix_res,
2192 oct->ioq_vector[i].tag);
2193 oct->ioq_vector[i].tag = NULL;
2194 }
2195 if (oct->ioq_vector[i].msix_res != NULL) {
2196 bus_release_resource(oct->device,
2197 SYS_RES_IRQ,
2198 oct->ioq_vector[i].vector,
2199 oct->ioq_vector[i].msix_res);
2200 oct->ioq_vector[i].msix_res = NULL;
2201 }
2202 }
2203 /* non-iov vector's argument is oct struct */
2204 if (oct->tag != NULL) {
2205 bus_teardown_intr(oct->device, oct->msix_res,
2206 oct->tag);
2207 oct->tag = NULL;
2208 }
2209
2210 if (oct->msix_res != NULL) {
2211 bus_release_resource(oct->device, SYS_RES_IRQ,
2212 oct->aux_vector,
2213 oct->msix_res);
2214 oct->msix_res = NULL;
2215 }
2216
2217 pci_release_msi(oct->device);
2218 }
2219 /* fallthrough */
2220 case LIO_DEV_IN_RESET:
2221 case LIO_DEV_DROQ_INIT_DONE:
2222 /* Wait for any pending operations */
2223 lio_mdelay(100);
2224 for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) {
2225 if (!(oct->io_qmask.oq & BIT_ULL(i)))
2226 continue;
2227 lio_delete_droq(oct, i);
2228 }
2229
2230 /* fallthrough */
2231 case LIO_DEV_RESP_LIST_INIT_DONE:
2232 for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) {
2233 if (oct->droq[i] != NULL) {
2234 free(oct->droq[i], M_DEVBUF);
2235 oct->droq[i] = NULL;
2236 }
2237 }
2238 lio_delete_response_list(oct);
2239
2240 /* fallthrough */
2241 case LIO_DEV_INSTR_QUEUE_INIT_DONE:
2242 for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) {
2243 if (!(oct->io_qmask.iq & BIT_ULL(i)))
2244 continue;
2245
2246 lio_delete_instr_queue(oct, i);
2247 }
2248
2249 /* fallthrough */
2250 case LIO_DEV_MSIX_ALLOC_VECTOR_DONE:
2251 for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) {
2252 if (oct->instr_queue[i] != NULL) {
2253 free(oct->instr_queue[i], M_DEVBUF);
2254 oct->instr_queue[i] = NULL;
2255 }
2256 }
2257 lio_free_ioq_vector(oct);
2258
2259 /* fallthrough */
2260 case LIO_DEV_SC_BUFF_POOL_INIT_DONE:
2261 lio_free_sc_buffer_pool(oct);
2262
2263 /* fallthrough */
2264 case LIO_DEV_DISPATCH_INIT_DONE:
2265 lio_delete_dispatch_list(oct);
2266
2267 /* fallthrough */
2268 case LIO_DEV_PCI_MAP_DONE:
2269 refcount = lio_deregister_device(oct);
2270
2271 if (fw_type_is_none())
2272 lio_pci_flr(oct);
2273
2274 if (!refcount)
2275 oct->fn_list.soft_reset(oct);
2276
2277 lio_unmap_pci_barx(oct, 0);
2278 lio_unmap_pci_barx(oct, 1);
2279
2280 /* fallthrough */
2281 case LIO_DEV_PCI_ENABLE_DONE:
2282 /* Disable the device, releasing the PCI INT */
2283 pci_disable_busmaster(oct->device);
2284
2285 /* fallthrough */
2286 case LIO_DEV_BEGIN_STATE:
2287 break;
2288 } /* end switch (oct->status) */
2289 }
2290