xref: /linux/drivers/net/ethernet/cavium/liquidio/lio_main.c (revision f6f3bac08ff9855d803081a353a1fafaa8845739)
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/firmware.h>
22 #include <net/vxlan.h>
23 #include <linux/kthread.h>
24 #include <net/switchdev.h>
25 #include "liquidio_common.h"
26 #include "octeon_droq.h"
27 #include "octeon_iq.h"
28 #include "response_manager.h"
29 #include "octeon_device.h"
30 #include "octeon_nic.h"
31 #include "octeon_main.h"
32 #include "octeon_network.h"
33 #include "cn66xx_regs.h"
34 #include "cn66xx_device.h"
35 #include "cn68xx_device.h"
36 #include "cn23xx_pf_device.h"
37 #include "liquidio_image.h"
38 #include "lio_vf_rep.h"
39 
40 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
41 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
42 MODULE_LICENSE("GPL");
43 MODULE_VERSION(LIQUIDIO_VERSION);
44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
45 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
47 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
49 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
50 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
51 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
52 
53 static int ddr_timeout = 10000;
54 module_param(ddr_timeout, int, 0644);
55 MODULE_PARM_DESC(ddr_timeout,
56 		 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
57 
58 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
59 
60 static int debug = -1;
61 module_param(debug, int, 0644);
62 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
63 
64 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
65 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
66 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
67 
68 static u32 console_bitmask;
69 module_param(console_bitmask, int, 0644);
70 MODULE_PARM_DESC(console_bitmask,
71 		 "Bitmask indicating which consoles have debug output redirected to syslog.");
72 
73 /**
74  * \brief determines if a given console has debug enabled.
75  * @param console console to check
76  * @returns  1 = enabled. 0 otherwise
77  */
78 static int octeon_console_debug_enabled(u32 console)
79 {
80 	return (console_bitmask >> (console)) & 0x1;
81 }
82 
83 /* Polling interval for determining when NIC application is alive */
84 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
85 
86 /* runtime link query interval */
87 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS         1000
88 /* update localtime to octeon firmware every 60 seconds.
89  * make firmware to use same time reference, so that it will be easy to
90  * correlate firmware logged events/errors with host events, for debugging.
91  */
92 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
93 
94 /* time to wait for possible in-flight requests in milliseconds */
95 #define WAIT_INFLIGHT_REQUEST	msecs_to_jiffies(1000)
96 
97 struct lio_trusted_vf_ctx {
98 	struct completion complete;
99 	int status;
100 };
101 
102 struct oct_link_status_resp {
103 	u64 rh;
104 	struct oct_link_info link_info;
105 	u64 status;
106 };
107 
108 struct oct_timestamp_resp {
109 	u64 rh;
110 	u64 timestamp;
111 	u64 status;
112 };
113 
114 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
115 
116 union tx_info {
117 	u64 u64;
118 	struct {
119 #ifdef __BIG_ENDIAN_BITFIELD
120 		u16 gso_size;
121 		u16 gso_segs;
122 		u32 reserved;
123 #else
124 		u32 reserved;
125 		u16 gso_segs;
126 		u16 gso_size;
127 #endif
128 	} s;
129 };
130 
131 /** Octeon device properties to be used by the NIC module.
132  * Each octeon device in the system will be represented
133  * by this structure in the NIC module.
134  */
135 
136 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
137 #define OCTNIC_GSO_MAX_SIZE                                                    \
138 	(CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
139 
140 struct handshake {
141 	struct completion init;
142 	struct completion started;
143 	struct pci_dev *pci_dev;
144 	int init_ok;
145 	int started_ok;
146 };
147 
148 #ifdef CONFIG_PCI_IOV
149 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
150 #endif
151 
152 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
153 				    char *prefix, char *suffix);
154 
155 static int octeon_device_init(struct octeon_device *);
156 static int liquidio_stop(struct net_device *netdev);
157 static void liquidio_remove(struct pci_dev *pdev);
158 static int liquidio_probe(struct pci_dev *pdev,
159 			  const struct pci_device_id *ent);
160 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
161 				      int linkstate);
162 
163 static struct handshake handshake[MAX_OCTEON_DEVICES];
164 static struct completion first_stage;
165 
166 static void octeon_droq_bh(unsigned long pdev)
167 {
168 	int q_no;
169 	int reschedule = 0;
170 	struct octeon_device *oct = (struct octeon_device *)pdev;
171 	struct octeon_device_priv *oct_priv =
172 		(struct octeon_device_priv *)oct->priv;
173 
174 	for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
175 		if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
176 			continue;
177 		reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
178 							  MAX_PACKET_BUDGET);
179 		lio_enable_irq(oct->droq[q_no], NULL);
180 
181 		if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
182 			/* set time and cnt interrupt thresholds for this DROQ
183 			 * for NAPI
184 			 */
185 			int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
186 
187 			octeon_write_csr64(
188 			    oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
189 			    0x5700000040ULL);
190 			octeon_write_csr64(
191 			    oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
192 		}
193 	}
194 
195 	if (reschedule)
196 		tasklet_schedule(&oct_priv->droq_tasklet);
197 }
198 
199 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
200 {
201 	struct octeon_device_priv *oct_priv =
202 		(struct octeon_device_priv *)oct->priv;
203 	int retry = 100, pkt_cnt = 0, pending_pkts = 0;
204 	int i;
205 
206 	do {
207 		pending_pkts = 0;
208 
209 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
210 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
211 				continue;
212 			pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
213 		}
214 		if (pkt_cnt > 0) {
215 			pending_pkts += pkt_cnt;
216 			tasklet_schedule(&oct_priv->droq_tasklet);
217 		}
218 		pkt_cnt = 0;
219 		schedule_timeout_uninterruptible(1);
220 
221 	} while (retry-- && pending_pkts);
222 
223 	return pkt_cnt;
224 }
225 
226 /**
227  * \brief Forces all IO queues off on a given device
228  * @param oct Pointer to Octeon device
229  */
230 static void force_io_queues_off(struct octeon_device *oct)
231 {
232 	if ((oct->chip_id == OCTEON_CN66XX) ||
233 	    (oct->chip_id == OCTEON_CN68XX)) {
234 		/* Reset the Enable bits for Input Queues. */
235 		octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
236 
237 		/* Reset the Enable bits for Output Queues. */
238 		octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
239 	}
240 }
241 
242 /**
243  * \brief Cause device to go quiet so it can be safely removed/reset/etc
244  * @param oct Pointer to Octeon device
245  */
246 static inline void pcierror_quiesce_device(struct octeon_device *oct)
247 {
248 	int i;
249 
250 	/* Disable the input and output queues now. No more packets will
251 	 * arrive from Octeon, but we should wait for all packet processing
252 	 * to finish.
253 	 */
254 	force_io_queues_off(oct);
255 
256 	/* To allow for in-flight requests */
257 	schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
258 
259 	if (wait_for_pending_requests(oct))
260 		dev_err(&oct->pci_dev->dev, "There were pending requests\n");
261 
262 	/* Force all requests waiting to be fetched by OCTEON to complete. */
263 	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
264 		struct octeon_instr_queue *iq;
265 
266 		if (!(oct->io_qmask.iq & BIT_ULL(i)))
267 			continue;
268 		iq = oct->instr_queue[i];
269 
270 		if (atomic_read(&iq->instr_pending)) {
271 			spin_lock_bh(&iq->lock);
272 			iq->fill_cnt = 0;
273 			iq->octeon_read_index = iq->host_write_index;
274 			iq->stats.instr_processed +=
275 				atomic_read(&iq->instr_pending);
276 			lio_process_iq_request_list(oct, iq, 0);
277 			spin_unlock_bh(&iq->lock);
278 		}
279 	}
280 
281 	/* Force all pending ordered list requests to time out. */
282 	lio_process_ordered_list(oct, 1);
283 
284 	/* We do not need to wait for output queue packets to be processed. */
285 }
286 
287 /**
288  * \brief Cleanup PCI AER uncorrectable error status
289  * @param dev Pointer to PCI device
290  */
291 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
292 {
293 	int pos = 0x100;
294 	u32 status, mask;
295 
296 	pr_info("%s :\n", __func__);
297 
298 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
299 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
300 	if (dev->error_state == pci_channel_io_normal)
301 		status &= ~mask;        /* Clear corresponding nonfatal bits */
302 	else
303 		status &= mask;         /* Clear corresponding fatal bits */
304 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
305 }
306 
307 /**
308  * \brief Stop all PCI IO to a given device
309  * @param dev Pointer to Octeon device
310  */
311 static void stop_pci_io(struct octeon_device *oct)
312 {
313 	/* No more instructions will be forwarded. */
314 	atomic_set(&oct->status, OCT_DEV_IN_RESET);
315 
316 	pci_disable_device(oct->pci_dev);
317 
318 	/* Disable interrupts  */
319 	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
320 
321 	pcierror_quiesce_device(oct);
322 
323 	/* Release the interrupt line */
324 	free_irq(oct->pci_dev->irq, oct);
325 
326 	if (oct->flags & LIO_FLAG_MSI_ENABLED)
327 		pci_disable_msi(oct->pci_dev);
328 
329 	dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
330 		lio_get_state_string(&oct->status));
331 
332 	/* making it a common function for all OCTEON models */
333 	cleanup_aer_uncorrect_error_status(oct->pci_dev);
334 }
335 
336 /**
337  * \brief called when PCI error is detected
338  * @param pdev Pointer to PCI device
339  * @param state The current pci connection state
340  *
341  * This function is called after a PCI bus error affecting
342  * this device has been detected.
343  */
344 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
345 						     pci_channel_state_t state)
346 {
347 	struct octeon_device *oct = pci_get_drvdata(pdev);
348 
349 	/* Non-correctable Non-fatal errors */
350 	if (state == pci_channel_io_normal) {
351 		dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
352 		cleanup_aer_uncorrect_error_status(oct->pci_dev);
353 		return PCI_ERS_RESULT_CAN_RECOVER;
354 	}
355 
356 	/* Non-correctable Fatal errors */
357 	dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
358 	stop_pci_io(oct);
359 
360 	/* Always return a DISCONNECT. There is no support for recovery but only
361 	 * for a clean shutdown.
362 	 */
363 	return PCI_ERS_RESULT_DISCONNECT;
364 }
365 
366 /**
367  * \brief mmio handler
368  * @param pdev Pointer to PCI device
369  */
370 static pci_ers_result_t liquidio_pcie_mmio_enabled(
371 				struct pci_dev *pdev __attribute__((unused)))
372 {
373 	/* We should never hit this since we never ask for a reset for a Fatal
374 	 * Error. We always return DISCONNECT in io_error above.
375 	 * But play safe and return RECOVERED for now.
376 	 */
377 	return PCI_ERS_RESULT_RECOVERED;
378 }
379 
380 /**
381  * \brief called after the pci bus has been reset.
382  * @param pdev Pointer to PCI device
383  *
384  * Restart the card from scratch, as if from a cold-boot. Implementation
385  * resembles the first-half of the octeon_resume routine.
386  */
387 static pci_ers_result_t liquidio_pcie_slot_reset(
388 				struct pci_dev *pdev __attribute__((unused)))
389 {
390 	/* We should never hit this since we never ask for a reset for a Fatal
391 	 * Error. We always return DISCONNECT in io_error above.
392 	 * But play safe and return RECOVERED for now.
393 	 */
394 	return PCI_ERS_RESULT_RECOVERED;
395 }
396 
397 /**
398  * \brief called when traffic can start flowing again.
399  * @param pdev Pointer to PCI device
400  *
401  * This callback is called when the error recovery driver tells us that
402  * its OK to resume normal operation. Implementation resembles the
403  * second-half of the octeon_resume routine.
404  */
405 static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
406 {
407 	/* Nothing to be done here. */
408 }
409 
410 #ifdef CONFIG_PM
411 /**
412  * \brief called when suspending
413  * @param pdev Pointer to PCI device
414  * @param state state to suspend to
415  */
416 static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
417 			    pm_message_t state __attribute__((unused)))
418 {
419 	return 0;
420 }
421 
422 /**
423  * \brief called when resuming
424  * @param pdev Pointer to PCI device
425  */
426 static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
427 {
428 	return 0;
429 }
430 #endif
431 
432 /* For PCI-E Advanced Error Recovery (AER) Interface */
433 static const struct pci_error_handlers liquidio_err_handler = {
434 	.error_detected = liquidio_pcie_error_detected,
435 	.mmio_enabled	= liquidio_pcie_mmio_enabled,
436 	.slot_reset	= liquidio_pcie_slot_reset,
437 	.resume		= liquidio_pcie_resume,
438 };
439 
440 static const struct pci_device_id liquidio_pci_tbl[] = {
441 	{       /* 68xx */
442 		PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
443 	},
444 	{       /* 66xx */
445 		PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
446 	},
447 	{       /* 23xx pf */
448 		PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
449 	},
450 	{
451 		0, 0, 0, 0, 0, 0, 0
452 	}
453 };
454 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
455 
456 static struct pci_driver liquidio_pci_driver = {
457 	.name		= "LiquidIO",
458 	.id_table	= liquidio_pci_tbl,
459 	.probe		= liquidio_probe,
460 	.remove		= liquidio_remove,
461 	.err_handler	= &liquidio_err_handler,    /* For AER */
462 
463 #ifdef CONFIG_PM
464 	.suspend	= liquidio_suspend,
465 	.resume		= liquidio_resume,
466 #endif
467 #ifdef CONFIG_PCI_IOV
468 	.sriov_configure = liquidio_enable_sriov,
469 #endif
470 };
471 
472 /**
473  * \brief register PCI driver
474  */
475 static int liquidio_init_pci(void)
476 {
477 	return pci_register_driver(&liquidio_pci_driver);
478 }
479 
480 /**
481  * \brief unregister PCI driver
482  */
483 static void liquidio_deinit_pci(void)
484 {
485 	pci_unregister_driver(&liquidio_pci_driver);
486 }
487 
488 /**
489  * \brief Check Tx queue status, and take appropriate action
490  * @param lio per-network private data
491  * @returns 0 if full, number of queues woken up otherwise
492  */
493 static inline int check_txq_status(struct lio *lio)
494 {
495 	int numqs = lio->netdev->real_num_tx_queues;
496 	int ret_val = 0;
497 	int q, iq;
498 
499 	/* check each sub-queue state */
500 	for (q = 0; q < numqs; q++) {
501 		iq = lio->linfo.txpciq[q %
502 			lio->oct_dev->num_iqs].s.q_no;
503 		if (octnet_iq_is_full(lio->oct_dev, iq))
504 			continue;
505 		if (__netif_subqueue_stopped(lio->netdev, q)) {
506 			netif_wake_subqueue(lio->netdev, q);
507 			INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
508 						  tx_restart, 1);
509 			ret_val++;
510 		}
511 	}
512 
513 	return ret_val;
514 }
515 
516 /**
517  * \brief Print link information
518  * @param netdev network device
519  */
520 static void print_link_info(struct net_device *netdev)
521 {
522 	struct lio *lio = GET_LIO(netdev);
523 
524 	if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
525 	    ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
526 		struct oct_link_info *linfo = &lio->linfo;
527 
528 		if (linfo->link.s.link_up) {
529 			netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
530 				   linfo->link.s.speed,
531 				   (linfo->link.s.duplex) ? "Full" : "Half");
532 		} else {
533 			netif_info(lio, link, lio->netdev, "Link Down\n");
534 		}
535 	}
536 }
537 
538 /**
539  * \brief Routine to notify MTU change
540  * @param work work_struct data structure
541  */
542 static void octnet_link_status_change(struct work_struct *work)
543 {
544 	struct cavium_wk *wk = (struct cavium_wk *)work;
545 	struct lio *lio = (struct lio *)wk->ctxptr;
546 
547 	/* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
548 	 * this API is invoked only when new max-MTU of the interface is
549 	 * less than current MTU.
550 	 */
551 	rtnl_lock();
552 	dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
553 	rtnl_unlock();
554 }
555 
556 /**
557  * \brief Sets up the mtu status change work
558  * @param netdev network device
559  */
560 static inline int setup_link_status_change_wq(struct net_device *netdev)
561 {
562 	struct lio *lio = GET_LIO(netdev);
563 	struct octeon_device *oct = lio->oct_dev;
564 
565 	lio->link_status_wq.wq = alloc_workqueue("link-status",
566 						 WQ_MEM_RECLAIM, 0);
567 	if (!lio->link_status_wq.wq) {
568 		dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
569 		return -1;
570 	}
571 	INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
572 			  octnet_link_status_change);
573 	lio->link_status_wq.wk.ctxptr = lio;
574 
575 	return 0;
576 }
577 
578 static inline void cleanup_link_status_change_wq(struct net_device *netdev)
579 {
580 	struct lio *lio = GET_LIO(netdev);
581 
582 	if (lio->link_status_wq.wq) {
583 		cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
584 		destroy_workqueue(lio->link_status_wq.wq);
585 	}
586 }
587 
588 /**
589  * \brief Update link status
590  * @param netdev network device
591  * @param ls link status structure
592  *
593  * Called on receipt of a link status response from the core application to
594  * update each interface's link status.
595  */
596 static inline void update_link_status(struct net_device *netdev,
597 				      union oct_link_status *ls)
598 {
599 	struct lio *lio = GET_LIO(netdev);
600 	int changed = (lio->linfo.link.u64 != ls->u64);
601 	int current_max_mtu = lio->linfo.link.s.mtu;
602 	struct octeon_device *oct = lio->oct_dev;
603 
604 	dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
605 		__func__, lio->linfo.link.u64, ls->u64);
606 	lio->linfo.link.u64 = ls->u64;
607 
608 	if ((lio->intf_open) && (changed)) {
609 		print_link_info(netdev);
610 		lio->link_changes++;
611 
612 		if (lio->linfo.link.s.link_up) {
613 			dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
614 			netif_carrier_on(netdev);
615 			wake_txqs(netdev);
616 		} else {
617 			dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
618 			netif_carrier_off(netdev);
619 			stop_txqs(netdev);
620 		}
621 		if (lio->linfo.link.s.mtu != current_max_mtu) {
622 			netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
623 				   current_max_mtu, lio->linfo.link.s.mtu);
624 			netdev->max_mtu = lio->linfo.link.s.mtu;
625 		}
626 		if (lio->linfo.link.s.mtu < netdev->mtu) {
627 			dev_warn(&oct->pci_dev->dev,
628 				 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
629 				     netdev->mtu, lio->linfo.link.s.mtu);
630 			queue_delayed_work(lio->link_status_wq.wq,
631 					   &lio->link_status_wq.wk.work, 0);
632 		}
633 	}
634 }
635 
636 /**
637  * lio_sync_octeon_time - send latest localtime to octeon firmware so that
638  * firmware will correct it's time, in case there is a time skew
639  *
640  * @work: work scheduled to send time update to octeon firmware
641  **/
642 static void lio_sync_octeon_time(struct work_struct *work)
643 {
644 	struct cavium_wk *wk = (struct cavium_wk *)work;
645 	struct lio *lio = (struct lio *)wk->ctxptr;
646 	struct octeon_device *oct = lio->oct_dev;
647 	struct octeon_soft_command *sc;
648 	struct timespec64 ts;
649 	struct lio_time *lt;
650 	int ret;
651 
652 	sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0);
653 	if (!sc) {
654 		dev_err(&oct->pci_dev->dev,
655 			"Failed to sync time to octeon: soft command allocation failed\n");
656 		return;
657 	}
658 
659 	lt = (struct lio_time *)sc->virtdptr;
660 
661 	/* Get time of the day */
662 	ktime_get_real_ts64(&ts);
663 	lt->sec = ts.tv_sec;
664 	lt->nsec = ts.tv_nsec;
665 	octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
666 
667 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
668 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
669 				    OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
670 
671 	init_completion(&sc->complete);
672 	sc->sc_status = OCTEON_REQUEST_PENDING;
673 
674 	ret = octeon_send_soft_command(oct, sc);
675 	if (ret == IQ_SEND_FAILED) {
676 		dev_err(&oct->pci_dev->dev,
677 			"Failed to sync time to octeon: failed to send soft command\n");
678 		octeon_free_soft_command(oct, sc);
679 	} else {
680 		WRITE_ONCE(sc->caller_is_done, true);
681 	}
682 
683 	queue_delayed_work(lio->sync_octeon_time_wq.wq,
684 			   &lio->sync_octeon_time_wq.wk.work,
685 			   msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
686 }
687 
688 /**
689  * setup_sync_octeon_time_wq - Sets up the work to periodically update
690  * local time to octeon firmware
691  *
692  * @netdev - network device which should send time update to firmware
693  **/
694 static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
695 {
696 	struct lio *lio = GET_LIO(netdev);
697 	struct octeon_device *oct = lio->oct_dev;
698 
699 	lio->sync_octeon_time_wq.wq =
700 		alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
701 	if (!lio->sync_octeon_time_wq.wq) {
702 		dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
703 		return -1;
704 	}
705 	INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
706 			  lio_sync_octeon_time);
707 	lio->sync_octeon_time_wq.wk.ctxptr = lio;
708 	queue_delayed_work(lio->sync_octeon_time_wq.wq,
709 			   &lio->sync_octeon_time_wq.wk.work,
710 			   msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
711 
712 	return 0;
713 }
714 
715 /**
716  * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created
717  * to periodically update local time to octeon firmware
718  *
719  * @netdev - network device which should send time update to firmware
720  **/
721 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
722 {
723 	struct lio *lio = GET_LIO(netdev);
724 	struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
725 
726 	if (time_wq->wq) {
727 		cancel_delayed_work_sync(&time_wq->wk.work);
728 		destroy_workqueue(time_wq->wq);
729 	}
730 }
731 
732 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
733 {
734 	struct octeon_device *other_oct;
735 
736 	other_oct = lio_get_device(oct->octeon_id + 1);
737 
738 	if (other_oct && other_oct->pci_dev) {
739 		int oct_busnum, other_oct_busnum;
740 
741 		oct_busnum = oct->pci_dev->bus->number;
742 		other_oct_busnum = other_oct->pci_dev->bus->number;
743 
744 		if (oct_busnum == other_oct_busnum) {
745 			int oct_slot, other_oct_slot;
746 
747 			oct_slot = PCI_SLOT(oct->pci_dev->devfn);
748 			other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
749 
750 			if (oct_slot == other_oct_slot)
751 				return other_oct;
752 		}
753 	}
754 
755 	return NULL;
756 }
757 
758 static void disable_all_vf_links(struct octeon_device *oct)
759 {
760 	struct net_device *netdev;
761 	int max_vfs, vf, i;
762 
763 	if (!oct)
764 		return;
765 
766 	max_vfs = oct->sriov_info.max_vfs;
767 
768 	for (i = 0; i < oct->ifcount; i++) {
769 		netdev = oct->props[i].netdev;
770 		if (!netdev)
771 			continue;
772 
773 		for (vf = 0; vf < max_vfs; vf++)
774 			liquidio_set_vf_link_state(netdev, vf,
775 						   IFLA_VF_LINK_STATE_DISABLE);
776 	}
777 }
778 
779 static int liquidio_watchdog(void *param)
780 {
781 	bool err_msg_was_printed[LIO_MAX_CORES];
782 	u16 mask_of_crashed_or_stuck_cores = 0;
783 	bool all_vf_links_are_disabled = false;
784 	struct octeon_device *oct = param;
785 	struct octeon_device *other_oct;
786 #ifdef CONFIG_MODULE_UNLOAD
787 	long refcount, vfs_referencing_pf;
788 	u64 vfs_mask1, vfs_mask2;
789 #endif
790 	int core;
791 
792 	memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
793 
794 	while (!kthread_should_stop()) {
795 		/* sleep for a couple of seconds so that we don't hog the CPU */
796 		set_current_state(TASK_INTERRUPTIBLE);
797 		schedule_timeout(msecs_to_jiffies(2000));
798 
799 		mask_of_crashed_or_stuck_cores =
800 		    (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
801 
802 		if (!mask_of_crashed_or_stuck_cores)
803 			continue;
804 
805 		WRITE_ONCE(oct->cores_crashed, true);
806 		other_oct = get_other_octeon_device(oct);
807 		if (other_oct)
808 			WRITE_ONCE(other_oct->cores_crashed, true);
809 
810 		for (core = 0; core < LIO_MAX_CORES; core++) {
811 			bool core_crashed_or_got_stuck;
812 
813 			core_crashed_or_got_stuck =
814 						(mask_of_crashed_or_stuck_cores
815 						 >> core) & 1;
816 
817 			if (core_crashed_or_got_stuck &&
818 			    !err_msg_was_printed[core]) {
819 				dev_err(&oct->pci_dev->dev,
820 					"ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",
821 					core);
822 				err_msg_was_printed[core] = true;
823 			}
824 		}
825 
826 		if (all_vf_links_are_disabled)
827 			continue;
828 
829 		disable_all_vf_links(oct);
830 		disable_all_vf_links(other_oct);
831 		all_vf_links_are_disabled = true;
832 
833 #ifdef CONFIG_MODULE_UNLOAD
834 		vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
835 		vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
836 
837 		vfs_referencing_pf  = hweight64(vfs_mask1);
838 		vfs_referencing_pf += hweight64(vfs_mask2);
839 
840 		refcount = module_refcount(THIS_MODULE);
841 		if (refcount >= vfs_referencing_pf) {
842 			while (vfs_referencing_pf) {
843 				module_put(THIS_MODULE);
844 				vfs_referencing_pf--;
845 			}
846 		}
847 #endif
848 	}
849 
850 	return 0;
851 }
852 
853 /**
854  * \brief PCI probe handler
855  * @param pdev PCI device structure
856  * @param ent unused
857  */
858 static int
859 liquidio_probe(struct pci_dev *pdev,
860 	       const struct pci_device_id *ent __attribute__((unused)))
861 {
862 	struct octeon_device *oct_dev = NULL;
863 	struct handshake *hs;
864 
865 	oct_dev = octeon_allocate_device(pdev->device,
866 					 sizeof(struct octeon_device_priv));
867 	if (!oct_dev) {
868 		dev_err(&pdev->dev, "Unable to allocate device\n");
869 		return -ENOMEM;
870 	}
871 
872 	if (pdev->device == OCTEON_CN23XX_PF_VID)
873 		oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
874 
875 	/* Enable PTP for 6XXX Device */
876 	if (((pdev->device == OCTEON_CN66XX) ||
877 	     (pdev->device == OCTEON_CN68XX)))
878 		oct_dev->ptp_enable = true;
879 	else
880 		oct_dev->ptp_enable = false;
881 
882 	dev_info(&pdev->dev, "Initializing device %x:%x.\n",
883 		 (u32)pdev->vendor, (u32)pdev->device);
884 
885 	/* Assign octeon_device for this device to the private data area. */
886 	pci_set_drvdata(pdev, oct_dev);
887 
888 	/* set linux specific device pointer */
889 	oct_dev->pci_dev = (void *)pdev;
890 
891 	oct_dev->subsystem_id = pdev->subsystem_vendor |
892 		(pdev->subsystem_device << 16);
893 
894 	hs = &handshake[oct_dev->octeon_id];
895 	init_completion(&hs->init);
896 	init_completion(&hs->started);
897 	hs->pci_dev = pdev;
898 
899 	if (oct_dev->octeon_id == 0)
900 		/* first LiquidIO NIC is detected */
901 		complete(&first_stage);
902 
903 	if (octeon_device_init(oct_dev)) {
904 		complete(&hs->init);
905 		liquidio_remove(pdev);
906 		return -ENOMEM;
907 	}
908 
909 	if (OCTEON_CN23XX_PF(oct_dev)) {
910 		u8 bus, device, function;
911 
912 		if (atomic_read(oct_dev->adapter_refcount) == 1) {
913 			/* Each NIC gets one watchdog kernel thread.  The first
914 			 * PF (of each NIC) that gets pci_driver->probe()'d
915 			 * creates that thread.
916 			 */
917 			bus = pdev->bus->number;
918 			device = PCI_SLOT(pdev->devfn);
919 			function = PCI_FUNC(pdev->devfn);
920 			oct_dev->watchdog_task = kthread_create(
921 			    liquidio_watchdog, oct_dev,
922 			    "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
923 			if (!IS_ERR(oct_dev->watchdog_task)) {
924 				wake_up_process(oct_dev->watchdog_task);
925 			} else {
926 				oct_dev->watchdog_task = NULL;
927 				dev_err(&oct_dev->pci_dev->dev,
928 					"failed to create kernel_thread\n");
929 				liquidio_remove(pdev);
930 				return -1;
931 			}
932 		}
933 	}
934 
935 	oct_dev->rx_pause = 1;
936 	oct_dev->tx_pause = 1;
937 
938 	dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
939 
940 	return 0;
941 }
942 
943 static bool fw_type_is_auto(void)
944 {
945 	return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
946 		       sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
947 }
948 
949 /**
950  * \brief PCI FLR for each Octeon device.
951  * @param oct octeon device
952  */
953 static void octeon_pci_flr(struct octeon_device *oct)
954 {
955 	int rc;
956 
957 	pci_save_state(oct->pci_dev);
958 
959 	pci_cfg_access_lock(oct->pci_dev);
960 
961 	/* Quiesce the device completely */
962 	pci_write_config_word(oct->pci_dev, PCI_COMMAND,
963 			      PCI_COMMAND_INTX_DISABLE);
964 
965 	rc = __pci_reset_function_locked(oct->pci_dev);
966 
967 	if (rc != 0)
968 		dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
969 			rc, oct->pf_num);
970 
971 	pci_cfg_access_unlock(oct->pci_dev);
972 
973 	pci_restore_state(oct->pci_dev);
974 }
975 
976 /**
977  *\brief Destroy resources associated with octeon device
978  * @param pdev PCI device structure
979  * @param ent unused
980  */
981 static void octeon_destroy_resources(struct octeon_device *oct)
982 {
983 	int i, refcount;
984 	struct msix_entry *msix_entries;
985 	struct octeon_device_priv *oct_priv =
986 		(struct octeon_device_priv *)oct->priv;
987 
988 	struct handshake *hs;
989 
990 	switch (atomic_read(&oct->status)) {
991 	case OCT_DEV_RUNNING:
992 	case OCT_DEV_CORE_OK:
993 
994 		/* No more instructions will be forwarded. */
995 		atomic_set(&oct->status, OCT_DEV_IN_RESET);
996 
997 		oct->app_mode = CVM_DRV_INVALID_APP;
998 		dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
999 			lio_get_state_string(&oct->status));
1000 
1001 		schedule_timeout_uninterruptible(HZ / 10);
1002 
1003 		/* fallthrough */
1004 	case OCT_DEV_HOST_OK:
1005 
1006 		/* fallthrough */
1007 	case OCT_DEV_CONSOLE_INIT_DONE:
1008 		/* Remove any consoles */
1009 		octeon_remove_consoles(oct);
1010 
1011 		/* fallthrough */
1012 	case OCT_DEV_IO_QUEUES_DONE:
1013 		if (lio_wait_for_instr_fetch(oct))
1014 			dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1015 
1016 		if (wait_for_pending_requests(oct))
1017 			dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1018 
1019 		/* Disable the input and output queues now. No more packets will
1020 		 * arrive from Octeon, but we should wait for all packet
1021 		 * processing to finish.
1022 		 */
1023 		oct->fn_list.disable_io_queues(oct);
1024 
1025 		if (lio_wait_for_oq_pkts(oct))
1026 			dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1027 
1028 		/* Force all requests waiting to be fetched by OCTEON to
1029 		 * complete.
1030 		 */
1031 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1032 			struct octeon_instr_queue *iq;
1033 
1034 			if (!(oct->io_qmask.iq & BIT_ULL(i)))
1035 				continue;
1036 			iq = oct->instr_queue[i];
1037 
1038 			if (atomic_read(&iq->instr_pending)) {
1039 				spin_lock_bh(&iq->lock);
1040 				iq->fill_cnt = 0;
1041 				iq->octeon_read_index = iq->host_write_index;
1042 				iq->stats.instr_processed +=
1043 					atomic_read(&iq->instr_pending);
1044 				lio_process_iq_request_list(oct, iq, 0);
1045 				spin_unlock_bh(&iq->lock);
1046 			}
1047 		}
1048 
1049 		lio_process_ordered_list(oct, 1);
1050 		octeon_free_sc_done_list(oct);
1051 		octeon_free_sc_zombie_list(oct);
1052 
1053 	/* fallthrough */
1054 	case OCT_DEV_INTR_SET_DONE:
1055 		/* Disable interrupts  */
1056 		oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1057 
1058 		if (oct->msix_on) {
1059 			msix_entries = (struct msix_entry *)oct->msix_entries;
1060 			for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1061 				if (oct->ioq_vector[i].vector) {
1062 					/* clear the affinity_cpumask */
1063 					irq_set_affinity_hint(
1064 							msix_entries[i].vector,
1065 							NULL);
1066 					free_irq(msix_entries[i].vector,
1067 						 &oct->ioq_vector[i]);
1068 					oct->ioq_vector[i].vector = 0;
1069 				}
1070 			}
1071 			/* non-iov vector's argument is oct struct */
1072 			free_irq(msix_entries[i].vector, oct);
1073 
1074 			pci_disable_msix(oct->pci_dev);
1075 			kfree(oct->msix_entries);
1076 			oct->msix_entries = NULL;
1077 		} else {
1078 			/* Release the interrupt line */
1079 			free_irq(oct->pci_dev->irq, oct);
1080 
1081 			if (oct->flags & LIO_FLAG_MSI_ENABLED)
1082 				pci_disable_msi(oct->pci_dev);
1083 		}
1084 
1085 		kfree(oct->irq_name_storage);
1086 		oct->irq_name_storage = NULL;
1087 
1088 	/* fallthrough */
1089 	case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1090 		if (OCTEON_CN23XX_PF(oct))
1091 			octeon_free_ioq_vector(oct);
1092 
1093 	/* fallthrough */
1094 	case OCT_DEV_MBOX_SETUP_DONE:
1095 		if (OCTEON_CN23XX_PF(oct))
1096 			oct->fn_list.free_mbox(oct);
1097 
1098 	/* fallthrough */
1099 	case OCT_DEV_IN_RESET:
1100 	case OCT_DEV_DROQ_INIT_DONE:
1101 		/* Wait for any pending operations */
1102 		mdelay(100);
1103 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1104 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
1105 				continue;
1106 			octeon_delete_droq(oct, i);
1107 		}
1108 
1109 		/* Force any pending handshakes to complete */
1110 		for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1111 			hs = &handshake[i];
1112 
1113 			if (hs->pci_dev) {
1114 				handshake[oct->octeon_id].init_ok = 0;
1115 				complete(&handshake[oct->octeon_id].init);
1116 				handshake[oct->octeon_id].started_ok = 0;
1117 				complete(&handshake[oct->octeon_id].started);
1118 			}
1119 		}
1120 
1121 		/* fallthrough */
1122 	case OCT_DEV_RESP_LIST_INIT_DONE:
1123 		octeon_delete_response_list(oct);
1124 
1125 		/* fallthrough */
1126 	case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1127 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1128 			if (!(oct->io_qmask.iq & BIT_ULL(i)))
1129 				continue;
1130 			octeon_delete_instr_queue(oct, i);
1131 		}
1132 #ifdef CONFIG_PCI_IOV
1133 		if (oct->sriov_info.sriov_enabled)
1134 			pci_disable_sriov(oct->pci_dev);
1135 #endif
1136 		/* fallthrough */
1137 	case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1138 		octeon_free_sc_buffer_pool(oct);
1139 
1140 		/* fallthrough */
1141 	case OCT_DEV_DISPATCH_INIT_DONE:
1142 		octeon_delete_dispatch_list(oct);
1143 		cancel_delayed_work_sync(&oct->nic_poll_work.work);
1144 
1145 		/* fallthrough */
1146 	case OCT_DEV_PCI_MAP_DONE:
1147 		refcount = octeon_deregister_device(oct);
1148 
1149 		/* Soft reset the octeon device before exiting.
1150 		 * However, if fw was loaded from card (i.e. autoboot),
1151 		 * perform an FLR instead.
1152 		 * Implementation note: only soft-reset the device
1153 		 * if it is a CN6XXX OR the LAST CN23XX device.
1154 		 */
1155 		if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1156 			octeon_pci_flr(oct);
1157 		else if (OCTEON_CN6XXX(oct) || !refcount)
1158 			oct->fn_list.soft_reset(oct);
1159 
1160 		octeon_unmap_pci_barx(oct, 0);
1161 		octeon_unmap_pci_barx(oct, 1);
1162 
1163 		/* fallthrough */
1164 	case OCT_DEV_PCI_ENABLE_DONE:
1165 		pci_clear_master(oct->pci_dev);
1166 		/* Disable the device, releasing the PCI INT */
1167 		pci_disable_device(oct->pci_dev);
1168 
1169 		/* fallthrough */
1170 	case OCT_DEV_BEGIN_STATE:
1171 		/* Nothing to be done here either */
1172 		break;
1173 	}                       /* end switch (oct->status) */
1174 
1175 	tasklet_kill(&oct_priv->droq_tasklet);
1176 }
1177 
1178 /**
1179  * \brief Send Rx control command
1180  * @param lio per-network private data
1181  * @param start_stop whether to start or stop
1182  */
1183 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1184 {
1185 	struct octeon_soft_command *sc;
1186 	union octnet_cmd *ncmd;
1187 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1188 	int retval;
1189 
1190 	if (oct->props[lio->ifidx].rx_on == start_stop)
1191 		return;
1192 
1193 	sc = (struct octeon_soft_command *)
1194 		octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1195 					  16, 0);
1196 
1197 	ncmd = (union octnet_cmd *)sc->virtdptr;
1198 
1199 	ncmd->u64 = 0;
1200 	ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1201 	ncmd->s.param1 = start_stop;
1202 
1203 	octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1204 
1205 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1206 
1207 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1208 				    OPCODE_NIC_CMD, 0, 0, 0);
1209 
1210 	init_completion(&sc->complete);
1211 	sc->sc_status = OCTEON_REQUEST_PENDING;
1212 
1213 	retval = octeon_send_soft_command(oct, sc);
1214 	if (retval == IQ_SEND_FAILED) {
1215 		netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1216 		octeon_free_soft_command(oct, sc);
1217 		return;
1218 	} else {
1219 		/* Sleep on a wait queue till the cond flag indicates that the
1220 		 * response arrived or timed-out.
1221 		 */
1222 		retval = wait_for_sc_completion_timeout(oct, sc, 0);
1223 		if (retval)
1224 			return;
1225 
1226 		oct->props[lio->ifidx].rx_on = start_stop;
1227 		WRITE_ONCE(sc->caller_is_done, true);
1228 	}
1229 }
1230 
1231 /**
1232  * \brief Destroy NIC device interface
1233  * @param oct octeon device
1234  * @param ifidx which interface to destroy
1235  *
1236  * Cleanup associated with each interface for an Octeon device  when NIC
1237  * module is being unloaded or if initialization fails during load.
1238  */
1239 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1240 {
1241 	struct net_device *netdev = oct->props[ifidx].netdev;
1242 	struct lio *lio;
1243 	struct napi_struct *napi, *n;
1244 
1245 	if (!netdev) {
1246 		dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1247 			__func__, ifidx);
1248 		return;
1249 	}
1250 
1251 	lio = GET_LIO(netdev);
1252 
1253 	dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1254 
1255 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1256 		liquidio_stop(netdev);
1257 
1258 	if (oct->props[lio->ifidx].napi_enabled == 1) {
1259 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1260 			napi_disable(napi);
1261 
1262 		oct->props[lio->ifidx].napi_enabled = 0;
1263 
1264 		if (OCTEON_CN23XX_PF(oct))
1265 			oct->droq[0]->ops.poll_mode = 0;
1266 	}
1267 
1268 	/* Delete NAPI */
1269 	list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1270 		netif_napi_del(napi);
1271 
1272 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1273 		unregister_netdev(netdev);
1274 
1275 	cleanup_sync_octeon_time_wq(netdev);
1276 	cleanup_link_status_change_wq(netdev);
1277 
1278 	cleanup_rx_oom_poll_fn(netdev);
1279 
1280 	lio_delete_glists(lio);
1281 
1282 	free_netdev(netdev);
1283 
1284 	oct->props[ifidx].gmxport = -1;
1285 
1286 	oct->props[ifidx].netdev = NULL;
1287 }
1288 
1289 /**
1290  * \brief Stop complete NIC functionality
1291  * @param oct octeon device
1292  */
1293 static int liquidio_stop_nic_module(struct octeon_device *oct)
1294 {
1295 	int i, j;
1296 	struct lio *lio;
1297 
1298 	dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1299 	if (!oct->ifcount) {
1300 		dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1301 		return 1;
1302 	}
1303 
1304 	spin_lock_bh(&oct->cmd_resp_wqlock);
1305 	oct->cmd_resp_state = OCT_DRV_OFFLINE;
1306 	spin_unlock_bh(&oct->cmd_resp_wqlock);
1307 
1308 	lio_vf_rep_destroy(oct);
1309 
1310 	for (i = 0; i < oct->ifcount; i++) {
1311 		lio = GET_LIO(oct->props[i].netdev);
1312 		for (j = 0; j < oct->num_oqs; j++)
1313 			octeon_unregister_droq_ops(oct,
1314 						   lio->linfo.rxpciq[j].s.q_no);
1315 	}
1316 
1317 	for (i = 0; i < oct->ifcount; i++)
1318 		liquidio_destroy_nic_device(oct, i);
1319 
1320 	if (oct->devlink) {
1321 		devlink_unregister(oct->devlink);
1322 		devlink_free(oct->devlink);
1323 		oct->devlink = NULL;
1324 	}
1325 
1326 	dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1327 	return 0;
1328 }
1329 
1330 /**
1331  * \brief Cleans up resources at unload time
1332  * @param pdev PCI device structure
1333  */
1334 static void liquidio_remove(struct pci_dev *pdev)
1335 {
1336 	struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1337 
1338 	dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1339 
1340 	if (oct_dev->watchdog_task)
1341 		kthread_stop(oct_dev->watchdog_task);
1342 
1343 	if (!oct_dev->octeon_id &&
1344 	    oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1345 		lio_vf_rep_modexit();
1346 
1347 	if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1348 		liquidio_stop_nic_module(oct_dev);
1349 
1350 	/* Reset the octeon device and cleanup all memory allocated for
1351 	 * the octeon device by driver.
1352 	 */
1353 	octeon_destroy_resources(oct_dev);
1354 
1355 	dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1356 
1357 	/* This octeon device has been removed. Update the global
1358 	 * data structure to reflect this. Free the device structure.
1359 	 */
1360 	octeon_free_device_mem(oct_dev);
1361 }
1362 
1363 /**
1364  * \brief Identify the Octeon device and to map the BAR address space
1365  * @param oct octeon device
1366  */
1367 static int octeon_chip_specific_setup(struct octeon_device *oct)
1368 {
1369 	u32 dev_id, rev_id;
1370 	int ret = 1;
1371 	char *s;
1372 
1373 	pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1374 	pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1375 	oct->rev_id = rev_id & 0xff;
1376 
1377 	switch (dev_id) {
1378 	case OCTEON_CN68XX_PCIID:
1379 		oct->chip_id = OCTEON_CN68XX;
1380 		ret = lio_setup_cn68xx_octeon_device(oct);
1381 		s = "CN68XX";
1382 		break;
1383 
1384 	case OCTEON_CN66XX_PCIID:
1385 		oct->chip_id = OCTEON_CN66XX;
1386 		ret = lio_setup_cn66xx_octeon_device(oct);
1387 		s = "CN66XX";
1388 		break;
1389 
1390 	case OCTEON_CN23XX_PCIID_PF:
1391 		oct->chip_id = OCTEON_CN23XX_PF_VID;
1392 		ret = setup_cn23xx_octeon_pf_device(oct);
1393 		if (ret)
1394 			break;
1395 #ifdef CONFIG_PCI_IOV
1396 		if (!ret)
1397 			pci_sriov_set_totalvfs(oct->pci_dev,
1398 					       oct->sriov_info.max_vfs);
1399 #endif
1400 		s = "CN23XX";
1401 		break;
1402 
1403 	default:
1404 		s = "?";
1405 		dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1406 			dev_id);
1407 	}
1408 
1409 	if (!ret)
1410 		dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
1411 			 OCTEON_MAJOR_REV(oct),
1412 			 OCTEON_MINOR_REV(oct),
1413 			 octeon_get_conf(oct)->card_name,
1414 			 LIQUIDIO_VERSION);
1415 
1416 	return ret;
1417 }
1418 
1419 /**
1420  * \brief PCI initialization for each Octeon device.
1421  * @param oct octeon device
1422  */
1423 static int octeon_pci_os_setup(struct octeon_device *oct)
1424 {
1425 	/* setup PCI stuff first */
1426 	if (pci_enable_device(oct->pci_dev)) {
1427 		dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1428 		return 1;
1429 	}
1430 
1431 	if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1432 		dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1433 		pci_disable_device(oct->pci_dev);
1434 		return 1;
1435 	}
1436 
1437 	/* Enable PCI DMA Master. */
1438 	pci_set_master(oct->pci_dev);
1439 
1440 	return 0;
1441 }
1442 
1443 /**
1444  * \brief Unmap and free network buffer
1445  * @param buf buffer
1446  */
1447 static void free_netbuf(void *buf)
1448 {
1449 	struct sk_buff *skb;
1450 	struct octnet_buf_free_info *finfo;
1451 	struct lio *lio;
1452 
1453 	finfo = (struct octnet_buf_free_info *)buf;
1454 	skb = finfo->skb;
1455 	lio = finfo->lio;
1456 
1457 	dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1458 			 DMA_TO_DEVICE);
1459 
1460 	tx_buffer_free(skb);
1461 }
1462 
1463 /**
1464  * \brief Unmap and free gather buffer
1465  * @param buf buffer
1466  */
1467 static void free_netsgbuf(void *buf)
1468 {
1469 	struct octnet_buf_free_info *finfo;
1470 	struct sk_buff *skb;
1471 	struct lio *lio;
1472 	struct octnic_gather *g;
1473 	int i, frags, iq;
1474 
1475 	finfo = (struct octnet_buf_free_info *)buf;
1476 	skb = finfo->skb;
1477 	lio = finfo->lio;
1478 	g = finfo->g;
1479 	frags = skb_shinfo(skb)->nr_frags;
1480 
1481 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1482 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
1483 			 DMA_TO_DEVICE);
1484 
1485 	i = 1;
1486 	while (frags--) {
1487 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1488 
1489 		pci_unmap_page((lio->oct_dev)->pci_dev,
1490 			       g->sg[(i >> 2)].ptr[(i & 3)],
1491 			       frag->size, DMA_TO_DEVICE);
1492 		i++;
1493 	}
1494 
1495 	iq = skb_iq(lio->oct_dev, skb);
1496 	spin_lock(&lio->glist_lock[iq]);
1497 	list_add_tail(&g->list, &lio->glist[iq]);
1498 	spin_unlock(&lio->glist_lock[iq]);
1499 
1500 	tx_buffer_free(skb);
1501 }
1502 
1503 /**
1504  * \brief Unmap and free gather buffer with response
1505  * @param buf buffer
1506  */
1507 static void free_netsgbuf_with_resp(void *buf)
1508 {
1509 	struct octeon_soft_command *sc;
1510 	struct octnet_buf_free_info *finfo;
1511 	struct sk_buff *skb;
1512 	struct lio *lio;
1513 	struct octnic_gather *g;
1514 	int i, frags, iq;
1515 
1516 	sc = (struct octeon_soft_command *)buf;
1517 	skb = (struct sk_buff *)sc->callback_arg;
1518 	finfo = (struct octnet_buf_free_info *)&skb->cb;
1519 
1520 	lio = finfo->lio;
1521 	g = finfo->g;
1522 	frags = skb_shinfo(skb)->nr_frags;
1523 
1524 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1525 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
1526 			 DMA_TO_DEVICE);
1527 
1528 	i = 1;
1529 	while (frags--) {
1530 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1531 
1532 		pci_unmap_page((lio->oct_dev)->pci_dev,
1533 			       g->sg[(i >> 2)].ptr[(i & 3)],
1534 			       frag->size, DMA_TO_DEVICE);
1535 		i++;
1536 	}
1537 
1538 	iq = skb_iq(lio->oct_dev, skb);
1539 
1540 	spin_lock(&lio->glist_lock[iq]);
1541 	list_add_tail(&g->list, &lio->glist[iq]);
1542 	spin_unlock(&lio->glist_lock[iq]);
1543 
1544 	/* Don't free the skb yet */
1545 }
1546 
1547 /**
1548  * \brief Adjust ptp frequency
1549  * @param ptp PTP clock info
1550  * @param ppb how much to adjust by, in parts-per-billion
1551  */
1552 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1553 {
1554 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1555 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1556 	u64 comp, delta;
1557 	unsigned long flags;
1558 	bool neg_adj = false;
1559 
1560 	if (ppb < 0) {
1561 		neg_adj = true;
1562 		ppb = -ppb;
1563 	}
1564 
1565 	/* The hardware adds the clock compensation value to the
1566 	 * PTP clock on every coprocessor clock cycle, so we
1567 	 * compute the delta in terms of coprocessor clocks.
1568 	 */
1569 	delta = (u64)ppb << 32;
1570 	do_div(delta, oct->coproc_clock_rate);
1571 
1572 	spin_lock_irqsave(&lio->ptp_lock, flags);
1573 	comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1574 	if (neg_adj)
1575 		comp -= delta;
1576 	else
1577 		comp += delta;
1578 	lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1579 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1580 
1581 	return 0;
1582 }
1583 
1584 /**
1585  * \brief Adjust ptp time
1586  * @param ptp PTP clock info
1587  * @param delta how much to adjust by, in nanosecs
1588  */
1589 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1590 {
1591 	unsigned long flags;
1592 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1593 
1594 	spin_lock_irqsave(&lio->ptp_lock, flags);
1595 	lio->ptp_adjust += delta;
1596 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1597 
1598 	return 0;
1599 }
1600 
1601 /**
1602  * \brief Get hardware clock time, including any adjustment
1603  * @param ptp PTP clock info
1604  * @param ts timespec
1605  */
1606 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1607 				struct timespec64 *ts)
1608 {
1609 	u64 ns;
1610 	unsigned long flags;
1611 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1612 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1613 
1614 	spin_lock_irqsave(&lio->ptp_lock, flags);
1615 	ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1616 	ns += lio->ptp_adjust;
1617 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1618 
1619 	*ts = ns_to_timespec64(ns);
1620 
1621 	return 0;
1622 }
1623 
1624 /**
1625  * \brief Set hardware clock time. Reset adjustment
1626  * @param ptp PTP clock info
1627  * @param ts timespec
1628  */
1629 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1630 				const struct timespec64 *ts)
1631 {
1632 	u64 ns;
1633 	unsigned long flags;
1634 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1635 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1636 
1637 	ns = timespec64_to_ns(ts);
1638 
1639 	spin_lock_irqsave(&lio->ptp_lock, flags);
1640 	lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1641 	lio->ptp_adjust = 0;
1642 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1643 
1644 	return 0;
1645 }
1646 
1647 /**
1648  * \brief Check if PTP is enabled
1649  * @param ptp PTP clock info
1650  * @param rq request
1651  * @param on is it on
1652  */
1653 static int
1654 liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
1655 		    struct ptp_clock_request *rq __attribute__((unused)),
1656 		    int on __attribute__((unused)))
1657 {
1658 	return -EOPNOTSUPP;
1659 }
1660 
1661 /**
1662  * \brief Open PTP clock source
1663  * @param netdev network device
1664  */
1665 static void oct_ptp_open(struct net_device *netdev)
1666 {
1667 	struct lio *lio = GET_LIO(netdev);
1668 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1669 
1670 	spin_lock_init(&lio->ptp_lock);
1671 
1672 	snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1673 	lio->ptp_info.owner = THIS_MODULE;
1674 	lio->ptp_info.max_adj = 250000000;
1675 	lio->ptp_info.n_alarm = 0;
1676 	lio->ptp_info.n_ext_ts = 0;
1677 	lio->ptp_info.n_per_out = 0;
1678 	lio->ptp_info.pps = 0;
1679 	lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1680 	lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1681 	lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1682 	lio->ptp_info.settime64 = liquidio_ptp_settime;
1683 	lio->ptp_info.enable = liquidio_ptp_enable;
1684 
1685 	lio->ptp_adjust = 0;
1686 
1687 	lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1688 					     &oct->pci_dev->dev);
1689 
1690 	if (IS_ERR(lio->ptp_clock))
1691 		lio->ptp_clock = NULL;
1692 }
1693 
1694 /**
1695  * \brief Init PTP clock
1696  * @param oct octeon device
1697  */
1698 static void liquidio_ptp_init(struct octeon_device *oct)
1699 {
1700 	u64 clock_comp, cfg;
1701 
1702 	clock_comp = (u64)NSEC_PER_SEC << 32;
1703 	do_div(clock_comp, oct->coproc_clock_rate);
1704 	lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1705 
1706 	/* Enable */
1707 	cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1708 	lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1709 }
1710 
1711 /**
1712  * \brief Load firmware to device
1713  * @param oct octeon device
1714  *
1715  * Maps device to firmware filename, requests firmware, and downloads it
1716  */
1717 static int load_firmware(struct octeon_device *oct)
1718 {
1719 	int ret = 0;
1720 	const struct firmware *fw;
1721 	char fw_name[LIO_MAX_FW_FILENAME_LEN];
1722 	char *tmp_fw_type;
1723 
1724 	if (fw_type_is_auto()) {
1725 		tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1726 		strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
1727 	} else {
1728 		tmp_fw_type = fw_type;
1729 	}
1730 
1731 	sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1732 		octeon_get_conf(oct)->card_name, tmp_fw_type,
1733 		LIO_FW_NAME_SUFFIX);
1734 
1735 	ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1736 	if (ret) {
1737 		dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n",
1738 			fw_name);
1739 		release_firmware(fw);
1740 		return ret;
1741 	}
1742 
1743 	ret = octeon_download_firmware(oct, fw->data, fw->size);
1744 
1745 	release_firmware(fw);
1746 
1747 	return ret;
1748 }
1749 
1750 /**
1751  * \brief Poll routine for checking transmit queue status
1752  * @param work work_struct data structure
1753  */
1754 static void octnet_poll_check_txq_status(struct work_struct *work)
1755 {
1756 	struct cavium_wk *wk = (struct cavium_wk *)work;
1757 	struct lio *lio = (struct lio *)wk->ctxptr;
1758 
1759 	if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1760 		return;
1761 
1762 	check_txq_status(lio);
1763 	queue_delayed_work(lio->txq_status_wq.wq,
1764 			   &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1765 }
1766 
1767 /**
1768  * \brief Sets up the txq poll check
1769  * @param netdev network device
1770  */
1771 static inline int setup_tx_poll_fn(struct net_device *netdev)
1772 {
1773 	struct lio *lio = GET_LIO(netdev);
1774 	struct octeon_device *oct = lio->oct_dev;
1775 
1776 	lio->txq_status_wq.wq = alloc_workqueue("txq-status",
1777 						WQ_MEM_RECLAIM, 0);
1778 	if (!lio->txq_status_wq.wq) {
1779 		dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
1780 		return -1;
1781 	}
1782 	INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
1783 			  octnet_poll_check_txq_status);
1784 	lio->txq_status_wq.wk.ctxptr = lio;
1785 	queue_delayed_work(lio->txq_status_wq.wq,
1786 			   &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1787 	return 0;
1788 }
1789 
1790 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
1791 {
1792 	struct lio *lio = GET_LIO(netdev);
1793 
1794 	if (lio->txq_status_wq.wq) {
1795 		cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
1796 		destroy_workqueue(lio->txq_status_wq.wq);
1797 	}
1798 }
1799 
1800 /**
1801  * \brief Net device open for LiquidIO
1802  * @param netdev network device
1803  */
1804 static int liquidio_open(struct net_device *netdev)
1805 {
1806 	struct lio *lio = GET_LIO(netdev);
1807 	struct octeon_device *oct = lio->oct_dev;
1808 	struct napi_struct *napi, *n;
1809 
1810 	if (oct->props[lio->ifidx].napi_enabled == 0) {
1811 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1812 			napi_enable(napi);
1813 
1814 		oct->props[lio->ifidx].napi_enabled = 1;
1815 
1816 		if (OCTEON_CN23XX_PF(oct))
1817 			oct->droq[0]->ops.poll_mode = 1;
1818 	}
1819 
1820 	if (oct->ptp_enable)
1821 		oct_ptp_open(netdev);
1822 
1823 	ifstate_set(lio, LIO_IFSTATE_RUNNING);
1824 
1825 	if (OCTEON_CN23XX_PF(oct)) {
1826 		if (!oct->msix_on)
1827 			if (setup_tx_poll_fn(netdev))
1828 				return -1;
1829 	} else {
1830 		if (setup_tx_poll_fn(netdev))
1831 			return -1;
1832 	}
1833 
1834 	netif_tx_start_all_queues(netdev);
1835 
1836 	/* Ready for link status updates */
1837 	lio->intf_open = 1;
1838 
1839 	netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1840 
1841 	/* tell Octeon to start forwarding packets to host */
1842 	send_rx_ctrl_cmd(lio, 1);
1843 
1844 	/* start periodical statistics fetch */
1845 	INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
1846 	lio->stats_wk.ctxptr = lio;
1847 	schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
1848 					(LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
1849 
1850 	dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
1851 		 netdev->name);
1852 
1853 	return 0;
1854 }
1855 
1856 /**
1857  * \brief Net device stop for LiquidIO
1858  * @param netdev network device
1859  */
1860 static int liquidio_stop(struct net_device *netdev)
1861 {
1862 	struct lio *lio = GET_LIO(netdev);
1863 	struct octeon_device *oct = lio->oct_dev;
1864 	struct napi_struct *napi, *n;
1865 
1866 	ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1867 
1868 	/* Stop any link updates */
1869 	lio->intf_open = 0;
1870 
1871 	stop_txqs(netdev);
1872 
1873 	/* Inform that netif carrier is down */
1874 	netif_carrier_off(netdev);
1875 	netif_tx_disable(netdev);
1876 
1877 	lio->linfo.link.s.link_up = 0;
1878 	lio->link_changes++;
1879 
1880 	/* Tell Octeon that nic interface is down. */
1881 	send_rx_ctrl_cmd(lio, 0);
1882 
1883 	if (OCTEON_CN23XX_PF(oct)) {
1884 		if (!oct->msix_on)
1885 			cleanup_tx_poll_fn(netdev);
1886 	} else {
1887 		cleanup_tx_poll_fn(netdev);
1888 	}
1889 
1890 	cancel_delayed_work_sync(&lio->stats_wk.work);
1891 
1892 	if (lio->ptp_clock) {
1893 		ptp_clock_unregister(lio->ptp_clock);
1894 		lio->ptp_clock = NULL;
1895 	}
1896 
1897 	/* Wait for any pending Rx descriptors */
1898 	if (lio_wait_for_clean_oq(oct))
1899 		netif_info(lio, rx_err, lio->netdev,
1900 			   "Proceeding with stop interface after partial RX desc processing\n");
1901 
1902 	if (oct->props[lio->ifidx].napi_enabled == 1) {
1903 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1904 			napi_disable(napi);
1905 
1906 		oct->props[lio->ifidx].napi_enabled = 0;
1907 
1908 		if (OCTEON_CN23XX_PF(oct))
1909 			oct->droq[0]->ops.poll_mode = 0;
1910 	}
1911 
1912 	dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1913 
1914 	return 0;
1915 }
1916 
1917 /**
1918  * \brief Converts a mask based on net device flags
1919  * @param netdev network device
1920  *
1921  * This routine generates a octnet_ifflags mask from the net device flags
1922  * received from the OS.
1923  */
1924 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
1925 {
1926 	enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1927 
1928 	if (netdev->flags & IFF_PROMISC)
1929 		f |= OCTNET_IFFLAG_PROMISC;
1930 
1931 	if (netdev->flags & IFF_ALLMULTI)
1932 		f |= OCTNET_IFFLAG_ALLMULTI;
1933 
1934 	if (netdev->flags & IFF_MULTICAST) {
1935 		f |= OCTNET_IFFLAG_MULTICAST;
1936 
1937 		/* Accept all multicast addresses if there are more than we
1938 		 * can handle
1939 		 */
1940 		if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1941 			f |= OCTNET_IFFLAG_ALLMULTI;
1942 	}
1943 
1944 	if (netdev->flags & IFF_BROADCAST)
1945 		f |= OCTNET_IFFLAG_BROADCAST;
1946 
1947 	return f;
1948 }
1949 
1950 /**
1951  * \brief Net device set_multicast_list
1952  * @param netdev network device
1953  */
1954 static void liquidio_set_mcast_list(struct net_device *netdev)
1955 {
1956 	struct lio *lio = GET_LIO(netdev);
1957 	struct octeon_device *oct = lio->oct_dev;
1958 	struct octnic_ctrl_pkt nctrl;
1959 	struct netdev_hw_addr *ha;
1960 	u64 *mc;
1961 	int ret;
1962 	int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1963 
1964 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1965 
1966 	/* Create a ctrl pkt command to be sent to core app. */
1967 	nctrl.ncmd.u64 = 0;
1968 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1969 	nctrl.ncmd.s.param1 = get_new_flags(netdev);
1970 	nctrl.ncmd.s.param2 = mc_count;
1971 	nctrl.ncmd.s.more = mc_count;
1972 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1973 	nctrl.netpndev = (u64)netdev;
1974 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1975 
1976 	/* copy all the addresses into the udd */
1977 	mc = &nctrl.udd[0];
1978 	netdev_for_each_mc_addr(ha, netdev) {
1979 		*mc = 0;
1980 		memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
1981 		/* no need to swap bytes */
1982 
1983 		if (++mc > &nctrl.udd[mc_count])
1984 			break;
1985 	}
1986 
1987 	/* Apparently, any activity in this call from the kernel has to
1988 	 * be atomic. So we won't wait for response.
1989 	 */
1990 
1991 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1992 	if (ret) {
1993 		dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1994 			ret);
1995 	}
1996 }
1997 
1998 /**
1999  * \brief Net device set_mac_address
2000  * @param netdev network device
2001  */
2002 static int liquidio_set_mac(struct net_device *netdev, void *p)
2003 {
2004 	int ret = 0;
2005 	struct lio *lio = GET_LIO(netdev);
2006 	struct octeon_device *oct = lio->oct_dev;
2007 	struct sockaddr *addr = (struct sockaddr *)p;
2008 	struct octnic_ctrl_pkt nctrl;
2009 
2010 	if (!is_valid_ether_addr(addr->sa_data))
2011 		return -EADDRNOTAVAIL;
2012 
2013 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2014 
2015 	nctrl.ncmd.u64 = 0;
2016 	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2017 	nctrl.ncmd.s.param1 = 0;
2018 	nctrl.ncmd.s.more = 1;
2019 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2020 	nctrl.netpndev = (u64)netdev;
2021 
2022 	nctrl.udd[0] = 0;
2023 	/* The MAC Address is presented in network byte order. */
2024 	memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2025 
2026 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2027 	if (ret < 0) {
2028 		dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2029 		return -ENOMEM;
2030 	}
2031 
2032 	if (nctrl.sc_status) {
2033 		dev_err(&oct->pci_dev->dev,
2034 			"%s: MAC Address change failed. sc return=%x\n",
2035 			 __func__, nctrl.sc_status);
2036 		return -EIO;
2037 	}
2038 
2039 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2040 	memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2041 
2042 	return 0;
2043 }
2044 
2045 static void
2046 liquidio_get_stats64(struct net_device *netdev,
2047 		     struct rtnl_link_stats64 *lstats)
2048 {
2049 	struct lio *lio = GET_LIO(netdev);
2050 	struct octeon_device *oct;
2051 	u64 pkts = 0, drop = 0, bytes = 0;
2052 	struct oct_droq_stats *oq_stats;
2053 	struct oct_iq_stats *iq_stats;
2054 	int i, iq_no, oq_no;
2055 
2056 	oct = lio->oct_dev;
2057 
2058 	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2059 		return;
2060 
2061 	for (i = 0; i < oct->num_iqs; i++) {
2062 		iq_no = lio->linfo.txpciq[i].s.q_no;
2063 		iq_stats = &oct->instr_queue[iq_no]->stats;
2064 		pkts += iq_stats->tx_done;
2065 		drop += iq_stats->tx_dropped;
2066 		bytes += iq_stats->tx_tot_bytes;
2067 	}
2068 
2069 	lstats->tx_packets = pkts;
2070 	lstats->tx_bytes = bytes;
2071 	lstats->tx_dropped = drop;
2072 
2073 	pkts = 0;
2074 	drop = 0;
2075 	bytes = 0;
2076 
2077 	for (i = 0; i < oct->num_oqs; i++) {
2078 		oq_no = lio->linfo.rxpciq[i].s.q_no;
2079 		oq_stats = &oct->droq[oq_no]->stats;
2080 		pkts += oq_stats->rx_pkts_received;
2081 		drop += (oq_stats->rx_dropped +
2082 			 oq_stats->dropped_nodispatch +
2083 			 oq_stats->dropped_toomany +
2084 			 oq_stats->dropped_nomem);
2085 		bytes += oq_stats->rx_bytes_received;
2086 	}
2087 
2088 	lstats->rx_bytes = bytes;
2089 	lstats->rx_packets = pkts;
2090 	lstats->rx_dropped = drop;
2091 
2092 	lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
2093 	lstats->collisions = oct->link_stats.fromhost.total_collisions;
2094 
2095 	/* detailed rx_errors: */
2096 	lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
2097 	/* recved pkt with crc error    */
2098 	lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
2099 	/* recv'd frame alignment error */
2100 	lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
2101 	/* recv'r fifo overrun */
2102 	lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
2103 
2104 	lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
2105 		lstats->rx_frame_errors + lstats->rx_fifo_errors;
2106 
2107 	/* detailed tx_errors */
2108 	lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
2109 	lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
2110 	lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
2111 
2112 	lstats->tx_errors = lstats->tx_aborted_errors +
2113 		lstats->tx_carrier_errors +
2114 		lstats->tx_fifo_errors;
2115 }
2116 
2117 /**
2118  * \brief Handler for SIOCSHWTSTAMP ioctl
2119  * @param netdev network device
2120  * @param ifr interface request
2121  * @param cmd command
2122  */
2123 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2124 {
2125 	struct hwtstamp_config conf;
2126 	struct lio *lio = GET_LIO(netdev);
2127 
2128 	if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2129 		return -EFAULT;
2130 
2131 	if (conf.flags)
2132 		return -EINVAL;
2133 
2134 	switch (conf.tx_type) {
2135 	case HWTSTAMP_TX_ON:
2136 	case HWTSTAMP_TX_OFF:
2137 		break;
2138 	default:
2139 		return -ERANGE;
2140 	}
2141 
2142 	switch (conf.rx_filter) {
2143 	case HWTSTAMP_FILTER_NONE:
2144 		break;
2145 	case HWTSTAMP_FILTER_ALL:
2146 	case HWTSTAMP_FILTER_SOME:
2147 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2148 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2149 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2150 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2151 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2152 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2153 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2154 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2155 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2156 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2157 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2158 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2159 	case HWTSTAMP_FILTER_NTP_ALL:
2160 		conf.rx_filter = HWTSTAMP_FILTER_ALL;
2161 		break;
2162 	default:
2163 		return -ERANGE;
2164 	}
2165 
2166 	if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2167 		ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2168 
2169 	else
2170 		ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2171 
2172 	return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2173 }
2174 
2175 /**
2176  * \brief ioctl handler
2177  * @param netdev network device
2178  * @param ifr interface request
2179  * @param cmd command
2180  */
2181 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2182 {
2183 	struct lio *lio = GET_LIO(netdev);
2184 
2185 	switch (cmd) {
2186 	case SIOCSHWTSTAMP:
2187 		if (lio->oct_dev->ptp_enable)
2188 			return hwtstamp_ioctl(netdev, ifr);
2189 		/* fall through */
2190 	default:
2191 		return -EOPNOTSUPP;
2192 	}
2193 }
2194 
2195 /**
2196  * \brief handle a Tx timestamp response
2197  * @param status response status
2198  * @param buf pointer to skb
2199  */
2200 static void handle_timestamp(struct octeon_device *oct,
2201 			     u32 status,
2202 			     void *buf)
2203 {
2204 	struct octnet_buf_free_info *finfo;
2205 	struct octeon_soft_command *sc;
2206 	struct oct_timestamp_resp *resp;
2207 	struct lio *lio;
2208 	struct sk_buff *skb = (struct sk_buff *)buf;
2209 
2210 	finfo = (struct octnet_buf_free_info *)skb->cb;
2211 	lio = finfo->lio;
2212 	sc = finfo->sc;
2213 	oct = lio->oct_dev;
2214 	resp = (struct oct_timestamp_resp *)sc->virtrptr;
2215 
2216 	if (status != OCTEON_REQUEST_DONE) {
2217 		dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2218 			CVM_CAST64(status));
2219 		resp->timestamp = 0;
2220 	}
2221 
2222 	octeon_swap_8B_data(&resp->timestamp, 1);
2223 
2224 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2225 		struct skb_shared_hwtstamps ts;
2226 		u64 ns = resp->timestamp;
2227 
2228 		netif_info(lio, tx_done, lio->netdev,
2229 			   "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2230 			   skb, (unsigned long long)ns);
2231 		ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2232 		skb_tstamp_tx(skb, &ts);
2233 	}
2234 
2235 	octeon_free_soft_command(oct, sc);
2236 	tx_buffer_free(skb);
2237 }
2238 
2239 /* \brief Send a data packet that will be timestamped
2240  * @param oct octeon device
2241  * @param ndata pointer to network data
2242  * @param finfo pointer to private network data
2243  */
2244 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2245 					 struct octnic_data_pkt *ndata,
2246 					 struct octnet_buf_free_info *finfo,
2247 					 int xmit_more)
2248 {
2249 	int retval;
2250 	struct octeon_soft_command *sc;
2251 	struct lio *lio;
2252 	int ring_doorbell;
2253 	u32 len;
2254 
2255 	lio = finfo->lio;
2256 
2257 	sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2258 					    sizeof(struct oct_timestamp_resp));
2259 	finfo->sc = sc;
2260 
2261 	if (!sc) {
2262 		dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2263 		return IQ_SEND_FAILED;
2264 	}
2265 
2266 	if (ndata->reqtype == REQTYPE_NORESP_NET)
2267 		ndata->reqtype = REQTYPE_RESP_NET;
2268 	else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2269 		ndata->reqtype = REQTYPE_RESP_NET_SG;
2270 
2271 	sc->callback = handle_timestamp;
2272 	sc->callback_arg = finfo->skb;
2273 	sc->iq_no = ndata->q_no;
2274 
2275 	if (OCTEON_CN23XX_PF(oct))
2276 		len = (u32)((struct octeon_instr_ih3 *)
2277 			    (&sc->cmd.cmd3.ih3))->dlengsz;
2278 	else
2279 		len = (u32)((struct octeon_instr_ih2 *)
2280 			    (&sc->cmd.cmd2.ih2))->dlengsz;
2281 
2282 	ring_doorbell = !xmit_more;
2283 
2284 	retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2285 				     sc, len, ndata->reqtype);
2286 
2287 	if (retval == IQ_SEND_FAILED) {
2288 		dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2289 			retval);
2290 		octeon_free_soft_command(oct, sc);
2291 	} else {
2292 		netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2293 	}
2294 
2295 	return retval;
2296 }
2297 
2298 /** \brief Transmit networks packets to the Octeon interface
2299  * @param skbuff   skbuff struct to be passed to network layer.
2300  * @param netdev    pointer to network device
2301  * @returns whether the packet was transmitted to the device okay or not
2302  *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
2303  */
2304 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2305 {
2306 	struct lio *lio;
2307 	struct octnet_buf_free_info *finfo;
2308 	union octnic_cmd_setup cmdsetup;
2309 	struct octnic_data_pkt ndata;
2310 	struct octeon_device *oct;
2311 	struct oct_iq_stats *stats;
2312 	struct octeon_instr_irh *irh;
2313 	union tx_info *tx_info;
2314 	int status = 0;
2315 	int q_idx = 0, iq_no = 0;
2316 	int j, xmit_more = 0;
2317 	u64 dptr = 0;
2318 	u32 tag = 0;
2319 
2320 	lio = GET_LIO(netdev);
2321 	oct = lio->oct_dev;
2322 
2323 	q_idx = skb_iq(oct, skb);
2324 	tag = q_idx;
2325 	iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2326 
2327 	stats = &oct->instr_queue[iq_no]->stats;
2328 
2329 	/* Check for all conditions in which the current packet cannot be
2330 	 * transmitted.
2331 	 */
2332 	if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2333 	    (!lio->linfo.link.s.link_up) ||
2334 	    (skb->len <= 0)) {
2335 		netif_info(lio, tx_err, lio->netdev,
2336 			   "Transmit failed link_status : %d\n",
2337 			   lio->linfo.link.s.link_up);
2338 		goto lio_xmit_failed;
2339 	}
2340 
2341 	/* Use space in skb->cb to store info used to unmap and
2342 	 * free the buffers.
2343 	 */
2344 	finfo = (struct octnet_buf_free_info *)skb->cb;
2345 	finfo->lio = lio;
2346 	finfo->skb = skb;
2347 	finfo->sc = NULL;
2348 
2349 	/* Prepare the attributes for the data to be passed to OSI. */
2350 	memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2351 
2352 	ndata.buf = (void *)finfo;
2353 
2354 	ndata.q_no = iq_no;
2355 
2356 	if (octnet_iq_is_full(oct, ndata.q_no)) {
2357 		/* defer sending if queue is full */
2358 		netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2359 			   ndata.q_no);
2360 		stats->tx_iq_busy++;
2361 		return NETDEV_TX_BUSY;
2362 	}
2363 
2364 	/* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu:  %d, q_no:%d\n",
2365 	 *	lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2366 	 */
2367 
2368 	ndata.datasize = skb->len;
2369 
2370 	cmdsetup.u64 = 0;
2371 	cmdsetup.s.iq_no = iq_no;
2372 
2373 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2374 		if (skb->encapsulation) {
2375 			cmdsetup.s.tnl_csum = 1;
2376 			stats->tx_vxlan++;
2377 		} else {
2378 			cmdsetup.s.transport_csum = 1;
2379 		}
2380 	}
2381 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2382 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2383 		cmdsetup.s.timestamp = 1;
2384 	}
2385 
2386 	if (skb_shinfo(skb)->nr_frags == 0) {
2387 		cmdsetup.s.u.datasize = skb->len;
2388 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2389 
2390 		/* Offload checksum calculation for TCP/UDP packets */
2391 		dptr = dma_map_single(&oct->pci_dev->dev,
2392 				      skb->data,
2393 				      skb->len,
2394 				      DMA_TO_DEVICE);
2395 		if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2396 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2397 				__func__);
2398 			stats->tx_dmamap_fail++;
2399 			return NETDEV_TX_BUSY;
2400 		}
2401 
2402 		if (OCTEON_CN23XX_PF(oct))
2403 			ndata.cmd.cmd3.dptr = dptr;
2404 		else
2405 			ndata.cmd.cmd2.dptr = dptr;
2406 		finfo->dptr = dptr;
2407 		ndata.reqtype = REQTYPE_NORESP_NET;
2408 
2409 	} else {
2410 		int i, frags;
2411 		struct skb_frag_struct *frag;
2412 		struct octnic_gather *g;
2413 
2414 		spin_lock(&lio->glist_lock[q_idx]);
2415 		g = (struct octnic_gather *)
2416 			lio_list_delete_head(&lio->glist[q_idx]);
2417 		spin_unlock(&lio->glist_lock[q_idx]);
2418 
2419 		if (!g) {
2420 			netif_info(lio, tx_err, lio->netdev,
2421 				   "Transmit scatter gather: glist null!\n");
2422 			goto lio_xmit_failed;
2423 		}
2424 
2425 		cmdsetup.s.gather = 1;
2426 		cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2427 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2428 
2429 		memset(g->sg, 0, g->sg_size);
2430 
2431 		g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2432 						 skb->data,
2433 						 (skb->len - skb->data_len),
2434 						 DMA_TO_DEVICE);
2435 		if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2436 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2437 				__func__);
2438 			stats->tx_dmamap_fail++;
2439 			return NETDEV_TX_BUSY;
2440 		}
2441 		add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2442 
2443 		frags = skb_shinfo(skb)->nr_frags;
2444 		i = 1;
2445 		while (frags--) {
2446 			frag = &skb_shinfo(skb)->frags[i - 1];
2447 
2448 			g->sg[(i >> 2)].ptr[(i & 3)] =
2449 				dma_map_page(&oct->pci_dev->dev,
2450 					     frag->page.p,
2451 					     frag->page_offset,
2452 					     frag->size,
2453 					     DMA_TO_DEVICE);
2454 
2455 			if (dma_mapping_error(&oct->pci_dev->dev,
2456 					      g->sg[i >> 2].ptr[i & 3])) {
2457 				dma_unmap_single(&oct->pci_dev->dev,
2458 						 g->sg[0].ptr[0],
2459 						 skb->len - skb->data_len,
2460 						 DMA_TO_DEVICE);
2461 				for (j = 1; j < i; j++) {
2462 					frag = &skb_shinfo(skb)->frags[j - 1];
2463 					dma_unmap_page(&oct->pci_dev->dev,
2464 						       g->sg[j >> 2].ptr[j & 3],
2465 						       frag->size,
2466 						       DMA_TO_DEVICE);
2467 				}
2468 				dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2469 					__func__);
2470 				return NETDEV_TX_BUSY;
2471 			}
2472 
2473 			add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
2474 			i++;
2475 		}
2476 
2477 		dptr = g->sg_dma_ptr;
2478 
2479 		if (OCTEON_CN23XX_PF(oct))
2480 			ndata.cmd.cmd3.dptr = dptr;
2481 		else
2482 			ndata.cmd.cmd2.dptr = dptr;
2483 		finfo->dptr = dptr;
2484 		finfo->g = g;
2485 
2486 		ndata.reqtype = REQTYPE_NORESP_NET_SG;
2487 	}
2488 
2489 	if (OCTEON_CN23XX_PF(oct)) {
2490 		irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2491 		tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2492 	} else {
2493 		irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2494 		tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2495 	}
2496 
2497 	if (skb_shinfo(skb)->gso_size) {
2498 		tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2499 		tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2500 		stats->tx_gso++;
2501 	}
2502 
2503 	/* HW insert VLAN tag */
2504 	if (skb_vlan_tag_present(skb)) {
2505 		irh->priority = skb_vlan_tag_get(skb) >> 13;
2506 		irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2507 	}
2508 
2509 	xmit_more = skb->xmit_more;
2510 
2511 	if (unlikely(cmdsetup.s.timestamp))
2512 		status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2513 	else
2514 		status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2515 	if (status == IQ_SEND_FAILED)
2516 		goto lio_xmit_failed;
2517 
2518 	netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2519 
2520 	if (status == IQ_SEND_STOP)
2521 		netif_stop_subqueue(netdev, q_idx);
2522 
2523 	netif_trans_update(netdev);
2524 
2525 	if (tx_info->s.gso_segs)
2526 		stats->tx_done += tx_info->s.gso_segs;
2527 	else
2528 		stats->tx_done++;
2529 	stats->tx_tot_bytes += ndata.datasize;
2530 
2531 	return NETDEV_TX_OK;
2532 
2533 lio_xmit_failed:
2534 	stats->tx_dropped++;
2535 	netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2536 		   iq_no, stats->tx_dropped);
2537 	if (dptr)
2538 		dma_unmap_single(&oct->pci_dev->dev, dptr,
2539 				 ndata.datasize, DMA_TO_DEVICE);
2540 
2541 	octeon_ring_doorbell_locked(oct, iq_no);
2542 
2543 	tx_buffer_free(skb);
2544 	return NETDEV_TX_OK;
2545 }
2546 
2547 /** \brief Network device Tx timeout
2548  * @param netdev    pointer to network device
2549  */
2550 static void liquidio_tx_timeout(struct net_device *netdev)
2551 {
2552 	struct lio *lio;
2553 
2554 	lio = GET_LIO(netdev);
2555 
2556 	netif_info(lio, tx_err, lio->netdev,
2557 		   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2558 		   netdev->stats.tx_dropped);
2559 	netif_trans_update(netdev);
2560 	wake_txqs(netdev);
2561 }
2562 
2563 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2564 				    __be16 proto __attribute__((unused)),
2565 				    u16 vid)
2566 {
2567 	struct lio *lio = GET_LIO(netdev);
2568 	struct octeon_device *oct = lio->oct_dev;
2569 	struct octnic_ctrl_pkt nctrl;
2570 	int ret = 0;
2571 
2572 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2573 
2574 	nctrl.ncmd.u64 = 0;
2575 	nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2576 	nctrl.ncmd.s.param1 = vid;
2577 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2578 	nctrl.netpndev = (u64)netdev;
2579 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2580 
2581 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2582 	if (ret) {
2583 		dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2584 			ret);
2585 		if (ret > 0)
2586 			ret = -EIO;
2587 	}
2588 
2589 	return ret;
2590 }
2591 
2592 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2593 				     __be16 proto __attribute__((unused)),
2594 				     u16 vid)
2595 {
2596 	struct lio *lio = GET_LIO(netdev);
2597 	struct octeon_device *oct = lio->oct_dev;
2598 	struct octnic_ctrl_pkt nctrl;
2599 	int ret = 0;
2600 
2601 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2602 
2603 	nctrl.ncmd.u64 = 0;
2604 	nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2605 	nctrl.ncmd.s.param1 = vid;
2606 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2607 	nctrl.netpndev = (u64)netdev;
2608 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2609 
2610 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2611 	if (ret) {
2612 		dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
2613 			ret);
2614 		if (ret > 0)
2615 			ret = -EIO;
2616 	}
2617 	return ret;
2618 }
2619 
2620 /** Sending command to enable/disable RX checksum offload
2621  * @param netdev                pointer to network device
2622  * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
2623  * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
2624  *                              OCTNET_CMD_RXCSUM_DISABLE
2625  * @returns                     SUCCESS or FAILURE
2626  */
2627 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2628 				       u8 rx_cmd)
2629 {
2630 	struct lio *lio = GET_LIO(netdev);
2631 	struct octeon_device *oct = lio->oct_dev;
2632 	struct octnic_ctrl_pkt nctrl;
2633 	int ret = 0;
2634 
2635 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2636 
2637 	nctrl.ncmd.u64 = 0;
2638 	nctrl.ncmd.s.cmd = command;
2639 	nctrl.ncmd.s.param1 = rx_cmd;
2640 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2641 	nctrl.netpndev = (u64)netdev;
2642 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2643 
2644 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2645 	if (ret) {
2646 		dev_err(&oct->pci_dev->dev,
2647 			"DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2648 			ret);
2649 		if (ret > 0)
2650 			ret = -EIO;
2651 	}
2652 	return ret;
2653 }
2654 
2655 /** Sending command to add/delete VxLAN UDP port to firmware
2656  * @param netdev                pointer to network device
2657  * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
2658  * @param vxlan_port            VxLAN port to be added or deleted
2659  * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
2660  *                              OCTNET_CMD_VXLAN_PORT_DEL
2661  * @returns                     SUCCESS or FAILURE
2662  */
2663 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2664 				       u16 vxlan_port, u8 vxlan_cmd_bit)
2665 {
2666 	struct lio *lio = GET_LIO(netdev);
2667 	struct octeon_device *oct = lio->oct_dev;
2668 	struct octnic_ctrl_pkt nctrl;
2669 	int ret = 0;
2670 
2671 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2672 
2673 	nctrl.ncmd.u64 = 0;
2674 	nctrl.ncmd.s.cmd = command;
2675 	nctrl.ncmd.s.more = vxlan_cmd_bit;
2676 	nctrl.ncmd.s.param1 = vxlan_port;
2677 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2678 	nctrl.netpndev = (u64)netdev;
2679 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2680 
2681 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2682 	if (ret) {
2683 		dev_err(&oct->pci_dev->dev,
2684 			"VxLAN port add/delete failed in core (ret:0x%x)\n",
2685 			ret);
2686 		if (ret > 0)
2687 			ret = -EIO;
2688 	}
2689 	return ret;
2690 }
2691 
2692 /** \brief Net device fix features
2693  * @param netdev  pointer to network device
2694  * @param request features requested
2695  * @returns updated features list
2696  */
2697 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2698 					       netdev_features_t request)
2699 {
2700 	struct lio *lio = netdev_priv(netdev);
2701 
2702 	if ((request & NETIF_F_RXCSUM) &&
2703 	    !(lio->dev_capability & NETIF_F_RXCSUM))
2704 		request &= ~NETIF_F_RXCSUM;
2705 
2706 	if ((request & NETIF_F_HW_CSUM) &&
2707 	    !(lio->dev_capability & NETIF_F_HW_CSUM))
2708 		request &= ~NETIF_F_HW_CSUM;
2709 
2710 	if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2711 		request &= ~NETIF_F_TSO;
2712 
2713 	if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2714 		request &= ~NETIF_F_TSO6;
2715 
2716 	if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2717 		request &= ~NETIF_F_LRO;
2718 
2719 	/*Disable LRO if RXCSUM is off */
2720 	if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2721 	    (lio->dev_capability & NETIF_F_LRO))
2722 		request &= ~NETIF_F_LRO;
2723 
2724 	if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2725 	    !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2726 		request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2727 
2728 	return request;
2729 }
2730 
2731 /** \brief Net device set features
2732  * @param netdev  pointer to network device
2733  * @param features features to enable/disable
2734  */
2735 static int liquidio_set_features(struct net_device *netdev,
2736 				 netdev_features_t features)
2737 {
2738 	struct lio *lio = netdev_priv(netdev);
2739 
2740 	if ((features & NETIF_F_LRO) &&
2741 	    (lio->dev_capability & NETIF_F_LRO) &&
2742 	    !(netdev->features & NETIF_F_LRO))
2743 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2744 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2745 	else if (!(features & NETIF_F_LRO) &&
2746 		 (lio->dev_capability & NETIF_F_LRO) &&
2747 		 (netdev->features & NETIF_F_LRO))
2748 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2749 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2750 
2751 	/* Sending command to firmware to enable/disable RX checksum
2752 	 * offload settings using ethtool
2753 	 */
2754 	if (!(netdev->features & NETIF_F_RXCSUM) &&
2755 	    (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2756 	    (features & NETIF_F_RXCSUM))
2757 		liquidio_set_rxcsum_command(netdev,
2758 					    OCTNET_CMD_TNL_RX_CSUM_CTL,
2759 					    OCTNET_CMD_RXCSUM_ENABLE);
2760 	else if ((netdev->features & NETIF_F_RXCSUM) &&
2761 		 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2762 		 !(features & NETIF_F_RXCSUM))
2763 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2764 					    OCTNET_CMD_RXCSUM_DISABLE);
2765 
2766 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2767 	    (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2768 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2769 		liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2770 				     OCTNET_CMD_VLAN_FILTER_ENABLE);
2771 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2772 		 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2773 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2774 		liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2775 				     OCTNET_CMD_VLAN_FILTER_DISABLE);
2776 
2777 	return 0;
2778 }
2779 
2780 static void liquidio_add_vxlan_port(struct net_device *netdev,
2781 				    struct udp_tunnel_info *ti)
2782 {
2783 	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2784 		return;
2785 
2786 	liquidio_vxlan_port_command(netdev,
2787 				    OCTNET_CMD_VXLAN_PORT_CONFIG,
2788 				    htons(ti->port),
2789 				    OCTNET_CMD_VXLAN_PORT_ADD);
2790 }
2791 
2792 static void liquidio_del_vxlan_port(struct net_device *netdev,
2793 				    struct udp_tunnel_info *ti)
2794 {
2795 	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2796 		return;
2797 
2798 	liquidio_vxlan_port_command(netdev,
2799 				    OCTNET_CMD_VXLAN_PORT_CONFIG,
2800 				    htons(ti->port),
2801 				    OCTNET_CMD_VXLAN_PORT_DEL);
2802 }
2803 
2804 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2805 				 u8 *mac, bool is_admin_assigned)
2806 {
2807 	struct lio *lio = GET_LIO(netdev);
2808 	struct octeon_device *oct = lio->oct_dev;
2809 	struct octnic_ctrl_pkt nctrl;
2810 	int ret = 0;
2811 
2812 	if (!is_valid_ether_addr(mac))
2813 		return -EINVAL;
2814 
2815 	if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
2816 		return -EINVAL;
2817 
2818 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2819 
2820 	nctrl.ncmd.u64 = 0;
2821 	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2822 	/* vfidx is 0 based, but vf_num (param1) is 1 based */
2823 	nctrl.ncmd.s.param1 = vfidx + 1;
2824 	nctrl.ncmd.s.more = 1;
2825 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2826 	nctrl.netpndev = (u64)netdev;
2827 	if (is_admin_assigned) {
2828 		nctrl.ncmd.s.param2 = true;
2829 		nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2830 	}
2831 
2832 	nctrl.udd[0] = 0;
2833 	/* The MAC Address is presented in network byte order. */
2834 	ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
2835 
2836 	oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
2837 
2838 	ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2839 	if (ret > 0)
2840 		ret = -EIO;
2841 
2842 	return ret;
2843 }
2844 
2845 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
2846 {
2847 	struct lio *lio = GET_LIO(netdev);
2848 	struct octeon_device *oct = lio->oct_dev;
2849 	int retval;
2850 
2851 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2852 		return -EINVAL;
2853 
2854 	retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
2855 	if (!retval)
2856 		cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
2857 
2858 	return retval;
2859 }
2860 
2861 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
2862 				u16 vlan, u8 qos, __be16 vlan_proto)
2863 {
2864 	struct lio *lio = GET_LIO(netdev);
2865 	struct octeon_device *oct = lio->oct_dev;
2866 	struct octnic_ctrl_pkt nctrl;
2867 	u16 vlantci;
2868 	int ret = 0;
2869 
2870 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2871 		return -EINVAL;
2872 
2873 	if (vlan_proto != htons(ETH_P_8021Q))
2874 		return -EPROTONOSUPPORT;
2875 
2876 	if (vlan >= VLAN_N_VID || qos > 7)
2877 		return -EINVAL;
2878 
2879 	if (vlan)
2880 		vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
2881 	else
2882 		vlantci = 0;
2883 
2884 	if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
2885 		return 0;
2886 
2887 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2888 
2889 	if (vlan)
2890 		nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2891 	else
2892 		nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2893 
2894 	nctrl.ncmd.s.param1 = vlantci;
2895 	nctrl.ncmd.s.param2 =
2896 	    vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
2897 	nctrl.ncmd.s.more = 0;
2898 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2899 	nctrl.cb_fn = NULL;
2900 
2901 	ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2902 	if (ret) {
2903 		if (ret > 0)
2904 			ret = -EIO;
2905 		return ret;
2906 	}
2907 
2908 	oct->sriov_info.vf_vlantci[vfidx] = vlantci;
2909 
2910 	return ret;
2911 }
2912 
2913 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
2914 				  struct ifla_vf_info *ivi)
2915 {
2916 	struct lio *lio = GET_LIO(netdev);
2917 	struct octeon_device *oct = lio->oct_dev;
2918 	u8 *macaddr;
2919 
2920 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2921 		return -EINVAL;
2922 
2923 	ivi->vf = vfidx;
2924 	macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
2925 	ether_addr_copy(&ivi->mac[0], macaddr);
2926 	ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
2927 	ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
2928 	if (oct->sriov_info.trusted_vf.active &&
2929 	    oct->sriov_info.trusted_vf.id == vfidx)
2930 		ivi->trusted = true;
2931 	else
2932 		ivi->trusted = false;
2933 	ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
2934 	return 0;
2935 }
2936 
2937 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
2938 {
2939 	struct octeon_device *oct = lio->oct_dev;
2940 	struct octeon_soft_command *sc;
2941 	int retval;
2942 
2943 	sc = octeon_alloc_soft_command(oct, 0, 16, 0);
2944 	if (!sc)
2945 		return -ENOMEM;
2946 
2947 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2948 
2949 	/* vfidx is 0 based, but vf_num (param1) is 1 based */
2950 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
2951 				    OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
2952 				    trusted);
2953 
2954 	init_completion(&sc->complete);
2955 	sc->sc_status = OCTEON_REQUEST_PENDING;
2956 
2957 	retval = octeon_send_soft_command(oct, sc);
2958 	if (retval == IQ_SEND_FAILED) {
2959 		octeon_free_soft_command(oct, sc);
2960 		retval = -1;
2961 	} else {
2962 		/* Wait for response or timeout */
2963 		retval = wait_for_sc_completion_timeout(oct, sc, 0);
2964 		if (retval)
2965 			return (retval);
2966 
2967 		WRITE_ONCE(sc->caller_is_done, true);
2968 	}
2969 
2970 	return retval;
2971 }
2972 
2973 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
2974 				 bool setting)
2975 {
2976 	struct lio *lio = GET_LIO(netdev);
2977 	struct octeon_device *oct = lio->oct_dev;
2978 
2979 	if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
2980 		/* trusted vf is not supported by firmware older than 1.7.1 */
2981 		return -EOPNOTSUPP;
2982 	}
2983 
2984 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
2985 		netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
2986 		return -EINVAL;
2987 	}
2988 
2989 	if (setting) {
2990 		/* Set */
2991 
2992 		if (oct->sriov_info.trusted_vf.active &&
2993 		    oct->sriov_info.trusted_vf.id == vfidx)
2994 			return 0;
2995 
2996 		if (oct->sriov_info.trusted_vf.active) {
2997 			netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
2998 			return -EPERM;
2999 		}
3000 	} else {
3001 		/* Clear */
3002 
3003 		if (!oct->sriov_info.trusted_vf.active)
3004 			return 0;
3005 	}
3006 
3007 	if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3008 		if (setting) {
3009 			oct->sriov_info.trusted_vf.id = vfidx;
3010 			oct->sriov_info.trusted_vf.active = true;
3011 		} else {
3012 			oct->sriov_info.trusted_vf.active = false;
3013 		}
3014 
3015 		netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3016 			   setting ? "" : "not ");
3017 	} else {
3018 		netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3019 		return -1;
3020 	}
3021 
3022 	return 0;
3023 }
3024 
3025 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3026 				      int linkstate)
3027 {
3028 	struct lio *lio = GET_LIO(netdev);
3029 	struct octeon_device *oct = lio->oct_dev;
3030 	struct octnic_ctrl_pkt nctrl;
3031 	int ret = 0;
3032 
3033 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3034 		return -EINVAL;
3035 
3036 	if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3037 		return 0;
3038 
3039 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3040 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3041 	nctrl.ncmd.s.param1 =
3042 	    vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3043 	nctrl.ncmd.s.param2 = linkstate;
3044 	nctrl.ncmd.s.more = 0;
3045 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3046 	nctrl.cb_fn = NULL;
3047 
3048 	ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
3049 
3050 	if (!ret)
3051 		oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3052 	else if (ret > 0)
3053 		ret = -EIO;
3054 
3055 	return ret;
3056 }
3057 
3058 static int
3059 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3060 {
3061 	struct lio_devlink_priv *priv;
3062 	struct octeon_device *oct;
3063 
3064 	priv = devlink_priv(devlink);
3065 	oct = priv->oct;
3066 
3067 	*mode = oct->eswitch_mode;
3068 
3069 	return 0;
3070 }
3071 
3072 static int
3073 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode)
3074 {
3075 	struct lio_devlink_priv *priv;
3076 	struct octeon_device *oct;
3077 	int ret = 0;
3078 
3079 	priv = devlink_priv(devlink);
3080 	oct = priv->oct;
3081 
3082 	if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3083 		return -EINVAL;
3084 
3085 	if (oct->eswitch_mode == mode)
3086 		return 0;
3087 
3088 	switch (mode) {
3089 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3090 		oct->eswitch_mode = mode;
3091 		ret = lio_vf_rep_create(oct);
3092 		break;
3093 
3094 	case DEVLINK_ESWITCH_MODE_LEGACY:
3095 		lio_vf_rep_destroy(oct);
3096 		oct->eswitch_mode = mode;
3097 		break;
3098 
3099 	default:
3100 		ret = -EINVAL;
3101 	}
3102 
3103 	return ret;
3104 }
3105 
3106 static const struct devlink_ops liquidio_devlink_ops = {
3107 	.eswitch_mode_get = liquidio_eswitch_mode_get,
3108 	.eswitch_mode_set = liquidio_eswitch_mode_set,
3109 };
3110 
3111 static int
3112 lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr)
3113 {
3114 	struct lio *lio = GET_LIO(dev);
3115 	struct octeon_device *oct = lio->oct_dev;
3116 
3117 	if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3118 		return -EOPNOTSUPP;
3119 
3120 	switch (attr->id) {
3121 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
3122 		attr->u.ppid.id_len = ETH_ALEN;
3123 		ether_addr_copy(attr->u.ppid.id,
3124 				(void *)&lio->linfo.hw_addr + 2);
3125 		break;
3126 
3127 	default:
3128 		return -EOPNOTSUPP;
3129 	}
3130 
3131 	return 0;
3132 }
3133 
3134 static const struct switchdev_ops lio_pf_switchdev_ops = {
3135 	.switchdev_port_attr_get = lio_pf_switchdev_attr_get,
3136 };
3137 
3138 static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
3139 				 struct ifla_vf_stats *vf_stats)
3140 {
3141 	struct lio *lio = GET_LIO(netdev);
3142 	struct octeon_device *oct = lio->oct_dev;
3143 	struct oct_vf_stats stats;
3144 	int ret;
3145 
3146 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3147 		return -EINVAL;
3148 
3149 	memset(&stats, 0, sizeof(struct oct_vf_stats));
3150 	ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
3151 	if (!ret) {
3152 		vf_stats->rx_packets = stats.rx_packets;
3153 		vf_stats->tx_packets = stats.tx_packets;
3154 		vf_stats->rx_bytes = stats.rx_bytes;
3155 		vf_stats->tx_bytes = stats.tx_bytes;
3156 		vf_stats->broadcast = stats.broadcast;
3157 		vf_stats->multicast = stats.multicast;
3158 	}
3159 
3160 	return ret;
3161 }
3162 
3163 static const struct net_device_ops lionetdevops = {
3164 	.ndo_open		= liquidio_open,
3165 	.ndo_stop		= liquidio_stop,
3166 	.ndo_start_xmit		= liquidio_xmit,
3167 	.ndo_get_stats64	= liquidio_get_stats64,
3168 	.ndo_set_mac_address	= liquidio_set_mac,
3169 	.ndo_set_rx_mode	= liquidio_set_mcast_list,
3170 	.ndo_tx_timeout		= liquidio_tx_timeout,
3171 
3172 	.ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
3173 	.ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
3174 	.ndo_change_mtu		= liquidio_change_mtu,
3175 	.ndo_do_ioctl		= liquidio_ioctl,
3176 	.ndo_fix_features	= liquidio_fix_features,
3177 	.ndo_set_features	= liquidio_set_features,
3178 	.ndo_udp_tunnel_add	= liquidio_add_vxlan_port,
3179 	.ndo_udp_tunnel_del	= liquidio_del_vxlan_port,
3180 	.ndo_set_vf_mac		= liquidio_set_vf_mac,
3181 	.ndo_set_vf_vlan	= liquidio_set_vf_vlan,
3182 	.ndo_get_vf_config	= liquidio_get_vf_config,
3183 	.ndo_set_vf_trust	= liquidio_set_vf_trust,
3184 	.ndo_set_vf_link_state  = liquidio_set_vf_link_state,
3185 	.ndo_get_vf_stats	= liquidio_get_vf_stats,
3186 };
3187 
3188 /** \brief Entry point for the liquidio module
3189  */
3190 static int __init liquidio_init(void)
3191 {
3192 	int i;
3193 	struct handshake *hs;
3194 
3195 	init_completion(&first_stage);
3196 
3197 	octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3198 
3199 	if (liquidio_init_pci())
3200 		return -EINVAL;
3201 
3202 	wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3203 
3204 	for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3205 		hs = &handshake[i];
3206 		if (hs->pci_dev) {
3207 			wait_for_completion(&hs->init);
3208 			if (!hs->init_ok) {
3209 				/* init handshake failed */
3210 				dev_err(&hs->pci_dev->dev,
3211 					"Failed to init device\n");
3212 				liquidio_deinit_pci();
3213 				return -EIO;
3214 			}
3215 		}
3216 	}
3217 
3218 	for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3219 		hs = &handshake[i];
3220 		if (hs->pci_dev) {
3221 			wait_for_completion_timeout(&hs->started,
3222 						    msecs_to_jiffies(30000));
3223 			if (!hs->started_ok) {
3224 				/* starter handshake failed */
3225 				dev_err(&hs->pci_dev->dev,
3226 					"Firmware failed to start\n");
3227 				liquidio_deinit_pci();
3228 				return -EIO;
3229 			}
3230 		}
3231 	}
3232 
3233 	return 0;
3234 }
3235 
3236 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3237 {
3238 	struct octeon_device *oct = (struct octeon_device *)buf;
3239 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3240 	int gmxport = 0;
3241 	union oct_link_status *ls;
3242 	int i;
3243 
3244 	if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3245 		dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3246 			recv_pkt->buffer_size[0],
3247 			recv_pkt->rh.r_nic_info.gmxport);
3248 		goto nic_info_err;
3249 	}
3250 
3251 	gmxport = recv_pkt->rh.r_nic_info.gmxport;
3252 	ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3253 		OCT_DROQ_INFO_SIZE);
3254 
3255 	octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3256 	for (i = 0; i < oct->ifcount; i++) {
3257 		if (oct->props[i].gmxport == gmxport) {
3258 			update_link_status(oct->props[i].netdev, ls);
3259 			break;
3260 		}
3261 	}
3262 
3263 nic_info_err:
3264 	for (i = 0; i < recv_pkt->buffer_count; i++)
3265 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
3266 	octeon_free_recv_info(recv_info);
3267 	return 0;
3268 }
3269 
3270 /**
3271  * \brief Setup network interfaces
3272  * @param octeon_dev  octeon device
3273  *
3274  * Called during init time for each device. It assumes the NIC
3275  * is already up and running.  The link information for each
3276  * interface is passed in link_info.
3277  */
3278 static int setup_nic_devices(struct octeon_device *octeon_dev)
3279 {
3280 	struct lio *lio = NULL;
3281 	struct net_device *netdev;
3282 	u8 mac[6], i, j, *fw_ver, *micro_ver;
3283 	unsigned long micro;
3284 	u32 cur_ver;
3285 	struct octeon_soft_command *sc;
3286 	struct liquidio_if_cfg_resp *resp;
3287 	struct octdev_props *props;
3288 	int retval, num_iqueues, num_oqueues;
3289 	int max_num_queues = 0;
3290 	union oct_nic_if_cfg if_cfg;
3291 	unsigned int base_queue;
3292 	unsigned int gmx_port_id;
3293 	u32 resp_size, data_size;
3294 	u32 ifidx_or_pfnum;
3295 	struct lio_version *vdata;
3296 	struct devlink *devlink;
3297 	struct lio_devlink_priv *lio_devlink;
3298 
3299 	/* This is to handle link status changes */
3300 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3301 				    OPCODE_NIC_INFO,
3302 				    lio_nic_info, octeon_dev);
3303 
3304 	/* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3305 	 * They are handled directly.
3306 	 */
3307 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3308 					free_netbuf);
3309 
3310 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3311 					free_netsgbuf);
3312 
3313 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3314 					free_netsgbuf_with_resp);
3315 
3316 	for (i = 0; i < octeon_dev->ifcount; i++) {
3317 		resp_size = sizeof(struct liquidio_if_cfg_resp);
3318 		data_size = sizeof(struct lio_version);
3319 		sc = (struct octeon_soft_command *)
3320 			octeon_alloc_soft_command(octeon_dev, data_size,
3321 						  resp_size, 0);
3322 		resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3323 		vdata = (struct lio_version *)sc->virtdptr;
3324 
3325 		*((u64 *)vdata) = 0;
3326 		vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3327 		vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3328 		vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3329 
3330 		if (OCTEON_CN23XX_PF(octeon_dev)) {
3331 			num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3332 			num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3333 			base_queue = octeon_dev->sriov_info.pf_srn;
3334 
3335 			gmx_port_id = octeon_dev->pf_num;
3336 			ifidx_or_pfnum = octeon_dev->pf_num;
3337 		} else {
3338 			num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3339 						octeon_get_conf(octeon_dev), i);
3340 			num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3341 						octeon_get_conf(octeon_dev), i);
3342 			base_queue = CFG_GET_BASE_QUE_NIC_IF(
3343 						octeon_get_conf(octeon_dev), i);
3344 			gmx_port_id = CFG_GET_GMXID_NIC_IF(
3345 						octeon_get_conf(octeon_dev), i);
3346 			ifidx_or_pfnum = i;
3347 		}
3348 
3349 		dev_dbg(&octeon_dev->pci_dev->dev,
3350 			"requesting config for interface %d, iqs %d, oqs %d\n",
3351 			ifidx_or_pfnum, num_iqueues, num_oqueues);
3352 
3353 		if_cfg.u64 = 0;
3354 		if_cfg.s.num_iqueues = num_iqueues;
3355 		if_cfg.s.num_oqueues = num_oqueues;
3356 		if_cfg.s.base_queue = base_queue;
3357 		if_cfg.s.gmx_port_id = gmx_port_id;
3358 
3359 		sc->iq_no = 0;
3360 
3361 		octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3362 					    OPCODE_NIC_IF_CFG, 0,
3363 					    if_cfg.u64, 0);
3364 
3365 		init_completion(&sc->complete);
3366 		sc->sc_status = OCTEON_REQUEST_PENDING;
3367 
3368 		retval = octeon_send_soft_command(octeon_dev, sc);
3369 		if (retval == IQ_SEND_FAILED) {
3370 			dev_err(&octeon_dev->pci_dev->dev,
3371 				"iq/oq config failed status: %x\n",
3372 				retval);
3373 			/* Soft instr is freed by driver in case of failure. */
3374 			octeon_free_soft_command(octeon_dev, sc);
3375 			return(-EIO);
3376 		}
3377 
3378 		/* Sleep on a wait queue till the cond flag indicates that the
3379 		 * response arrived or timed-out.
3380 		 */
3381 		retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
3382 		if (retval)
3383 			return retval;
3384 
3385 		retval = resp->status;
3386 		if (retval) {
3387 			dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3388 			WRITE_ONCE(sc->caller_is_done, true);
3389 			goto setup_nic_dev_done;
3390 		}
3391 		snprintf(octeon_dev->fw_info.liquidio_firmware_version,
3392 			 32, "%s",
3393 			 resp->cfg_info.liquidio_firmware_version);
3394 
3395 		/* Verify f/w version (in case of 'auto' loading from flash) */
3396 		fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3397 		if (memcmp(LIQUIDIO_BASE_VERSION,
3398 			   fw_ver,
3399 			   strlen(LIQUIDIO_BASE_VERSION))) {
3400 			dev_err(&octeon_dev->pci_dev->dev,
3401 				"Unmatched firmware version. Expected %s.x, got %s.\n",
3402 				LIQUIDIO_BASE_VERSION, fw_ver);
3403 			WRITE_ONCE(sc->caller_is_done, true);
3404 			goto setup_nic_dev_done;
3405 		} else if (atomic_read(octeon_dev->adapter_fw_state) ==
3406 			   FW_IS_PRELOADED) {
3407 			dev_info(&octeon_dev->pci_dev->dev,
3408 				 "Using auto-loaded firmware version %s.\n",
3409 				 fw_ver);
3410 		}
3411 
3412 		/* extract micro version field; point past '<maj>.<min>.' */
3413 		micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
3414 		if (kstrtoul(micro_ver, 10, &micro) != 0)
3415 			micro = 0;
3416 		octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
3417 		octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
3418 		octeon_dev->fw_info.ver.rev = micro;
3419 
3420 		octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3421 				    (sizeof(struct liquidio_if_cfg_info)) >> 3);
3422 
3423 		num_iqueues = hweight64(resp->cfg_info.iqmask);
3424 		num_oqueues = hweight64(resp->cfg_info.oqmask);
3425 
3426 		if (!(num_iqueues) || !(num_oqueues)) {
3427 			dev_err(&octeon_dev->pci_dev->dev,
3428 				"Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3429 				resp->cfg_info.iqmask,
3430 				resp->cfg_info.oqmask);
3431 			WRITE_ONCE(sc->caller_is_done, true);
3432 			goto setup_nic_dev_done;
3433 		}
3434 
3435 		if (OCTEON_CN6XXX(octeon_dev)) {
3436 			max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3437 								    cn6xxx));
3438 		} else if (OCTEON_CN23XX_PF(octeon_dev)) {
3439 			max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3440 								    cn23xx_pf));
3441 		}
3442 
3443 		dev_dbg(&octeon_dev->pci_dev->dev,
3444 			"interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3445 			i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3446 			num_iqueues, num_oqueues, max_num_queues);
3447 		netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
3448 
3449 		if (!netdev) {
3450 			dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3451 			WRITE_ONCE(sc->caller_is_done, true);
3452 			goto setup_nic_dev_done;
3453 		}
3454 
3455 		SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3456 
3457 		/* Associate the routines that will handle different
3458 		 * netdev tasks.
3459 		 */
3460 		netdev->netdev_ops = &lionetdevops;
3461 		SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops);
3462 
3463 		retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
3464 		if (retval) {
3465 			dev_err(&octeon_dev->pci_dev->dev,
3466 				"setting real number rx failed\n");
3467 			WRITE_ONCE(sc->caller_is_done, true);
3468 			goto setup_nic_dev_free;
3469 		}
3470 
3471 		retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
3472 		if (retval) {
3473 			dev_err(&octeon_dev->pci_dev->dev,
3474 				"setting real number tx failed\n");
3475 			WRITE_ONCE(sc->caller_is_done, true);
3476 			goto setup_nic_dev_free;
3477 		}
3478 
3479 		lio = GET_LIO(netdev);
3480 
3481 		memset(lio, 0, sizeof(struct lio));
3482 
3483 		lio->ifidx = ifidx_or_pfnum;
3484 
3485 		props = &octeon_dev->props[i];
3486 		props->gmxport = resp->cfg_info.linfo.gmxport;
3487 		props->netdev = netdev;
3488 
3489 		lio->linfo.num_rxpciq = num_oqueues;
3490 		lio->linfo.num_txpciq = num_iqueues;
3491 		for (j = 0; j < num_oqueues; j++) {
3492 			lio->linfo.rxpciq[j].u64 =
3493 				resp->cfg_info.linfo.rxpciq[j].u64;
3494 		}
3495 		for (j = 0; j < num_iqueues; j++) {
3496 			lio->linfo.txpciq[j].u64 =
3497 				resp->cfg_info.linfo.txpciq[j].u64;
3498 		}
3499 		lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3500 		lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3501 		lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3502 
3503 		WRITE_ONCE(sc->caller_is_done, true);
3504 
3505 		lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3506 
3507 		if (OCTEON_CN23XX_PF(octeon_dev) ||
3508 		    OCTEON_CN6XXX(octeon_dev)) {
3509 			lio->dev_capability = NETIF_F_HIGHDMA
3510 					      | NETIF_F_IP_CSUM
3511 					      | NETIF_F_IPV6_CSUM
3512 					      | NETIF_F_SG | NETIF_F_RXCSUM
3513 					      | NETIF_F_GRO
3514 					      | NETIF_F_TSO | NETIF_F_TSO6
3515 					      | NETIF_F_LRO;
3516 		}
3517 		netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3518 
3519 		/*  Copy of transmit encapsulation capabilities:
3520 		 *  TSO, TSO6, Checksums for this device
3521 		 */
3522 		lio->enc_dev_capability = NETIF_F_IP_CSUM
3523 					  | NETIF_F_IPV6_CSUM
3524 					  | NETIF_F_GSO_UDP_TUNNEL
3525 					  | NETIF_F_HW_CSUM | NETIF_F_SG
3526 					  | NETIF_F_RXCSUM
3527 					  | NETIF_F_TSO | NETIF_F_TSO6
3528 					  | NETIF_F_LRO;
3529 
3530 		netdev->hw_enc_features = (lio->enc_dev_capability &
3531 					   ~NETIF_F_LRO);
3532 
3533 		lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3534 
3535 		netdev->vlan_features = lio->dev_capability;
3536 		/* Add any unchangeable hw features */
3537 		lio->dev_capability |=  NETIF_F_HW_VLAN_CTAG_FILTER |
3538 					NETIF_F_HW_VLAN_CTAG_RX |
3539 					NETIF_F_HW_VLAN_CTAG_TX;
3540 
3541 		netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3542 
3543 		netdev->hw_features = lio->dev_capability;
3544 		/*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3545 		netdev->hw_features = netdev->hw_features &
3546 			~NETIF_F_HW_VLAN_CTAG_RX;
3547 
3548 		/* MTU range: 68 - 16000 */
3549 		netdev->min_mtu = LIO_MIN_MTU_SIZE;
3550 		netdev->max_mtu = LIO_MAX_MTU_SIZE;
3551 
3552 		/* Point to the  properties for octeon device to which this
3553 		 * interface belongs.
3554 		 */
3555 		lio->oct_dev = octeon_dev;
3556 		lio->octprops = props;
3557 		lio->netdev = netdev;
3558 
3559 		dev_dbg(&octeon_dev->pci_dev->dev,
3560 			"if%d gmx: %d hw_addr: 0x%llx\n", i,
3561 			lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3562 
3563 		for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3564 			u8 vfmac[ETH_ALEN];
3565 
3566 			eth_random_addr(vfmac);
3567 			if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
3568 				dev_err(&octeon_dev->pci_dev->dev,
3569 					"Error setting VF%d MAC address\n",
3570 					j);
3571 				goto setup_nic_dev_free;
3572 			}
3573 		}
3574 
3575 		/* 64-bit swap required on LE machines */
3576 		octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3577 		for (j = 0; j < 6; j++)
3578 			mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3579 
3580 		/* Copy MAC Address to OS network device structure */
3581 
3582 		ether_addr_copy(netdev->dev_addr, mac);
3583 
3584 		/* By default all interfaces on a single Octeon uses the same
3585 		 * tx and rx queues
3586 		 */
3587 		lio->txq = lio->linfo.txpciq[0].s.q_no;
3588 		lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3589 		if (liquidio_setup_io_queues(octeon_dev, i,
3590 					     lio->linfo.num_txpciq,
3591 					     lio->linfo.num_rxpciq)) {
3592 			dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3593 			goto setup_nic_dev_free;
3594 		}
3595 
3596 		ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3597 
3598 		lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3599 		lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3600 
3601 		if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
3602 			dev_err(&octeon_dev->pci_dev->dev,
3603 				"Gather list allocation failed\n");
3604 			goto setup_nic_dev_free;
3605 		}
3606 
3607 		/* Register ethtool support */
3608 		liquidio_set_ethtool_ops(netdev);
3609 		if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3610 			octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3611 		else
3612 			octeon_dev->priv_flags = 0x0;
3613 
3614 		if (netdev->features & NETIF_F_LRO)
3615 			liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3616 					     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3617 
3618 		liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3619 				     OCTNET_CMD_VLAN_FILTER_ENABLE);
3620 
3621 		if ((debug != -1) && (debug & NETIF_MSG_HW))
3622 			liquidio_set_feature(netdev,
3623 					     OCTNET_CMD_VERBOSE_ENABLE, 0);
3624 
3625 		if (setup_link_status_change_wq(netdev))
3626 			goto setup_nic_dev_free;
3627 
3628 		if ((octeon_dev->fw_info.app_cap_flags &
3629 		     LIQUIDIO_TIME_SYNC_CAP) &&
3630 		    setup_sync_octeon_time_wq(netdev))
3631 			goto setup_nic_dev_free;
3632 
3633 		if (setup_rx_oom_poll_fn(netdev))
3634 			goto setup_nic_dev_free;
3635 
3636 		/* Register the network device with the OS */
3637 		if (register_netdev(netdev)) {
3638 			dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3639 			goto setup_nic_dev_free;
3640 		}
3641 
3642 		dev_dbg(&octeon_dev->pci_dev->dev,
3643 			"Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3644 			i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3645 		netif_carrier_off(netdev);
3646 		lio->link_changes++;
3647 
3648 		ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3649 
3650 		/* Sending command to firmware to enable Rx checksum offload
3651 		 * by default at the time of setup of Liquidio driver for
3652 		 * this device
3653 		 */
3654 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3655 					    OCTNET_CMD_RXCSUM_ENABLE);
3656 		liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3657 				     OCTNET_CMD_TXCSUM_ENABLE);
3658 
3659 		dev_dbg(&octeon_dev->pci_dev->dev,
3660 			"NIC ifidx:%d Setup successful\n", i);
3661 
3662 		if (octeon_dev->subsystem_id ==
3663 			OCTEON_CN2350_25GB_SUBSYS_ID ||
3664 		    octeon_dev->subsystem_id ==
3665 			OCTEON_CN2360_25GB_SUBSYS_ID) {
3666 			cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
3667 					     octeon_dev->fw_info.ver.min,
3668 					     octeon_dev->fw_info.ver.rev);
3669 
3670 			/* speed control unsupported in f/w older than 1.7.2 */
3671 			if (cur_ver < OCT_FW_VER(1, 7, 2)) {
3672 				dev_info(&octeon_dev->pci_dev->dev,
3673 					 "speed setting not supported by f/w.");
3674 				octeon_dev->speed_setting = 25;
3675 				octeon_dev->no_speed_setting = 1;
3676 			} else {
3677 				liquidio_get_speed(lio);
3678 			}
3679 
3680 			if (octeon_dev->speed_setting == 0) {
3681 				octeon_dev->speed_setting = 25;
3682 				octeon_dev->no_speed_setting = 1;
3683 			}
3684 		} else {
3685 			octeon_dev->no_speed_setting = 1;
3686 			octeon_dev->speed_setting = 10;
3687 		}
3688 		octeon_dev->speed_boot = octeon_dev->speed_setting;
3689 	}
3690 
3691 	devlink = devlink_alloc(&liquidio_devlink_ops,
3692 				sizeof(struct lio_devlink_priv));
3693 	if (!devlink) {
3694 		dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3695 		goto setup_nic_dev_free;
3696 	}
3697 
3698 	lio_devlink = devlink_priv(devlink);
3699 	lio_devlink->oct = octeon_dev;
3700 
3701 	if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) {
3702 		devlink_free(devlink);
3703 		dev_err(&octeon_dev->pci_dev->dev,
3704 			"devlink registration failed\n");
3705 		goto setup_nic_dev_free;
3706 	}
3707 
3708 	octeon_dev->devlink = devlink;
3709 	octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3710 
3711 	return 0;
3712 
3713 setup_nic_dev_free:
3714 
3715 	while (i--) {
3716 		dev_err(&octeon_dev->pci_dev->dev,
3717 			"NIC ifidx:%d Setup failed\n", i);
3718 		liquidio_destroy_nic_device(octeon_dev, i);
3719 	}
3720 
3721 setup_nic_dev_done:
3722 
3723 	return -ENODEV;
3724 }
3725 
3726 #ifdef CONFIG_PCI_IOV
3727 static int octeon_enable_sriov(struct octeon_device *oct)
3728 {
3729 	unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3730 	struct pci_dev *vfdev;
3731 	int err;
3732 	u32 u;
3733 
3734 	if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3735 		err = pci_enable_sriov(oct->pci_dev,
3736 				       oct->sriov_info.num_vfs_alloced);
3737 		if (err) {
3738 			dev_err(&oct->pci_dev->dev,
3739 				"OCTEON: Failed to enable PCI sriov: %d\n",
3740 				err);
3741 			oct->sriov_info.num_vfs_alloced = 0;
3742 			return err;
3743 		}
3744 		oct->sriov_info.sriov_enabled = 1;
3745 
3746 		/* init lookup table that maps DPI ring number to VF pci_dev
3747 		 * struct pointer
3748 		 */
3749 		u = 0;
3750 		vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3751 				       OCTEON_CN23XX_VF_VID, NULL);
3752 		while (vfdev) {
3753 			if (vfdev->is_virtfn &&
3754 			    (vfdev->physfn == oct->pci_dev)) {
3755 				oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3756 					vfdev;
3757 				u += oct->sriov_info.rings_per_vf;
3758 			}
3759 			vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3760 					       OCTEON_CN23XX_VF_VID, vfdev);
3761 		}
3762 	}
3763 
3764 	return num_vfs_alloced;
3765 }
3766 
3767 static int lio_pci_sriov_disable(struct octeon_device *oct)
3768 {
3769 	int u;
3770 
3771 	if (pci_vfs_assigned(oct->pci_dev)) {
3772 		dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3773 		return -EPERM;
3774 	}
3775 
3776 	pci_disable_sriov(oct->pci_dev);
3777 
3778 	u = 0;
3779 	while (u < MAX_POSSIBLE_VFS) {
3780 		oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3781 		u += oct->sriov_info.rings_per_vf;
3782 	}
3783 
3784 	oct->sriov_info.num_vfs_alloced = 0;
3785 	dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3786 		 oct->pf_num);
3787 
3788 	return 0;
3789 }
3790 
3791 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3792 {
3793 	struct octeon_device *oct = pci_get_drvdata(dev);
3794 	int ret = 0;
3795 
3796 	if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3797 	    (oct->sriov_info.sriov_enabled)) {
3798 		dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3799 			 oct->pf_num, num_vfs);
3800 		return 0;
3801 	}
3802 
3803 	if (!num_vfs) {
3804 		lio_vf_rep_destroy(oct);
3805 		ret = lio_pci_sriov_disable(oct);
3806 	} else if (num_vfs > oct->sriov_info.max_vfs) {
3807 		dev_err(&oct->pci_dev->dev,
3808 			"OCTEON: Max allowed VFs:%d user requested:%d",
3809 			oct->sriov_info.max_vfs, num_vfs);
3810 		ret = -EPERM;
3811 	} else {
3812 		oct->sriov_info.num_vfs_alloced = num_vfs;
3813 		ret = octeon_enable_sriov(oct);
3814 		dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3815 			 oct->pf_num, num_vfs);
3816 		ret = lio_vf_rep_create(oct);
3817 		if (ret)
3818 			dev_info(&oct->pci_dev->dev,
3819 				 "vf representor create failed");
3820 	}
3821 
3822 	return ret;
3823 }
3824 #endif
3825 
3826 /**
3827  * \brief initialize the NIC
3828  * @param oct octeon device
3829  *
3830  * This initialization routine is called once the Octeon device application is
3831  * up and running
3832  */
3833 static int liquidio_init_nic_module(struct octeon_device *oct)
3834 {
3835 	int i, retval = 0;
3836 	int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3837 
3838 	dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3839 
3840 	/* only default iq and oq were initialized
3841 	 * initialize the rest as well
3842 	 */
3843 	/* run port_config command for each port */
3844 	oct->ifcount = num_nic_ports;
3845 
3846 	memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3847 
3848 	for (i = 0; i < MAX_OCTEON_LINKS; i++)
3849 		oct->props[i].gmxport = -1;
3850 
3851 	retval = setup_nic_devices(oct);
3852 	if (retval) {
3853 		dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3854 		goto octnet_init_failure;
3855 	}
3856 
3857 	/* Call vf_rep_modinit if the firmware is switchdev capable
3858 	 * and do it from the first liquidio function probed.
3859 	 */
3860 	if (!oct->octeon_id &&
3861 	    oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
3862 		retval = lio_vf_rep_modinit();
3863 		if (retval) {
3864 			liquidio_stop_nic_module(oct);
3865 			goto octnet_init_failure;
3866 		}
3867 	}
3868 
3869 	liquidio_ptp_init(oct);
3870 
3871 	dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3872 
3873 	return retval;
3874 
3875 octnet_init_failure:
3876 
3877 	oct->ifcount = 0;
3878 
3879 	return retval;
3880 }
3881 
3882 /**
3883  * \brief starter callback that invokes the remaining initialization work after
3884  * the NIC is up and running.
3885  * @param octptr  work struct work_struct
3886  */
3887 static void nic_starter(struct work_struct *work)
3888 {
3889 	struct octeon_device *oct;
3890 	struct cavium_wk *wk = (struct cavium_wk *)work;
3891 
3892 	oct = (struct octeon_device *)wk->ctxptr;
3893 
3894 	if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3895 		return;
3896 
3897 	/* If the status of the device is CORE_OK, the core
3898 	 * application has reported its application type. Call
3899 	 * any registered handlers now and move to the RUNNING
3900 	 * state.
3901 	 */
3902 	if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3903 		schedule_delayed_work(&oct->nic_poll_work.work,
3904 				      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3905 		return;
3906 	}
3907 
3908 	atomic_set(&oct->status, OCT_DEV_RUNNING);
3909 
3910 	if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3911 		dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3912 
3913 		if (liquidio_init_nic_module(oct))
3914 			dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3915 		else
3916 			handshake[oct->octeon_id].started_ok = 1;
3917 	} else {
3918 		dev_err(&oct->pci_dev->dev,
3919 			"Unexpected application running on NIC (%d). Check firmware.\n",
3920 			oct->app_mode);
3921 	}
3922 
3923 	complete(&handshake[oct->octeon_id].started);
3924 }
3925 
3926 static int
3927 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
3928 {
3929 	struct octeon_device *oct = (struct octeon_device *)buf;
3930 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3931 	int i, notice, vf_idx;
3932 	bool cores_crashed;
3933 	u64 *data, vf_num;
3934 
3935 	notice = recv_pkt->rh.r.ossp;
3936 	data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
3937 
3938 	/* the first 64-bit word of data is the vf_num */
3939 	vf_num = data[0];
3940 	octeon_swap_8B_data(&vf_num, 1);
3941 	vf_idx = (int)vf_num - 1;
3942 
3943 	cores_crashed = READ_ONCE(oct->cores_crashed);
3944 
3945 	if (notice == VF_DRV_LOADED) {
3946 		if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
3947 			oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
3948 			dev_info(&oct->pci_dev->dev,
3949 				 "driver for VF%d was loaded\n", vf_idx);
3950 			if (!cores_crashed)
3951 				try_module_get(THIS_MODULE);
3952 		}
3953 	} else if (notice == VF_DRV_REMOVED) {
3954 		if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
3955 			oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
3956 			dev_info(&oct->pci_dev->dev,
3957 				 "driver for VF%d was removed\n", vf_idx);
3958 			if (!cores_crashed)
3959 				module_put(THIS_MODULE);
3960 		}
3961 	} else if (notice == VF_DRV_MACADDR_CHANGED) {
3962 		u8 *b = (u8 *)&data[1];
3963 
3964 		oct->sriov_info.vf_macaddr[vf_idx] = data[1];
3965 		dev_info(&oct->pci_dev->dev,
3966 			 "VF driver changed VF%d's MAC address to %pM\n",
3967 			 vf_idx, b + 2);
3968 	}
3969 
3970 	for (i = 0; i < recv_pkt->buffer_count; i++)
3971 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
3972 	octeon_free_recv_info(recv_info);
3973 
3974 	return 0;
3975 }
3976 
3977 /**
3978  * \brief Device initialization for each Octeon device that is probed
3979  * @param octeon_dev  octeon device
3980  */
3981 static int octeon_device_init(struct octeon_device *octeon_dev)
3982 {
3983 	int j, ret;
3984 	char bootcmd[] = "\n";
3985 	char *dbg_enb = NULL;
3986 	enum lio_fw_state fw_state;
3987 	struct octeon_device_priv *oct_priv =
3988 		(struct octeon_device_priv *)octeon_dev->priv;
3989 	atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
3990 
3991 	/* Enable access to the octeon device and make its DMA capability
3992 	 * known to the OS.
3993 	 */
3994 	if (octeon_pci_os_setup(octeon_dev))
3995 		return 1;
3996 
3997 	atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
3998 
3999 	/* Identify the Octeon type and map the BAR address space. */
4000 	if (octeon_chip_specific_setup(octeon_dev)) {
4001 		dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4002 		return 1;
4003 	}
4004 
4005 	atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4006 
4007 	/* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4008 	 * since that is what is required for the reference to be removed
4009 	 * during de-initialization (see 'octeon_destroy_resources').
4010 	 */
4011 	octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4012 			       PCI_SLOT(octeon_dev->pci_dev->devfn),
4013 			       PCI_FUNC(octeon_dev->pci_dev->devfn),
4014 			       true);
4015 
4016 	octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4017 
4018 	/* CN23XX supports preloaded firmware if the following is true:
4019 	 *
4020 	 * The adapter indicates that firmware is currently running AND
4021 	 * 'fw_type' is 'auto'.
4022 	 *
4023 	 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4024 	 */
4025 	if (OCTEON_CN23XX_PF(octeon_dev) &&
4026 	    cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4027 		atomic_cmpxchg(octeon_dev->adapter_fw_state,
4028 			       FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4029 	}
4030 
4031 	/* If loading firmware, only first device of adapter needs to do so. */
4032 	fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4033 				  FW_NEEDS_TO_BE_LOADED,
4034 				  FW_IS_BEING_LOADED);
4035 
4036 	/* Here, [local variable] 'fw_state' is set to one of:
4037 	 *
4038 	 *   FW_IS_PRELOADED:       No firmware is to be loaded (see above)
4039 	 *   FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4040 	 *                          firmware to the adapter.
4041 	 *   FW_IS_BEING_LOADED:    The driver's second instance will not load
4042 	 *                          firmware to the adapter.
4043 	 */
4044 
4045 	/* Prior to f/w load, perform a soft reset of the Octeon device;
4046 	 * if error resetting, return w/error.
4047 	 */
4048 	if (fw_state == FW_NEEDS_TO_BE_LOADED)
4049 		if (octeon_dev->fn_list.soft_reset(octeon_dev))
4050 			return 1;
4051 
4052 	/* Initialize the dispatch mechanism used to push packets arriving on
4053 	 * Octeon Output queues.
4054 	 */
4055 	if (octeon_init_dispatch_list(octeon_dev))
4056 		return 1;
4057 
4058 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4059 				    OPCODE_NIC_CORE_DRV_ACTIVE,
4060 				    octeon_core_drv_init,
4061 				    octeon_dev);
4062 
4063 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4064 				    OPCODE_NIC_VF_DRV_NOTICE,
4065 				    octeon_recv_vf_drv_notice, octeon_dev);
4066 	INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4067 	octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4068 	schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4069 			      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4070 
4071 	atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4072 
4073 	if (octeon_set_io_queues_off(octeon_dev)) {
4074 		dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4075 		return 1;
4076 	}
4077 
4078 	if (OCTEON_CN23XX_PF(octeon_dev)) {
4079 		ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4080 		if (ret) {
4081 			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4082 			return ret;
4083 		}
4084 	}
4085 
4086 	/* Initialize soft command buffer pool
4087 	 */
4088 	if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4089 		dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4090 		return 1;
4091 	}
4092 	atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4093 
4094 	/*  Setup the data structures that manage this Octeon's Input queues. */
4095 	if (octeon_setup_instr_queues(octeon_dev)) {
4096 		dev_err(&octeon_dev->pci_dev->dev,
4097 			"instruction queue initialization failed\n");
4098 		return 1;
4099 	}
4100 	atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4101 
4102 	/* Initialize lists to manage the requests of different types that
4103 	 * arrive from user & kernel applications for this octeon device.
4104 	 */
4105 	if (octeon_setup_response_list(octeon_dev)) {
4106 		dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4107 		return 1;
4108 	}
4109 	atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4110 
4111 	if (octeon_setup_output_queues(octeon_dev)) {
4112 		dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4113 		return 1;
4114 	}
4115 
4116 	atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4117 
4118 	if (OCTEON_CN23XX_PF(octeon_dev)) {
4119 		if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4120 			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4121 			return 1;
4122 		}
4123 		atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4124 
4125 		if (octeon_allocate_ioq_vector
4126 				(octeon_dev,
4127 				 octeon_dev->sriov_info.num_pf_rings)) {
4128 			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4129 			return 1;
4130 		}
4131 		atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4132 
4133 	} else {
4134 		/* The input and output queue registers were setup earlier (the
4135 		 * queues were not enabled). Any additional registers
4136 		 * that need to be programmed should be done now.
4137 		 */
4138 		ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4139 		if (ret) {
4140 			dev_err(&octeon_dev->pci_dev->dev,
4141 				"Failed to configure device registers\n");
4142 			return ret;
4143 		}
4144 	}
4145 
4146 	/* Initialize the tasklet that handles output queue packet processing.*/
4147 	dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4148 	tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
4149 		     (unsigned long)octeon_dev);
4150 
4151 	/* Setup the interrupt handler and record the INT SUM register address
4152 	 */
4153 	if (octeon_setup_interrupt(octeon_dev,
4154 				   octeon_dev->sriov_info.num_pf_rings))
4155 		return 1;
4156 
4157 	/* Enable Octeon device interrupts */
4158 	octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4159 
4160 	atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4161 
4162 	/* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4163 	 * the output queue is enabled.
4164 	 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4165 	 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4166 	 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4167 	 * before any credits have been issued, causing the ring to be reset
4168 	 * (and the f/w appear to never have started).
4169 	 */
4170 	for (j = 0; j < octeon_dev->num_oqs; j++)
4171 		writel(octeon_dev->droq[j]->max_count,
4172 		       octeon_dev->droq[j]->pkts_credit_reg);
4173 
4174 	/* Enable the input and output queues for this Octeon device */
4175 	ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4176 	if (ret) {
4177 		dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4178 		return ret;
4179 	}
4180 
4181 	atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4182 
4183 	if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4184 		dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4185 		if (!ddr_timeout) {
4186 			dev_info(&octeon_dev->pci_dev->dev,
4187 				 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4188 		}
4189 
4190 		schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4191 
4192 		/* Wait for the octeon to initialize DDR after the soft-reset.*/
4193 		while (!ddr_timeout) {
4194 			set_current_state(TASK_INTERRUPTIBLE);
4195 			if (schedule_timeout(HZ / 10)) {
4196 				/* user probably pressed Control-C */
4197 				return 1;
4198 			}
4199 		}
4200 		ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4201 		if (ret) {
4202 			dev_err(&octeon_dev->pci_dev->dev,
4203 				"DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4204 				ret);
4205 			return 1;
4206 		}
4207 
4208 		if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4209 			dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4210 			return 1;
4211 		}
4212 
4213 		/* Divert uboot to take commands from host instead. */
4214 		ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4215 
4216 		dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4217 		ret = octeon_init_consoles(octeon_dev);
4218 		if (ret) {
4219 			dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4220 			return 1;
4221 		}
4222 		/* If console debug enabled, specify empty string to use default
4223 		 * enablement ELSE specify NULL string for 'disabled'.
4224 		 */
4225 		dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4226 		ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4227 		if (ret) {
4228 			dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4229 			return 1;
4230 		} else if (octeon_console_debug_enabled(0)) {
4231 			/* If console was added AND we're logging console output
4232 			 * then set our console print function.
4233 			 */
4234 			octeon_dev->console[0].print = octeon_dbg_console_print;
4235 		}
4236 
4237 		atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4238 
4239 		dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4240 		ret = load_firmware(octeon_dev);
4241 		if (ret) {
4242 			dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4243 			return 1;
4244 		}
4245 
4246 		atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4247 	}
4248 
4249 	handshake[octeon_dev->octeon_id].init_ok = 1;
4250 	complete(&handshake[octeon_dev->octeon_id].init);
4251 
4252 	atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4253 
4254 	return 0;
4255 }
4256 
4257 /**
4258  * \brief Debug console print function
4259  * @param octeon_dev  octeon device
4260  * @param console_num console number
4261  * @param prefix      first portion of line to display
4262  * @param suffix      second portion of line to display
4263  *
4264  * The OCTEON debug console outputs entire lines (excluding '\n').
4265  * Normally, the line will be passed in the 'prefix' parameter.
4266  * However, due to buffering, it is possible for a line to be split into two
4267  * parts, in which case they will be passed as the 'prefix' parameter and
4268  * 'suffix' parameter.
4269  */
4270 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4271 				    char *prefix, char *suffix)
4272 {
4273 	if (prefix && suffix)
4274 		dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4275 			 suffix);
4276 	else if (prefix)
4277 		dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4278 	else if (suffix)
4279 		dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4280 
4281 	return 0;
4282 }
4283 
4284 /**
4285  * \brief Exits the module
4286  */
4287 static void __exit liquidio_exit(void)
4288 {
4289 	liquidio_deinit_pci();
4290 
4291 	pr_info("LiquidIO network module is now unloaded\n");
4292 }
4293 
4294 module_init(liquidio_init);
4295 module_exit(liquidio_exit);
4296