xref: /linux/drivers/net/ethernet/cavium/liquidio/lio_main.c (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/firmware.h>
22 #include <net/vxlan.h>
23 #include <linux/kthread.h>
24 #include <net/switchdev.h>
25 #include "liquidio_common.h"
26 #include "octeon_droq.h"
27 #include "octeon_iq.h"
28 #include "response_manager.h"
29 #include "octeon_device.h"
30 #include "octeon_nic.h"
31 #include "octeon_main.h"
32 #include "octeon_network.h"
33 #include "cn66xx_regs.h"
34 #include "cn66xx_device.h"
35 #include "cn68xx_device.h"
36 #include "cn23xx_pf_device.h"
37 #include "liquidio_image.h"
38 #include "lio_vf_rep.h"
39 
40 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
41 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
42 MODULE_LICENSE("GPL");
43 MODULE_VERSION(LIQUIDIO_VERSION);
44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
45 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
47 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
49 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
50 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
51 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
52 
53 static int ddr_timeout = 10000;
54 module_param(ddr_timeout, int, 0644);
55 MODULE_PARM_DESC(ddr_timeout,
56 		 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
57 
58 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
59 
60 static int debug = -1;
61 module_param(debug, int, 0644);
62 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
63 
64 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
65 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
66 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
67 
68 static u32 console_bitmask;
69 module_param(console_bitmask, int, 0644);
70 MODULE_PARM_DESC(console_bitmask,
71 		 "Bitmask indicating which consoles have debug output redirected to syslog.");
72 
73 /**
74  * \brief determines if a given console has debug enabled.
75  * @param console console to check
76  * @returns  1 = enabled. 0 otherwise
77  */
78 static int octeon_console_debug_enabled(u32 console)
79 {
80 	return (console_bitmask >> (console)) & 0x1;
81 }
82 
83 /* Polling interval for determining when NIC application is alive */
84 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
85 
86 /* runtime link query interval */
87 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS         1000
88 /* update localtime to octeon firmware every 60 seconds.
89  * make firmware to use same time reference, so that it will be easy to
90  * correlate firmware logged events/errors with host events, for debugging.
91  */
92 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
93 
94 /* time to wait for possible in-flight requests in milliseconds */
95 #define WAIT_INFLIGHT_REQUEST	msecs_to_jiffies(1000)
96 
97 struct lio_trusted_vf_ctx {
98 	struct completion complete;
99 	int status;
100 };
101 
102 struct oct_link_status_resp {
103 	u64 rh;
104 	struct oct_link_info link_info;
105 	u64 status;
106 };
107 
108 struct oct_timestamp_resp {
109 	u64 rh;
110 	u64 timestamp;
111 	u64 status;
112 };
113 
114 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
115 
116 union tx_info {
117 	u64 u64;
118 	struct {
119 #ifdef __BIG_ENDIAN_BITFIELD
120 		u16 gso_size;
121 		u16 gso_segs;
122 		u32 reserved;
123 #else
124 		u32 reserved;
125 		u16 gso_segs;
126 		u16 gso_size;
127 #endif
128 	} s;
129 };
130 
131 /** Octeon device properties to be used by the NIC module.
132  * Each octeon device in the system will be represented
133  * by this structure in the NIC module.
134  */
135 
136 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
137 #define OCTNIC_GSO_MAX_SIZE                                                    \
138 	(CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
139 
140 struct handshake {
141 	struct completion init;
142 	struct completion started;
143 	struct pci_dev *pci_dev;
144 	int init_ok;
145 	int started_ok;
146 };
147 
148 #ifdef CONFIG_PCI_IOV
149 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
150 #endif
151 
152 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
153 				    char *prefix, char *suffix);
154 
155 static int octeon_device_init(struct octeon_device *);
156 static int liquidio_stop(struct net_device *netdev);
157 static void liquidio_remove(struct pci_dev *pdev);
158 static int liquidio_probe(struct pci_dev *pdev,
159 			  const struct pci_device_id *ent);
160 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
161 				      int linkstate);
162 
163 static struct handshake handshake[MAX_OCTEON_DEVICES];
164 static struct completion first_stage;
165 
166 static void octeon_droq_bh(unsigned long pdev)
167 {
168 	int q_no;
169 	int reschedule = 0;
170 	struct octeon_device *oct = (struct octeon_device *)pdev;
171 	struct octeon_device_priv *oct_priv =
172 		(struct octeon_device_priv *)oct->priv;
173 
174 	for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
175 		if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
176 			continue;
177 		reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
178 							  MAX_PACKET_BUDGET);
179 		lio_enable_irq(oct->droq[q_no], NULL);
180 
181 		if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
182 			/* set time and cnt interrupt thresholds for this DROQ
183 			 * for NAPI
184 			 */
185 			int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
186 
187 			octeon_write_csr64(
188 			    oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
189 			    0x5700000040ULL);
190 			octeon_write_csr64(
191 			    oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
192 		}
193 	}
194 
195 	if (reschedule)
196 		tasklet_schedule(&oct_priv->droq_tasklet);
197 }
198 
199 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
200 {
201 	struct octeon_device_priv *oct_priv =
202 		(struct octeon_device_priv *)oct->priv;
203 	int retry = 100, pkt_cnt = 0, pending_pkts = 0;
204 	int i;
205 
206 	do {
207 		pending_pkts = 0;
208 
209 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
210 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
211 				continue;
212 			pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
213 		}
214 		if (pkt_cnt > 0) {
215 			pending_pkts += pkt_cnt;
216 			tasklet_schedule(&oct_priv->droq_tasklet);
217 		}
218 		pkt_cnt = 0;
219 		schedule_timeout_uninterruptible(1);
220 
221 	} while (retry-- && pending_pkts);
222 
223 	return pkt_cnt;
224 }
225 
226 /**
227  * \brief Forces all IO queues off on a given device
228  * @param oct Pointer to Octeon device
229  */
230 static void force_io_queues_off(struct octeon_device *oct)
231 {
232 	if ((oct->chip_id == OCTEON_CN66XX) ||
233 	    (oct->chip_id == OCTEON_CN68XX)) {
234 		/* Reset the Enable bits for Input Queues. */
235 		octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
236 
237 		/* Reset the Enable bits for Output Queues. */
238 		octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
239 	}
240 }
241 
242 /**
243  * \brief Cause device to go quiet so it can be safely removed/reset/etc
244  * @param oct Pointer to Octeon device
245  */
246 static inline void pcierror_quiesce_device(struct octeon_device *oct)
247 {
248 	int i;
249 
250 	/* Disable the input and output queues now. No more packets will
251 	 * arrive from Octeon, but we should wait for all packet processing
252 	 * to finish.
253 	 */
254 	force_io_queues_off(oct);
255 
256 	/* To allow for in-flight requests */
257 	schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
258 
259 	if (wait_for_pending_requests(oct))
260 		dev_err(&oct->pci_dev->dev, "There were pending requests\n");
261 
262 	/* Force all requests waiting to be fetched by OCTEON to complete. */
263 	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
264 		struct octeon_instr_queue *iq;
265 
266 		if (!(oct->io_qmask.iq & BIT_ULL(i)))
267 			continue;
268 		iq = oct->instr_queue[i];
269 
270 		if (atomic_read(&iq->instr_pending)) {
271 			spin_lock_bh(&iq->lock);
272 			iq->fill_cnt = 0;
273 			iq->octeon_read_index = iq->host_write_index;
274 			iq->stats.instr_processed +=
275 				atomic_read(&iq->instr_pending);
276 			lio_process_iq_request_list(oct, iq, 0);
277 			spin_unlock_bh(&iq->lock);
278 		}
279 	}
280 
281 	/* Force all pending ordered list requests to time out. */
282 	lio_process_ordered_list(oct, 1);
283 
284 	/* We do not need to wait for output queue packets to be processed. */
285 }
286 
287 /**
288  * \brief Cleanup PCI AER uncorrectable error status
289  * @param dev Pointer to PCI device
290  */
291 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
292 {
293 	int pos = 0x100;
294 	u32 status, mask;
295 
296 	pr_info("%s :\n", __func__);
297 
298 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
299 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
300 	if (dev->error_state == pci_channel_io_normal)
301 		status &= ~mask;        /* Clear corresponding nonfatal bits */
302 	else
303 		status &= mask;         /* Clear corresponding fatal bits */
304 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
305 }
306 
307 /**
308  * \brief Stop all PCI IO to a given device
309  * @param dev Pointer to Octeon device
310  */
311 static void stop_pci_io(struct octeon_device *oct)
312 {
313 	/* No more instructions will be forwarded. */
314 	atomic_set(&oct->status, OCT_DEV_IN_RESET);
315 
316 	pci_disable_device(oct->pci_dev);
317 
318 	/* Disable interrupts  */
319 	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
320 
321 	pcierror_quiesce_device(oct);
322 
323 	/* Release the interrupt line */
324 	free_irq(oct->pci_dev->irq, oct);
325 
326 	if (oct->flags & LIO_FLAG_MSI_ENABLED)
327 		pci_disable_msi(oct->pci_dev);
328 
329 	dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
330 		lio_get_state_string(&oct->status));
331 
332 	/* making it a common function for all OCTEON models */
333 	cleanup_aer_uncorrect_error_status(oct->pci_dev);
334 }
335 
336 /**
337  * \brief called when PCI error is detected
338  * @param pdev Pointer to PCI device
339  * @param state The current pci connection state
340  *
341  * This function is called after a PCI bus error affecting
342  * this device has been detected.
343  */
344 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
345 						     pci_channel_state_t state)
346 {
347 	struct octeon_device *oct = pci_get_drvdata(pdev);
348 
349 	/* Non-correctable Non-fatal errors */
350 	if (state == pci_channel_io_normal) {
351 		dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
352 		cleanup_aer_uncorrect_error_status(oct->pci_dev);
353 		return PCI_ERS_RESULT_CAN_RECOVER;
354 	}
355 
356 	/* Non-correctable Fatal errors */
357 	dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
358 	stop_pci_io(oct);
359 
360 	/* Always return a DISCONNECT. There is no support for recovery but only
361 	 * for a clean shutdown.
362 	 */
363 	return PCI_ERS_RESULT_DISCONNECT;
364 }
365 
366 /**
367  * \brief mmio handler
368  * @param pdev Pointer to PCI device
369  */
370 static pci_ers_result_t liquidio_pcie_mmio_enabled(
371 				struct pci_dev *pdev __attribute__((unused)))
372 {
373 	/* We should never hit this since we never ask for a reset for a Fatal
374 	 * Error. We always return DISCONNECT in io_error above.
375 	 * But play safe and return RECOVERED for now.
376 	 */
377 	return PCI_ERS_RESULT_RECOVERED;
378 }
379 
380 /**
381  * \brief called after the pci bus has been reset.
382  * @param pdev Pointer to PCI device
383  *
384  * Restart the card from scratch, as if from a cold-boot. Implementation
385  * resembles the first-half of the octeon_resume routine.
386  */
387 static pci_ers_result_t liquidio_pcie_slot_reset(
388 				struct pci_dev *pdev __attribute__((unused)))
389 {
390 	/* We should never hit this since we never ask for a reset for a Fatal
391 	 * Error. We always return DISCONNECT in io_error above.
392 	 * But play safe and return RECOVERED for now.
393 	 */
394 	return PCI_ERS_RESULT_RECOVERED;
395 }
396 
397 /**
398  * \brief called when traffic can start flowing again.
399  * @param pdev Pointer to PCI device
400  *
401  * This callback is called when the error recovery driver tells us that
402  * its OK to resume normal operation. Implementation resembles the
403  * second-half of the octeon_resume routine.
404  */
405 static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
406 {
407 	/* Nothing to be done here. */
408 }
409 
410 #ifdef CONFIG_PM
411 /**
412  * \brief called when suspending
413  * @param pdev Pointer to PCI device
414  * @param state state to suspend to
415  */
416 static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
417 			    pm_message_t state __attribute__((unused)))
418 {
419 	return 0;
420 }
421 
422 /**
423  * \brief called when resuming
424  * @param pdev Pointer to PCI device
425  */
426 static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
427 {
428 	return 0;
429 }
430 #endif
431 
432 /* For PCI-E Advanced Error Recovery (AER) Interface */
433 static const struct pci_error_handlers liquidio_err_handler = {
434 	.error_detected = liquidio_pcie_error_detected,
435 	.mmio_enabled	= liquidio_pcie_mmio_enabled,
436 	.slot_reset	= liquidio_pcie_slot_reset,
437 	.resume		= liquidio_pcie_resume,
438 };
439 
440 static const struct pci_device_id liquidio_pci_tbl[] = {
441 	{       /* 68xx */
442 		PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
443 	},
444 	{       /* 66xx */
445 		PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
446 	},
447 	{       /* 23xx pf */
448 		PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
449 	},
450 	{
451 		0, 0, 0, 0, 0, 0, 0
452 	}
453 };
454 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
455 
456 static struct pci_driver liquidio_pci_driver = {
457 	.name		= "LiquidIO",
458 	.id_table	= liquidio_pci_tbl,
459 	.probe		= liquidio_probe,
460 	.remove		= liquidio_remove,
461 	.err_handler	= &liquidio_err_handler,    /* For AER */
462 
463 #ifdef CONFIG_PM
464 	.suspend	= liquidio_suspend,
465 	.resume		= liquidio_resume,
466 #endif
467 #ifdef CONFIG_PCI_IOV
468 	.sriov_configure = liquidio_enable_sriov,
469 #endif
470 };
471 
472 /**
473  * \brief register PCI driver
474  */
475 static int liquidio_init_pci(void)
476 {
477 	return pci_register_driver(&liquidio_pci_driver);
478 }
479 
480 /**
481  * \brief unregister PCI driver
482  */
483 static void liquidio_deinit_pci(void)
484 {
485 	pci_unregister_driver(&liquidio_pci_driver);
486 }
487 
488 /**
489  * \brief Check Tx queue status, and take appropriate action
490  * @param lio per-network private data
491  * @returns 0 if full, number of queues woken up otherwise
492  */
493 static inline int check_txq_status(struct lio *lio)
494 {
495 	int numqs = lio->netdev->real_num_tx_queues;
496 	int ret_val = 0;
497 	int q, iq;
498 
499 	/* check each sub-queue state */
500 	for (q = 0; q < numqs; q++) {
501 		iq = lio->linfo.txpciq[q %
502 			lio->oct_dev->num_iqs].s.q_no;
503 		if (octnet_iq_is_full(lio->oct_dev, iq))
504 			continue;
505 		if (__netif_subqueue_stopped(lio->netdev, q)) {
506 			netif_wake_subqueue(lio->netdev, q);
507 			INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
508 						  tx_restart, 1);
509 			ret_val++;
510 		}
511 	}
512 
513 	return ret_val;
514 }
515 
516 /**
517  * \brief Print link information
518  * @param netdev network device
519  */
520 static void print_link_info(struct net_device *netdev)
521 {
522 	struct lio *lio = GET_LIO(netdev);
523 
524 	if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
525 	    ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
526 		struct oct_link_info *linfo = &lio->linfo;
527 
528 		if (linfo->link.s.link_up) {
529 			netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
530 				   linfo->link.s.speed,
531 				   (linfo->link.s.duplex) ? "Full" : "Half");
532 		} else {
533 			netif_info(lio, link, lio->netdev, "Link Down\n");
534 		}
535 	}
536 }
537 
538 /**
539  * \brief Routine to notify MTU change
540  * @param work work_struct data structure
541  */
542 static void octnet_link_status_change(struct work_struct *work)
543 {
544 	struct cavium_wk *wk = (struct cavium_wk *)work;
545 	struct lio *lio = (struct lio *)wk->ctxptr;
546 
547 	/* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
548 	 * this API is invoked only when new max-MTU of the interface is
549 	 * less than current MTU.
550 	 */
551 	rtnl_lock();
552 	dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
553 	rtnl_unlock();
554 }
555 
556 /**
557  * \brief Sets up the mtu status change work
558  * @param netdev network device
559  */
560 static inline int setup_link_status_change_wq(struct net_device *netdev)
561 {
562 	struct lio *lio = GET_LIO(netdev);
563 	struct octeon_device *oct = lio->oct_dev;
564 
565 	lio->link_status_wq.wq = alloc_workqueue("link-status",
566 						 WQ_MEM_RECLAIM, 0);
567 	if (!lio->link_status_wq.wq) {
568 		dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
569 		return -1;
570 	}
571 	INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
572 			  octnet_link_status_change);
573 	lio->link_status_wq.wk.ctxptr = lio;
574 
575 	return 0;
576 }
577 
578 static inline void cleanup_link_status_change_wq(struct net_device *netdev)
579 {
580 	struct lio *lio = GET_LIO(netdev);
581 
582 	if (lio->link_status_wq.wq) {
583 		cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
584 		destroy_workqueue(lio->link_status_wq.wq);
585 	}
586 }
587 
588 /**
589  * \brief Update link status
590  * @param netdev network device
591  * @param ls link status structure
592  *
593  * Called on receipt of a link status response from the core application to
594  * update each interface's link status.
595  */
596 static inline void update_link_status(struct net_device *netdev,
597 				      union oct_link_status *ls)
598 {
599 	struct lio *lio = GET_LIO(netdev);
600 	int changed = (lio->linfo.link.u64 != ls->u64);
601 	int current_max_mtu = lio->linfo.link.s.mtu;
602 	struct octeon_device *oct = lio->oct_dev;
603 
604 	dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
605 		__func__, lio->linfo.link.u64, ls->u64);
606 	lio->linfo.link.u64 = ls->u64;
607 
608 	if ((lio->intf_open) && (changed)) {
609 		print_link_info(netdev);
610 		lio->link_changes++;
611 
612 		if (lio->linfo.link.s.link_up) {
613 			dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
614 			netif_carrier_on(netdev);
615 			wake_txqs(netdev);
616 		} else {
617 			dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
618 			netif_carrier_off(netdev);
619 			stop_txqs(netdev);
620 		}
621 		if (lio->linfo.link.s.mtu != current_max_mtu) {
622 			netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
623 				   current_max_mtu, lio->linfo.link.s.mtu);
624 			netdev->max_mtu = lio->linfo.link.s.mtu;
625 		}
626 		if (lio->linfo.link.s.mtu < netdev->mtu) {
627 			dev_warn(&oct->pci_dev->dev,
628 				 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
629 				     netdev->mtu, lio->linfo.link.s.mtu);
630 			queue_delayed_work(lio->link_status_wq.wq,
631 					   &lio->link_status_wq.wk.work, 0);
632 		}
633 	}
634 }
635 
636 /**
637  * lio_sync_octeon_time - send latest localtime to octeon firmware so that
638  * firmware will correct it's time, in case there is a time skew
639  *
640  * @work: work scheduled to send time update to octeon firmware
641  **/
642 static void lio_sync_octeon_time(struct work_struct *work)
643 {
644 	struct cavium_wk *wk = (struct cavium_wk *)work;
645 	struct lio *lio = (struct lio *)wk->ctxptr;
646 	struct octeon_device *oct = lio->oct_dev;
647 	struct octeon_soft_command *sc;
648 	struct timespec64 ts;
649 	struct lio_time *lt;
650 	int ret;
651 
652 	sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0);
653 	if (!sc) {
654 		dev_err(&oct->pci_dev->dev,
655 			"Failed to sync time to octeon: soft command allocation failed\n");
656 		return;
657 	}
658 
659 	lt = (struct lio_time *)sc->virtdptr;
660 
661 	/* Get time of the day */
662 	ktime_get_real_ts64(&ts);
663 	lt->sec = ts.tv_sec;
664 	lt->nsec = ts.tv_nsec;
665 	octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
666 
667 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
668 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
669 				    OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
670 
671 	init_completion(&sc->complete);
672 	sc->sc_status = OCTEON_REQUEST_PENDING;
673 
674 	ret = octeon_send_soft_command(oct, sc);
675 	if (ret == IQ_SEND_FAILED) {
676 		dev_err(&oct->pci_dev->dev,
677 			"Failed to sync time to octeon: failed to send soft command\n");
678 		octeon_free_soft_command(oct, sc);
679 	} else {
680 		WRITE_ONCE(sc->caller_is_done, true);
681 	}
682 
683 	queue_delayed_work(lio->sync_octeon_time_wq.wq,
684 			   &lio->sync_octeon_time_wq.wk.work,
685 			   msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
686 }
687 
688 /**
689  * setup_sync_octeon_time_wq - Sets up the work to periodically update
690  * local time to octeon firmware
691  *
692  * @netdev - network device which should send time update to firmware
693  **/
694 static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
695 {
696 	struct lio *lio = GET_LIO(netdev);
697 	struct octeon_device *oct = lio->oct_dev;
698 
699 	lio->sync_octeon_time_wq.wq =
700 		alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
701 	if (!lio->sync_octeon_time_wq.wq) {
702 		dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
703 		return -1;
704 	}
705 	INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
706 			  lio_sync_octeon_time);
707 	lio->sync_octeon_time_wq.wk.ctxptr = lio;
708 	queue_delayed_work(lio->sync_octeon_time_wq.wq,
709 			   &lio->sync_octeon_time_wq.wk.work,
710 			   msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
711 
712 	return 0;
713 }
714 
715 /**
716  * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created
717  * to periodically update local time to octeon firmware
718  *
719  * @netdev - network device which should send time update to firmware
720  **/
721 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
722 {
723 	struct lio *lio = GET_LIO(netdev);
724 	struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
725 
726 	if (time_wq->wq) {
727 		cancel_delayed_work_sync(&time_wq->wk.work);
728 		destroy_workqueue(time_wq->wq);
729 	}
730 }
731 
732 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
733 {
734 	struct octeon_device *other_oct;
735 
736 	other_oct = lio_get_device(oct->octeon_id + 1);
737 
738 	if (other_oct && other_oct->pci_dev) {
739 		int oct_busnum, other_oct_busnum;
740 
741 		oct_busnum = oct->pci_dev->bus->number;
742 		other_oct_busnum = other_oct->pci_dev->bus->number;
743 
744 		if (oct_busnum == other_oct_busnum) {
745 			int oct_slot, other_oct_slot;
746 
747 			oct_slot = PCI_SLOT(oct->pci_dev->devfn);
748 			other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
749 
750 			if (oct_slot == other_oct_slot)
751 				return other_oct;
752 		}
753 	}
754 
755 	return NULL;
756 }
757 
758 static void disable_all_vf_links(struct octeon_device *oct)
759 {
760 	struct net_device *netdev;
761 	int max_vfs, vf, i;
762 
763 	if (!oct)
764 		return;
765 
766 	max_vfs = oct->sriov_info.max_vfs;
767 
768 	for (i = 0; i < oct->ifcount; i++) {
769 		netdev = oct->props[i].netdev;
770 		if (!netdev)
771 			continue;
772 
773 		for (vf = 0; vf < max_vfs; vf++)
774 			liquidio_set_vf_link_state(netdev, vf,
775 						   IFLA_VF_LINK_STATE_DISABLE);
776 	}
777 }
778 
779 static int liquidio_watchdog(void *param)
780 {
781 	bool err_msg_was_printed[LIO_MAX_CORES];
782 	u16 mask_of_crashed_or_stuck_cores = 0;
783 	bool all_vf_links_are_disabled = false;
784 	struct octeon_device *oct = param;
785 	struct octeon_device *other_oct;
786 #ifdef CONFIG_MODULE_UNLOAD
787 	long refcount, vfs_referencing_pf;
788 	u64 vfs_mask1, vfs_mask2;
789 #endif
790 	int core;
791 
792 	memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
793 
794 	while (!kthread_should_stop()) {
795 		/* sleep for a couple of seconds so that we don't hog the CPU */
796 		set_current_state(TASK_INTERRUPTIBLE);
797 		schedule_timeout(msecs_to_jiffies(2000));
798 
799 		mask_of_crashed_or_stuck_cores =
800 		    (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
801 
802 		if (!mask_of_crashed_or_stuck_cores)
803 			continue;
804 
805 		WRITE_ONCE(oct->cores_crashed, true);
806 		other_oct = get_other_octeon_device(oct);
807 		if (other_oct)
808 			WRITE_ONCE(other_oct->cores_crashed, true);
809 
810 		for (core = 0; core < LIO_MAX_CORES; core++) {
811 			bool core_crashed_or_got_stuck;
812 
813 			core_crashed_or_got_stuck =
814 						(mask_of_crashed_or_stuck_cores
815 						 >> core) & 1;
816 
817 			if (core_crashed_or_got_stuck &&
818 			    !err_msg_was_printed[core]) {
819 				dev_err(&oct->pci_dev->dev,
820 					"ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",
821 					core);
822 				err_msg_was_printed[core] = true;
823 			}
824 		}
825 
826 		if (all_vf_links_are_disabled)
827 			continue;
828 
829 		disable_all_vf_links(oct);
830 		disable_all_vf_links(other_oct);
831 		all_vf_links_are_disabled = true;
832 
833 #ifdef CONFIG_MODULE_UNLOAD
834 		vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
835 		vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
836 
837 		vfs_referencing_pf  = hweight64(vfs_mask1);
838 		vfs_referencing_pf += hweight64(vfs_mask2);
839 
840 		refcount = module_refcount(THIS_MODULE);
841 		if (refcount >= vfs_referencing_pf) {
842 			while (vfs_referencing_pf) {
843 				module_put(THIS_MODULE);
844 				vfs_referencing_pf--;
845 			}
846 		}
847 #endif
848 	}
849 
850 	return 0;
851 }
852 
853 /**
854  * \brief PCI probe handler
855  * @param pdev PCI device structure
856  * @param ent unused
857  */
858 static int
859 liquidio_probe(struct pci_dev *pdev,
860 	       const struct pci_device_id *ent __attribute__((unused)))
861 {
862 	struct octeon_device *oct_dev = NULL;
863 	struct handshake *hs;
864 
865 	oct_dev = octeon_allocate_device(pdev->device,
866 					 sizeof(struct octeon_device_priv));
867 	if (!oct_dev) {
868 		dev_err(&pdev->dev, "Unable to allocate device\n");
869 		return -ENOMEM;
870 	}
871 
872 	if (pdev->device == OCTEON_CN23XX_PF_VID)
873 		oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
874 
875 	/* Enable PTP for 6XXX Device */
876 	if (((pdev->device == OCTEON_CN66XX) ||
877 	     (pdev->device == OCTEON_CN68XX)))
878 		oct_dev->ptp_enable = true;
879 	else
880 		oct_dev->ptp_enable = false;
881 
882 	dev_info(&pdev->dev, "Initializing device %x:%x.\n",
883 		 (u32)pdev->vendor, (u32)pdev->device);
884 
885 	/* Assign octeon_device for this device to the private data area. */
886 	pci_set_drvdata(pdev, oct_dev);
887 
888 	/* set linux specific device pointer */
889 	oct_dev->pci_dev = (void *)pdev;
890 
891 	oct_dev->subsystem_id = pdev->subsystem_vendor |
892 		(pdev->subsystem_device << 16);
893 
894 	hs = &handshake[oct_dev->octeon_id];
895 	init_completion(&hs->init);
896 	init_completion(&hs->started);
897 	hs->pci_dev = pdev;
898 
899 	if (oct_dev->octeon_id == 0)
900 		/* first LiquidIO NIC is detected */
901 		complete(&first_stage);
902 
903 	if (octeon_device_init(oct_dev)) {
904 		complete(&hs->init);
905 		liquidio_remove(pdev);
906 		return -ENOMEM;
907 	}
908 
909 	if (OCTEON_CN23XX_PF(oct_dev)) {
910 		u8 bus, device, function;
911 
912 		if (atomic_read(oct_dev->adapter_refcount) == 1) {
913 			/* Each NIC gets one watchdog kernel thread.  The first
914 			 * PF (of each NIC) that gets pci_driver->probe()'d
915 			 * creates that thread.
916 			 */
917 			bus = pdev->bus->number;
918 			device = PCI_SLOT(pdev->devfn);
919 			function = PCI_FUNC(pdev->devfn);
920 			oct_dev->watchdog_task = kthread_create(
921 			    liquidio_watchdog, oct_dev,
922 			    "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
923 			if (!IS_ERR(oct_dev->watchdog_task)) {
924 				wake_up_process(oct_dev->watchdog_task);
925 			} else {
926 				oct_dev->watchdog_task = NULL;
927 				dev_err(&oct_dev->pci_dev->dev,
928 					"failed to create kernel_thread\n");
929 				liquidio_remove(pdev);
930 				return -1;
931 			}
932 		}
933 	}
934 
935 	oct_dev->rx_pause = 1;
936 	oct_dev->tx_pause = 1;
937 
938 	dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
939 
940 	return 0;
941 }
942 
943 static bool fw_type_is_auto(void)
944 {
945 	return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
946 		       sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
947 }
948 
949 /**
950  * \brief PCI FLR for each Octeon device.
951  * @param oct octeon device
952  */
953 static void octeon_pci_flr(struct octeon_device *oct)
954 {
955 	int rc;
956 
957 	pci_save_state(oct->pci_dev);
958 
959 	pci_cfg_access_lock(oct->pci_dev);
960 
961 	/* Quiesce the device completely */
962 	pci_write_config_word(oct->pci_dev, PCI_COMMAND,
963 			      PCI_COMMAND_INTX_DISABLE);
964 
965 	rc = __pci_reset_function_locked(oct->pci_dev);
966 
967 	if (rc != 0)
968 		dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
969 			rc, oct->pf_num);
970 
971 	pci_cfg_access_unlock(oct->pci_dev);
972 
973 	pci_restore_state(oct->pci_dev);
974 }
975 
976 /**
977  *\brief Destroy resources associated with octeon device
978  * @param pdev PCI device structure
979  * @param ent unused
980  */
981 static void octeon_destroy_resources(struct octeon_device *oct)
982 {
983 	int i, refcount;
984 	struct msix_entry *msix_entries;
985 	struct octeon_device_priv *oct_priv =
986 		(struct octeon_device_priv *)oct->priv;
987 
988 	struct handshake *hs;
989 
990 	switch (atomic_read(&oct->status)) {
991 	case OCT_DEV_RUNNING:
992 	case OCT_DEV_CORE_OK:
993 
994 		/* No more instructions will be forwarded. */
995 		atomic_set(&oct->status, OCT_DEV_IN_RESET);
996 
997 		oct->app_mode = CVM_DRV_INVALID_APP;
998 		dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
999 			lio_get_state_string(&oct->status));
1000 
1001 		schedule_timeout_uninterruptible(HZ / 10);
1002 
1003 		/* fallthrough */
1004 	case OCT_DEV_HOST_OK:
1005 
1006 		/* fallthrough */
1007 	case OCT_DEV_CONSOLE_INIT_DONE:
1008 		/* Remove any consoles */
1009 		octeon_remove_consoles(oct);
1010 
1011 		/* fallthrough */
1012 	case OCT_DEV_IO_QUEUES_DONE:
1013 		if (lio_wait_for_instr_fetch(oct))
1014 			dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1015 
1016 		if (wait_for_pending_requests(oct))
1017 			dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1018 
1019 		/* Disable the input and output queues now. No more packets will
1020 		 * arrive from Octeon, but we should wait for all packet
1021 		 * processing to finish.
1022 		 */
1023 		oct->fn_list.disable_io_queues(oct);
1024 
1025 		if (lio_wait_for_oq_pkts(oct))
1026 			dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1027 
1028 		/* Force all requests waiting to be fetched by OCTEON to
1029 		 * complete.
1030 		 */
1031 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1032 			struct octeon_instr_queue *iq;
1033 
1034 			if (!(oct->io_qmask.iq & BIT_ULL(i)))
1035 				continue;
1036 			iq = oct->instr_queue[i];
1037 
1038 			if (atomic_read(&iq->instr_pending)) {
1039 				spin_lock_bh(&iq->lock);
1040 				iq->fill_cnt = 0;
1041 				iq->octeon_read_index = iq->host_write_index;
1042 				iq->stats.instr_processed +=
1043 					atomic_read(&iq->instr_pending);
1044 				lio_process_iq_request_list(oct, iq, 0);
1045 				spin_unlock_bh(&iq->lock);
1046 			}
1047 		}
1048 
1049 		lio_process_ordered_list(oct, 1);
1050 		octeon_free_sc_done_list(oct);
1051 		octeon_free_sc_zombie_list(oct);
1052 
1053 	/* fallthrough */
1054 	case OCT_DEV_INTR_SET_DONE:
1055 		/* Disable interrupts  */
1056 		oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1057 
1058 		if (oct->msix_on) {
1059 			msix_entries = (struct msix_entry *)oct->msix_entries;
1060 			for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1061 				if (oct->ioq_vector[i].vector) {
1062 					/* clear the affinity_cpumask */
1063 					irq_set_affinity_hint(
1064 							msix_entries[i].vector,
1065 							NULL);
1066 					free_irq(msix_entries[i].vector,
1067 						 &oct->ioq_vector[i]);
1068 					oct->ioq_vector[i].vector = 0;
1069 				}
1070 			}
1071 			/* non-iov vector's argument is oct struct */
1072 			free_irq(msix_entries[i].vector, oct);
1073 
1074 			pci_disable_msix(oct->pci_dev);
1075 			kfree(oct->msix_entries);
1076 			oct->msix_entries = NULL;
1077 		} else {
1078 			/* Release the interrupt line */
1079 			free_irq(oct->pci_dev->irq, oct);
1080 
1081 			if (oct->flags & LIO_FLAG_MSI_ENABLED)
1082 				pci_disable_msi(oct->pci_dev);
1083 		}
1084 
1085 		kfree(oct->irq_name_storage);
1086 		oct->irq_name_storage = NULL;
1087 
1088 	/* fallthrough */
1089 	case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1090 		if (OCTEON_CN23XX_PF(oct))
1091 			octeon_free_ioq_vector(oct);
1092 
1093 	/* fallthrough */
1094 	case OCT_DEV_MBOX_SETUP_DONE:
1095 		if (OCTEON_CN23XX_PF(oct))
1096 			oct->fn_list.free_mbox(oct);
1097 
1098 	/* fallthrough */
1099 	case OCT_DEV_IN_RESET:
1100 	case OCT_DEV_DROQ_INIT_DONE:
1101 		/* Wait for any pending operations */
1102 		mdelay(100);
1103 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1104 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
1105 				continue;
1106 			octeon_delete_droq(oct, i);
1107 		}
1108 
1109 		/* Force any pending handshakes to complete */
1110 		for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1111 			hs = &handshake[i];
1112 
1113 			if (hs->pci_dev) {
1114 				handshake[oct->octeon_id].init_ok = 0;
1115 				complete(&handshake[oct->octeon_id].init);
1116 				handshake[oct->octeon_id].started_ok = 0;
1117 				complete(&handshake[oct->octeon_id].started);
1118 			}
1119 		}
1120 
1121 		/* fallthrough */
1122 	case OCT_DEV_RESP_LIST_INIT_DONE:
1123 		octeon_delete_response_list(oct);
1124 
1125 		/* fallthrough */
1126 	case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1127 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1128 			if (!(oct->io_qmask.iq & BIT_ULL(i)))
1129 				continue;
1130 			octeon_delete_instr_queue(oct, i);
1131 		}
1132 #ifdef CONFIG_PCI_IOV
1133 		if (oct->sriov_info.sriov_enabled)
1134 			pci_disable_sriov(oct->pci_dev);
1135 #endif
1136 		/* fallthrough */
1137 	case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1138 		octeon_free_sc_buffer_pool(oct);
1139 
1140 		/* fallthrough */
1141 	case OCT_DEV_DISPATCH_INIT_DONE:
1142 		octeon_delete_dispatch_list(oct);
1143 		cancel_delayed_work_sync(&oct->nic_poll_work.work);
1144 
1145 		/* fallthrough */
1146 	case OCT_DEV_PCI_MAP_DONE:
1147 		refcount = octeon_deregister_device(oct);
1148 
1149 		/* Soft reset the octeon device before exiting.
1150 		 * However, if fw was loaded from card (i.e. autoboot),
1151 		 * perform an FLR instead.
1152 		 * Implementation note: only soft-reset the device
1153 		 * if it is a CN6XXX OR the LAST CN23XX device.
1154 		 */
1155 		if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1156 			octeon_pci_flr(oct);
1157 		else if (OCTEON_CN6XXX(oct) || !refcount)
1158 			oct->fn_list.soft_reset(oct);
1159 
1160 		octeon_unmap_pci_barx(oct, 0);
1161 		octeon_unmap_pci_barx(oct, 1);
1162 
1163 		/* fallthrough */
1164 	case OCT_DEV_PCI_ENABLE_DONE:
1165 		pci_clear_master(oct->pci_dev);
1166 		/* Disable the device, releasing the PCI INT */
1167 		pci_disable_device(oct->pci_dev);
1168 
1169 		/* fallthrough */
1170 	case OCT_DEV_BEGIN_STATE:
1171 		/* Nothing to be done here either */
1172 		break;
1173 	}                       /* end switch (oct->status) */
1174 
1175 	tasklet_kill(&oct_priv->droq_tasklet);
1176 }
1177 
1178 /**
1179  * \brief Send Rx control command
1180  * @param lio per-network private data
1181  * @param start_stop whether to start or stop
1182  */
1183 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1184 {
1185 	struct octeon_soft_command *sc;
1186 	union octnet_cmd *ncmd;
1187 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1188 	int retval;
1189 
1190 	if (oct->props[lio->ifidx].rx_on == start_stop)
1191 		return;
1192 
1193 	sc = (struct octeon_soft_command *)
1194 		octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1195 					  16, 0);
1196 
1197 	ncmd = (union octnet_cmd *)sc->virtdptr;
1198 
1199 	ncmd->u64 = 0;
1200 	ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1201 	ncmd->s.param1 = start_stop;
1202 
1203 	octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1204 
1205 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1206 
1207 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1208 				    OPCODE_NIC_CMD, 0, 0, 0);
1209 
1210 	init_completion(&sc->complete);
1211 	sc->sc_status = OCTEON_REQUEST_PENDING;
1212 
1213 	retval = octeon_send_soft_command(oct, sc);
1214 	if (retval == IQ_SEND_FAILED) {
1215 		netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1216 		octeon_free_soft_command(oct, sc);
1217 		return;
1218 	} else {
1219 		/* Sleep on a wait queue till the cond flag indicates that the
1220 		 * response arrived or timed-out.
1221 		 */
1222 		retval = wait_for_sc_completion_timeout(oct, sc, 0);
1223 		if (retval)
1224 			return;
1225 
1226 		oct->props[lio->ifidx].rx_on = start_stop;
1227 		WRITE_ONCE(sc->caller_is_done, true);
1228 	}
1229 }
1230 
1231 /**
1232  * \brief Destroy NIC device interface
1233  * @param oct octeon device
1234  * @param ifidx which interface to destroy
1235  *
1236  * Cleanup associated with each interface for an Octeon device  when NIC
1237  * module is being unloaded or if initialization fails during load.
1238  */
1239 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1240 {
1241 	struct net_device *netdev = oct->props[ifidx].netdev;
1242 	struct octeon_device_priv *oct_priv =
1243 		(struct octeon_device_priv *)oct->priv;
1244 	struct napi_struct *napi, *n;
1245 	struct lio *lio;
1246 
1247 	if (!netdev) {
1248 		dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1249 			__func__, ifidx);
1250 		return;
1251 	}
1252 
1253 	lio = GET_LIO(netdev);
1254 
1255 	dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1256 
1257 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1258 		liquidio_stop(netdev);
1259 
1260 	if (oct->props[lio->ifidx].napi_enabled == 1) {
1261 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1262 			napi_disable(napi);
1263 
1264 		oct->props[lio->ifidx].napi_enabled = 0;
1265 
1266 		if (OCTEON_CN23XX_PF(oct))
1267 			oct->droq[0]->ops.poll_mode = 0;
1268 	}
1269 
1270 	/* Delete NAPI */
1271 	list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1272 		netif_napi_del(napi);
1273 
1274 	tasklet_enable(&oct_priv->droq_tasklet);
1275 
1276 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1277 		unregister_netdev(netdev);
1278 
1279 	cleanup_sync_octeon_time_wq(netdev);
1280 	cleanup_link_status_change_wq(netdev);
1281 
1282 	cleanup_rx_oom_poll_fn(netdev);
1283 
1284 	lio_delete_glists(lio);
1285 
1286 	free_netdev(netdev);
1287 
1288 	oct->props[ifidx].gmxport = -1;
1289 
1290 	oct->props[ifidx].netdev = NULL;
1291 }
1292 
1293 /**
1294  * \brief Stop complete NIC functionality
1295  * @param oct octeon device
1296  */
1297 static int liquidio_stop_nic_module(struct octeon_device *oct)
1298 {
1299 	int i, j;
1300 	struct lio *lio;
1301 
1302 	dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1303 	if (!oct->ifcount) {
1304 		dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1305 		return 1;
1306 	}
1307 
1308 	spin_lock_bh(&oct->cmd_resp_wqlock);
1309 	oct->cmd_resp_state = OCT_DRV_OFFLINE;
1310 	spin_unlock_bh(&oct->cmd_resp_wqlock);
1311 
1312 	lio_vf_rep_destroy(oct);
1313 
1314 	for (i = 0; i < oct->ifcount; i++) {
1315 		lio = GET_LIO(oct->props[i].netdev);
1316 		for (j = 0; j < oct->num_oqs; j++)
1317 			octeon_unregister_droq_ops(oct,
1318 						   lio->linfo.rxpciq[j].s.q_no);
1319 	}
1320 
1321 	for (i = 0; i < oct->ifcount; i++)
1322 		liquidio_destroy_nic_device(oct, i);
1323 
1324 	if (oct->devlink) {
1325 		devlink_unregister(oct->devlink);
1326 		devlink_free(oct->devlink);
1327 		oct->devlink = NULL;
1328 	}
1329 
1330 	dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1331 	return 0;
1332 }
1333 
1334 /**
1335  * \brief Cleans up resources at unload time
1336  * @param pdev PCI device structure
1337  */
1338 static void liquidio_remove(struct pci_dev *pdev)
1339 {
1340 	struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1341 
1342 	dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1343 
1344 	if (oct_dev->watchdog_task)
1345 		kthread_stop(oct_dev->watchdog_task);
1346 
1347 	if (!oct_dev->octeon_id &&
1348 	    oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1349 		lio_vf_rep_modexit();
1350 
1351 	if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1352 		liquidio_stop_nic_module(oct_dev);
1353 
1354 	/* Reset the octeon device and cleanup all memory allocated for
1355 	 * the octeon device by driver.
1356 	 */
1357 	octeon_destroy_resources(oct_dev);
1358 
1359 	dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1360 
1361 	/* This octeon device has been removed. Update the global
1362 	 * data structure to reflect this. Free the device structure.
1363 	 */
1364 	octeon_free_device_mem(oct_dev);
1365 }
1366 
1367 /**
1368  * \brief Identify the Octeon device and to map the BAR address space
1369  * @param oct octeon device
1370  */
1371 static int octeon_chip_specific_setup(struct octeon_device *oct)
1372 {
1373 	u32 dev_id, rev_id;
1374 	int ret = 1;
1375 	char *s;
1376 
1377 	pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1378 	pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1379 	oct->rev_id = rev_id & 0xff;
1380 
1381 	switch (dev_id) {
1382 	case OCTEON_CN68XX_PCIID:
1383 		oct->chip_id = OCTEON_CN68XX;
1384 		ret = lio_setup_cn68xx_octeon_device(oct);
1385 		s = "CN68XX";
1386 		break;
1387 
1388 	case OCTEON_CN66XX_PCIID:
1389 		oct->chip_id = OCTEON_CN66XX;
1390 		ret = lio_setup_cn66xx_octeon_device(oct);
1391 		s = "CN66XX";
1392 		break;
1393 
1394 	case OCTEON_CN23XX_PCIID_PF:
1395 		oct->chip_id = OCTEON_CN23XX_PF_VID;
1396 		ret = setup_cn23xx_octeon_pf_device(oct);
1397 		if (ret)
1398 			break;
1399 #ifdef CONFIG_PCI_IOV
1400 		if (!ret)
1401 			pci_sriov_set_totalvfs(oct->pci_dev,
1402 					       oct->sriov_info.max_vfs);
1403 #endif
1404 		s = "CN23XX";
1405 		break;
1406 
1407 	default:
1408 		s = "?";
1409 		dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1410 			dev_id);
1411 	}
1412 
1413 	if (!ret)
1414 		dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
1415 			 OCTEON_MAJOR_REV(oct),
1416 			 OCTEON_MINOR_REV(oct),
1417 			 octeon_get_conf(oct)->card_name,
1418 			 LIQUIDIO_VERSION);
1419 
1420 	return ret;
1421 }
1422 
1423 /**
1424  * \brief PCI initialization for each Octeon device.
1425  * @param oct octeon device
1426  */
1427 static int octeon_pci_os_setup(struct octeon_device *oct)
1428 {
1429 	/* setup PCI stuff first */
1430 	if (pci_enable_device(oct->pci_dev)) {
1431 		dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1432 		return 1;
1433 	}
1434 
1435 	if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1436 		dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1437 		pci_disable_device(oct->pci_dev);
1438 		return 1;
1439 	}
1440 
1441 	/* Enable PCI DMA Master. */
1442 	pci_set_master(oct->pci_dev);
1443 
1444 	return 0;
1445 }
1446 
1447 /**
1448  * \brief Unmap and free network buffer
1449  * @param buf buffer
1450  */
1451 static void free_netbuf(void *buf)
1452 {
1453 	struct sk_buff *skb;
1454 	struct octnet_buf_free_info *finfo;
1455 	struct lio *lio;
1456 
1457 	finfo = (struct octnet_buf_free_info *)buf;
1458 	skb = finfo->skb;
1459 	lio = finfo->lio;
1460 
1461 	dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1462 			 DMA_TO_DEVICE);
1463 
1464 	tx_buffer_free(skb);
1465 }
1466 
1467 /**
1468  * \brief Unmap and free gather buffer
1469  * @param buf buffer
1470  */
1471 static void free_netsgbuf(void *buf)
1472 {
1473 	struct octnet_buf_free_info *finfo;
1474 	struct sk_buff *skb;
1475 	struct lio *lio;
1476 	struct octnic_gather *g;
1477 	int i, frags, iq;
1478 
1479 	finfo = (struct octnet_buf_free_info *)buf;
1480 	skb = finfo->skb;
1481 	lio = finfo->lio;
1482 	g = finfo->g;
1483 	frags = skb_shinfo(skb)->nr_frags;
1484 
1485 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1486 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
1487 			 DMA_TO_DEVICE);
1488 
1489 	i = 1;
1490 	while (frags--) {
1491 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1492 
1493 		pci_unmap_page((lio->oct_dev)->pci_dev,
1494 			       g->sg[(i >> 2)].ptr[(i & 3)],
1495 			       frag->size, DMA_TO_DEVICE);
1496 		i++;
1497 	}
1498 
1499 	iq = skb_iq(lio->oct_dev, skb);
1500 	spin_lock(&lio->glist_lock[iq]);
1501 	list_add_tail(&g->list, &lio->glist[iq]);
1502 	spin_unlock(&lio->glist_lock[iq]);
1503 
1504 	tx_buffer_free(skb);
1505 }
1506 
1507 /**
1508  * \brief Unmap and free gather buffer with response
1509  * @param buf buffer
1510  */
1511 static void free_netsgbuf_with_resp(void *buf)
1512 {
1513 	struct octeon_soft_command *sc;
1514 	struct octnet_buf_free_info *finfo;
1515 	struct sk_buff *skb;
1516 	struct lio *lio;
1517 	struct octnic_gather *g;
1518 	int i, frags, iq;
1519 
1520 	sc = (struct octeon_soft_command *)buf;
1521 	skb = (struct sk_buff *)sc->callback_arg;
1522 	finfo = (struct octnet_buf_free_info *)&skb->cb;
1523 
1524 	lio = finfo->lio;
1525 	g = finfo->g;
1526 	frags = skb_shinfo(skb)->nr_frags;
1527 
1528 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1529 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
1530 			 DMA_TO_DEVICE);
1531 
1532 	i = 1;
1533 	while (frags--) {
1534 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1535 
1536 		pci_unmap_page((lio->oct_dev)->pci_dev,
1537 			       g->sg[(i >> 2)].ptr[(i & 3)],
1538 			       frag->size, DMA_TO_DEVICE);
1539 		i++;
1540 	}
1541 
1542 	iq = skb_iq(lio->oct_dev, skb);
1543 
1544 	spin_lock(&lio->glist_lock[iq]);
1545 	list_add_tail(&g->list, &lio->glist[iq]);
1546 	spin_unlock(&lio->glist_lock[iq]);
1547 
1548 	/* Don't free the skb yet */
1549 }
1550 
1551 /**
1552  * \brief Adjust ptp frequency
1553  * @param ptp PTP clock info
1554  * @param ppb how much to adjust by, in parts-per-billion
1555  */
1556 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1557 {
1558 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1559 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1560 	u64 comp, delta;
1561 	unsigned long flags;
1562 	bool neg_adj = false;
1563 
1564 	if (ppb < 0) {
1565 		neg_adj = true;
1566 		ppb = -ppb;
1567 	}
1568 
1569 	/* The hardware adds the clock compensation value to the
1570 	 * PTP clock on every coprocessor clock cycle, so we
1571 	 * compute the delta in terms of coprocessor clocks.
1572 	 */
1573 	delta = (u64)ppb << 32;
1574 	do_div(delta, oct->coproc_clock_rate);
1575 
1576 	spin_lock_irqsave(&lio->ptp_lock, flags);
1577 	comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1578 	if (neg_adj)
1579 		comp -= delta;
1580 	else
1581 		comp += delta;
1582 	lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1583 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1584 
1585 	return 0;
1586 }
1587 
1588 /**
1589  * \brief Adjust ptp time
1590  * @param ptp PTP clock info
1591  * @param delta how much to adjust by, in nanosecs
1592  */
1593 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1594 {
1595 	unsigned long flags;
1596 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1597 
1598 	spin_lock_irqsave(&lio->ptp_lock, flags);
1599 	lio->ptp_adjust += delta;
1600 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1601 
1602 	return 0;
1603 }
1604 
1605 /**
1606  * \brief Get hardware clock time, including any adjustment
1607  * @param ptp PTP clock info
1608  * @param ts timespec
1609  */
1610 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1611 				struct timespec64 *ts)
1612 {
1613 	u64 ns;
1614 	unsigned long flags;
1615 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1616 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1617 
1618 	spin_lock_irqsave(&lio->ptp_lock, flags);
1619 	ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1620 	ns += lio->ptp_adjust;
1621 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1622 
1623 	*ts = ns_to_timespec64(ns);
1624 
1625 	return 0;
1626 }
1627 
1628 /**
1629  * \brief Set hardware clock time. Reset adjustment
1630  * @param ptp PTP clock info
1631  * @param ts timespec
1632  */
1633 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1634 				const struct timespec64 *ts)
1635 {
1636 	u64 ns;
1637 	unsigned long flags;
1638 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1639 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1640 
1641 	ns = timespec64_to_ns(ts);
1642 
1643 	spin_lock_irqsave(&lio->ptp_lock, flags);
1644 	lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1645 	lio->ptp_adjust = 0;
1646 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1647 
1648 	return 0;
1649 }
1650 
1651 /**
1652  * \brief Check if PTP is enabled
1653  * @param ptp PTP clock info
1654  * @param rq request
1655  * @param on is it on
1656  */
1657 static int
1658 liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
1659 		    struct ptp_clock_request *rq __attribute__((unused)),
1660 		    int on __attribute__((unused)))
1661 {
1662 	return -EOPNOTSUPP;
1663 }
1664 
1665 /**
1666  * \brief Open PTP clock source
1667  * @param netdev network device
1668  */
1669 static void oct_ptp_open(struct net_device *netdev)
1670 {
1671 	struct lio *lio = GET_LIO(netdev);
1672 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1673 
1674 	spin_lock_init(&lio->ptp_lock);
1675 
1676 	snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1677 	lio->ptp_info.owner = THIS_MODULE;
1678 	lio->ptp_info.max_adj = 250000000;
1679 	lio->ptp_info.n_alarm = 0;
1680 	lio->ptp_info.n_ext_ts = 0;
1681 	lio->ptp_info.n_per_out = 0;
1682 	lio->ptp_info.pps = 0;
1683 	lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1684 	lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1685 	lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1686 	lio->ptp_info.settime64 = liquidio_ptp_settime;
1687 	lio->ptp_info.enable = liquidio_ptp_enable;
1688 
1689 	lio->ptp_adjust = 0;
1690 
1691 	lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1692 					     &oct->pci_dev->dev);
1693 
1694 	if (IS_ERR(lio->ptp_clock))
1695 		lio->ptp_clock = NULL;
1696 }
1697 
1698 /**
1699  * \brief Init PTP clock
1700  * @param oct octeon device
1701  */
1702 static void liquidio_ptp_init(struct octeon_device *oct)
1703 {
1704 	u64 clock_comp, cfg;
1705 
1706 	clock_comp = (u64)NSEC_PER_SEC << 32;
1707 	do_div(clock_comp, oct->coproc_clock_rate);
1708 	lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1709 
1710 	/* Enable */
1711 	cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1712 	lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1713 }
1714 
1715 /**
1716  * \brief Load firmware to device
1717  * @param oct octeon device
1718  *
1719  * Maps device to firmware filename, requests firmware, and downloads it
1720  */
1721 static int load_firmware(struct octeon_device *oct)
1722 {
1723 	int ret = 0;
1724 	const struct firmware *fw;
1725 	char fw_name[LIO_MAX_FW_FILENAME_LEN];
1726 	char *tmp_fw_type;
1727 
1728 	if (fw_type_is_auto()) {
1729 		tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1730 		strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
1731 	} else {
1732 		tmp_fw_type = fw_type;
1733 	}
1734 
1735 	sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1736 		octeon_get_conf(oct)->card_name, tmp_fw_type,
1737 		LIO_FW_NAME_SUFFIX);
1738 
1739 	ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1740 	if (ret) {
1741 		dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n",
1742 			fw_name);
1743 		release_firmware(fw);
1744 		return ret;
1745 	}
1746 
1747 	ret = octeon_download_firmware(oct, fw->data, fw->size);
1748 
1749 	release_firmware(fw);
1750 
1751 	return ret;
1752 }
1753 
1754 /**
1755  * \brief Poll routine for checking transmit queue status
1756  * @param work work_struct data structure
1757  */
1758 static void octnet_poll_check_txq_status(struct work_struct *work)
1759 {
1760 	struct cavium_wk *wk = (struct cavium_wk *)work;
1761 	struct lio *lio = (struct lio *)wk->ctxptr;
1762 
1763 	if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1764 		return;
1765 
1766 	check_txq_status(lio);
1767 	queue_delayed_work(lio->txq_status_wq.wq,
1768 			   &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1769 }
1770 
1771 /**
1772  * \brief Sets up the txq poll check
1773  * @param netdev network device
1774  */
1775 static inline int setup_tx_poll_fn(struct net_device *netdev)
1776 {
1777 	struct lio *lio = GET_LIO(netdev);
1778 	struct octeon_device *oct = lio->oct_dev;
1779 
1780 	lio->txq_status_wq.wq = alloc_workqueue("txq-status",
1781 						WQ_MEM_RECLAIM, 0);
1782 	if (!lio->txq_status_wq.wq) {
1783 		dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
1784 		return -1;
1785 	}
1786 	INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
1787 			  octnet_poll_check_txq_status);
1788 	lio->txq_status_wq.wk.ctxptr = lio;
1789 	queue_delayed_work(lio->txq_status_wq.wq,
1790 			   &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1791 	return 0;
1792 }
1793 
1794 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
1795 {
1796 	struct lio *lio = GET_LIO(netdev);
1797 
1798 	if (lio->txq_status_wq.wq) {
1799 		cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
1800 		destroy_workqueue(lio->txq_status_wq.wq);
1801 	}
1802 }
1803 
1804 /**
1805  * \brief Net device open for LiquidIO
1806  * @param netdev network device
1807  */
1808 static int liquidio_open(struct net_device *netdev)
1809 {
1810 	struct lio *lio = GET_LIO(netdev);
1811 	struct octeon_device *oct = lio->oct_dev;
1812 	struct octeon_device_priv *oct_priv =
1813 		(struct octeon_device_priv *)oct->priv;
1814 	struct napi_struct *napi, *n;
1815 
1816 	if (oct->props[lio->ifidx].napi_enabled == 0) {
1817 		tasklet_disable(&oct_priv->droq_tasklet);
1818 
1819 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1820 			napi_enable(napi);
1821 
1822 		oct->props[lio->ifidx].napi_enabled = 1;
1823 
1824 		if (OCTEON_CN23XX_PF(oct))
1825 			oct->droq[0]->ops.poll_mode = 1;
1826 	}
1827 
1828 	if (oct->ptp_enable)
1829 		oct_ptp_open(netdev);
1830 
1831 	ifstate_set(lio, LIO_IFSTATE_RUNNING);
1832 
1833 	if (OCTEON_CN23XX_PF(oct)) {
1834 		if (!oct->msix_on)
1835 			if (setup_tx_poll_fn(netdev))
1836 				return -1;
1837 	} else {
1838 		if (setup_tx_poll_fn(netdev))
1839 			return -1;
1840 	}
1841 
1842 	netif_tx_start_all_queues(netdev);
1843 
1844 	/* Ready for link status updates */
1845 	lio->intf_open = 1;
1846 
1847 	netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1848 
1849 	/* tell Octeon to start forwarding packets to host */
1850 	send_rx_ctrl_cmd(lio, 1);
1851 
1852 	/* start periodical statistics fetch */
1853 	INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
1854 	lio->stats_wk.ctxptr = lio;
1855 	schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
1856 					(LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
1857 
1858 	dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
1859 		 netdev->name);
1860 
1861 	return 0;
1862 }
1863 
1864 /**
1865  * \brief Net device stop for LiquidIO
1866  * @param netdev network device
1867  */
1868 static int liquidio_stop(struct net_device *netdev)
1869 {
1870 	struct lio *lio = GET_LIO(netdev);
1871 	struct octeon_device *oct = lio->oct_dev;
1872 	struct octeon_device_priv *oct_priv =
1873 		(struct octeon_device_priv *)oct->priv;
1874 	struct napi_struct *napi, *n;
1875 
1876 	ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1877 
1878 	/* Stop any link updates */
1879 	lio->intf_open = 0;
1880 
1881 	stop_txqs(netdev);
1882 
1883 	/* Inform that netif carrier is down */
1884 	netif_carrier_off(netdev);
1885 	netif_tx_disable(netdev);
1886 
1887 	lio->linfo.link.s.link_up = 0;
1888 	lio->link_changes++;
1889 
1890 	/* Tell Octeon that nic interface is down. */
1891 	send_rx_ctrl_cmd(lio, 0);
1892 
1893 	if (OCTEON_CN23XX_PF(oct)) {
1894 		if (!oct->msix_on)
1895 			cleanup_tx_poll_fn(netdev);
1896 	} else {
1897 		cleanup_tx_poll_fn(netdev);
1898 	}
1899 
1900 	cancel_delayed_work_sync(&lio->stats_wk.work);
1901 
1902 	if (lio->ptp_clock) {
1903 		ptp_clock_unregister(lio->ptp_clock);
1904 		lio->ptp_clock = NULL;
1905 	}
1906 
1907 	/* Wait for any pending Rx descriptors */
1908 	if (lio_wait_for_clean_oq(oct))
1909 		netif_info(lio, rx_err, lio->netdev,
1910 			   "Proceeding with stop interface after partial RX desc processing\n");
1911 
1912 	if (oct->props[lio->ifidx].napi_enabled == 1) {
1913 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1914 			napi_disable(napi);
1915 
1916 		oct->props[lio->ifidx].napi_enabled = 0;
1917 
1918 		if (OCTEON_CN23XX_PF(oct))
1919 			oct->droq[0]->ops.poll_mode = 0;
1920 
1921 		tasklet_enable(&oct_priv->droq_tasklet);
1922 	}
1923 
1924 	dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1925 
1926 	return 0;
1927 }
1928 
1929 /**
1930  * \brief Converts a mask based on net device flags
1931  * @param netdev network device
1932  *
1933  * This routine generates a octnet_ifflags mask from the net device flags
1934  * received from the OS.
1935  */
1936 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
1937 {
1938 	enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1939 
1940 	if (netdev->flags & IFF_PROMISC)
1941 		f |= OCTNET_IFFLAG_PROMISC;
1942 
1943 	if (netdev->flags & IFF_ALLMULTI)
1944 		f |= OCTNET_IFFLAG_ALLMULTI;
1945 
1946 	if (netdev->flags & IFF_MULTICAST) {
1947 		f |= OCTNET_IFFLAG_MULTICAST;
1948 
1949 		/* Accept all multicast addresses if there are more than we
1950 		 * can handle
1951 		 */
1952 		if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1953 			f |= OCTNET_IFFLAG_ALLMULTI;
1954 	}
1955 
1956 	if (netdev->flags & IFF_BROADCAST)
1957 		f |= OCTNET_IFFLAG_BROADCAST;
1958 
1959 	return f;
1960 }
1961 
1962 /**
1963  * \brief Net device set_multicast_list
1964  * @param netdev network device
1965  */
1966 static void liquidio_set_mcast_list(struct net_device *netdev)
1967 {
1968 	struct lio *lio = GET_LIO(netdev);
1969 	struct octeon_device *oct = lio->oct_dev;
1970 	struct octnic_ctrl_pkt nctrl;
1971 	struct netdev_hw_addr *ha;
1972 	u64 *mc;
1973 	int ret;
1974 	int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1975 
1976 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1977 
1978 	/* Create a ctrl pkt command to be sent to core app. */
1979 	nctrl.ncmd.u64 = 0;
1980 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1981 	nctrl.ncmd.s.param1 = get_new_flags(netdev);
1982 	nctrl.ncmd.s.param2 = mc_count;
1983 	nctrl.ncmd.s.more = mc_count;
1984 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1985 	nctrl.netpndev = (u64)netdev;
1986 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1987 
1988 	/* copy all the addresses into the udd */
1989 	mc = &nctrl.udd[0];
1990 	netdev_for_each_mc_addr(ha, netdev) {
1991 		*mc = 0;
1992 		memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
1993 		/* no need to swap bytes */
1994 
1995 		if (++mc > &nctrl.udd[mc_count])
1996 			break;
1997 	}
1998 
1999 	/* Apparently, any activity in this call from the kernel has to
2000 	 * be atomic. So we won't wait for response.
2001 	 */
2002 
2003 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2004 	if (ret) {
2005 		dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2006 			ret);
2007 	}
2008 }
2009 
2010 /**
2011  * \brief Net device set_mac_address
2012  * @param netdev network device
2013  */
2014 static int liquidio_set_mac(struct net_device *netdev, void *p)
2015 {
2016 	int ret = 0;
2017 	struct lio *lio = GET_LIO(netdev);
2018 	struct octeon_device *oct = lio->oct_dev;
2019 	struct sockaddr *addr = (struct sockaddr *)p;
2020 	struct octnic_ctrl_pkt nctrl;
2021 
2022 	if (!is_valid_ether_addr(addr->sa_data))
2023 		return -EADDRNOTAVAIL;
2024 
2025 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2026 
2027 	nctrl.ncmd.u64 = 0;
2028 	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2029 	nctrl.ncmd.s.param1 = 0;
2030 	nctrl.ncmd.s.more = 1;
2031 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2032 	nctrl.netpndev = (u64)netdev;
2033 
2034 	nctrl.udd[0] = 0;
2035 	/* The MAC Address is presented in network byte order. */
2036 	memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2037 
2038 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2039 	if (ret < 0) {
2040 		dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2041 		return -ENOMEM;
2042 	}
2043 
2044 	if (nctrl.sc_status) {
2045 		dev_err(&oct->pci_dev->dev,
2046 			"%s: MAC Address change failed. sc return=%x\n",
2047 			 __func__, nctrl.sc_status);
2048 		return -EIO;
2049 	}
2050 
2051 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2052 	memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2053 
2054 	return 0;
2055 }
2056 
2057 static void
2058 liquidio_get_stats64(struct net_device *netdev,
2059 		     struct rtnl_link_stats64 *lstats)
2060 {
2061 	struct lio *lio = GET_LIO(netdev);
2062 	struct octeon_device *oct;
2063 	u64 pkts = 0, drop = 0, bytes = 0;
2064 	struct oct_droq_stats *oq_stats;
2065 	struct oct_iq_stats *iq_stats;
2066 	int i, iq_no, oq_no;
2067 
2068 	oct = lio->oct_dev;
2069 
2070 	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2071 		return;
2072 
2073 	for (i = 0; i < oct->num_iqs; i++) {
2074 		iq_no = lio->linfo.txpciq[i].s.q_no;
2075 		iq_stats = &oct->instr_queue[iq_no]->stats;
2076 		pkts += iq_stats->tx_done;
2077 		drop += iq_stats->tx_dropped;
2078 		bytes += iq_stats->tx_tot_bytes;
2079 	}
2080 
2081 	lstats->tx_packets = pkts;
2082 	lstats->tx_bytes = bytes;
2083 	lstats->tx_dropped = drop;
2084 
2085 	pkts = 0;
2086 	drop = 0;
2087 	bytes = 0;
2088 
2089 	for (i = 0; i < oct->num_oqs; i++) {
2090 		oq_no = lio->linfo.rxpciq[i].s.q_no;
2091 		oq_stats = &oct->droq[oq_no]->stats;
2092 		pkts += oq_stats->rx_pkts_received;
2093 		drop += (oq_stats->rx_dropped +
2094 			 oq_stats->dropped_nodispatch +
2095 			 oq_stats->dropped_toomany +
2096 			 oq_stats->dropped_nomem);
2097 		bytes += oq_stats->rx_bytes_received;
2098 	}
2099 
2100 	lstats->rx_bytes = bytes;
2101 	lstats->rx_packets = pkts;
2102 	lstats->rx_dropped = drop;
2103 
2104 	lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
2105 	lstats->collisions = oct->link_stats.fromhost.total_collisions;
2106 
2107 	/* detailed rx_errors: */
2108 	lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
2109 	/* recved pkt with crc error    */
2110 	lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
2111 	/* recv'd frame alignment error */
2112 	lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
2113 	/* recv'r fifo overrun */
2114 	lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
2115 
2116 	lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
2117 		lstats->rx_frame_errors + lstats->rx_fifo_errors;
2118 
2119 	/* detailed tx_errors */
2120 	lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
2121 	lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
2122 	lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
2123 
2124 	lstats->tx_errors = lstats->tx_aborted_errors +
2125 		lstats->tx_carrier_errors +
2126 		lstats->tx_fifo_errors;
2127 }
2128 
2129 /**
2130  * \brief Handler for SIOCSHWTSTAMP ioctl
2131  * @param netdev network device
2132  * @param ifr interface request
2133  * @param cmd command
2134  */
2135 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2136 {
2137 	struct hwtstamp_config conf;
2138 	struct lio *lio = GET_LIO(netdev);
2139 
2140 	if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2141 		return -EFAULT;
2142 
2143 	if (conf.flags)
2144 		return -EINVAL;
2145 
2146 	switch (conf.tx_type) {
2147 	case HWTSTAMP_TX_ON:
2148 	case HWTSTAMP_TX_OFF:
2149 		break;
2150 	default:
2151 		return -ERANGE;
2152 	}
2153 
2154 	switch (conf.rx_filter) {
2155 	case HWTSTAMP_FILTER_NONE:
2156 		break;
2157 	case HWTSTAMP_FILTER_ALL:
2158 	case HWTSTAMP_FILTER_SOME:
2159 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2160 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2161 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2162 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2163 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2164 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2165 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2166 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2167 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2168 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2169 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2170 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2171 	case HWTSTAMP_FILTER_NTP_ALL:
2172 		conf.rx_filter = HWTSTAMP_FILTER_ALL;
2173 		break;
2174 	default:
2175 		return -ERANGE;
2176 	}
2177 
2178 	if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2179 		ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2180 
2181 	else
2182 		ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2183 
2184 	return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2185 }
2186 
2187 /**
2188  * \brief ioctl handler
2189  * @param netdev network device
2190  * @param ifr interface request
2191  * @param cmd command
2192  */
2193 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2194 {
2195 	struct lio *lio = GET_LIO(netdev);
2196 
2197 	switch (cmd) {
2198 	case SIOCSHWTSTAMP:
2199 		if (lio->oct_dev->ptp_enable)
2200 			return hwtstamp_ioctl(netdev, ifr);
2201 		/* fall through */
2202 	default:
2203 		return -EOPNOTSUPP;
2204 	}
2205 }
2206 
2207 /**
2208  * \brief handle a Tx timestamp response
2209  * @param status response status
2210  * @param buf pointer to skb
2211  */
2212 static void handle_timestamp(struct octeon_device *oct,
2213 			     u32 status,
2214 			     void *buf)
2215 {
2216 	struct octnet_buf_free_info *finfo;
2217 	struct octeon_soft_command *sc;
2218 	struct oct_timestamp_resp *resp;
2219 	struct lio *lio;
2220 	struct sk_buff *skb = (struct sk_buff *)buf;
2221 
2222 	finfo = (struct octnet_buf_free_info *)skb->cb;
2223 	lio = finfo->lio;
2224 	sc = finfo->sc;
2225 	oct = lio->oct_dev;
2226 	resp = (struct oct_timestamp_resp *)sc->virtrptr;
2227 
2228 	if (status != OCTEON_REQUEST_DONE) {
2229 		dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2230 			CVM_CAST64(status));
2231 		resp->timestamp = 0;
2232 	}
2233 
2234 	octeon_swap_8B_data(&resp->timestamp, 1);
2235 
2236 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2237 		struct skb_shared_hwtstamps ts;
2238 		u64 ns = resp->timestamp;
2239 
2240 		netif_info(lio, tx_done, lio->netdev,
2241 			   "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2242 			   skb, (unsigned long long)ns);
2243 		ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2244 		skb_tstamp_tx(skb, &ts);
2245 	}
2246 
2247 	octeon_free_soft_command(oct, sc);
2248 	tx_buffer_free(skb);
2249 }
2250 
2251 /* \brief Send a data packet that will be timestamped
2252  * @param oct octeon device
2253  * @param ndata pointer to network data
2254  * @param finfo pointer to private network data
2255  */
2256 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2257 					 struct octnic_data_pkt *ndata,
2258 					 struct octnet_buf_free_info *finfo,
2259 					 int xmit_more)
2260 {
2261 	int retval;
2262 	struct octeon_soft_command *sc;
2263 	struct lio *lio;
2264 	int ring_doorbell;
2265 	u32 len;
2266 
2267 	lio = finfo->lio;
2268 
2269 	sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2270 					    sizeof(struct oct_timestamp_resp));
2271 	finfo->sc = sc;
2272 
2273 	if (!sc) {
2274 		dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2275 		return IQ_SEND_FAILED;
2276 	}
2277 
2278 	if (ndata->reqtype == REQTYPE_NORESP_NET)
2279 		ndata->reqtype = REQTYPE_RESP_NET;
2280 	else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2281 		ndata->reqtype = REQTYPE_RESP_NET_SG;
2282 
2283 	sc->callback = handle_timestamp;
2284 	sc->callback_arg = finfo->skb;
2285 	sc->iq_no = ndata->q_no;
2286 
2287 	if (OCTEON_CN23XX_PF(oct))
2288 		len = (u32)((struct octeon_instr_ih3 *)
2289 			    (&sc->cmd.cmd3.ih3))->dlengsz;
2290 	else
2291 		len = (u32)((struct octeon_instr_ih2 *)
2292 			    (&sc->cmd.cmd2.ih2))->dlengsz;
2293 
2294 	ring_doorbell = !xmit_more;
2295 
2296 	retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2297 				     sc, len, ndata->reqtype);
2298 
2299 	if (retval == IQ_SEND_FAILED) {
2300 		dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2301 			retval);
2302 		octeon_free_soft_command(oct, sc);
2303 	} else {
2304 		netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2305 	}
2306 
2307 	return retval;
2308 }
2309 
2310 /** \brief Transmit networks packets to the Octeon interface
2311  * @param skbuff   skbuff struct to be passed to network layer.
2312  * @param netdev    pointer to network device
2313  * @returns whether the packet was transmitted to the device okay or not
2314  *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
2315  */
2316 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2317 {
2318 	struct lio *lio;
2319 	struct octnet_buf_free_info *finfo;
2320 	union octnic_cmd_setup cmdsetup;
2321 	struct octnic_data_pkt ndata;
2322 	struct octeon_device *oct;
2323 	struct oct_iq_stats *stats;
2324 	struct octeon_instr_irh *irh;
2325 	union tx_info *tx_info;
2326 	int status = 0;
2327 	int q_idx = 0, iq_no = 0;
2328 	int j, xmit_more = 0;
2329 	u64 dptr = 0;
2330 	u32 tag = 0;
2331 
2332 	lio = GET_LIO(netdev);
2333 	oct = lio->oct_dev;
2334 
2335 	q_idx = skb_iq(oct, skb);
2336 	tag = q_idx;
2337 	iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2338 
2339 	stats = &oct->instr_queue[iq_no]->stats;
2340 
2341 	/* Check for all conditions in which the current packet cannot be
2342 	 * transmitted.
2343 	 */
2344 	if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2345 	    (!lio->linfo.link.s.link_up) ||
2346 	    (skb->len <= 0)) {
2347 		netif_info(lio, tx_err, lio->netdev,
2348 			   "Transmit failed link_status : %d\n",
2349 			   lio->linfo.link.s.link_up);
2350 		goto lio_xmit_failed;
2351 	}
2352 
2353 	/* Use space in skb->cb to store info used to unmap and
2354 	 * free the buffers.
2355 	 */
2356 	finfo = (struct octnet_buf_free_info *)skb->cb;
2357 	finfo->lio = lio;
2358 	finfo->skb = skb;
2359 	finfo->sc = NULL;
2360 
2361 	/* Prepare the attributes for the data to be passed to OSI. */
2362 	memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2363 
2364 	ndata.buf = (void *)finfo;
2365 
2366 	ndata.q_no = iq_no;
2367 
2368 	if (octnet_iq_is_full(oct, ndata.q_no)) {
2369 		/* defer sending if queue is full */
2370 		netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2371 			   ndata.q_no);
2372 		stats->tx_iq_busy++;
2373 		return NETDEV_TX_BUSY;
2374 	}
2375 
2376 	/* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu:  %d, q_no:%d\n",
2377 	 *	lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2378 	 */
2379 
2380 	ndata.datasize = skb->len;
2381 
2382 	cmdsetup.u64 = 0;
2383 	cmdsetup.s.iq_no = iq_no;
2384 
2385 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2386 		if (skb->encapsulation) {
2387 			cmdsetup.s.tnl_csum = 1;
2388 			stats->tx_vxlan++;
2389 		} else {
2390 			cmdsetup.s.transport_csum = 1;
2391 		}
2392 	}
2393 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2394 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2395 		cmdsetup.s.timestamp = 1;
2396 	}
2397 
2398 	if (skb_shinfo(skb)->nr_frags == 0) {
2399 		cmdsetup.s.u.datasize = skb->len;
2400 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2401 
2402 		/* Offload checksum calculation for TCP/UDP packets */
2403 		dptr = dma_map_single(&oct->pci_dev->dev,
2404 				      skb->data,
2405 				      skb->len,
2406 				      DMA_TO_DEVICE);
2407 		if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2408 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2409 				__func__);
2410 			stats->tx_dmamap_fail++;
2411 			return NETDEV_TX_BUSY;
2412 		}
2413 
2414 		if (OCTEON_CN23XX_PF(oct))
2415 			ndata.cmd.cmd3.dptr = dptr;
2416 		else
2417 			ndata.cmd.cmd2.dptr = dptr;
2418 		finfo->dptr = dptr;
2419 		ndata.reqtype = REQTYPE_NORESP_NET;
2420 
2421 	} else {
2422 		int i, frags;
2423 		struct skb_frag_struct *frag;
2424 		struct octnic_gather *g;
2425 
2426 		spin_lock(&lio->glist_lock[q_idx]);
2427 		g = (struct octnic_gather *)
2428 			lio_list_delete_head(&lio->glist[q_idx]);
2429 		spin_unlock(&lio->glist_lock[q_idx]);
2430 
2431 		if (!g) {
2432 			netif_info(lio, tx_err, lio->netdev,
2433 				   "Transmit scatter gather: glist null!\n");
2434 			goto lio_xmit_failed;
2435 		}
2436 
2437 		cmdsetup.s.gather = 1;
2438 		cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2439 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2440 
2441 		memset(g->sg, 0, g->sg_size);
2442 
2443 		g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2444 						 skb->data,
2445 						 (skb->len - skb->data_len),
2446 						 DMA_TO_DEVICE);
2447 		if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2448 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2449 				__func__);
2450 			stats->tx_dmamap_fail++;
2451 			return NETDEV_TX_BUSY;
2452 		}
2453 		add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2454 
2455 		frags = skb_shinfo(skb)->nr_frags;
2456 		i = 1;
2457 		while (frags--) {
2458 			frag = &skb_shinfo(skb)->frags[i - 1];
2459 
2460 			g->sg[(i >> 2)].ptr[(i & 3)] =
2461 				dma_map_page(&oct->pci_dev->dev,
2462 					     frag->page.p,
2463 					     frag->page_offset,
2464 					     frag->size,
2465 					     DMA_TO_DEVICE);
2466 
2467 			if (dma_mapping_error(&oct->pci_dev->dev,
2468 					      g->sg[i >> 2].ptr[i & 3])) {
2469 				dma_unmap_single(&oct->pci_dev->dev,
2470 						 g->sg[0].ptr[0],
2471 						 skb->len - skb->data_len,
2472 						 DMA_TO_DEVICE);
2473 				for (j = 1; j < i; j++) {
2474 					frag = &skb_shinfo(skb)->frags[j - 1];
2475 					dma_unmap_page(&oct->pci_dev->dev,
2476 						       g->sg[j >> 2].ptr[j & 3],
2477 						       frag->size,
2478 						       DMA_TO_DEVICE);
2479 				}
2480 				dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2481 					__func__);
2482 				return NETDEV_TX_BUSY;
2483 			}
2484 
2485 			add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
2486 			i++;
2487 		}
2488 
2489 		dptr = g->sg_dma_ptr;
2490 
2491 		if (OCTEON_CN23XX_PF(oct))
2492 			ndata.cmd.cmd3.dptr = dptr;
2493 		else
2494 			ndata.cmd.cmd2.dptr = dptr;
2495 		finfo->dptr = dptr;
2496 		finfo->g = g;
2497 
2498 		ndata.reqtype = REQTYPE_NORESP_NET_SG;
2499 	}
2500 
2501 	if (OCTEON_CN23XX_PF(oct)) {
2502 		irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2503 		tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2504 	} else {
2505 		irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2506 		tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2507 	}
2508 
2509 	if (skb_shinfo(skb)->gso_size) {
2510 		tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2511 		tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2512 		stats->tx_gso++;
2513 	}
2514 
2515 	/* HW insert VLAN tag */
2516 	if (skb_vlan_tag_present(skb)) {
2517 		irh->priority = skb_vlan_tag_get(skb) >> 13;
2518 		irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2519 	}
2520 
2521 	xmit_more = skb->xmit_more;
2522 
2523 	if (unlikely(cmdsetup.s.timestamp))
2524 		status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2525 	else
2526 		status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2527 	if (status == IQ_SEND_FAILED)
2528 		goto lio_xmit_failed;
2529 
2530 	netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2531 
2532 	if (status == IQ_SEND_STOP)
2533 		netif_stop_subqueue(netdev, q_idx);
2534 
2535 	netif_trans_update(netdev);
2536 
2537 	if (tx_info->s.gso_segs)
2538 		stats->tx_done += tx_info->s.gso_segs;
2539 	else
2540 		stats->tx_done++;
2541 	stats->tx_tot_bytes += ndata.datasize;
2542 
2543 	return NETDEV_TX_OK;
2544 
2545 lio_xmit_failed:
2546 	stats->tx_dropped++;
2547 	netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2548 		   iq_no, stats->tx_dropped);
2549 	if (dptr)
2550 		dma_unmap_single(&oct->pci_dev->dev, dptr,
2551 				 ndata.datasize, DMA_TO_DEVICE);
2552 
2553 	octeon_ring_doorbell_locked(oct, iq_no);
2554 
2555 	tx_buffer_free(skb);
2556 	return NETDEV_TX_OK;
2557 }
2558 
2559 /** \brief Network device Tx timeout
2560  * @param netdev    pointer to network device
2561  */
2562 static void liquidio_tx_timeout(struct net_device *netdev)
2563 {
2564 	struct lio *lio;
2565 
2566 	lio = GET_LIO(netdev);
2567 
2568 	netif_info(lio, tx_err, lio->netdev,
2569 		   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2570 		   netdev->stats.tx_dropped);
2571 	netif_trans_update(netdev);
2572 	wake_txqs(netdev);
2573 }
2574 
2575 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2576 				    __be16 proto __attribute__((unused)),
2577 				    u16 vid)
2578 {
2579 	struct lio *lio = GET_LIO(netdev);
2580 	struct octeon_device *oct = lio->oct_dev;
2581 	struct octnic_ctrl_pkt nctrl;
2582 	int ret = 0;
2583 
2584 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2585 
2586 	nctrl.ncmd.u64 = 0;
2587 	nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2588 	nctrl.ncmd.s.param1 = vid;
2589 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2590 	nctrl.netpndev = (u64)netdev;
2591 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2592 
2593 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2594 	if (ret) {
2595 		dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2596 			ret);
2597 		if (ret > 0)
2598 			ret = -EIO;
2599 	}
2600 
2601 	return ret;
2602 }
2603 
2604 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2605 				     __be16 proto __attribute__((unused)),
2606 				     u16 vid)
2607 {
2608 	struct lio *lio = GET_LIO(netdev);
2609 	struct octeon_device *oct = lio->oct_dev;
2610 	struct octnic_ctrl_pkt nctrl;
2611 	int ret = 0;
2612 
2613 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2614 
2615 	nctrl.ncmd.u64 = 0;
2616 	nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2617 	nctrl.ncmd.s.param1 = vid;
2618 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2619 	nctrl.netpndev = (u64)netdev;
2620 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2621 
2622 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2623 	if (ret) {
2624 		dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
2625 			ret);
2626 		if (ret > 0)
2627 			ret = -EIO;
2628 	}
2629 	return ret;
2630 }
2631 
2632 /** Sending command to enable/disable RX checksum offload
2633  * @param netdev                pointer to network device
2634  * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
2635  * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
2636  *                              OCTNET_CMD_RXCSUM_DISABLE
2637  * @returns                     SUCCESS or FAILURE
2638  */
2639 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2640 				       u8 rx_cmd)
2641 {
2642 	struct lio *lio = GET_LIO(netdev);
2643 	struct octeon_device *oct = lio->oct_dev;
2644 	struct octnic_ctrl_pkt nctrl;
2645 	int ret = 0;
2646 
2647 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2648 
2649 	nctrl.ncmd.u64 = 0;
2650 	nctrl.ncmd.s.cmd = command;
2651 	nctrl.ncmd.s.param1 = rx_cmd;
2652 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2653 	nctrl.netpndev = (u64)netdev;
2654 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2655 
2656 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2657 	if (ret) {
2658 		dev_err(&oct->pci_dev->dev,
2659 			"DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2660 			ret);
2661 		if (ret > 0)
2662 			ret = -EIO;
2663 	}
2664 	return ret;
2665 }
2666 
2667 /** Sending command to add/delete VxLAN UDP port to firmware
2668  * @param netdev                pointer to network device
2669  * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
2670  * @param vxlan_port            VxLAN port to be added or deleted
2671  * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
2672  *                              OCTNET_CMD_VXLAN_PORT_DEL
2673  * @returns                     SUCCESS or FAILURE
2674  */
2675 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2676 				       u16 vxlan_port, u8 vxlan_cmd_bit)
2677 {
2678 	struct lio *lio = GET_LIO(netdev);
2679 	struct octeon_device *oct = lio->oct_dev;
2680 	struct octnic_ctrl_pkt nctrl;
2681 	int ret = 0;
2682 
2683 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2684 
2685 	nctrl.ncmd.u64 = 0;
2686 	nctrl.ncmd.s.cmd = command;
2687 	nctrl.ncmd.s.more = vxlan_cmd_bit;
2688 	nctrl.ncmd.s.param1 = vxlan_port;
2689 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2690 	nctrl.netpndev = (u64)netdev;
2691 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2692 
2693 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2694 	if (ret) {
2695 		dev_err(&oct->pci_dev->dev,
2696 			"VxLAN port add/delete failed in core (ret:0x%x)\n",
2697 			ret);
2698 		if (ret > 0)
2699 			ret = -EIO;
2700 	}
2701 	return ret;
2702 }
2703 
2704 /** \brief Net device fix features
2705  * @param netdev  pointer to network device
2706  * @param request features requested
2707  * @returns updated features list
2708  */
2709 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2710 					       netdev_features_t request)
2711 {
2712 	struct lio *lio = netdev_priv(netdev);
2713 
2714 	if ((request & NETIF_F_RXCSUM) &&
2715 	    !(lio->dev_capability & NETIF_F_RXCSUM))
2716 		request &= ~NETIF_F_RXCSUM;
2717 
2718 	if ((request & NETIF_F_HW_CSUM) &&
2719 	    !(lio->dev_capability & NETIF_F_HW_CSUM))
2720 		request &= ~NETIF_F_HW_CSUM;
2721 
2722 	if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2723 		request &= ~NETIF_F_TSO;
2724 
2725 	if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2726 		request &= ~NETIF_F_TSO6;
2727 
2728 	if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2729 		request &= ~NETIF_F_LRO;
2730 
2731 	/*Disable LRO if RXCSUM is off */
2732 	if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2733 	    (lio->dev_capability & NETIF_F_LRO))
2734 		request &= ~NETIF_F_LRO;
2735 
2736 	if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2737 	    !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2738 		request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2739 
2740 	return request;
2741 }
2742 
2743 /** \brief Net device set features
2744  * @param netdev  pointer to network device
2745  * @param features features to enable/disable
2746  */
2747 static int liquidio_set_features(struct net_device *netdev,
2748 				 netdev_features_t features)
2749 {
2750 	struct lio *lio = netdev_priv(netdev);
2751 
2752 	if ((features & NETIF_F_LRO) &&
2753 	    (lio->dev_capability & NETIF_F_LRO) &&
2754 	    !(netdev->features & NETIF_F_LRO))
2755 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2756 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2757 	else if (!(features & NETIF_F_LRO) &&
2758 		 (lio->dev_capability & NETIF_F_LRO) &&
2759 		 (netdev->features & NETIF_F_LRO))
2760 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2761 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2762 
2763 	/* Sending command to firmware to enable/disable RX checksum
2764 	 * offload settings using ethtool
2765 	 */
2766 	if (!(netdev->features & NETIF_F_RXCSUM) &&
2767 	    (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2768 	    (features & NETIF_F_RXCSUM))
2769 		liquidio_set_rxcsum_command(netdev,
2770 					    OCTNET_CMD_TNL_RX_CSUM_CTL,
2771 					    OCTNET_CMD_RXCSUM_ENABLE);
2772 	else if ((netdev->features & NETIF_F_RXCSUM) &&
2773 		 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2774 		 !(features & NETIF_F_RXCSUM))
2775 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2776 					    OCTNET_CMD_RXCSUM_DISABLE);
2777 
2778 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2779 	    (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2780 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2781 		liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2782 				     OCTNET_CMD_VLAN_FILTER_ENABLE);
2783 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2784 		 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2785 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2786 		liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2787 				     OCTNET_CMD_VLAN_FILTER_DISABLE);
2788 
2789 	return 0;
2790 }
2791 
2792 static void liquidio_add_vxlan_port(struct net_device *netdev,
2793 				    struct udp_tunnel_info *ti)
2794 {
2795 	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2796 		return;
2797 
2798 	liquidio_vxlan_port_command(netdev,
2799 				    OCTNET_CMD_VXLAN_PORT_CONFIG,
2800 				    htons(ti->port),
2801 				    OCTNET_CMD_VXLAN_PORT_ADD);
2802 }
2803 
2804 static void liquidio_del_vxlan_port(struct net_device *netdev,
2805 				    struct udp_tunnel_info *ti)
2806 {
2807 	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2808 		return;
2809 
2810 	liquidio_vxlan_port_command(netdev,
2811 				    OCTNET_CMD_VXLAN_PORT_CONFIG,
2812 				    htons(ti->port),
2813 				    OCTNET_CMD_VXLAN_PORT_DEL);
2814 }
2815 
2816 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2817 				 u8 *mac, bool is_admin_assigned)
2818 {
2819 	struct lio *lio = GET_LIO(netdev);
2820 	struct octeon_device *oct = lio->oct_dev;
2821 	struct octnic_ctrl_pkt nctrl;
2822 	int ret = 0;
2823 
2824 	if (!is_valid_ether_addr(mac))
2825 		return -EINVAL;
2826 
2827 	if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
2828 		return -EINVAL;
2829 
2830 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2831 
2832 	nctrl.ncmd.u64 = 0;
2833 	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2834 	/* vfidx is 0 based, but vf_num (param1) is 1 based */
2835 	nctrl.ncmd.s.param1 = vfidx + 1;
2836 	nctrl.ncmd.s.more = 1;
2837 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2838 	nctrl.netpndev = (u64)netdev;
2839 	if (is_admin_assigned) {
2840 		nctrl.ncmd.s.param2 = true;
2841 		nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2842 	}
2843 
2844 	nctrl.udd[0] = 0;
2845 	/* The MAC Address is presented in network byte order. */
2846 	ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
2847 
2848 	oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
2849 
2850 	ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2851 	if (ret > 0)
2852 		ret = -EIO;
2853 
2854 	return ret;
2855 }
2856 
2857 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
2858 {
2859 	struct lio *lio = GET_LIO(netdev);
2860 	struct octeon_device *oct = lio->oct_dev;
2861 	int retval;
2862 
2863 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2864 		return -EINVAL;
2865 
2866 	retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
2867 	if (!retval)
2868 		cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
2869 
2870 	return retval;
2871 }
2872 
2873 static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx,
2874 				    bool enable)
2875 {
2876 	struct lio *lio = GET_LIO(netdev);
2877 	struct octeon_device *oct = lio->oct_dev;
2878 	struct octnic_ctrl_pkt nctrl;
2879 	int retval;
2880 
2881 	if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) {
2882 		netif_info(lio, drv, lio->netdev,
2883 			   "firmware does not support spoofchk\n");
2884 		return -EOPNOTSUPP;
2885 	}
2886 
2887 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
2888 		netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
2889 		return -EINVAL;
2890 	}
2891 
2892 	if (enable) {
2893 		if (oct->sriov_info.vf_spoofchk[vfidx])
2894 			return 0;
2895 	} else {
2896 		/* Clear */
2897 		if (!oct->sriov_info.vf_spoofchk[vfidx])
2898 			return 0;
2899 	}
2900 
2901 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2902 	nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1;
2903 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK;
2904 	nctrl.ncmd.s.param1 =
2905 		vfidx + 1; /* vfidx is 0 based,
2906 			    * but vf_num (param1) is 1 based
2907 			    */
2908 	nctrl.ncmd.s.param2 = enable;
2909 	nctrl.ncmd.s.more = 0;
2910 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2911 	nctrl.cb_fn = 0;
2912 
2913 	retval = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2914 
2915 	if (retval) {
2916 		netif_info(lio, drv, lio->netdev,
2917 			   "Failed to set VF %d spoofchk %s\n", vfidx,
2918 			enable ? "on" : "off");
2919 		return -1;
2920 	}
2921 
2922 	oct->sriov_info.vf_spoofchk[vfidx] = enable;
2923 	netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx,
2924 		   enable ? "on" : "off");
2925 
2926 	return 0;
2927 }
2928 
2929 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
2930 				u16 vlan, u8 qos, __be16 vlan_proto)
2931 {
2932 	struct lio *lio = GET_LIO(netdev);
2933 	struct octeon_device *oct = lio->oct_dev;
2934 	struct octnic_ctrl_pkt nctrl;
2935 	u16 vlantci;
2936 	int ret = 0;
2937 
2938 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2939 		return -EINVAL;
2940 
2941 	if (vlan_proto != htons(ETH_P_8021Q))
2942 		return -EPROTONOSUPPORT;
2943 
2944 	if (vlan >= VLAN_N_VID || qos > 7)
2945 		return -EINVAL;
2946 
2947 	if (vlan)
2948 		vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
2949 	else
2950 		vlantci = 0;
2951 
2952 	if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
2953 		return 0;
2954 
2955 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2956 
2957 	if (vlan)
2958 		nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2959 	else
2960 		nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2961 
2962 	nctrl.ncmd.s.param1 = vlantci;
2963 	nctrl.ncmd.s.param2 =
2964 	    vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
2965 	nctrl.ncmd.s.more = 0;
2966 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2967 	nctrl.cb_fn = NULL;
2968 
2969 	ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2970 	if (ret) {
2971 		if (ret > 0)
2972 			ret = -EIO;
2973 		return ret;
2974 	}
2975 
2976 	oct->sriov_info.vf_vlantci[vfidx] = vlantci;
2977 
2978 	return ret;
2979 }
2980 
2981 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
2982 				  struct ifla_vf_info *ivi)
2983 {
2984 	struct lio *lio = GET_LIO(netdev);
2985 	struct octeon_device *oct = lio->oct_dev;
2986 	u8 *macaddr;
2987 
2988 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2989 		return -EINVAL;
2990 
2991 	memset(ivi, 0, sizeof(struct ifla_vf_info));
2992 
2993 	ivi->vf = vfidx;
2994 	macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
2995 	ether_addr_copy(&ivi->mac[0], macaddr);
2996 	ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
2997 	ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
2998 	if (oct->sriov_info.trusted_vf.active &&
2999 	    oct->sriov_info.trusted_vf.id == vfidx)
3000 		ivi->trusted = true;
3001 	else
3002 		ivi->trusted = false;
3003 	ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
3004 	ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx];
3005 	ivi->max_tx_rate = lio->linfo.link.s.speed;
3006 	ivi->min_tx_rate = 0;
3007 
3008 	return 0;
3009 }
3010 
3011 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
3012 {
3013 	struct octeon_device *oct = lio->oct_dev;
3014 	struct octeon_soft_command *sc;
3015 	int retval;
3016 
3017 	sc = octeon_alloc_soft_command(oct, 0, 16, 0);
3018 	if (!sc)
3019 		return -ENOMEM;
3020 
3021 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
3022 
3023 	/* vfidx is 0 based, but vf_num (param1) is 1 based */
3024 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
3025 				    OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
3026 				    trusted);
3027 
3028 	init_completion(&sc->complete);
3029 	sc->sc_status = OCTEON_REQUEST_PENDING;
3030 
3031 	retval = octeon_send_soft_command(oct, sc);
3032 	if (retval == IQ_SEND_FAILED) {
3033 		octeon_free_soft_command(oct, sc);
3034 		retval = -1;
3035 	} else {
3036 		/* Wait for response or timeout */
3037 		retval = wait_for_sc_completion_timeout(oct, sc, 0);
3038 		if (retval)
3039 			return (retval);
3040 
3041 		WRITE_ONCE(sc->caller_is_done, true);
3042 	}
3043 
3044 	return retval;
3045 }
3046 
3047 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
3048 				 bool setting)
3049 {
3050 	struct lio *lio = GET_LIO(netdev);
3051 	struct octeon_device *oct = lio->oct_dev;
3052 
3053 	if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
3054 		/* trusted vf is not supported by firmware older than 1.7.1 */
3055 		return -EOPNOTSUPP;
3056 	}
3057 
3058 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
3059 		netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
3060 		return -EINVAL;
3061 	}
3062 
3063 	if (setting) {
3064 		/* Set */
3065 
3066 		if (oct->sriov_info.trusted_vf.active &&
3067 		    oct->sriov_info.trusted_vf.id == vfidx)
3068 			return 0;
3069 
3070 		if (oct->sriov_info.trusted_vf.active) {
3071 			netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
3072 			return -EPERM;
3073 		}
3074 	} else {
3075 		/* Clear */
3076 
3077 		if (!oct->sriov_info.trusted_vf.active)
3078 			return 0;
3079 	}
3080 
3081 	if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3082 		if (setting) {
3083 			oct->sriov_info.trusted_vf.id = vfidx;
3084 			oct->sriov_info.trusted_vf.active = true;
3085 		} else {
3086 			oct->sriov_info.trusted_vf.active = false;
3087 		}
3088 
3089 		netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3090 			   setting ? "" : "not ");
3091 	} else {
3092 		netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3093 		return -1;
3094 	}
3095 
3096 	return 0;
3097 }
3098 
3099 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3100 				      int linkstate)
3101 {
3102 	struct lio *lio = GET_LIO(netdev);
3103 	struct octeon_device *oct = lio->oct_dev;
3104 	struct octnic_ctrl_pkt nctrl;
3105 	int ret = 0;
3106 
3107 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3108 		return -EINVAL;
3109 
3110 	if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3111 		return 0;
3112 
3113 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3114 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3115 	nctrl.ncmd.s.param1 =
3116 	    vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3117 	nctrl.ncmd.s.param2 = linkstate;
3118 	nctrl.ncmd.s.more = 0;
3119 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3120 	nctrl.cb_fn = NULL;
3121 
3122 	ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
3123 
3124 	if (!ret)
3125 		oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3126 	else if (ret > 0)
3127 		ret = -EIO;
3128 
3129 	return ret;
3130 }
3131 
3132 static int
3133 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3134 {
3135 	struct lio_devlink_priv *priv;
3136 	struct octeon_device *oct;
3137 
3138 	priv = devlink_priv(devlink);
3139 	oct = priv->oct;
3140 
3141 	*mode = oct->eswitch_mode;
3142 
3143 	return 0;
3144 }
3145 
3146 static int
3147 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
3148 			  struct netlink_ext_ack *extack)
3149 {
3150 	struct lio_devlink_priv *priv;
3151 	struct octeon_device *oct;
3152 	int ret = 0;
3153 
3154 	priv = devlink_priv(devlink);
3155 	oct = priv->oct;
3156 
3157 	if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3158 		return -EINVAL;
3159 
3160 	if (oct->eswitch_mode == mode)
3161 		return 0;
3162 
3163 	switch (mode) {
3164 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3165 		oct->eswitch_mode = mode;
3166 		ret = lio_vf_rep_create(oct);
3167 		break;
3168 
3169 	case DEVLINK_ESWITCH_MODE_LEGACY:
3170 		lio_vf_rep_destroy(oct);
3171 		oct->eswitch_mode = mode;
3172 		break;
3173 
3174 	default:
3175 		ret = -EINVAL;
3176 	}
3177 
3178 	return ret;
3179 }
3180 
3181 static const struct devlink_ops liquidio_devlink_ops = {
3182 	.eswitch_mode_get = liquidio_eswitch_mode_get,
3183 	.eswitch_mode_set = liquidio_eswitch_mode_set,
3184 };
3185 
3186 static int
3187 lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr)
3188 {
3189 	struct lio *lio = GET_LIO(dev);
3190 	struct octeon_device *oct = lio->oct_dev;
3191 
3192 	if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3193 		return -EOPNOTSUPP;
3194 
3195 	switch (attr->id) {
3196 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
3197 		attr->u.ppid.id_len = ETH_ALEN;
3198 		ether_addr_copy(attr->u.ppid.id,
3199 				(void *)&lio->linfo.hw_addr + 2);
3200 		break;
3201 
3202 	default:
3203 		return -EOPNOTSUPP;
3204 	}
3205 
3206 	return 0;
3207 }
3208 
3209 static const struct switchdev_ops lio_pf_switchdev_ops = {
3210 	.switchdev_port_attr_get = lio_pf_switchdev_attr_get,
3211 };
3212 
3213 static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
3214 				 struct ifla_vf_stats *vf_stats)
3215 {
3216 	struct lio *lio = GET_LIO(netdev);
3217 	struct octeon_device *oct = lio->oct_dev;
3218 	struct oct_vf_stats stats;
3219 	int ret;
3220 
3221 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3222 		return -EINVAL;
3223 
3224 	memset(&stats, 0, sizeof(struct oct_vf_stats));
3225 	ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
3226 	if (!ret) {
3227 		vf_stats->rx_packets = stats.rx_packets;
3228 		vf_stats->tx_packets = stats.tx_packets;
3229 		vf_stats->rx_bytes = stats.rx_bytes;
3230 		vf_stats->tx_bytes = stats.tx_bytes;
3231 		vf_stats->broadcast = stats.broadcast;
3232 		vf_stats->multicast = stats.multicast;
3233 	}
3234 
3235 	return ret;
3236 }
3237 
3238 static const struct net_device_ops lionetdevops = {
3239 	.ndo_open		= liquidio_open,
3240 	.ndo_stop		= liquidio_stop,
3241 	.ndo_start_xmit		= liquidio_xmit,
3242 	.ndo_get_stats64	= liquidio_get_stats64,
3243 	.ndo_set_mac_address	= liquidio_set_mac,
3244 	.ndo_set_rx_mode	= liquidio_set_mcast_list,
3245 	.ndo_tx_timeout		= liquidio_tx_timeout,
3246 
3247 	.ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
3248 	.ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
3249 	.ndo_change_mtu		= liquidio_change_mtu,
3250 	.ndo_do_ioctl		= liquidio_ioctl,
3251 	.ndo_fix_features	= liquidio_fix_features,
3252 	.ndo_set_features	= liquidio_set_features,
3253 	.ndo_udp_tunnel_add	= liquidio_add_vxlan_port,
3254 	.ndo_udp_tunnel_del	= liquidio_del_vxlan_port,
3255 	.ndo_set_vf_mac		= liquidio_set_vf_mac,
3256 	.ndo_set_vf_vlan	= liquidio_set_vf_vlan,
3257 	.ndo_get_vf_config	= liquidio_get_vf_config,
3258 	.ndo_set_vf_spoofchk	= liquidio_set_vf_spoofchk,
3259 	.ndo_set_vf_trust	= liquidio_set_vf_trust,
3260 	.ndo_set_vf_link_state  = liquidio_set_vf_link_state,
3261 	.ndo_get_vf_stats	= liquidio_get_vf_stats,
3262 };
3263 
3264 /** \brief Entry point for the liquidio module
3265  */
3266 static int __init liquidio_init(void)
3267 {
3268 	int i;
3269 	struct handshake *hs;
3270 
3271 	init_completion(&first_stage);
3272 
3273 	octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3274 
3275 	if (liquidio_init_pci())
3276 		return -EINVAL;
3277 
3278 	wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3279 
3280 	for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3281 		hs = &handshake[i];
3282 		if (hs->pci_dev) {
3283 			wait_for_completion(&hs->init);
3284 			if (!hs->init_ok) {
3285 				/* init handshake failed */
3286 				dev_err(&hs->pci_dev->dev,
3287 					"Failed to init device\n");
3288 				liquidio_deinit_pci();
3289 				return -EIO;
3290 			}
3291 		}
3292 	}
3293 
3294 	for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3295 		hs = &handshake[i];
3296 		if (hs->pci_dev) {
3297 			wait_for_completion_timeout(&hs->started,
3298 						    msecs_to_jiffies(30000));
3299 			if (!hs->started_ok) {
3300 				/* starter handshake failed */
3301 				dev_err(&hs->pci_dev->dev,
3302 					"Firmware failed to start\n");
3303 				liquidio_deinit_pci();
3304 				return -EIO;
3305 			}
3306 		}
3307 	}
3308 
3309 	return 0;
3310 }
3311 
3312 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3313 {
3314 	struct octeon_device *oct = (struct octeon_device *)buf;
3315 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3316 	int gmxport = 0;
3317 	union oct_link_status *ls;
3318 	int i;
3319 
3320 	if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3321 		dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3322 			recv_pkt->buffer_size[0],
3323 			recv_pkt->rh.r_nic_info.gmxport);
3324 		goto nic_info_err;
3325 	}
3326 
3327 	gmxport = recv_pkt->rh.r_nic_info.gmxport;
3328 	ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3329 		OCT_DROQ_INFO_SIZE);
3330 
3331 	octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3332 	for (i = 0; i < oct->ifcount; i++) {
3333 		if (oct->props[i].gmxport == gmxport) {
3334 			update_link_status(oct->props[i].netdev, ls);
3335 			break;
3336 		}
3337 	}
3338 
3339 nic_info_err:
3340 	for (i = 0; i < recv_pkt->buffer_count; i++)
3341 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
3342 	octeon_free_recv_info(recv_info);
3343 	return 0;
3344 }
3345 
3346 /**
3347  * \brief Setup network interfaces
3348  * @param octeon_dev  octeon device
3349  *
3350  * Called during init time for each device. It assumes the NIC
3351  * is already up and running.  The link information for each
3352  * interface is passed in link_info.
3353  */
3354 static int setup_nic_devices(struct octeon_device *octeon_dev)
3355 {
3356 	struct lio *lio = NULL;
3357 	struct net_device *netdev;
3358 	u8 mac[6], i, j, *fw_ver, *micro_ver;
3359 	unsigned long micro;
3360 	u32 cur_ver;
3361 	struct octeon_soft_command *sc;
3362 	struct liquidio_if_cfg_resp *resp;
3363 	struct octdev_props *props;
3364 	int retval, num_iqueues, num_oqueues;
3365 	int max_num_queues = 0;
3366 	union oct_nic_if_cfg if_cfg;
3367 	unsigned int base_queue;
3368 	unsigned int gmx_port_id;
3369 	u32 resp_size, data_size;
3370 	u32 ifidx_or_pfnum;
3371 	struct lio_version *vdata;
3372 	struct devlink *devlink;
3373 	struct lio_devlink_priv *lio_devlink;
3374 
3375 	/* This is to handle link status changes */
3376 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3377 				    OPCODE_NIC_INFO,
3378 				    lio_nic_info, octeon_dev);
3379 
3380 	/* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3381 	 * They are handled directly.
3382 	 */
3383 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3384 					free_netbuf);
3385 
3386 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3387 					free_netsgbuf);
3388 
3389 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3390 					free_netsgbuf_with_resp);
3391 
3392 	for (i = 0; i < octeon_dev->ifcount; i++) {
3393 		resp_size = sizeof(struct liquidio_if_cfg_resp);
3394 		data_size = sizeof(struct lio_version);
3395 		sc = (struct octeon_soft_command *)
3396 			octeon_alloc_soft_command(octeon_dev, data_size,
3397 						  resp_size, 0);
3398 		resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3399 		vdata = (struct lio_version *)sc->virtdptr;
3400 
3401 		*((u64 *)vdata) = 0;
3402 		vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3403 		vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3404 		vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3405 
3406 		if (OCTEON_CN23XX_PF(octeon_dev)) {
3407 			num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3408 			num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3409 			base_queue = octeon_dev->sriov_info.pf_srn;
3410 
3411 			gmx_port_id = octeon_dev->pf_num;
3412 			ifidx_or_pfnum = octeon_dev->pf_num;
3413 		} else {
3414 			num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3415 						octeon_get_conf(octeon_dev), i);
3416 			num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3417 						octeon_get_conf(octeon_dev), i);
3418 			base_queue = CFG_GET_BASE_QUE_NIC_IF(
3419 						octeon_get_conf(octeon_dev), i);
3420 			gmx_port_id = CFG_GET_GMXID_NIC_IF(
3421 						octeon_get_conf(octeon_dev), i);
3422 			ifidx_or_pfnum = i;
3423 		}
3424 
3425 		dev_dbg(&octeon_dev->pci_dev->dev,
3426 			"requesting config for interface %d, iqs %d, oqs %d\n",
3427 			ifidx_or_pfnum, num_iqueues, num_oqueues);
3428 
3429 		if_cfg.u64 = 0;
3430 		if_cfg.s.num_iqueues = num_iqueues;
3431 		if_cfg.s.num_oqueues = num_oqueues;
3432 		if_cfg.s.base_queue = base_queue;
3433 		if_cfg.s.gmx_port_id = gmx_port_id;
3434 
3435 		sc->iq_no = 0;
3436 
3437 		octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3438 					    OPCODE_NIC_IF_CFG, 0,
3439 					    if_cfg.u64, 0);
3440 
3441 		init_completion(&sc->complete);
3442 		sc->sc_status = OCTEON_REQUEST_PENDING;
3443 
3444 		retval = octeon_send_soft_command(octeon_dev, sc);
3445 		if (retval == IQ_SEND_FAILED) {
3446 			dev_err(&octeon_dev->pci_dev->dev,
3447 				"iq/oq config failed status: %x\n",
3448 				retval);
3449 			/* Soft instr is freed by driver in case of failure. */
3450 			octeon_free_soft_command(octeon_dev, sc);
3451 			return(-EIO);
3452 		}
3453 
3454 		/* Sleep on a wait queue till the cond flag indicates that the
3455 		 * response arrived or timed-out.
3456 		 */
3457 		retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
3458 		if (retval)
3459 			return retval;
3460 
3461 		retval = resp->status;
3462 		if (retval) {
3463 			dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3464 			WRITE_ONCE(sc->caller_is_done, true);
3465 			goto setup_nic_dev_done;
3466 		}
3467 		snprintf(octeon_dev->fw_info.liquidio_firmware_version,
3468 			 32, "%s",
3469 			 resp->cfg_info.liquidio_firmware_version);
3470 
3471 		/* Verify f/w version (in case of 'auto' loading from flash) */
3472 		fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3473 		if (memcmp(LIQUIDIO_BASE_VERSION,
3474 			   fw_ver,
3475 			   strlen(LIQUIDIO_BASE_VERSION))) {
3476 			dev_err(&octeon_dev->pci_dev->dev,
3477 				"Unmatched firmware version. Expected %s.x, got %s.\n",
3478 				LIQUIDIO_BASE_VERSION, fw_ver);
3479 			WRITE_ONCE(sc->caller_is_done, true);
3480 			goto setup_nic_dev_done;
3481 		} else if (atomic_read(octeon_dev->adapter_fw_state) ==
3482 			   FW_IS_PRELOADED) {
3483 			dev_info(&octeon_dev->pci_dev->dev,
3484 				 "Using auto-loaded firmware version %s.\n",
3485 				 fw_ver);
3486 		}
3487 
3488 		/* extract micro version field; point past '<maj>.<min>.' */
3489 		micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
3490 		if (kstrtoul(micro_ver, 10, &micro) != 0)
3491 			micro = 0;
3492 		octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
3493 		octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
3494 		octeon_dev->fw_info.ver.rev = micro;
3495 
3496 		octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3497 				    (sizeof(struct liquidio_if_cfg_info)) >> 3);
3498 
3499 		num_iqueues = hweight64(resp->cfg_info.iqmask);
3500 		num_oqueues = hweight64(resp->cfg_info.oqmask);
3501 
3502 		if (!(num_iqueues) || !(num_oqueues)) {
3503 			dev_err(&octeon_dev->pci_dev->dev,
3504 				"Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3505 				resp->cfg_info.iqmask,
3506 				resp->cfg_info.oqmask);
3507 			WRITE_ONCE(sc->caller_is_done, true);
3508 			goto setup_nic_dev_done;
3509 		}
3510 
3511 		if (OCTEON_CN6XXX(octeon_dev)) {
3512 			max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3513 								    cn6xxx));
3514 		} else if (OCTEON_CN23XX_PF(octeon_dev)) {
3515 			max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3516 								    cn23xx_pf));
3517 		}
3518 
3519 		dev_dbg(&octeon_dev->pci_dev->dev,
3520 			"interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3521 			i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3522 			num_iqueues, num_oqueues, max_num_queues);
3523 		netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
3524 
3525 		if (!netdev) {
3526 			dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3527 			WRITE_ONCE(sc->caller_is_done, true);
3528 			goto setup_nic_dev_done;
3529 		}
3530 
3531 		SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3532 
3533 		/* Associate the routines that will handle different
3534 		 * netdev tasks.
3535 		 */
3536 		netdev->netdev_ops = &lionetdevops;
3537 		SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops);
3538 
3539 		retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
3540 		if (retval) {
3541 			dev_err(&octeon_dev->pci_dev->dev,
3542 				"setting real number rx failed\n");
3543 			WRITE_ONCE(sc->caller_is_done, true);
3544 			goto setup_nic_dev_free;
3545 		}
3546 
3547 		retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
3548 		if (retval) {
3549 			dev_err(&octeon_dev->pci_dev->dev,
3550 				"setting real number tx failed\n");
3551 			WRITE_ONCE(sc->caller_is_done, true);
3552 			goto setup_nic_dev_free;
3553 		}
3554 
3555 		lio = GET_LIO(netdev);
3556 
3557 		memset(lio, 0, sizeof(struct lio));
3558 
3559 		lio->ifidx = ifidx_or_pfnum;
3560 
3561 		props = &octeon_dev->props[i];
3562 		props->gmxport = resp->cfg_info.linfo.gmxport;
3563 		props->netdev = netdev;
3564 
3565 		lio->linfo.num_rxpciq = num_oqueues;
3566 		lio->linfo.num_txpciq = num_iqueues;
3567 		for (j = 0; j < num_oqueues; j++) {
3568 			lio->linfo.rxpciq[j].u64 =
3569 				resp->cfg_info.linfo.rxpciq[j].u64;
3570 		}
3571 		for (j = 0; j < num_iqueues; j++) {
3572 			lio->linfo.txpciq[j].u64 =
3573 				resp->cfg_info.linfo.txpciq[j].u64;
3574 		}
3575 		lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3576 		lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3577 		lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3578 
3579 		WRITE_ONCE(sc->caller_is_done, true);
3580 
3581 		lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3582 
3583 		if (OCTEON_CN23XX_PF(octeon_dev) ||
3584 		    OCTEON_CN6XXX(octeon_dev)) {
3585 			lio->dev_capability = NETIF_F_HIGHDMA
3586 					      | NETIF_F_IP_CSUM
3587 					      | NETIF_F_IPV6_CSUM
3588 					      | NETIF_F_SG | NETIF_F_RXCSUM
3589 					      | NETIF_F_GRO
3590 					      | NETIF_F_TSO | NETIF_F_TSO6
3591 					      | NETIF_F_LRO;
3592 		}
3593 		netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3594 
3595 		/*  Copy of transmit encapsulation capabilities:
3596 		 *  TSO, TSO6, Checksums for this device
3597 		 */
3598 		lio->enc_dev_capability = NETIF_F_IP_CSUM
3599 					  | NETIF_F_IPV6_CSUM
3600 					  | NETIF_F_GSO_UDP_TUNNEL
3601 					  | NETIF_F_HW_CSUM | NETIF_F_SG
3602 					  | NETIF_F_RXCSUM
3603 					  | NETIF_F_TSO | NETIF_F_TSO6
3604 					  | NETIF_F_LRO;
3605 
3606 		netdev->hw_enc_features = (lio->enc_dev_capability &
3607 					   ~NETIF_F_LRO);
3608 
3609 		lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3610 
3611 		netdev->vlan_features = lio->dev_capability;
3612 		/* Add any unchangeable hw features */
3613 		lio->dev_capability |=  NETIF_F_HW_VLAN_CTAG_FILTER |
3614 					NETIF_F_HW_VLAN_CTAG_RX |
3615 					NETIF_F_HW_VLAN_CTAG_TX;
3616 
3617 		netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3618 
3619 		netdev->hw_features = lio->dev_capability;
3620 		/*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3621 		netdev->hw_features = netdev->hw_features &
3622 			~NETIF_F_HW_VLAN_CTAG_RX;
3623 
3624 		/* MTU range: 68 - 16000 */
3625 		netdev->min_mtu = LIO_MIN_MTU_SIZE;
3626 		netdev->max_mtu = LIO_MAX_MTU_SIZE;
3627 
3628 		/* Point to the  properties for octeon device to which this
3629 		 * interface belongs.
3630 		 */
3631 		lio->oct_dev = octeon_dev;
3632 		lio->octprops = props;
3633 		lio->netdev = netdev;
3634 
3635 		dev_dbg(&octeon_dev->pci_dev->dev,
3636 			"if%d gmx: %d hw_addr: 0x%llx\n", i,
3637 			lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3638 
3639 		for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3640 			u8 vfmac[ETH_ALEN];
3641 
3642 			eth_random_addr(vfmac);
3643 			if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
3644 				dev_err(&octeon_dev->pci_dev->dev,
3645 					"Error setting VF%d MAC address\n",
3646 					j);
3647 				goto setup_nic_dev_free;
3648 			}
3649 		}
3650 
3651 		/* 64-bit swap required on LE machines */
3652 		octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3653 		for (j = 0; j < 6; j++)
3654 			mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3655 
3656 		/* Copy MAC Address to OS network device structure */
3657 
3658 		ether_addr_copy(netdev->dev_addr, mac);
3659 
3660 		/* By default all interfaces on a single Octeon uses the same
3661 		 * tx and rx queues
3662 		 */
3663 		lio->txq = lio->linfo.txpciq[0].s.q_no;
3664 		lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3665 		if (liquidio_setup_io_queues(octeon_dev, i,
3666 					     lio->linfo.num_txpciq,
3667 					     lio->linfo.num_rxpciq)) {
3668 			dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3669 			goto setup_nic_dev_free;
3670 		}
3671 
3672 		ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3673 
3674 		lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3675 		lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3676 
3677 		if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
3678 			dev_err(&octeon_dev->pci_dev->dev,
3679 				"Gather list allocation failed\n");
3680 			goto setup_nic_dev_free;
3681 		}
3682 
3683 		/* Register ethtool support */
3684 		liquidio_set_ethtool_ops(netdev);
3685 		if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3686 			octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3687 		else
3688 			octeon_dev->priv_flags = 0x0;
3689 
3690 		if (netdev->features & NETIF_F_LRO)
3691 			liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3692 					     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3693 
3694 		liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3695 				     OCTNET_CMD_VLAN_FILTER_ENABLE);
3696 
3697 		if ((debug != -1) && (debug & NETIF_MSG_HW))
3698 			liquidio_set_feature(netdev,
3699 					     OCTNET_CMD_VERBOSE_ENABLE, 0);
3700 
3701 		if (setup_link_status_change_wq(netdev))
3702 			goto setup_nic_dev_free;
3703 
3704 		if ((octeon_dev->fw_info.app_cap_flags &
3705 		     LIQUIDIO_TIME_SYNC_CAP) &&
3706 		    setup_sync_octeon_time_wq(netdev))
3707 			goto setup_nic_dev_free;
3708 
3709 		if (setup_rx_oom_poll_fn(netdev))
3710 			goto setup_nic_dev_free;
3711 
3712 		/* Register the network device with the OS */
3713 		if (register_netdev(netdev)) {
3714 			dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3715 			goto setup_nic_dev_free;
3716 		}
3717 
3718 		dev_dbg(&octeon_dev->pci_dev->dev,
3719 			"Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3720 			i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3721 		netif_carrier_off(netdev);
3722 		lio->link_changes++;
3723 
3724 		ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3725 
3726 		/* Sending command to firmware to enable Rx checksum offload
3727 		 * by default at the time of setup of Liquidio driver for
3728 		 * this device
3729 		 */
3730 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3731 					    OCTNET_CMD_RXCSUM_ENABLE);
3732 		liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3733 				     OCTNET_CMD_TXCSUM_ENABLE);
3734 
3735 		dev_dbg(&octeon_dev->pci_dev->dev,
3736 			"NIC ifidx:%d Setup successful\n", i);
3737 
3738 		if (octeon_dev->subsystem_id ==
3739 			OCTEON_CN2350_25GB_SUBSYS_ID ||
3740 		    octeon_dev->subsystem_id ==
3741 			OCTEON_CN2360_25GB_SUBSYS_ID) {
3742 			cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
3743 					     octeon_dev->fw_info.ver.min,
3744 					     octeon_dev->fw_info.ver.rev);
3745 
3746 			/* speed control unsupported in f/w older than 1.7.2 */
3747 			if (cur_ver < OCT_FW_VER(1, 7, 2)) {
3748 				dev_info(&octeon_dev->pci_dev->dev,
3749 					 "speed setting not supported by f/w.");
3750 				octeon_dev->speed_setting = 25;
3751 				octeon_dev->no_speed_setting = 1;
3752 			} else {
3753 				liquidio_get_speed(lio);
3754 			}
3755 
3756 			if (octeon_dev->speed_setting == 0) {
3757 				octeon_dev->speed_setting = 25;
3758 				octeon_dev->no_speed_setting = 1;
3759 			}
3760 		} else {
3761 			octeon_dev->no_speed_setting = 1;
3762 			octeon_dev->speed_setting = 10;
3763 		}
3764 		octeon_dev->speed_boot = octeon_dev->speed_setting;
3765 
3766 		/* don't read FEC setting if unsupported by f/w (see above) */
3767 		if (octeon_dev->speed_boot == 25 &&
3768 		    !octeon_dev->no_speed_setting) {
3769 			liquidio_get_fec(lio);
3770 			octeon_dev->props[lio->ifidx].fec_boot =
3771 				octeon_dev->props[lio->ifidx].fec;
3772 		}
3773 	}
3774 
3775 	devlink = devlink_alloc(&liquidio_devlink_ops,
3776 				sizeof(struct lio_devlink_priv));
3777 	if (!devlink) {
3778 		dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3779 		goto setup_nic_dev_free;
3780 	}
3781 
3782 	lio_devlink = devlink_priv(devlink);
3783 	lio_devlink->oct = octeon_dev;
3784 
3785 	if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) {
3786 		devlink_free(devlink);
3787 		dev_err(&octeon_dev->pci_dev->dev,
3788 			"devlink registration failed\n");
3789 		goto setup_nic_dev_free;
3790 	}
3791 
3792 	octeon_dev->devlink = devlink;
3793 	octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3794 
3795 	return 0;
3796 
3797 setup_nic_dev_free:
3798 
3799 	while (i--) {
3800 		dev_err(&octeon_dev->pci_dev->dev,
3801 			"NIC ifidx:%d Setup failed\n", i);
3802 		liquidio_destroy_nic_device(octeon_dev, i);
3803 	}
3804 
3805 setup_nic_dev_done:
3806 
3807 	return -ENODEV;
3808 }
3809 
3810 #ifdef CONFIG_PCI_IOV
3811 static int octeon_enable_sriov(struct octeon_device *oct)
3812 {
3813 	unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3814 	struct pci_dev *vfdev;
3815 	int err;
3816 	u32 u;
3817 
3818 	if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3819 		err = pci_enable_sriov(oct->pci_dev,
3820 				       oct->sriov_info.num_vfs_alloced);
3821 		if (err) {
3822 			dev_err(&oct->pci_dev->dev,
3823 				"OCTEON: Failed to enable PCI sriov: %d\n",
3824 				err);
3825 			oct->sriov_info.num_vfs_alloced = 0;
3826 			return err;
3827 		}
3828 		oct->sriov_info.sriov_enabled = 1;
3829 
3830 		/* init lookup table that maps DPI ring number to VF pci_dev
3831 		 * struct pointer
3832 		 */
3833 		u = 0;
3834 		vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3835 				       OCTEON_CN23XX_VF_VID, NULL);
3836 		while (vfdev) {
3837 			if (vfdev->is_virtfn &&
3838 			    (vfdev->physfn == oct->pci_dev)) {
3839 				oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3840 					vfdev;
3841 				u += oct->sriov_info.rings_per_vf;
3842 			}
3843 			vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3844 					       OCTEON_CN23XX_VF_VID, vfdev);
3845 		}
3846 	}
3847 
3848 	return num_vfs_alloced;
3849 }
3850 
3851 static int lio_pci_sriov_disable(struct octeon_device *oct)
3852 {
3853 	int u;
3854 
3855 	if (pci_vfs_assigned(oct->pci_dev)) {
3856 		dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3857 		return -EPERM;
3858 	}
3859 
3860 	pci_disable_sriov(oct->pci_dev);
3861 
3862 	u = 0;
3863 	while (u < MAX_POSSIBLE_VFS) {
3864 		oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3865 		u += oct->sriov_info.rings_per_vf;
3866 	}
3867 
3868 	oct->sriov_info.num_vfs_alloced = 0;
3869 	dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3870 		 oct->pf_num);
3871 
3872 	return 0;
3873 }
3874 
3875 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3876 {
3877 	struct octeon_device *oct = pci_get_drvdata(dev);
3878 	int ret = 0;
3879 
3880 	if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3881 	    (oct->sriov_info.sriov_enabled)) {
3882 		dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3883 			 oct->pf_num, num_vfs);
3884 		return 0;
3885 	}
3886 
3887 	if (!num_vfs) {
3888 		lio_vf_rep_destroy(oct);
3889 		ret = lio_pci_sriov_disable(oct);
3890 	} else if (num_vfs > oct->sriov_info.max_vfs) {
3891 		dev_err(&oct->pci_dev->dev,
3892 			"OCTEON: Max allowed VFs:%d user requested:%d",
3893 			oct->sriov_info.max_vfs, num_vfs);
3894 		ret = -EPERM;
3895 	} else {
3896 		oct->sriov_info.num_vfs_alloced = num_vfs;
3897 		ret = octeon_enable_sriov(oct);
3898 		dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3899 			 oct->pf_num, num_vfs);
3900 		ret = lio_vf_rep_create(oct);
3901 		if (ret)
3902 			dev_info(&oct->pci_dev->dev,
3903 				 "vf representor create failed");
3904 	}
3905 
3906 	return ret;
3907 }
3908 #endif
3909 
3910 /**
3911  * \brief initialize the NIC
3912  * @param oct octeon device
3913  *
3914  * This initialization routine is called once the Octeon device application is
3915  * up and running
3916  */
3917 static int liquidio_init_nic_module(struct octeon_device *oct)
3918 {
3919 	int i, retval = 0;
3920 	int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3921 
3922 	dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3923 
3924 	/* only default iq and oq were initialized
3925 	 * initialize the rest as well
3926 	 */
3927 	/* run port_config command for each port */
3928 	oct->ifcount = num_nic_ports;
3929 
3930 	memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3931 
3932 	for (i = 0; i < MAX_OCTEON_LINKS; i++)
3933 		oct->props[i].gmxport = -1;
3934 
3935 	retval = setup_nic_devices(oct);
3936 	if (retval) {
3937 		dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3938 		goto octnet_init_failure;
3939 	}
3940 
3941 	/* Call vf_rep_modinit if the firmware is switchdev capable
3942 	 * and do it from the first liquidio function probed.
3943 	 */
3944 	if (!oct->octeon_id &&
3945 	    oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
3946 		retval = lio_vf_rep_modinit();
3947 		if (retval) {
3948 			liquidio_stop_nic_module(oct);
3949 			goto octnet_init_failure;
3950 		}
3951 	}
3952 
3953 	liquidio_ptp_init(oct);
3954 
3955 	dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3956 
3957 	return retval;
3958 
3959 octnet_init_failure:
3960 
3961 	oct->ifcount = 0;
3962 
3963 	return retval;
3964 }
3965 
3966 /**
3967  * \brief starter callback that invokes the remaining initialization work after
3968  * the NIC is up and running.
3969  * @param octptr  work struct work_struct
3970  */
3971 static void nic_starter(struct work_struct *work)
3972 {
3973 	struct octeon_device *oct;
3974 	struct cavium_wk *wk = (struct cavium_wk *)work;
3975 
3976 	oct = (struct octeon_device *)wk->ctxptr;
3977 
3978 	if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3979 		return;
3980 
3981 	/* If the status of the device is CORE_OK, the core
3982 	 * application has reported its application type. Call
3983 	 * any registered handlers now and move to the RUNNING
3984 	 * state.
3985 	 */
3986 	if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3987 		schedule_delayed_work(&oct->nic_poll_work.work,
3988 				      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3989 		return;
3990 	}
3991 
3992 	atomic_set(&oct->status, OCT_DEV_RUNNING);
3993 
3994 	if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3995 		dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3996 
3997 		if (liquidio_init_nic_module(oct))
3998 			dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3999 		else
4000 			handshake[oct->octeon_id].started_ok = 1;
4001 	} else {
4002 		dev_err(&oct->pci_dev->dev,
4003 			"Unexpected application running on NIC (%d). Check firmware.\n",
4004 			oct->app_mode);
4005 	}
4006 
4007 	complete(&handshake[oct->octeon_id].started);
4008 }
4009 
4010 static int
4011 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
4012 {
4013 	struct octeon_device *oct = (struct octeon_device *)buf;
4014 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
4015 	int i, notice, vf_idx;
4016 	bool cores_crashed;
4017 	u64 *data, vf_num;
4018 
4019 	notice = recv_pkt->rh.r.ossp;
4020 	data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
4021 
4022 	/* the first 64-bit word of data is the vf_num */
4023 	vf_num = data[0];
4024 	octeon_swap_8B_data(&vf_num, 1);
4025 	vf_idx = (int)vf_num - 1;
4026 
4027 	cores_crashed = READ_ONCE(oct->cores_crashed);
4028 
4029 	if (notice == VF_DRV_LOADED) {
4030 		if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
4031 			oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
4032 			dev_info(&oct->pci_dev->dev,
4033 				 "driver for VF%d was loaded\n", vf_idx);
4034 			if (!cores_crashed)
4035 				try_module_get(THIS_MODULE);
4036 		}
4037 	} else if (notice == VF_DRV_REMOVED) {
4038 		if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
4039 			oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
4040 			dev_info(&oct->pci_dev->dev,
4041 				 "driver for VF%d was removed\n", vf_idx);
4042 			if (!cores_crashed)
4043 				module_put(THIS_MODULE);
4044 		}
4045 	} else if (notice == VF_DRV_MACADDR_CHANGED) {
4046 		u8 *b = (u8 *)&data[1];
4047 
4048 		oct->sriov_info.vf_macaddr[vf_idx] = data[1];
4049 		dev_info(&oct->pci_dev->dev,
4050 			 "VF driver changed VF%d's MAC address to %pM\n",
4051 			 vf_idx, b + 2);
4052 	}
4053 
4054 	for (i = 0; i < recv_pkt->buffer_count; i++)
4055 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
4056 	octeon_free_recv_info(recv_info);
4057 
4058 	return 0;
4059 }
4060 
4061 /**
4062  * \brief Device initialization for each Octeon device that is probed
4063  * @param octeon_dev  octeon device
4064  */
4065 static int octeon_device_init(struct octeon_device *octeon_dev)
4066 {
4067 	int j, ret;
4068 	char bootcmd[] = "\n";
4069 	char *dbg_enb = NULL;
4070 	enum lio_fw_state fw_state;
4071 	struct octeon_device_priv *oct_priv =
4072 		(struct octeon_device_priv *)octeon_dev->priv;
4073 	atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4074 
4075 	/* Enable access to the octeon device and make its DMA capability
4076 	 * known to the OS.
4077 	 */
4078 	if (octeon_pci_os_setup(octeon_dev))
4079 		return 1;
4080 
4081 	atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4082 
4083 	/* Identify the Octeon type and map the BAR address space. */
4084 	if (octeon_chip_specific_setup(octeon_dev)) {
4085 		dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4086 		return 1;
4087 	}
4088 
4089 	atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4090 
4091 	/* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4092 	 * since that is what is required for the reference to be removed
4093 	 * during de-initialization (see 'octeon_destroy_resources').
4094 	 */
4095 	octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4096 			       PCI_SLOT(octeon_dev->pci_dev->devfn),
4097 			       PCI_FUNC(octeon_dev->pci_dev->devfn),
4098 			       true);
4099 
4100 	octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4101 
4102 	/* CN23XX supports preloaded firmware if the following is true:
4103 	 *
4104 	 * The adapter indicates that firmware is currently running AND
4105 	 * 'fw_type' is 'auto'.
4106 	 *
4107 	 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4108 	 */
4109 	if (OCTEON_CN23XX_PF(octeon_dev) &&
4110 	    cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4111 		atomic_cmpxchg(octeon_dev->adapter_fw_state,
4112 			       FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4113 	}
4114 
4115 	/* If loading firmware, only first device of adapter needs to do so. */
4116 	fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4117 				  FW_NEEDS_TO_BE_LOADED,
4118 				  FW_IS_BEING_LOADED);
4119 
4120 	/* Here, [local variable] 'fw_state' is set to one of:
4121 	 *
4122 	 *   FW_IS_PRELOADED:       No firmware is to be loaded (see above)
4123 	 *   FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4124 	 *                          firmware to the adapter.
4125 	 *   FW_IS_BEING_LOADED:    The driver's second instance will not load
4126 	 *                          firmware to the adapter.
4127 	 */
4128 
4129 	/* Prior to f/w load, perform a soft reset of the Octeon device;
4130 	 * if error resetting, return w/error.
4131 	 */
4132 	if (fw_state == FW_NEEDS_TO_BE_LOADED)
4133 		if (octeon_dev->fn_list.soft_reset(octeon_dev))
4134 			return 1;
4135 
4136 	/* Initialize the dispatch mechanism used to push packets arriving on
4137 	 * Octeon Output queues.
4138 	 */
4139 	if (octeon_init_dispatch_list(octeon_dev))
4140 		return 1;
4141 
4142 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4143 				    OPCODE_NIC_CORE_DRV_ACTIVE,
4144 				    octeon_core_drv_init,
4145 				    octeon_dev);
4146 
4147 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4148 				    OPCODE_NIC_VF_DRV_NOTICE,
4149 				    octeon_recv_vf_drv_notice, octeon_dev);
4150 	INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4151 	octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4152 	schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4153 			      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4154 
4155 	atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4156 
4157 	if (octeon_set_io_queues_off(octeon_dev)) {
4158 		dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4159 		return 1;
4160 	}
4161 
4162 	if (OCTEON_CN23XX_PF(octeon_dev)) {
4163 		ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4164 		if (ret) {
4165 			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4166 			return ret;
4167 		}
4168 	}
4169 
4170 	/* Initialize soft command buffer pool
4171 	 */
4172 	if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4173 		dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4174 		return 1;
4175 	}
4176 	atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4177 
4178 	/*  Setup the data structures that manage this Octeon's Input queues. */
4179 	if (octeon_setup_instr_queues(octeon_dev)) {
4180 		dev_err(&octeon_dev->pci_dev->dev,
4181 			"instruction queue initialization failed\n");
4182 		return 1;
4183 	}
4184 	atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4185 
4186 	/* Initialize lists to manage the requests of different types that
4187 	 * arrive from user & kernel applications for this octeon device.
4188 	 */
4189 	if (octeon_setup_response_list(octeon_dev)) {
4190 		dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4191 		return 1;
4192 	}
4193 	atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4194 
4195 	if (octeon_setup_output_queues(octeon_dev)) {
4196 		dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4197 		return 1;
4198 	}
4199 
4200 	atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4201 
4202 	if (OCTEON_CN23XX_PF(octeon_dev)) {
4203 		if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4204 			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4205 			return 1;
4206 		}
4207 		atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4208 
4209 		if (octeon_allocate_ioq_vector
4210 				(octeon_dev,
4211 				 octeon_dev->sriov_info.num_pf_rings)) {
4212 			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4213 			return 1;
4214 		}
4215 		atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4216 
4217 	} else {
4218 		/* The input and output queue registers were setup earlier (the
4219 		 * queues were not enabled). Any additional registers
4220 		 * that need to be programmed should be done now.
4221 		 */
4222 		ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4223 		if (ret) {
4224 			dev_err(&octeon_dev->pci_dev->dev,
4225 				"Failed to configure device registers\n");
4226 			return ret;
4227 		}
4228 	}
4229 
4230 	/* Initialize the tasklet that handles output queue packet processing.*/
4231 	dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4232 	tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
4233 		     (unsigned long)octeon_dev);
4234 
4235 	/* Setup the interrupt handler and record the INT SUM register address
4236 	 */
4237 	if (octeon_setup_interrupt(octeon_dev,
4238 				   octeon_dev->sriov_info.num_pf_rings))
4239 		return 1;
4240 
4241 	/* Enable Octeon device interrupts */
4242 	octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4243 
4244 	atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4245 
4246 	/* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4247 	 * the output queue is enabled.
4248 	 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4249 	 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4250 	 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4251 	 * before any credits have been issued, causing the ring to be reset
4252 	 * (and the f/w appear to never have started).
4253 	 */
4254 	for (j = 0; j < octeon_dev->num_oqs; j++)
4255 		writel(octeon_dev->droq[j]->max_count,
4256 		       octeon_dev->droq[j]->pkts_credit_reg);
4257 
4258 	/* Enable the input and output queues for this Octeon device */
4259 	ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4260 	if (ret) {
4261 		dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4262 		return ret;
4263 	}
4264 
4265 	atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4266 
4267 	if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4268 		dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4269 		if (!ddr_timeout) {
4270 			dev_info(&octeon_dev->pci_dev->dev,
4271 				 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4272 		}
4273 
4274 		schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4275 
4276 		/* Wait for the octeon to initialize DDR after the soft-reset.*/
4277 		while (!ddr_timeout) {
4278 			set_current_state(TASK_INTERRUPTIBLE);
4279 			if (schedule_timeout(HZ / 10)) {
4280 				/* user probably pressed Control-C */
4281 				return 1;
4282 			}
4283 		}
4284 		ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4285 		if (ret) {
4286 			dev_err(&octeon_dev->pci_dev->dev,
4287 				"DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4288 				ret);
4289 			return 1;
4290 		}
4291 
4292 		if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4293 			dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4294 			return 1;
4295 		}
4296 
4297 		/* Divert uboot to take commands from host instead. */
4298 		ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4299 
4300 		dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4301 		ret = octeon_init_consoles(octeon_dev);
4302 		if (ret) {
4303 			dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4304 			return 1;
4305 		}
4306 		/* If console debug enabled, specify empty string to use default
4307 		 * enablement ELSE specify NULL string for 'disabled'.
4308 		 */
4309 		dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4310 		ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4311 		if (ret) {
4312 			dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4313 			return 1;
4314 		} else if (octeon_console_debug_enabled(0)) {
4315 			/* If console was added AND we're logging console output
4316 			 * then set our console print function.
4317 			 */
4318 			octeon_dev->console[0].print = octeon_dbg_console_print;
4319 		}
4320 
4321 		atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4322 
4323 		dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4324 		ret = load_firmware(octeon_dev);
4325 		if (ret) {
4326 			dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4327 			return 1;
4328 		}
4329 
4330 		atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4331 	}
4332 
4333 	handshake[octeon_dev->octeon_id].init_ok = 1;
4334 	complete(&handshake[octeon_dev->octeon_id].init);
4335 
4336 	atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4337 
4338 	return 0;
4339 }
4340 
4341 /**
4342  * \brief Debug console print function
4343  * @param octeon_dev  octeon device
4344  * @param console_num console number
4345  * @param prefix      first portion of line to display
4346  * @param suffix      second portion of line to display
4347  *
4348  * The OCTEON debug console outputs entire lines (excluding '\n').
4349  * Normally, the line will be passed in the 'prefix' parameter.
4350  * However, due to buffering, it is possible for a line to be split into two
4351  * parts, in which case they will be passed as the 'prefix' parameter and
4352  * 'suffix' parameter.
4353  */
4354 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4355 				    char *prefix, char *suffix)
4356 {
4357 	if (prefix && suffix)
4358 		dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4359 			 suffix);
4360 	else if (prefix)
4361 		dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4362 	else if (suffix)
4363 		dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4364 
4365 	return 0;
4366 }
4367 
4368 /**
4369  * \brief Exits the module
4370  */
4371 static void __exit liquidio_exit(void)
4372 {
4373 	liquidio_deinit_pci();
4374 
4375 	pr_info("LiquidIO network module is now unloaded\n");
4376 }
4377 
4378 module_init(liquidio_init);
4379 module_exit(liquidio_exit);
4380