1 /**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2016 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/firmware.h>
22 #include <net/vxlan.h>
23 #include <linux/kthread.h>
24 #include "liquidio_common.h"
25 #include "octeon_droq.h"
26 #include "octeon_iq.h"
27 #include "response_manager.h"
28 #include "octeon_device.h"
29 #include "octeon_nic.h"
30 #include "octeon_main.h"
31 #include "octeon_network.h"
32 #include "cn66xx_regs.h"
33 #include "cn66xx_device.h"
34 #include "cn68xx_device.h"
35 #include "cn23xx_pf_device.h"
36 #include "liquidio_image.h"
37 #include "lio_vf_rep.h"
38
39 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
40 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
41 MODULE_LICENSE("GPL");
42 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
43 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
45 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
47 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
49 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
50
51 static int ddr_timeout = 10000;
52 module_param(ddr_timeout, int, 0644);
53 MODULE_PARM_DESC(ddr_timeout,
54 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
55
56 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
57
58 static int debug = -1;
59 module_param(debug, int, 0644);
60 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
61
62 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
63 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
64 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
65
66 static u32 console_bitmask;
67 module_param(console_bitmask, int, 0644);
68 MODULE_PARM_DESC(console_bitmask,
69 "Bitmask indicating which consoles have debug output redirected to syslog.");
70
71 /**
72 * octeon_console_debug_enabled - determines if a given console has debug enabled.
73 * @console: console to check
74 * Return: 1 = enabled. 0 otherwise
75 */
octeon_console_debug_enabled(u32 console)76 static int octeon_console_debug_enabled(u32 console)
77 {
78 return (console_bitmask >> (console)) & 0x1;
79 }
80
81 /* Polling interval for determining when NIC application is alive */
82 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
83
84 /* runtime link query interval */
85 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
86 /* update localtime to octeon firmware every 60 seconds.
87 * make firmware to use same time reference, so that it will be easy to
88 * correlate firmware logged events/errors with host events, for debugging.
89 */
90 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
91
92 /* time to wait for possible in-flight requests in milliseconds */
93 #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
94
95 struct oct_timestamp_resp {
96 u64 rh;
97 u64 timestamp;
98 u64 status;
99 };
100
101 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
102
103 union tx_info {
104 u64 u64;
105 struct {
106 #ifdef __BIG_ENDIAN_BITFIELD
107 u16 gso_size;
108 u16 gso_segs;
109 u32 reserved;
110 #else
111 u32 reserved;
112 u16 gso_segs;
113 u16 gso_size;
114 #endif
115 } s;
116 };
117
118 /* Octeon device properties to be used by the NIC module.
119 * Each octeon device in the system will be represented
120 * by this structure in the NIC module.
121 */
122
123 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
124 #define OCTNIC_GSO_MAX_SIZE \
125 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
126
127 struct handshake {
128 struct completion init;
129 struct completion started;
130 struct pci_dev *pci_dev;
131 int init_ok;
132 int started_ok;
133 };
134
135 #ifdef CONFIG_PCI_IOV
136 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
137 #endif
138
139 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
140 char *prefix, char *suffix);
141
142 static int octeon_device_init(struct octeon_device *);
143 static int liquidio_stop(struct net_device *netdev);
144 static void liquidio_remove(struct pci_dev *pdev);
145 static int liquidio_probe(struct pci_dev *pdev,
146 const struct pci_device_id *ent);
147 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
148 int linkstate);
149
150 static struct handshake handshake[MAX_OCTEON_DEVICES];
151 static struct completion first_stage;
152
octeon_droq_bh(struct tasklet_struct * t)153 static void octeon_droq_bh(struct tasklet_struct *t)
154 {
155 int q_no;
156 int reschedule = 0;
157 struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t,
158 droq_tasklet);
159 struct octeon_device *oct = oct_priv->dev;
160
161 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
162 if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
163 continue;
164 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
165 MAX_PACKET_BUDGET);
166 lio_enable_irq(oct->droq[q_no], NULL);
167
168 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
169 /* set time and cnt interrupt thresholds for this DROQ
170 * for NAPI
171 */
172 int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
173
174 octeon_write_csr64(
175 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
176 0x5700000040ULL);
177 octeon_write_csr64(
178 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
179 }
180 }
181
182 if (reschedule)
183 tasklet_schedule(&oct_priv->droq_tasklet);
184 }
185
lio_wait_for_oq_pkts(struct octeon_device * oct)186 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
187 {
188 struct octeon_device_priv *oct_priv = oct->priv;
189 int retry = 100, pkt_cnt = 0, pending_pkts = 0;
190 int i;
191
192 do {
193 pending_pkts = 0;
194
195 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
196 if (!(oct->io_qmask.oq & BIT_ULL(i)))
197 continue;
198 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
199 }
200 if (pkt_cnt > 0) {
201 pending_pkts += pkt_cnt;
202 tasklet_schedule(&oct_priv->droq_tasklet);
203 }
204 pkt_cnt = 0;
205 schedule_timeout_uninterruptible(1);
206
207 } while (retry-- && pending_pkts);
208
209 return pkt_cnt;
210 }
211
212 /**
213 * force_io_queues_off - Forces all IO queues off on a given device
214 * @oct: Pointer to Octeon device
215 */
force_io_queues_off(struct octeon_device * oct)216 static void force_io_queues_off(struct octeon_device *oct)
217 {
218 if ((oct->chip_id == OCTEON_CN66XX) ||
219 (oct->chip_id == OCTEON_CN68XX)) {
220 /* Reset the Enable bits for Input Queues. */
221 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
222
223 /* Reset the Enable bits for Output Queues. */
224 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
225 }
226 }
227
228 /**
229 * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc
230 * @oct: Pointer to Octeon device
231 */
pcierror_quiesce_device(struct octeon_device * oct)232 static inline void pcierror_quiesce_device(struct octeon_device *oct)
233 {
234 int i;
235
236 /* Disable the input and output queues now. No more packets will
237 * arrive from Octeon, but we should wait for all packet processing
238 * to finish.
239 */
240 force_io_queues_off(oct);
241
242 /* To allow for in-flight requests */
243 schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
244
245 if (wait_for_pending_requests(oct))
246 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
247
248 /* Force all requests waiting to be fetched by OCTEON to complete. */
249 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
250 struct octeon_instr_queue *iq;
251
252 if (!(oct->io_qmask.iq & BIT_ULL(i)))
253 continue;
254 iq = oct->instr_queue[i];
255
256 if (atomic_read(&iq->instr_pending)) {
257 spin_lock_bh(&iq->lock);
258 iq->fill_cnt = 0;
259 iq->octeon_read_index = iq->host_write_index;
260 iq->stats.instr_processed +=
261 atomic_read(&iq->instr_pending);
262 lio_process_iq_request_list(oct, iq, 0);
263 spin_unlock_bh(&iq->lock);
264 }
265 }
266
267 /* Force all pending ordered list requests to time out. */
268 lio_process_ordered_list(oct, 1);
269
270 /* We do not need to wait for output queue packets to be processed. */
271 }
272
273 /**
274 * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status
275 * @dev: Pointer to PCI device
276 */
cleanup_aer_uncorrect_error_status(struct pci_dev * dev)277 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
278 {
279 int pos = 0x100;
280 u32 status, mask;
281
282 pr_info("%s :\n", __func__);
283
284 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
285 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
286 if (dev->error_state == pci_channel_io_normal)
287 status &= ~mask; /* Clear corresponding nonfatal bits */
288 else
289 status &= mask; /* Clear corresponding fatal bits */
290 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
291 }
292
293 /**
294 * stop_pci_io - Stop all PCI IO to a given device
295 * @oct: Pointer to Octeon device
296 */
stop_pci_io(struct octeon_device * oct)297 static void stop_pci_io(struct octeon_device *oct)
298 {
299 /* No more instructions will be forwarded. */
300 atomic_set(&oct->status, OCT_DEV_IN_RESET);
301
302 pci_disable_device(oct->pci_dev);
303
304 /* Disable interrupts */
305 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
306
307 pcierror_quiesce_device(oct);
308
309 /* Release the interrupt line */
310 free_irq(oct->pci_dev->irq, oct);
311
312 if (oct->flags & LIO_FLAG_MSI_ENABLED)
313 pci_disable_msi(oct->pci_dev);
314
315 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
316 lio_get_state_string(&oct->status));
317
318 /* making it a common function for all OCTEON models */
319 cleanup_aer_uncorrect_error_status(oct->pci_dev);
320 }
321
322 /**
323 * liquidio_pcie_error_detected - called when PCI error is detected
324 * @pdev: Pointer to PCI device
325 * @state: The current pci connection state
326 *
327 * This function is called after a PCI bus error affecting
328 * this device has been detected.
329 */
liquidio_pcie_error_detected(struct pci_dev * pdev,pci_channel_state_t state)330 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
331 pci_channel_state_t state)
332 {
333 struct octeon_device *oct = pci_get_drvdata(pdev);
334
335 /* Non-correctable Non-fatal errors */
336 if (state == pci_channel_io_normal) {
337 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
338 cleanup_aer_uncorrect_error_status(oct->pci_dev);
339 return PCI_ERS_RESULT_CAN_RECOVER;
340 }
341
342 /* Non-correctable Fatal errors */
343 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
344 stop_pci_io(oct);
345
346 /* Always return a DISCONNECT. There is no support for recovery but only
347 * for a clean shutdown.
348 */
349 return PCI_ERS_RESULT_DISCONNECT;
350 }
351
352 /**
353 * liquidio_pcie_mmio_enabled - mmio handler
354 * @pdev: Pointer to PCI device
355 */
liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused * pdev)356 static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev)
357 {
358 /* We should never hit this since we never ask for a reset for a Fatal
359 * Error. We always return DISCONNECT in io_error above.
360 * But play safe and return RECOVERED for now.
361 */
362 return PCI_ERS_RESULT_RECOVERED;
363 }
364
365 /**
366 * liquidio_pcie_slot_reset - called after the pci bus has been reset.
367 * @pdev: Pointer to PCI device
368 *
369 * Restart the card from scratch, as if from a cold-boot. Implementation
370 * resembles the first-half of the octeon_resume routine.
371 */
liquidio_pcie_slot_reset(struct pci_dev __maybe_unused * pdev)372 static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev)
373 {
374 /* We should never hit this since we never ask for a reset for a Fatal
375 * Error. We always return DISCONNECT in io_error above.
376 * But play safe and return RECOVERED for now.
377 */
378 return PCI_ERS_RESULT_RECOVERED;
379 }
380
381 /**
382 * liquidio_pcie_resume - called when traffic can start flowing again.
383 * @pdev: Pointer to PCI device
384 *
385 * This callback is called when the error recovery driver tells us that
386 * its OK to resume normal operation. Implementation resembles the
387 * second-half of the octeon_resume routine.
388 */
liquidio_pcie_resume(struct pci_dev __maybe_unused * pdev)389 static void liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev)
390 {
391 /* Nothing to be done here. */
392 }
393
394 #define liquidio_suspend NULL
395 #define liquidio_resume NULL
396
397 /* For PCI-E Advanced Error Recovery (AER) Interface */
398 static const struct pci_error_handlers liquidio_err_handler = {
399 .error_detected = liquidio_pcie_error_detected,
400 .mmio_enabled = liquidio_pcie_mmio_enabled,
401 .slot_reset = liquidio_pcie_slot_reset,
402 .resume = liquidio_pcie_resume,
403 };
404
405 static const struct pci_device_id liquidio_pci_tbl[] = {
406 { /* 68xx */
407 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
408 },
409 { /* 66xx */
410 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
411 },
412 { /* 23xx pf */
413 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
414 },
415 {
416 0, 0, 0, 0, 0, 0, 0
417 }
418 };
419 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
420
421 static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume);
422
423 static struct pci_driver liquidio_pci_driver = {
424 .name = "LiquidIO",
425 .id_table = liquidio_pci_tbl,
426 .probe = liquidio_probe,
427 .remove = liquidio_remove,
428 .err_handler = &liquidio_err_handler, /* For AER */
429 .driver.pm = &liquidio_pm_ops,
430 #ifdef CONFIG_PCI_IOV
431 .sriov_configure = liquidio_enable_sriov,
432 #endif
433 };
434
435 /**
436 * liquidio_init_pci - register PCI driver
437 */
liquidio_init_pci(void)438 static int liquidio_init_pci(void)
439 {
440 return pci_register_driver(&liquidio_pci_driver);
441 }
442
443 /**
444 * liquidio_deinit_pci - unregister PCI driver
445 */
liquidio_deinit_pci(void)446 static void liquidio_deinit_pci(void)
447 {
448 pci_unregister_driver(&liquidio_pci_driver);
449 }
450
451 /**
452 * check_txq_status - Check Tx queue status, and take appropriate action
453 * @lio: per-network private data
454 * Return: 0 if full, number of queues woken up otherwise
455 */
check_txq_status(struct lio * lio)456 static inline int check_txq_status(struct lio *lio)
457 {
458 int numqs = lio->netdev->real_num_tx_queues;
459 int ret_val = 0;
460 int q, iq;
461
462 /* check each sub-queue state */
463 for (q = 0; q < numqs; q++) {
464 iq = lio->linfo.txpciq[q %
465 lio->oct_dev->num_iqs].s.q_no;
466 if (octnet_iq_is_full(lio->oct_dev, iq))
467 continue;
468 if (__netif_subqueue_stopped(lio->netdev, q)) {
469 netif_wake_subqueue(lio->netdev, q);
470 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
471 tx_restart, 1);
472 ret_val++;
473 }
474 }
475
476 return ret_val;
477 }
478
479 /**
480 * print_link_info - Print link information
481 * @netdev: network device
482 */
print_link_info(struct net_device * netdev)483 static void print_link_info(struct net_device *netdev)
484 {
485 struct lio *lio = GET_LIO(netdev);
486
487 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
488 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
489 struct oct_link_info *linfo = &lio->linfo;
490
491 if (linfo->link.s.link_up) {
492 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
493 linfo->link.s.speed,
494 (linfo->link.s.duplex) ? "Full" : "Half");
495 } else {
496 netif_info(lio, link, lio->netdev, "Link Down\n");
497 }
498 }
499 }
500
501 /**
502 * octnet_link_status_change - Routine to notify MTU change
503 * @work: work_struct data structure
504 */
octnet_link_status_change(struct work_struct * work)505 static void octnet_link_status_change(struct work_struct *work)
506 {
507 struct cavium_wk *wk = (struct cavium_wk *)work;
508 struct lio *lio = (struct lio *)wk->ctxptr;
509
510 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
511 * this API is invoked only when new max-MTU of the interface is
512 * less than current MTU.
513 */
514 rtnl_lock();
515 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
516 rtnl_unlock();
517 }
518
519 /**
520 * setup_link_status_change_wq - Sets up the mtu status change work
521 * @netdev: network device
522 */
setup_link_status_change_wq(struct net_device * netdev)523 static inline int setup_link_status_change_wq(struct net_device *netdev)
524 {
525 struct lio *lio = GET_LIO(netdev);
526 struct octeon_device *oct = lio->oct_dev;
527
528 lio->link_status_wq.wq = alloc_workqueue("link-status",
529 WQ_MEM_RECLAIM, 0);
530 if (!lio->link_status_wq.wq) {
531 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
532 return -1;
533 }
534 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
535 octnet_link_status_change);
536 lio->link_status_wq.wk.ctxptr = lio;
537
538 return 0;
539 }
540
cleanup_link_status_change_wq(struct net_device * netdev)541 static inline void cleanup_link_status_change_wq(struct net_device *netdev)
542 {
543 struct lio *lio = GET_LIO(netdev);
544
545 if (lio->link_status_wq.wq) {
546 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
547 destroy_workqueue(lio->link_status_wq.wq);
548 }
549 }
550
551 /**
552 * update_link_status - Update link status
553 * @netdev: network device
554 * @ls: link status structure
555 *
556 * Called on receipt of a link status response from the core application to
557 * update each interface's link status.
558 */
update_link_status(struct net_device * netdev,union oct_link_status * ls)559 static inline void update_link_status(struct net_device *netdev,
560 union oct_link_status *ls)
561 {
562 struct lio *lio = GET_LIO(netdev);
563 int changed = (lio->linfo.link.u64 != ls->u64);
564 int current_max_mtu = lio->linfo.link.s.mtu;
565 struct octeon_device *oct = lio->oct_dev;
566
567 dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
568 __func__, lio->linfo.link.u64, ls->u64);
569 lio->linfo.link.u64 = ls->u64;
570
571 if ((lio->intf_open) && (changed)) {
572 print_link_info(netdev);
573 lio->link_changes++;
574
575 if (lio->linfo.link.s.link_up) {
576 dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
577 netif_carrier_on(netdev);
578 wake_txqs(netdev);
579 } else {
580 dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
581 netif_carrier_off(netdev);
582 stop_txqs(netdev);
583 }
584 if (lio->linfo.link.s.mtu != current_max_mtu) {
585 netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
586 current_max_mtu, lio->linfo.link.s.mtu);
587 netdev->max_mtu = lio->linfo.link.s.mtu;
588 }
589 if (lio->linfo.link.s.mtu < netdev->mtu) {
590 dev_warn(&oct->pci_dev->dev,
591 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
592 netdev->mtu, lio->linfo.link.s.mtu);
593 queue_delayed_work(lio->link_status_wq.wq,
594 &lio->link_status_wq.wk.work, 0);
595 }
596 }
597 }
598
599 /**
600 * lio_sync_octeon_time - send latest localtime to octeon firmware so that
601 * firmware will correct it's time, in case there is a time skew
602 *
603 * @work: work scheduled to send time update to octeon firmware
604 **/
lio_sync_octeon_time(struct work_struct * work)605 static void lio_sync_octeon_time(struct work_struct *work)
606 {
607 struct cavium_wk *wk = (struct cavium_wk *)work;
608 struct lio *lio = (struct lio *)wk->ctxptr;
609 struct octeon_device *oct = lio->oct_dev;
610 struct octeon_soft_command *sc;
611 struct timespec64 ts;
612 struct lio_time *lt;
613 int ret;
614
615 sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0);
616 if (!sc) {
617 dev_err(&oct->pci_dev->dev,
618 "Failed to sync time to octeon: soft command allocation failed\n");
619 return;
620 }
621
622 lt = (struct lio_time *)sc->virtdptr;
623
624 /* Get time of the day */
625 ktime_get_real_ts64(&ts);
626 lt->sec = ts.tv_sec;
627 lt->nsec = ts.tv_nsec;
628 octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
629
630 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
631 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
632 OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
633
634 init_completion(&sc->complete);
635 sc->sc_status = OCTEON_REQUEST_PENDING;
636
637 ret = octeon_send_soft_command(oct, sc);
638 if (ret == IQ_SEND_FAILED) {
639 dev_err(&oct->pci_dev->dev,
640 "Failed to sync time to octeon: failed to send soft command\n");
641 octeon_free_soft_command(oct, sc);
642 } else {
643 WRITE_ONCE(sc->caller_is_done, true);
644 }
645
646 queue_delayed_work(lio->sync_octeon_time_wq.wq,
647 &lio->sync_octeon_time_wq.wk.work,
648 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
649 }
650
651 /**
652 * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware
653 *
654 * @netdev: network device which should send time update to firmware
655 **/
setup_sync_octeon_time_wq(struct net_device * netdev)656 static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
657 {
658 struct lio *lio = GET_LIO(netdev);
659 struct octeon_device *oct = lio->oct_dev;
660
661 lio->sync_octeon_time_wq.wq =
662 alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
663 if (!lio->sync_octeon_time_wq.wq) {
664 dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
665 return -1;
666 }
667 INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
668 lio_sync_octeon_time);
669 lio->sync_octeon_time_wq.wk.ctxptr = lio;
670 queue_delayed_work(lio->sync_octeon_time_wq.wq,
671 &lio->sync_octeon_time_wq.wk.work,
672 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
673
674 return 0;
675 }
676
677 /**
678 * cleanup_sync_octeon_time_wq - destroy wq
679 *
680 * @netdev: network device which should send time update to firmware
681 *
682 * Stop scheduling and destroy the work created to periodically update local
683 * time to octeon firmware.
684 **/
cleanup_sync_octeon_time_wq(struct net_device * netdev)685 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
686 {
687 struct lio *lio = GET_LIO(netdev);
688 struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
689
690 if (time_wq->wq) {
691 cancel_delayed_work_sync(&time_wq->wk.work);
692 destroy_workqueue(time_wq->wq);
693 }
694 }
695
get_other_octeon_device(struct octeon_device * oct)696 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
697 {
698 struct octeon_device *other_oct;
699
700 other_oct = lio_get_device(oct->octeon_id + 1);
701
702 if (other_oct && other_oct->pci_dev) {
703 int oct_busnum, other_oct_busnum;
704
705 oct_busnum = oct->pci_dev->bus->number;
706 other_oct_busnum = other_oct->pci_dev->bus->number;
707
708 if (oct_busnum == other_oct_busnum) {
709 int oct_slot, other_oct_slot;
710
711 oct_slot = PCI_SLOT(oct->pci_dev->devfn);
712 other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
713
714 if (oct_slot == other_oct_slot)
715 return other_oct;
716 }
717 }
718
719 return NULL;
720 }
721
disable_all_vf_links(struct octeon_device * oct)722 static void disable_all_vf_links(struct octeon_device *oct)
723 {
724 struct net_device *netdev;
725 int max_vfs, vf, i;
726
727 if (!oct)
728 return;
729
730 max_vfs = oct->sriov_info.max_vfs;
731
732 for (i = 0; i < oct->ifcount; i++) {
733 netdev = oct->props[i].netdev;
734 if (!netdev)
735 continue;
736
737 for (vf = 0; vf < max_vfs; vf++)
738 liquidio_set_vf_link_state(netdev, vf,
739 IFLA_VF_LINK_STATE_DISABLE);
740 }
741 }
742
liquidio_watchdog(void * param)743 static int liquidio_watchdog(void *param)
744 {
745 bool err_msg_was_printed[LIO_MAX_CORES];
746 u16 mask_of_crashed_or_stuck_cores = 0;
747 bool all_vf_links_are_disabled = false;
748 struct octeon_device *oct = param;
749 struct octeon_device *other_oct;
750 #ifdef CONFIG_MODULE_UNLOAD
751 long refcount, vfs_referencing_pf;
752 u64 vfs_mask1, vfs_mask2;
753 #endif
754 int core;
755
756 memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
757
758 while (!kthread_should_stop()) {
759 /* sleep for a couple of seconds so that we don't hog the CPU */
760 set_current_state(TASK_INTERRUPTIBLE);
761 schedule_timeout(msecs_to_jiffies(2000));
762
763 mask_of_crashed_or_stuck_cores =
764 (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
765
766 if (!mask_of_crashed_or_stuck_cores)
767 continue;
768
769 WRITE_ONCE(oct->cores_crashed, true);
770 other_oct = get_other_octeon_device(oct);
771 if (other_oct)
772 WRITE_ONCE(other_oct->cores_crashed, true);
773
774 for (core = 0; core < LIO_MAX_CORES; core++) {
775 bool core_crashed_or_got_stuck;
776
777 core_crashed_or_got_stuck =
778 (mask_of_crashed_or_stuck_cores
779 >> core) & 1;
780
781 if (core_crashed_or_got_stuck &&
782 !err_msg_was_printed[core]) {
783 dev_err(&oct->pci_dev->dev,
784 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
785 core);
786 err_msg_was_printed[core] = true;
787 }
788 }
789
790 if (all_vf_links_are_disabled)
791 continue;
792
793 disable_all_vf_links(oct);
794 disable_all_vf_links(other_oct);
795 all_vf_links_are_disabled = true;
796
797 #ifdef CONFIG_MODULE_UNLOAD
798 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
799 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
800
801 vfs_referencing_pf = hweight64(vfs_mask1);
802 vfs_referencing_pf += hweight64(vfs_mask2);
803
804 refcount = module_refcount(THIS_MODULE);
805 if (refcount >= vfs_referencing_pf) {
806 while (vfs_referencing_pf) {
807 module_put(THIS_MODULE);
808 vfs_referencing_pf--;
809 }
810 }
811 #endif
812 }
813
814 return 0;
815 }
816
817 /**
818 * liquidio_probe - PCI probe handler
819 * @pdev: PCI device structure
820 * @ent: unused
821 */
822 static int
liquidio_probe(struct pci_dev * pdev,const struct pci_device_id __maybe_unused * ent)823 liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *ent)
824 {
825 struct octeon_device *oct_dev = NULL;
826 struct handshake *hs;
827
828 oct_dev = octeon_allocate_device(pdev->device,
829 sizeof(struct octeon_device_priv));
830 if (!oct_dev) {
831 dev_err(&pdev->dev, "Unable to allocate device\n");
832 return -ENOMEM;
833 }
834
835 if (pdev->device == OCTEON_CN23XX_PF_VID)
836 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
837
838 /* Enable PTP for 6XXX Device */
839 if (((pdev->device == OCTEON_CN66XX) ||
840 (pdev->device == OCTEON_CN68XX)))
841 oct_dev->ptp_enable = true;
842 else
843 oct_dev->ptp_enable = false;
844
845 dev_info(&pdev->dev, "Initializing device %x:%x.\n",
846 (u32)pdev->vendor, (u32)pdev->device);
847
848 /* Assign octeon_device for this device to the private data area. */
849 pci_set_drvdata(pdev, oct_dev);
850
851 /* set linux specific device pointer */
852 oct_dev->pci_dev = (void *)pdev;
853
854 oct_dev->subsystem_id = pdev->subsystem_vendor |
855 (pdev->subsystem_device << 16);
856
857 hs = &handshake[oct_dev->octeon_id];
858 init_completion(&hs->init);
859 init_completion(&hs->started);
860 hs->pci_dev = pdev;
861
862 if (oct_dev->octeon_id == 0)
863 /* first LiquidIO NIC is detected */
864 complete(&first_stage);
865
866 if (octeon_device_init(oct_dev)) {
867 complete(&hs->init);
868 liquidio_remove(pdev);
869 return -ENOMEM;
870 }
871
872 if (OCTEON_CN23XX_PF(oct_dev)) {
873 u8 bus, device, function;
874
875 if (atomic_read(oct_dev->adapter_refcount) == 1) {
876 /* Each NIC gets one watchdog kernel thread. The first
877 * PF (of each NIC) that gets pci_driver->probe()'d
878 * creates that thread.
879 */
880 bus = pdev->bus->number;
881 device = PCI_SLOT(pdev->devfn);
882 function = PCI_FUNC(pdev->devfn);
883 oct_dev->watchdog_task = kthread_run(liquidio_watchdog,
884 oct_dev,
885 "liowd/%02hhx:%02hhx.%hhx",
886 bus, device, function);
887 if (IS_ERR(oct_dev->watchdog_task)) {
888 oct_dev->watchdog_task = NULL;
889 dev_err(&oct_dev->pci_dev->dev,
890 "failed to create kernel_thread\n");
891 liquidio_remove(pdev);
892 return -1;
893 }
894 }
895 }
896
897 oct_dev->rx_pause = 1;
898 oct_dev->tx_pause = 1;
899
900 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
901
902 return 0;
903 }
904
fw_type_is_auto(void)905 static bool fw_type_is_auto(void)
906 {
907 return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
908 sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
909 }
910
911 /**
912 * octeon_pci_flr - PCI FLR for each Octeon device.
913 * @oct: octeon device
914 */
octeon_pci_flr(struct octeon_device * oct)915 static void octeon_pci_flr(struct octeon_device *oct)
916 {
917 int rc;
918
919 pci_save_state(oct->pci_dev);
920
921 pci_cfg_access_lock(oct->pci_dev);
922
923 /* Quiesce the device completely */
924 pci_write_config_word(oct->pci_dev, PCI_COMMAND,
925 PCI_COMMAND_INTX_DISABLE);
926
927 rc = __pci_reset_function_locked(oct->pci_dev);
928
929 if (rc != 0)
930 dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
931 rc, oct->pf_num);
932
933 pci_cfg_access_unlock(oct->pci_dev);
934
935 pci_restore_state(oct->pci_dev);
936 }
937
938 /**
939 * octeon_destroy_resources - Destroy resources associated with octeon device
940 * @oct: octeon device
941 */
octeon_destroy_resources(struct octeon_device * oct)942 static void octeon_destroy_resources(struct octeon_device *oct)
943 {
944 int i, refcount;
945 struct msix_entry *msix_entries;
946 struct octeon_device_priv *oct_priv = oct->priv;
947
948 struct handshake *hs;
949
950 switch (atomic_read(&oct->status)) {
951 case OCT_DEV_RUNNING:
952 case OCT_DEV_CORE_OK:
953
954 /* No more instructions will be forwarded. */
955 atomic_set(&oct->status, OCT_DEV_IN_RESET);
956
957 oct->app_mode = CVM_DRV_INVALID_APP;
958 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
959 lio_get_state_string(&oct->status));
960
961 schedule_timeout_uninterruptible(HZ / 10);
962
963 fallthrough;
964 case OCT_DEV_HOST_OK:
965
966 case OCT_DEV_CONSOLE_INIT_DONE:
967 /* Remove any consoles */
968 octeon_remove_consoles(oct);
969
970 fallthrough;
971 case OCT_DEV_IO_QUEUES_DONE:
972 if (lio_wait_for_instr_fetch(oct))
973 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
974
975 if (wait_for_pending_requests(oct))
976 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
977
978 /* Disable the input and output queues now. No more packets will
979 * arrive from Octeon, but we should wait for all packet
980 * processing to finish.
981 */
982 oct->fn_list.disable_io_queues(oct);
983
984 if (lio_wait_for_oq_pkts(oct))
985 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
986
987 /* Force all requests waiting to be fetched by OCTEON to
988 * complete.
989 */
990 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
991 struct octeon_instr_queue *iq;
992
993 if (!(oct->io_qmask.iq & BIT_ULL(i)))
994 continue;
995 iq = oct->instr_queue[i];
996
997 if (atomic_read(&iq->instr_pending)) {
998 spin_lock_bh(&iq->lock);
999 iq->fill_cnt = 0;
1000 iq->octeon_read_index = iq->host_write_index;
1001 iq->stats.instr_processed +=
1002 atomic_read(&iq->instr_pending);
1003 lio_process_iq_request_list(oct, iq, 0);
1004 spin_unlock_bh(&iq->lock);
1005 }
1006 }
1007
1008 lio_process_ordered_list(oct, 1);
1009 octeon_free_sc_done_list(oct);
1010 octeon_free_sc_zombie_list(oct);
1011
1012 fallthrough;
1013 case OCT_DEV_INTR_SET_DONE:
1014 /* Disable interrupts */
1015 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1016
1017 if (oct->msix_on) {
1018 msix_entries = (struct msix_entry *)oct->msix_entries;
1019 for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1020 if (oct->ioq_vector[i].vector) {
1021 /* clear the affinity_cpumask */
1022 irq_set_affinity_hint(
1023 msix_entries[i].vector,
1024 NULL);
1025 free_irq(msix_entries[i].vector,
1026 &oct->ioq_vector[i]);
1027 oct->ioq_vector[i].vector = 0;
1028 }
1029 }
1030 /* non-iov vector's argument is oct struct */
1031 free_irq(msix_entries[i].vector, oct);
1032
1033 pci_disable_msix(oct->pci_dev);
1034 kfree(oct->msix_entries);
1035 oct->msix_entries = NULL;
1036 } else {
1037 /* Release the interrupt line */
1038 free_irq(oct->pci_dev->irq, oct);
1039
1040 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1041 pci_disable_msi(oct->pci_dev);
1042 }
1043
1044 kfree(oct->irq_name_storage);
1045 oct->irq_name_storage = NULL;
1046
1047 fallthrough;
1048 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1049 if (OCTEON_CN23XX_PF(oct))
1050 octeon_free_ioq_vector(oct);
1051
1052 fallthrough;
1053 case OCT_DEV_MBOX_SETUP_DONE:
1054 if (OCTEON_CN23XX_PF(oct))
1055 oct->fn_list.free_mbox(oct);
1056
1057 fallthrough;
1058 case OCT_DEV_IN_RESET:
1059 case OCT_DEV_DROQ_INIT_DONE:
1060 /* Wait for any pending operations */
1061 mdelay(100);
1062 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1063 if (!(oct->io_qmask.oq & BIT_ULL(i)))
1064 continue;
1065 octeon_delete_droq(oct, i);
1066 }
1067
1068 /* Force any pending handshakes to complete */
1069 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1070 hs = &handshake[i];
1071
1072 if (hs->pci_dev) {
1073 handshake[oct->octeon_id].init_ok = 0;
1074 complete(&handshake[oct->octeon_id].init);
1075 handshake[oct->octeon_id].started_ok = 0;
1076 complete(&handshake[oct->octeon_id].started);
1077 }
1078 }
1079
1080 fallthrough;
1081 case OCT_DEV_RESP_LIST_INIT_DONE:
1082 octeon_delete_response_list(oct);
1083
1084 fallthrough;
1085 case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1086 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1087 if (!(oct->io_qmask.iq & BIT_ULL(i)))
1088 continue;
1089 octeon_delete_instr_queue(oct, i);
1090 }
1091 #ifdef CONFIG_PCI_IOV
1092 if (oct->sriov_info.sriov_enabled)
1093 pci_disable_sriov(oct->pci_dev);
1094 #endif
1095 fallthrough;
1096 case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1097 octeon_free_sc_buffer_pool(oct);
1098
1099 fallthrough;
1100 case OCT_DEV_DISPATCH_INIT_DONE:
1101 octeon_delete_dispatch_list(oct);
1102 cancel_delayed_work_sync(&oct->nic_poll_work.work);
1103
1104 fallthrough;
1105 case OCT_DEV_PCI_MAP_DONE:
1106 refcount = octeon_deregister_device(oct);
1107
1108 /* Soft reset the octeon device before exiting.
1109 * However, if fw was loaded from card (i.e. autoboot),
1110 * perform an FLR instead.
1111 * Implementation note: only soft-reset the device
1112 * if it is a CN6XXX OR the LAST CN23XX device.
1113 */
1114 if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1115 octeon_pci_flr(oct);
1116 else if (OCTEON_CN6XXX(oct) || !refcount)
1117 oct->fn_list.soft_reset(oct);
1118
1119 octeon_unmap_pci_barx(oct, 0);
1120 octeon_unmap_pci_barx(oct, 1);
1121
1122 fallthrough;
1123 case OCT_DEV_PCI_ENABLE_DONE:
1124 /* Disable the device, releasing the PCI INT */
1125 pci_disable_device(oct->pci_dev);
1126
1127 fallthrough;
1128 case OCT_DEV_BEGIN_STATE:
1129 /* Nothing to be done here either */
1130 break;
1131 } /* end switch (oct->status) */
1132
1133 tasklet_kill(&oct_priv->droq_tasklet);
1134 }
1135
1136 /**
1137 * send_rx_ctrl_cmd - Send Rx control command
1138 * @lio: per-network private data
1139 * @start_stop: whether to start or stop
1140 */
send_rx_ctrl_cmd(struct lio * lio,int start_stop)1141 static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1142 {
1143 struct octeon_soft_command *sc;
1144 union octnet_cmd *ncmd;
1145 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1146 int retval;
1147
1148 if (oct->props[lio->ifidx].rx_on == start_stop)
1149 return 0;
1150
1151 sc = (struct octeon_soft_command *)
1152 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1153 16, 0);
1154 if (!sc) {
1155 netif_info(lio, rx_err, lio->netdev,
1156 "Failed to allocate octeon_soft_command struct\n");
1157 return -ENOMEM;
1158 }
1159
1160 ncmd = (union octnet_cmd *)sc->virtdptr;
1161
1162 ncmd->u64 = 0;
1163 ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1164 ncmd->s.param1 = start_stop;
1165
1166 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1167
1168 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1169
1170 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1171 OPCODE_NIC_CMD, 0, 0, 0);
1172
1173 init_completion(&sc->complete);
1174 sc->sc_status = OCTEON_REQUEST_PENDING;
1175
1176 retval = octeon_send_soft_command(oct, sc);
1177 if (retval == IQ_SEND_FAILED) {
1178 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1179 octeon_free_soft_command(oct, sc);
1180 } else {
1181 /* Sleep on a wait queue till the cond flag indicates that the
1182 * response arrived or timed-out.
1183 */
1184 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1185 if (retval)
1186 return retval;
1187
1188 oct->props[lio->ifidx].rx_on = start_stop;
1189 WRITE_ONCE(sc->caller_is_done, true);
1190 }
1191
1192 return retval;
1193 }
1194
1195 /**
1196 * liquidio_destroy_nic_device - Destroy NIC device interface
1197 * @oct: octeon device
1198 * @ifidx: which interface to destroy
1199 *
1200 * Cleanup associated with each interface for an Octeon device when NIC
1201 * module is being unloaded or if initialization fails during load.
1202 */
liquidio_destroy_nic_device(struct octeon_device * oct,int ifidx)1203 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1204 {
1205 struct net_device *netdev = oct->props[ifidx].netdev;
1206 struct octeon_device_priv *oct_priv = oct->priv;
1207 struct napi_struct *napi, *n;
1208 struct lio *lio;
1209
1210 if (!netdev) {
1211 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1212 __func__, ifidx);
1213 return;
1214 }
1215
1216 lio = GET_LIO(netdev);
1217
1218 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1219
1220 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1221 liquidio_stop(netdev);
1222
1223 if (oct->props[lio->ifidx].napi_enabled == 1) {
1224 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1225 napi_disable(napi);
1226
1227 oct->props[lio->ifidx].napi_enabled = 0;
1228
1229 if (OCTEON_CN23XX_PF(oct))
1230 oct->droq[0]->ops.poll_mode = 0;
1231 }
1232
1233 /* Delete NAPI */
1234 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1235 netif_napi_del(napi);
1236
1237 tasklet_enable(&oct_priv->droq_tasklet);
1238
1239 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1240 unregister_netdev(netdev);
1241
1242 cleanup_sync_octeon_time_wq(netdev);
1243 cleanup_link_status_change_wq(netdev);
1244
1245 cleanup_rx_oom_poll_fn(netdev);
1246
1247 lio_delete_glists(lio);
1248
1249 free_netdev(netdev);
1250
1251 oct->props[ifidx].gmxport = -1;
1252
1253 oct->props[ifidx].netdev = NULL;
1254 }
1255
1256 /**
1257 * liquidio_stop_nic_module - Stop complete NIC functionality
1258 * @oct: octeon device
1259 */
liquidio_stop_nic_module(struct octeon_device * oct)1260 static int liquidio_stop_nic_module(struct octeon_device *oct)
1261 {
1262 int i, j;
1263 struct lio *lio;
1264
1265 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1266 device_lock(&oct->pci_dev->dev);
1267 if (oct->devlink) {
1268 devlink_unregister(oct->devlink);
1269 devlink_free(oct->devlink);
1270 oct->devlink = NULL;
1271 }
1272 device_unlock(&oct->pci_dev->dev);
1273
1274 if (!oct->ifcount) {
1275 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1276 return 1;
1277 }
1278
1279 spin_lock_bh(&oct->cmd_resp_wqlock);
1280 oct->cmd_resp_state = OCT_DRV_OFFLINE;
1281 spin_unlock_bh(&oct->cmd_resp_wqlock);
1282
1283 lio_vf_rep_destroy(oct);
1284
1285 for (i = 0; i < oct->ifcount; i++) {
1286 lio = GET_LIO(oct->props[i].netdev);
1287 for (j = 0; j < oct->num_oqs; j++)
1288 octeon_unregister_droq_ops(oct,
1289 lio->linfo.rxpciq[j].s.q_no);
1290 }
1291
1292 for (i = 0; i < oct->ifcount; i++)
1293 liquidio_destroy_nic_device(oct, i);
1294
1295 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1296 return 0;
1297 }
1298
1299 /**
1300 * liquidio_remove - Cleans up resources at unload time
1301 * @pdev: PCI device structure
1302 */
liquidio_remove(struct pci_dev * pdev)1303 static void liquidio_remove(struct pci_dev *pdev)
1304 {
1305 struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1306
1307 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1308
1309 if (oct_dev->watchdog_task)
1310 kthread_stop(oct_dev->watchdog_task);
1311
1312 if (!oct_dev->octeon_id &&
1313 oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1314 lio_vf_rep_modexit();
1315
1316 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1317 liquidio_stop_nic_module(oct_dev);
1318
1319 /* Reset the octeon device and cleanup all memory allocated for
1320 * the octeon device by driver.
1321 */
1322 octeon_destroy_resources(oct_dev);
1323
1324 dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1325
1326 /* This octeon device has been removed. Update the global
1327 * data structure to reflect this. Free the device structure.
1328 */
1329 octeon_free_device_mem(oct_dev);
1330 }
1331
1332 /**
1333 * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space
1334 * @oct: octeon device
1335 */
octeon_chip_specific_setup(struct octeon_device * oct)1336 static int octeon_chip_specific_setup(struct octeon_device *oct)
1337 {
1338 u32 dev_id, rev_id;
1339 int ret = 1;
1340
1341 pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1342 pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1343 oct->rev_id = rev_id & 0xff;
1344
1345 switch (dev_id) {
1346 case OCTEON_CN68XX_PCIID:
1347 oct->chip_id = OCTEON_CN68XX;
1348 ret = lio_setup_cn68xx_octeon_device(oct);
1349 break;
1350
1351 case OCTEON_CN66XX_PCIID:
1352 oct->chip_id = OCTEON_CN66XX;
1353 ret = lio_setup_cn66xx_octeon_device(oct);
1354 break;
1355
1356 case OCTEON_CN23XX_PCIID_PF:
1357 oct->chip_id = OCTEON_CN23XX_PF_VID;
1358 ret = setup_cn23xx_octeon_pf_device(oct);
1359 if (ret)
1360 break;
1361 #ifdef CONFIG_PCI_IOV
1362 if (!ret)
1363 pci_sriov_set_totalvfs(oct->pci_dev,
1364 oct->sriov_info.max_vfs);
1365 #endif
1366 break;
1367
1368 default:
1369 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1370 dev_id);
1371 }
1372
1373 return ret;
1374 }
1375
1376 /**
1377 * octeon_pci_os_setup - PCI initialization for each Octeon device.
1378 * @oct: octeon device
1379 */
octeon_pci_os_setup(struct octeon_device * oct)1380 static int octeon_pci_os_setup(struct octeon_device *oct)
1381 {
1382 /* setup PCI stuff first */
1383 if (pci_enable_device(oct->pci_dev)) {
1384 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1385 return 1;
1386 }
1387
1388 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1389 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1390 pci_disable_device(oct->pci_dev);
1391 return 1;
1392 }
1393
1394 /* Enable PCI DMA Master. */
1395 pci_set_master(oct->pci_dev);
1396
1397 return 0;
1398 }
1399
1400 /**
1401 * free_netbuf - Unmap and free network buffer
1402 * @buf: buffer
1403 */
free_netbuf(void * buf)1404 static void free_netbuf(void *buf)
1405 {
1406 struct sk_buff *skb;
1407 struct octnet_buf_free_info *finfo;
1408 struct lio *lio;
1409
1410 finfo = (struct octnet_buf_free_info *)buf;
1411 skb = finfo->skb;
1412 lio = finfo->lio;
1413
1414 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1415 DMA_TO_DEVICE);
1416
1417 tx_buffer_free(skb);
1418 }
1419
1420 /**
1421 * free_netsgbuf - Unmap and free gather buffer
1422 * @buf: buffer
1423 */
free_netsgbuf(void * buf)1424 static void free_netsgbuf(void *buf)
1425 {
1426 struct octnet_buf_free_info *finfo;
1427 struct sk_buff *skb;
1428 struct lio *lio;
1429 struct octnic_gather *g;
1430 int i, frags, iq;
1431
1432 finfo = (struct octnet_buf_free_info *)buf;
1433 skb = finfo->skb;
1434 lio = finfo->lio;
1435 g = finfo->g;
1436 frags = skb_shinfo(skb)->nr_frags;
1437
1438 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1439 g->sg[0].ptr[0], (skb->len - skb->data_len),
1440 DMA_TO_DEVICE);
1441
1442 i = 1;
1443 while (frags--) {
1444 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
1445
1446 dma_unmap_page(&lio->oct_dev->pci_dev->dev,
1447 g->sg[(i >> 2)].ptr[(i & 3)],
1448 skb_frag_size(frag), DMA_TO_DEVICE);
1449 i++;
1450 }
1451
1452 iq = skb_iq(lio->oct_dev, skb);
1453 spin_lock(&lio->glist_lock[iq]);
1454 list_add_tail(&g->list, &lio->glist[iq]);
1455 spin_unlock(&lio->glist_lock[iq]);
1456
1457 tx_buffer_free(skb);
1458 }
1459
1460 /**
1461 * free_netsgbuf_with_resp - Unmap and free gather buffer with response
1462 * @buf: buffer
1463 */
free_netsgbuf_with_resp(void * buf)1464 static void free_netsgbuf_with_resp(void *buf)
1465 {
1466 struct octeon_soft_command *sc;
1467 struct octnet_buf_free_info *finfo;
1468 struct sk_buff *skb;
1469 struct lio *lio;
1470 struct octnic_gather *g;
1471 int i, frags, iq;
1472
1473 sc = (struct octeon_soft_command *)buf;
1474 skb = (struct sk_buff *)sc->callback_arg;
1475 finfo = (struct octnet_buf_free_info *)&skb->cb;
1476
1477 lio = finfo->lio;
1478 g = finfo->g;
1479 frags = skb_shinfo(skb)->nr_frags;
1480
1481 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1482 g->sg[0].ptr[0], (skb->len - skb->data_len),
1483 DMA_TO_DEVICE);
1484
1485 i = 1;
1486 while (frags--) {
1487 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
1488
1489 dma_unmap_page(&lio->oct_dev->pci_dev->dev,
1490 g->sg[(i >> 2)].ptr[(i & 3)],
1491 skb_frag_size(frag), DMA_TO_DEVICE);
1492 i++;
1493 }
1494
1495 iq = skb_iq(lio->oct_dev, skb);
1496
1497 spin_lock(&lio->glist_lock[iq]);
1498 list_add_tail(&g->list, &lio->glist[iq]);
1499 spin_unlock(&lio->glist_lock[iq]);
1500
1501 /* Don't free the skb yet */
1502 }
1503
1504 /**
1505 * liquidio_ptp_adjfine - Adjust ptp frequency
1506 * @ptp: PTP clock info
1507 * @scaled_ppm: how much to adjust by, in scaled parts-per-million
1508 *
1509 * Scaled parts per million is ppm with a 16-bit binary fractional field.
1510 */
liquidio_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)1511 static int liquidio_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
1512 {
1513 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1514 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1515 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
1516 u64 comp, delta;
1517 unsigned long flags;
1518 bool neg_adj = false;
1519
1520 if (ppb < 0) {
1521 neg_adj = true;
1522 ppb = -ppb;
1523 }
1524
1525 /* The hardware adds the clock compensation value to the
1526 * PTP clock on every coprocessor clock cycle, so we
1527 * compute the delta in terms of coprocessor clocks.
1528 */
1529 delta = (u64)ppb << 32;
1530 do_div(delta, oct->coproc_clock_rate);
1531
1532 spin_lock_irqsave(&lio->ptp_lock, flags);
1533 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1534 if (neg_adj)
1535 comp -= delta;
1536 else
1537 comp += delta;
1538 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1539 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1540
1541 return 0;
1542 }
1543
1544 /**
1545 * liquidio_ptp_adjtime - Adjust ptp time
1546 * @ptp: PTP clock info
1547 * @delta: how much to adjust by, in nanosecs
1548 */
liquidio_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)1549 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1550 {
1551 unsigned long flags;
1552 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1553
1554 spin_lock_irqsave(&lio->ptp_lock, flags);
1555 lio->ptp_adjust += delta;
1556 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1557
1558 return 0;
1559 }
1560
1561 /**
1562 * liquidio_ptp_gettime - Get hardware clock time, including any adjustment
1563 * @ptp: PTP clock info
1564 * @ts: timespec
1565 */
liquidio_ptp_gettime(struct ptp_clock_info * ptp,struct timespec64 * ts)1566 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1567 struct timespec64 *ts)
1568 {
1569 u64 ns;
1570 unsigned long flags;
1571 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1572 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1573
1574 spin_lock_irqsave(&lio->ptp_lock, flags);
1575 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1576 ns += lio->ptp_adjust;
1577 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1578
1579 *ts = ns_to_timespec64(ns);
1580
1581 return 0;
1582 }
1583
1584 /**
1585 * liquidio_ptp_settime - Set hardware clock time. Reset adjustment
1586 * @ptp: PTP clock info
1587 * @ts: timespec
1588 */
liquidio_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)1589 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1590 const struct timespec64 *ts)
1591 {
1592 u64 ns;
1593 unsigned long flags;
1594 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1595 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1596
1597 ns = timespec64_to_ns(ts);
1598
1599 spin_lock_irqsave(&lio->ptp_lock, flags);
1600 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1601 lio->ptp_adjust = 0;
1602 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1603
1604 return 0;
1605 }
1606
1607 /**
1608 * liquidio_ptp_enable - Check if PTP is enabled
1609 * @ptp: PTP clock info
1610 * @rq: request
1611 * @on: is it on
1612 */
1613 static int
liquidio_ptp_enable(struct ptp_clock_info __maybe_unused * ptp,struct ptp_clock_request __maybe_unused * rq,int __maybe_unused on)1614 liquidio_ptp_enable(struct ptp_clock_info __maybe_unused *ptp,
1615 struct ptp_clock_request __maybe_unused *rq,
1616 int __maybe_unused on)
1617 {
1618 return -EOPNOTSUPP;
1619 }
1620
1621 /**
1622 * oct_ptp_open - Open PTP clock source
1623 * @netdev: network device
1624 */
oct_ptp_open(struct net_device * netdev)1625 static void oct_ptp_open(struct net_device *netdev)
1626 {
1627 struct lio *lio = GET_LIO(netdev);
1628 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1629
1630 spin_lock_init(&lio->ptp_lock);
1631
1632 snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1633 lio->ptp_info.owner = THIS_MODULE;
1634 lio->ptp_info.max_adj = 250000000;
1635 lio->ptp_info.n_alarm = 0;
1636 lio->ptp_info.n_ext_ts = 0;
1637 lio->ptp_info.n_per_out = 0;
1638 lio->ptp_info.pps = 0;
1639 lio->ptp_info.adjfine = liquidio_ptp_adjfine;
1640 lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1641 lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1642 lio->ptp_info.settime64 = liquidio_ptp_settime;
1643 lio->ptp_info.enable = liquidio_ptp_enable;
1644
1645 lio->ptp_adjust = 0;
1646
1647 lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1648 &oct->pci_dev->dev);
1649
1650 if (IS_ERR(lio->ptp_clock))
1651 lio->ptp_clock = NULL;
1652 }
1653
1654 /**
1655 * liquidio_ptp_init - Init PTP clock
1656 * @oct: octeon device
1657 */
liquidio_ptp_init(struct octeon_device * oct)1658 static void liquidio_ptp_init(struct octeon_device *oct)
1659 {
1660 u64 clock_comp, cfg;
1661
1662 clock_comp = (u64)NSEC_PER_SEC << 32;
1663 do_div(clock_comp, oct->coproc_clock_rate);
1664 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1665
1666 /* Enable */
1667 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1668 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1669 }
1670
1671 /**
1672 * load_firmware - Load firmware to device
1673 * @oct: octeon device
1674 *
1675 * Maps device to firmware filename, requests firmware, and downloads it
1676 */
load_firmware(struct octeon_device * oct)1677 static int load_firmware(struct octeon_device *oct)
1678 {
1679 int ret = 0;
1680 const struct firmware *fw;
1681 char fw_name[LIO_MAX_FW_FILENAME_LEN];
1682 char *tmp_fw_type;
1683
1684 if (fw_type_is_auto()) {
1685 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1686 strscpy_pad(fw_type, tmp_fw_type, sizeof(fw_type));
1687 } else {
1688 tmp_fw_type = fw_type;
1689 }
1690
1691 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1692 octeon_get_conf(oct)->card_name, tmp_fw_type,
1693 LIO_FW_NAME_SUFFIX);
1694
1695 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1696 if (ret) {
1697 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n",
1698 fw_name);
1699 release_firmware(fw);
1700 return ret;
1701 }
1702
1703 ret = octeon_download_firmware(oct, fw->data, fw->size);
1704
1705 release_firmware(fw);
1706
1707 return ret;
1708 }
1709
1710 /**
1711 * octnet_poll_check_txq_status - Poll routine for checking transmit queue status
1712 * @work: work_struct data structure
1713 */
octnet_poll_check_txq_status(struct work_struct * work)1714 static void octnet_poll_check_txq_status(struct work_struct *work)
1715 {
1716 struct cavium_wk *wk = (struct cavium_wk *)work;
1717 struct lio *lio = (struct lio *)wk->ctxptr;
1718
1719 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1720 return;
1721
1722 check_txq_status(lio);
1723 queue_delayed_work(lio->txq_status_wq.wq,
1724 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1725 }
1726
1727 /**
1728 * setup_tx_poll_fn - Sets up the txq poll check
1729 * @netdev: network device
1730 */
setup_tx_poll_fn(struct net_device * netdev)1731 static inline int setup_tx_poll_fn(struct net_device *netdev)
1732 {
1733 struct lio *lio = GET_LIO(netdev);
1734 struct octeon_device *oct = lio->oct_dev;
1735
1736 lio->txq_status_wq.wq = alloc_workqueue("txq-status",
1737 WQ_MEM_RECLAIM, 0);
1738 if (!lio->txq_status_wq.wq) {
1739 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
1740 return -1;
1741 }
1742 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
1743 octnet_poll_check_txq_status);
1744 lio->txq_status_wq.wk.ctxptr = lio;
1745 queue_delayed_work(lio->txq_status_wq.wq,
1746 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1747 return 0;
1748 }
1749
cleanup_tx_poll_fn(struct net_device * netdev)1750 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
1751 {
1752 struct lio *lio = GET_LIO(netdev);
1753
1754 if (lio->txq_status_wq.wq) {
1755 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
1756 destroy_workqueue(lio->txq_status_wq.wq);
1757 }
1758 }
1759
1760 /**
1761 * liquidio_open - Net device open for LiquidIO
1762 * @netdev: network device
1763 */
liquidio_open(struct net_device * netdev)1764 static int liquidio_open(struct net_device *netdev)
1765 {
1766 struct lio *lio = GET_LIO(netdev);
1767 struct octeon_device *oct = lio->oct_dev;
1768 struct octeon_device_priv *oct_priv = oct->priv;
1769 struct napi_struct *napi, *n;
1770 int ret = 0;
1771
1772 if (oct->props[lio->ifidx].napi_enabled == 0) {
1773 tasklet_disable(&oct_priv->droq_tasklet);
1774
1775 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1776 napi_enable(napi);
1777
1778 oct->props[lio->ifidx].napi_enabled = 1;
1779
1780 if (OCTEON_CN23XX_PF(oct))
1781 oct->droq[0]->ops.poll_mode = 1;
1782 }
1783
1784 if (oct->ptp_enable)
1785 oct_ptp_open(netdev);
1786
1787 ifstate_set(lio, LIO_IFSTATE_RUNNING);
1788
1789 if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) {
1790 ret = setup_tx_poll_fn(netdev);
1791 if (ret)
1792 goto err_poll;
1793 }
1794
1795 netif_tx_start_all_queues(netdev);
1796
1797 /* Ready for link status updates */
1798 lio->intf_open = 1;
1799
1800 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1801
1802 /* tell Octeon to start forwarding packets to host */
1803 ret = send_rx_ctrl_cmd(lio, 1);
1804 if (ret)
1805 goto err_rx_ctrl;
1806
1807 /* start periodical statistics fetch */
1808 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
1809 lio->stats_wk.ctxptr = lio;
1810 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
1811 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
1812
1813 dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
1814 netdev->name);
1815
1816 return 0;
1817
1818 err_rx_ctrl:
1819 if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on)
1820 cleanup_tx_poll_fn(netdev);
1821 err_poll:
1822 if (lio->ptp_clock) {
1823 ptp_clock_unregister(lio->ptp_clock);
1824 lio->ptp_clock = NULL;
1825 }
1826
1827 if (oct->props[lio->ifidx].napi_enabled == 1) {
1828 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1829 napi_disable(napi);
1830
1831 oct->props[lio->ifidx].napi_enabled = 0;
1832
1833 if (OCTEON_CN23XX_PF(oct))
1834 oct->droq[0]->ops.poll_mode = 0;
1835 }
1836
1837 return ret;
1838 }
1839
1840 /**
1841 * liquidio_stop - Net device stop for LiquidIO
1842 * @netdev: network device
1843 */
liquidio_stop(struct net_device * netdev)1844 static int liquidio_stop(struct net_device *netdev)
1845 {
1846 struct lio *lio = GET_LIO(netdev);
1847 struct octeon_device *oct = lio->oct_dev;
1848 struct octeon_device_priv *oct_priv = oct->priv;
1849 struct napi_struct *napi, *n;
1850 int ret = 0;
1851
1852 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1853
1854 /* Stop any link updates */
1855 lio->intf_open = 0;
1856
1857 stop_txqs(netdev);
1858
1859 /* Inform that netif carrier is down */
1860 netif_carrier_off(netdev);
1861 netif_tx_disable(netdev);
1862
1863 lio->linfo.link.s.link_up = 0;
1864 lio->link_changes++;
1865
1866 /* Tell Octeon that nic interface is down. */
1867 ret = send_rx_ctrl_cmd(lio, 0);
1868 if (ret)
1869 return ret;
1870
1871 if (OCTEON_CN23XX_PF(oct)) {
1872 if (!oct->msix_on)
1873 cleanup_tx_poll_fn(netdev);
1874 } else {
1875 cleanup_tx_poll_fn(netdev);
1876 }
1877
1878 cancel_delayed_work_sync(&lio->stats_wk.work);
1879
1880 if (lio->ptp_clock) {
1881 ptp_clock_unregister(lio->ptp_clock);
1882 lio->ptp_clock = NULL;
1883 }
1884
1885 /* Wait for any pending Rx descriptors */
1886 if (lio_wait_for_clean_oq(oct))
1887 netif_info(lio, rx_err, lio->netdev,
1888 "Proceeding with stop interface after partial RX desc processing\n");
1889
1890 if (oct->props[lio->ifidx].napi_enabled == 1) {
1891 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1892 napi_disable(napi);
1893
1894 oct->props[lio->ifidx].napi_enabled = 0;
1895
1896 if (OCTEON_CN23XX_PF(oct))
1897 oct->droq[0]->ops.poll_mode = 0;
1898
1899 tasklet_enable(&oct_priv->droq_tasklet);
1900 }
1901
1902 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1903
1904 return ret;
1905 }
1906
1907 /**
1908 * get_new_flags - Converts a mask based on net device flags
1909 * @netdev: network device
1910 *
1911 * This routine generates a octnet_ifflags mask from the net device flags
1912 * received from the OS.
1913 */
get_new_flags(struct net_device * netdev)1914 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
1915 {
1916 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1917
1918 if (netdev->flags & IFF_PROMISC)
1919 f |= OCTNET_IFFLAG_PROMISC;
1920
1921 if (netdev->flags & IFF_ALLMULTI)
1922 f |= OCTNET_IFFLAG_ALLMULTI;
1923
1924 if (netdev->flags & IFF_MULTICAST) {
1925 f |= OCTNET_IFFLAG_MULTICAST;
1926
1927 /* Accept all multicast addresses if there are more than we
1928 * can handle
1929 */
1930 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1931 f |= OCTNET_IFFLAG_ALLMULTI;
1932 }
1933
1934 if (netdev->flags & IFF_BROADCAST)
1935 f |= OCTNET_IFFLAG_BROADCAST;
1936
1937 return f;
1938 }
1939
1940 /**
1941 * liquidio_set_mcast_list - Net device set_multicast_list
1942 * @netdev: network device
1943 */
liquidio_set_mcast_list(struct net_device * netdev)1944 static void liquidio_set_mcast_list(struct net_device *netdev)
1945 {
1946 struct lio *lio = GET_LIO(netdev);
1947 struct octeon_device *oct = lio->oct_dev;
1948 struct octnic_ctrl_pkt nctrl;
1949 struct netdev_hw_addr *ha;
1950 u64 *mc;
1951 int ret;
1952 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1953
1954 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1955
1956 /* Create a ctrl pkt command to be sent to core app. */
1957 nctrl.ncmd.u64 = 0;
1958 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1959 nctrl.ncmd.s.param1 = get_new_flags(netdev);
1960 nctrl.ncmd.s.param2 = mc_count;
1961 nctrl.ncmd.s.more = mc_count;
1962 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1963 nctrl.netpndev = (u64)netdev;
1964 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1965
1966 /* copy all the addresses into the udd */
1967 mc = &nctrl.udd[0];
1968 netdev_for_each_mc_addr(ha, netdev) {
1969 *mc = 0;
1970 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
1971 /* no need to swap bytes */
1972
1973 if (++mc > &nctrl.udd[mc_count])
1974 break;
1975 }
1976
1977 /* Apparently, any activity in this call from the kernel has to
1978 * be atomic. So we won't wait for response.
1979 */
1980
1981 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1982 if (ret) {
1983 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1984 ret);
1985 }
1986 }
1987
1988 /**
1989 * liquidio_set_mac - Net device set_mac_address
1990 * @netdev: network device
1991 * @p: pointer to sockaddr
1992 */
liquidio_set_mac(struct net_device * netdev,void * p)1993 static int liquidio_set_mac(struct net_device *netdev, void *p)
1994 {
1995 int ret = 0;
1996 struct lio *lio = GET_LIO(netdev);
1997 struct octeon_device *oct = lio->oct_dev;
1998 struct sockaddr *addr = (struct sockaddr *)p;
1999 struct octnic_ctrl_pkt nctrl;
2000
2001 if (!is_valid_ether_addr(addr->sa_data))
2002 return -EADDRNOTAVAIL;
2003
2004 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2005
2006 nctrl.ncmd.u64 = 0;
2007 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2008 nctrl.ncmd.s.param1 = 0;
2009 nctrl.ncmd.s.more = 1;
2010 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2011 nctrl.netpndev = (u64)netdev;
2012
2013 nctrl.udd[0] = 0;
2014 /* The MAC Address is presented in network byte order. */
2015 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2016
2017 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2018 if (ret < 0) {
2019 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2020 return -ENOMEM;
2021 }
2022
2023 if (nctrl.sc_status) {
2024 dev_err(&oct->pci_dev->dev,
2025 "%s: MAC Address change failed. sc return=%x\n",
2026 __func__, nctrl.sc_status);
2027 return -EIO;
2028 }
2029
2030 eth_hw_addr_set(netdev, addr->sa_data);
2031 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2032
2033 return 0;
2034 }
2035
2036 static void
liquidio_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * lstats)2037 liquidio_get_stats64(struct net_device *netdev,
2038 struct rtnl_link_stats64 *lstats)
2039 {
2040 struct lio *lio = GET_LIO(netdev);
2041 struct octeon_device *oct;
2042 u64 pkts = 0, drop = 0, bytes = 0;
2043 struct oct_droq_stats *oq_stats;
2044 struct oct_iq_stats *iq_stats;
2045 int i, iq_no, oq_no;
2046
2047 oct = lio->oct_dev;
2048
2049 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2050 return;
2051
2052 for (i = 0; i < oct->num_iqs; i++) {
2053 iq_no = lio->linfo.txpciq[i].s.q_no;
2054 iq_stats = &oct->instr_queue[iq_no]->stats;
2055 pkts += iq_stats->tx_done;
2056 drop += iq_stats->tx_dropped;
2057 bytes += iq_stats->tx_tot_bytes;
2058 }
2059
2060 lstats->tx_packets = pkts;
2061 lstats->tx_bytes = bytes;
2062 lstats->tx_dropped = drop;
2063
2064 pkts = 0;
2065 drop = 0;
2066 bytes = 0;
2067
2068 for (i = 0; i < oct->num_oqs; i++) {
2069 oq_no = lio->linfo.rxpciq[i].s.q_no;
2070 oq_stats = &oct->droq[oq_no]->stats;
2071 pkts += oq_stats->rx_pkts_received;
2072 drop += (oq_stats->rx_dropped +
2073 oq_stats->dropped_nodispatch +
2074 oq_stats->dropped_toomany +
2075 oq_stats->dropped_nomem);
2076 bytes += oq_stats->rx_bytes_received;
2077 }
2078
2079 lstats->rx_bytes = bytes;
2080 lstats->rx_packets = pkts;
2081 lstats->rx_dropped = drop;
2082
2083 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
2084 lstats->collisions = oct->link_stats.fromhost.total_collisions;
2085
2086 /* detailed rx_errors: */
2087 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
2088 /* recved pkt with crc error */
2089 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
2090 /* recv'd frame alignment error */
2091 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
2092 /* recv'r fifo overrun */
2093 lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
2094
2095 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
2096 lstats->rx_frame_errors + lstats->rx_fifo_errors;
2097
2098 /* detailed tx_errors */
2099 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
2100 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
2101 lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
2102
2103 lstats->tx_errors = lstats->tx_aborted_errors +
2104 lstats->tx_carrier_errors +
2105 lstats->tx_fifo_errors;
2106 }
2107
2108 /**
2109 * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
2110 * @netdev: network device
2111 * @ifr: interface request
2112 */
hwtstamp_ioctl(struct net_device * netdev,struct ifreq * ifr)2113 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2114 {
2115 struct hwtstamp_config conf;
2116 struct lio *lio = GET_LIO(netdev);
2117
2118 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2119 return -EFAULT;
2120
2121 switch (conf.tx_type) {
2122 case HWTSTAMP_TX_ON:
2123 case HWTSTAMP_TX_OFF:
2124 break;
2125 default:
2126 return -ERANGE;
2127 }
2128
2129 switch (conf.rx_filter) {
2130 case HWTSTAMP_FILTER_NONE:
2131 break;
2132 case HWTSTAMP_FILTER_ALL:
2133 case HWTSTAMP_FILTER_SOME:
2134 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2135 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2136 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2137 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2138 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2139 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2140 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2141 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2142 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2143 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2144 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2145 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2146 case HWTSTAMP_FILTER_NTP_ALL:
2147 conf.rx_filter = HWTSTAMP_FILTER_ALL;
2148 break;
2149 default:
2150 return -ERANGE;
2151 }
2152
2153 if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2154 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2155
2156 else
2157 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2158
2159 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2160 }
2161
2162 /**
2163 * liquidio_ioctl - ioctl handler
2164 * @netdev: network device
2165 * @ifr: interface request
2166 * @cmd: command
2167 */
liquidio_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)2168 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2169 {
2170 struct lio *lio = GET_LIO(netdev);
2171
2172 switch (cmd) {
2173 case SIOCSHWTSTAMP:
2174 if (lio->oct_dev->ptp_enable)
2175 return hwtstamp_ioctl(netdev, ifr);
2176 fallthrough;
2177 default:
2178 return -EOPNOTSUPP;
2179 }
2180 }
2181
2182 /**
2183 * handle_timestamp - handle a Tx timestamp response
2184 * @oct: octeon device
2185 * @status: response status
2186 * @buf: pointer to skb
2187 */
handle_timestamp(struct octeon_device * oct,u32 status,void * buf)2188 static void handle_timestamp(struct octeon_device *oct,
2189 u32 status,
2190 void *buf)
2191 {
2192 struct octnet_buf_free_info *finfo;
2193 struct octeon_soft_command *sc;
2194 struct oct_timestamp_resp *resp;
2195 struct lio *lio;
2196 struct sk_buff *skb = (struct sk_buff *)buf;
2197
2198 finfo = (struct octnet_buf_free_info *)skb->cb;
2199 lio = finfo->lio;
2200 sc = finfo->sc;
2201 oct = lio->oct_dev;
2202 resp = (struct oct_timestamp_resp *)sc->virtrptr;
2203
2204 if (status != OCTEON_REQUEST_DONE) {
2205 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2206 CVM_CAST64(status));
2207 resp->timestamp = 0;
2208 }
2209
2210 octeon_swap_8B_data(&resp->timestamp, 1);
2211
2212 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2213 struct skb_shared_hwtstamps ts;
2214 u64 ns = resp->timestamp;
2215
2216 netif_info(lio, tx_done, lio->netdev,
2217 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2218 skb, (unsigned long long)ns);
2219 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2220 skb_tstamp_tx(skb, &ts);
2221 }
2222
2223 octeon_free_soft_command(oct, sc);
2224 tx_buffer_free(skb);
2225 }
2226
2227 /**
2228 * send_nic_timestamp_pkt - Send a data packet that will be timestamped
2229 * @oct: octeon device
2230 * @ndata: pointer to network data
2231 * @finfo: pointer to private network data
2232 * @xmit_more: more is coming
2233 */
send_nic_timestamp_pkt(struct octeon_device * oct,struct octnic_data_pkt * ndata,struct octnet_buf_free_info * finfo,int xmit_more)2234 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2235 struct octnic_data_pkt *ndata,
2236 struct octnet_buf_free_info *finfo,
2237 int xmit_more)
2238 {
2239 int retval;
2240 struct octeon_soft_command *sc;
2241 struct lio *lio;
2242 int ring_doorbell;
2243 u32 len;
2244
2245 lio = finfo->lio;
2246
2247 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2248 sizeof(struct oct_timestamp_resp));
2249 finfo->sc = sc;
2250
2251 if (!sc) {
2252 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2253 return IQ_SEND_FAILED;
2254 }
2255
2256 if (ndata->reqtype == REQTYPE_NORESP_NET)
2257 ndata->reqtype = REQTYPE_RESP_NET;
2258 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2259 ndata->reqtype = REQTYPE_RESP_NET_SG;
2260
2261 sc->callback = handle_timestamp;
2262 sc->callback_arg = finfo->skb;
2263 sc->iq_no = ndata->q_no;
2264
2265 if (OCTEON_CN23XX_PF(oct))
2266 len = (u32)((struct octeon_instr_ih3 *)
2267 (&sc->cmd.cmd3.ih3))->dlengsz;
2268 else
2269 len = (u32)((struct octeon_instr_ih2 *)
2270 (&sc->cmd.cmd2.ih2))->dlengsz;
2271
2272 ring_doorbell = !xmit_more;
2273
2274 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2275 sc, len, ndata->reqtype);
2276
2277 if (retval == IQ_SEND_FAILED) {
2278 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2279 retval);
2280 octeon_free_soft_command(oct, sc);
2281 } else {
2282 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2283 }
2284
2285 return retval;
2286 }
2287
2288 /**
2289 * liquidio_xmit - Transmit networks packets to the Octeon interface
2290 * @skb: skbuff struct to be passed to network layer.
2291 * @netdev: pointer to network device
2292 *
2293 * Return: whether the packet was transmitted to the device okay or not
2294 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
2295 */
liquidio_xmit(struct sk_buff * skb,struct net_device * netdev)2296 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2297 {
2298 struct lio *lio;
2299 struct octnet_buf_free_info *finfo;
2300 union octnic_cmd_setup cmdsetup;
2301 struct octnic_data_pkt ndata;
2302 struct octeon_device *oct;
2303 struct oct_iq_stats *stats;
2304 struct octeon_instr_irh *irh;
2305 union tx_info *tx_info;
2306 int status = 0;
2307 int q_idx = 0, iq_no = 0;
2308 int j, xmit_more = 0;
2309 u64 dptr = 0;
2310 u32 tag = 0;
2311
2312 lio = GET_LIO(netdev);
2313 oct = lio->oct_dev;
2314
2315 q_idx = skb_iq(oct, skb);
2316 tag = q_idx;
2317 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2318
2319 stats = &oct->instr_queue[iq_no]->stats;
2320
2321 /* Check for all conditions in which the current packet cannot be
2322 * transmitted.
2323 */
2324 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2325 (!lio->linfo.link.s.link_up) ||
2326 (skb->len <= 0)) {
2327 netif_info(lio, tx_err, lio->netdev,
2328 "Transmit failed link_status : %d\n",
2329 lio->linfo.link.s.link_up);
2330 goto lio_xmit_failed;
2331 }
2332
2333 /* Use space in skb->cb to store info used to unmap and
2334 * free the buffers.
2335 */
2336 finfo = (struct octnet_buf_free_info *)skb->cb;
2337 finfo->lio = lio;
2338 finfo->skb = skb;
2339 finfo->sc = NULL;
2340
2341 /* Prepare the attributes for the data to be passed to OSI. */
2342 memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2343
2344 ndata.buf = (void *)finfo;
2345
2346 ndata.q_no = iq_no;
2347
2348 if (octnet_iq_is_full(oct, ndata.q_no)) {
2349 /* defer sending if queue is full */
2350 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2351 ndata.q_no);
2352 stats->tx_iq_busy++;
2353 return NETDEV_TX_BUSY;
2354 }
2355
2356 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
2357 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2358 */
2359
2360 ndata.datasize = skb->len;
2361
2362 cmdsetup.u64 = 0;
2363 cmdsetup.s.iq_no = iq_no;
2364
2365 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2366 if (skb->encapsulation) {
2367 cmdsetup.s.tnl_csum = 1;
2368 stats->tx_vxlan++;
2369 } else {
2370 cmdsetup.s.transport_csum = 1;
2371 }
2372 }
2373 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2374 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2375 cmdsetup.s.timestamp = 1;
2376 }
2377
2378 if (skb_shinfo(skb)->nr_frags == 0) {
2379 cmdsetup.s.u.datasize = skb->len;
2380 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2381
2382 /* Offload checksum calculation for TCP/UDP packets */
2383 dptr = dma_map_single(&oct->pci_dev->dev,
2384 skb->data,
2385 skb->len,
2386 DMA_TO_DEVICE);
2387 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2388 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2389 __func__);
2390 stats->tx_dmamap_fail++;
2391 return NETDEV_TX_BUSY;
2392 }
2393
2394 if (OCTEON_CN23XX_PF(oct))
2395 ndata.cmd.cmd3.dptr = dptr;
2396 else
2397 ndata.cmd.cmd2.dptr = dptr;
2398 finfo->dptr = dptr;
2399 ndata.reqtype = REQTYPE_NORESP_NET;
2400
2401 } else {
2402 int i, frags;
2403 skb_frag_t *frag;
2404 struct octnic_gather *g;
2405
2406 spin_lock(&lio->glist_lock[q_idx]);
2407 g = (struct octnic_gather *)
2408 lio_list_delete_head(&lio->glist[q_idx]);
2409 spin_unlock(&lio->glist_lock[q_idx]);
2410
2411 if (!g) {
2412 netif_info(lio, tx_err, lio->netdev,
2413 "Transmit scatter gather: glist null!\n");
2414 goto lio_xmit_failed;
2415 }
2416
2417 cmdsetup.s.gather = 1;
2418 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2419 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2420
2421 memset(g->sg, 0, g->sg_size);
2422
2423 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2424 skb->data,
2425 (skb->len - skb->data_len),
2426 DMA_TO_DEVICE);
2427 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2428 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2429 __func__);
2430 stats->tx_dmamap_fail++;
2431 return NETDEV_TX_BUSY;
2432 }
2433 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2434
2435 frags = skb_shinfo(skb)->nr_frags;
2436 i = 1;
2437 while (frags--) {
2438 frag = &skb_shinfo(skb)->frags[i - 1];
2439
2440 g->sg[(i >> 2)].ptr[(i & 3)] =
2441 skb_frag_dma_map(&oct->pci_dev->dev,
2442 frag, 0, skb_frag_size(frag),
2443 DMA_TO_DEVICE);
2444
2445 if (dma_mapping_error(&oct->pci_dev->dev,
2446 g->sg[i >> 2].ptr[i & 3])) {
2447 dma_unmap_single(&oct->pci_dev->dev,
2448 g->sg[0].ptr[0],
2449 skb->len - skb->data_len,
2450 DMA_TO_DEVICE);
2451 for (j = 1; j < i; j++) {
2452 frag = &skb_shinfo(skb)->frags[j - 1];
2453 dma_unmap_page(&oct->pci_dev->dev,
2454 g->sg[j >> 2].ptr[j & 3],
2455 skb_frag_size(frag),
2456 DMA_TO_DEVICE);
2457 }
2458 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2459 __func__);
2460 return NETDEV_TX_BUSY;
2461 }
2462
2463 add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
2464 (i & 3));
2465 i++;
2466 }
2467
2468 dptr = g->sg_dma_ptr;
2469
2470 if (OCTEON_CN23XX_PF(oct))
2471 ndata.cmd.cmd3.dptr = dptr;
2472 else
2473 ndata.cmd.cmd2.dptr = dptr;
2474 finfo->dptr = dptr;
2475 finfo->g = g;
2476
2477 ndata.reqtype = REQTYPE_NORESP_NET_SG;
2478 }
2479
2480 if (OCTEON_CN23XX_PF(oct)) {
2481 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2482 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2483 } else {
2484 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2485 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2486 }
2487
2488 if (skb_shinfo(skb)->gso_size) {
2489 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2490 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2491 stats->tx_gso++;
2492 }
2493
2494 /* HW insert VLAN tag */
2495 if (skb_vlan_tag_present(skb)) {
2496 irh->priority = skb_vlan_tag_get(skb) >> 13;
2497 irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2498 }
2499
2500 xmit_more = netdev_xmit_more();
2501
2502 if (unlikely(cmdsetup.s.timestamp))
2503 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2504 else
2505 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2506 if (status == IQ_SEND_FAILED)
2507 goto lio_xmit_failed;
2508
2509 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2510
2511 if (status == IQ_SEND_STOP)
2512 netif_stop_subqueue(netdev, q_idx);
2513
2514 netif_trans_update(netdev);
2515
2516 if (tx_info->s.gso_segs)
2517 stats->tx_done += tx_info->s.gso_segs;
2518 else
2519 stats->tx_done++;
2520 stats->tx_tot_bytes += ndata.datasize;
2521
2522 return NETDEV_TX_OK;
2523
2524 lio_xmit_failed:
2525 stats->tx_dropped++;
2526 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2527 iq_no, stats->tx_dropped);
2528 if (dptr)
2529 dma_unmap_single(&oct->pci_dev->dev, dptr,
2530 ndata.datasize, DMA_TO_DEVICE);
2531
2532 octeon_ring_doorbell_locked(oct, iq_no);
2533
2534 tx_buffer_free(skb);
2535 return NETDEV_TX_OK;
2536 }
2537
2538 /**
2539 * liquidio_tx_timeout - Network device Tx timeout
2540 * @netdev: pointer to network device
2541 * @txqueue: index of the hung transmit queue
2542 */
liquidio_tx_timeout(struct net_device * netdev,unsigned int txqueue)2543 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2544 {
2545 struct lio *lio;
2546
2547 lio = GET_LIO(netdev);
2548
2549 netif_info(lio, tx_err, lio->netdev,
2550 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2551 netdev->stats.tx_dropped);
2552 netif_trans_update(netdev);
2553 wake_txqs(netdev);
2554 }
2555
liquidio_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2556 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2557 __be16 proto __attribute__((unused)),
2558 u16 vid)
2559 {
2560 struct lio *lio = GET_LIO(netdev);
2561 struct octeon_device *oct = lio->oct_dev;
2562 struct octnic_ctrl_pkt nctrl;
2563 int ret = 0;
2564
2565 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2566
2567 nctrl.ncmd.u64 = 0;
2568 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2569 nctrl.ncmd.s.param1 = vid;
2570 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2571 nctrl.netpndev = (u64)netdev;
2572 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2573
2574 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2575 if (ret) {
2576 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2577 ret);
2578 if (ret > 0)
2579 ret = -EIO;
2580 }
2581
2582 return ret;
2583 }
2584
liquidio_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2585 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2586 __be16 proto __attribute__((unused)),
2587 u16 vid)
2588 {
2589 struct lio *lio = GET_LIO(netdev);
2590 struct octeon_device *oct = lio->oct_dev;
2591 struct octnic_ctrl_pkt nctrl;
2592 int ret = 0;
2593
2594 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2595
2596 nctrl.ncmd.u64 = 0;
2597 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2598 nctrl.ncmd.s.param1 = vid;
2599 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2600 nctrl.netpndev = (u64)netdev;
2601 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2602
2603 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2604 if (ret) {
2605 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
2606 ret);
2607 if (ret > 0)
2608 ret = -EIO;
2609 }
2610 return ret;
2611 }
2612
2613 /**
2614 * liquidio_set_rxcsum_command - Sending command to enable/disable RX checksum offload
2615 * @netdev: pointer to network device
2616 * @command: OCTNET_CMD_TNL_RX_CSUM_CTL
2617 * @rx_cmd: OCTNET_CMD_RXCSUM_ENABLE/OCTNET_CMD_RXCSUM_DISABLE
2618 * Returns: SUCCESS or FAILURE
2619 */
liquidio_set_rxcsum_command(struct net_device * netdev,int command,u8 rx_cmd)2620 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2621 u8 rx_cmd)
2622 {
2623 struct lio *lio = GET_LIO(netdev);
2624 struct octeon_device *oct = lio->oct_dev;
2625 struct octnic_ctrl_pkt nctrl;
2626 int ret = 0;
2627
2628 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2629
2630 nctrl.ncmd.u64 = 0;
2631 nctrl.ncmd.s.cmd = command;
2632 nctrl.ncmd.s.param1 = rx_cmd;
2633 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2634 nctrl.netpndev = (u64)netdev;
2635 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2636
2637 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2638 if (ret) {
2639 dev_err(&oct->pci_dev->dev,
2640 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2641 ret);
2642 if (ret > 0)
2643 ret = -EIO;
2644 }
2645 return ret;
2646 }
2647
2648 /**
2649 * liquidio_vxlan_port_command - Sending command to add/delete VxLAN UDP port to firmware
2650 * @netdev: pointer to network device
2651 * @command: OCTNET_CMD_VXLAN_PORT_CONFIG
2652 * @vxlan_port: VxLAN port to be added or deleted
2653 * @vxlan_cmd_bit: OCTNET_CMD_VXLAN_PORT_ADD,
2654 * OCTNET_CMD_VXLAN_PORT_DEL
2655 * Return: SUCCESS or FAILURE
2656 */
liquidio_vxlan_port_command(struct net_device * netdev,int command,u16 vxlan_port,u8 vxlan_cmd_bit)2657 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2658 u16 vxlan_port, u8 vxlan_cmd_bit)
2659 {
2660 struct lio *lio = GET_LIO(netdev);
2661 struct octeon_device *oct = lio->oct_dev;
2662 struct octnic_ctrl_pkt nctrl;
2663 int ret = 0;
2664
2665 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2666
2667 nctrl.ncmd.u64 = 0;
2668 nctrl.ncmd.s.cmd = command;
2669 nctrl.ncmd.s.more = vxlan_cmd_bit;
2670 nctrl.ncmd.s.param1 = vxlan_port;
2671 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2672 nctrl.netpndev = (u64)netdev;
2673 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2674
2675 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2676 if (ret) {
2677 dev_err(&oct->pci_dev->dev,
2678 "VxLAN port add/delete failed in core (ret:0x%x)\n",
2679 ret);
2680 if (ret > 0)
2681 ret = -EIO;
2682 }
2683 return ret;
2684 }
2685
liquidio_udp_tunnel_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)2686 static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
2687 unsigned int table, unsigned int entry,
2688 struct udp_tunnel_info *ti)
2689 {
2690 return liquidio_vxlan_port_command(netdev,
2691 OCTNET_CMD_VXLAN_PORT_CONFIG,
2692 htons(ti->port),
2693 OCTNET_CMD_VXLAN_PORT_ADD);
2694 }
2695
liquidio_udp_tunnel_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)2696 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
2697 unsigned int table,
2698 unsigned int entry,
2699 struct udp_tunnel_info *ti)
2700 {
2701 return liquidio_vxlan_port_command(netdev,
2702 OCTNET_CMD_VXLAN_PORT_CONFIG,
2703 htons(ti->port),
2704 OCTNET_CMD_VXLAN_PORT_DEL);
2705 }
2706
2707 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
2708 .set_port = liquidio_udp_tunnel_set_port,
2709 .unset_port = liquidio_udp_tunnel_unset_port,
2710 .tables = {
2711 { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
2712 },
2713 };
2714
2715 /**
2716 * liquidio_fix_features - Net device fix features
2717 * @netdev: pointer to network device
2718 * @request: features requested
2719 * Return: updated features list
2720 */
liquidio_fix_features(struct net_device * netdev,netdev_features_t request)2721 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2722 netdev_features_t request)
2723 {
2724 struct lio *lio = netdev_priv(netdev);
2725
2726 if ((request & NETIF_F_RXCSUM) &&
2727 !(lio->dev_capability & NETIF_F_RXCSUM))
2728 request &= ~NETIF_F_RXCSUM;
2729
2730 if ((request & NETIF_F_HW_CSUM) &&
2731 !(lio->dev_capability & NETIF_F_HW_CSUM))
2732 request &= ~NETIF_F_HW_CSUM;
2733
2734 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2735 request &= ~NETIF_F_TSO;
2736
2737 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2738 request &= ~NETIF_F_TSO6;
2739
2740 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2741 request &= ~NETIF_F_LRO;
2742
2743 /*Disable LRO if RXCSUM is off */
2744 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2745 (lio->dev_capability & NETIF_F_LRO))
2746 request &= ~NETIF_F_LRO;
2747
2748 if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2749 !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2750 request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2751
2752 return request;
2753 }
2754
2755 /**
2756 * liquidio_set_features - Net device set features
2757 * @netdev: pointer to network device
2758 * @features: features to enable/disable
2759 */
liquidio_set_features(struct net_device * netdev,netdev_features_t features)2760 static int liquidio_set_features(struct net_device *netdev,
2761 netdev_features_t features)
2762 {
2763 struct lio *lio = netdev_priv(netdev);
2764
2765 if ((features & NETIF_F_LRO) &&
2766 (lio->dev_capability & NETIF_F_LRO) &&
2767 !(netdev->features & NETIF_F_LRO))
2768 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2769 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2770 else if (!(features & NETIF_F_LRO) &&
2771 (lio->dev_capability & NETIF_F_LRO) &&
2772 (netdev->features & NETIF_F_LRO))
2773 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2774 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2775
2776 /* Sending command to firmware to enable/disable RX checksum
2777 * offload settings using ethtool
2778 */
2779 if (!(netdev->features & NETIF_F_RXCSUM) &&
2780 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2781 (features & NETIF_F_RXCSUM))
2782 liquidio_set_rxcsum_command(netdev,
2783 OCTNET_CMD_TNL_RX_CSUM_CTL,
2784 OCTNET_CMD_RXCSUM_ENABLE);
2785 else if ((netdev->features & NETIF_F_RXCSUM) &&
2786 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2787 !(features & NETIF_F_RXCSUM))
2788 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2789 OCTNET_CMD_RXCSUM_DISABLE);
2790
2791 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2792 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2793 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2794 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2795 OCTNET_CMD_VLAN_FILTER_ENABLE);
2796 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2797 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2798 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2799 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2800 OCTNET_CMD_VLAN_FILTER_DISABLE);
2801
2802 return 0;
2803 }
2804
__liquidio_set_vf_mac(struct net_device * netdev,int vfidx,u8 * mac,bool is_admin_assigned)2805 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2806 u8 *mac, bool is_admin_assigned)
2807 {
2808 struct lio *lio = GET_LIO(netdev);
2809 struct octeon_device *oct = lio->oct_dev;
2810 struct octnic_ctrl_pkt nctrl;
2811 int ret = 0;
2812
2813 if (!is_valid_ether_addr(mac))
2814 return -EINVAL;
2815
2816 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
2817 return -EINVAL;
2818
2819 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2820
2821 nctrl.ncmd.u64 = 0;
2822 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2823 /* vfidx is 0 based, but vf_num (param1) is 1 based */
2824 nctrl.ncmd.s.param1 = vfidx + 1;
2825 nctrl.ncmd.s.more = 1;
2826 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2827 nctrl.netpndev = (u64)netdev;
2828 if (is_admin_assigned) {
2829 nctrl.ncmd.s.param2 = true;
2830 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2831 }
2832
2833 nctrl.udd[0] = 0;
2834 /* The MAC Address is presented in network byte order. */
2835 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
2836
2837 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
2838
2839 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2840 if (ret > 0)
2841 ret = -EIO;
2842
2843 return ret;
2844 }
2845
liquidio_set_vf_mac(struct net_device * netdev,int vfidx,u8 * mac)2846 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
2847 {
2848 struct lio *lio = GET_LIO(netdev);
2849 struct octeon_device *oct = lio->oct_dev;
2850 int retval;
2851
2852 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2853 return -EINVAL;
2854
2855 retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
2856 if (!retval)
2857 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
2858
2859 return retval;
2860 }
2861
liquidio_set_vf_spoofchk(struct net_device * netdev,int vfidx,bool enable)2862 static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx,
2863 bool enable)
2864 {
2865 struct lio *lio = GET_LIO(netdev);
2866 struct octeon_device *oct = lio->oct_dev;
2867 struct octnic_ctrl_pkt nctrl;
2868 int retval;
2869
2870 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) {
2871 netif_info(lio, drv, lio->netdev,
2872 "firmware does not support spoofchk\n");
2873 return -EOPNOTSUPP;
2874 }
2875
2876 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
2877 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
2878 return -EINVAL;
2879 }
2880
2881 if (enable) {
2882 if (oct->sriov_info.vf_spoofchk[vfidx])
2883 return 0;
2884 } else {
2885 /* Clear */
2886 if (!oct->sriov_info.vf_spoofchk[vfidx])
2887 return 0;
2888 }
2889
2890 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2891 nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1;
2892 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK;
2893 nctrl.ncmd.s.param1 =
2894 vfidx + 1; /* vfidx is 0 based,
2895 * but vf_num (param1) is 1 based
2896 */
2897 nctrl.ncmd.s.param2 = enable;
2898 nctrl.ncmd.s.more = 0;
2899 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2900 nctrl.cb_fn = NULL;
2901
2902 retval = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2903
2904 if (retval) {
2905 netif_info(lio, drv, lio->netdev,
2906 "Failed to set VF %d spoofchk %s\n", vfidx,
2907 enable ? "on" : "off");
2908 return -1;
2909 }
2910
2911 oct->sriov_info.vf_spoofchk[vfidx] = enable;
2912 netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx,
2913 enable ? "on" : "off");
2914
2915 return 0;
2916 }
2917
liquidio_set_vf_vlan(struct net_device * netdev,int vfidx,u16 vlan,u8 qos,__be16 vlan_proto)2918 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
2919 u16 vlan, u8 qos, __be16 vlan_proto)
2920 {
2921 struct lio *lio = GET_LIO(netdev);
2922 struct octeon_device *oct = lio->oct_dev;
2923 struct octnic_ctrl_pkt nctrl;
2924 u16 vlantci;
2925 int ret = 0;
2926
2927 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2928 return -EINVAL;
2929
2930 if (vlan_proto != htons(ETH_P_8021Q))
2931 return -EPROTONOSUPPORT;
2932
2933 if (vlan >= VLAN_N_VID || qos > 7)
2934 return -EINVAL;
2935
2936 if (vlan)
2937 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
2938 else
2939 vlantci = 0;
2940
2941 if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
2942 return 0;
2943
2944 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2945
2946 if (vlan)
2947 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2948 else
2949 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2950
2951 nctrl.ncmd.s.param1 = vlantci;
2952 nctrl.ncmd.s.param2 =
2953 vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
2954 nctrl.ncmd.s.more = 0;
2955 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2956 nctrl.cb_fn = NULL;
2957
2958 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2959 if (ret) {
2960 if (ret > 0)
2961 ret = -EIO;
2962 return ret;
2963 }
2964
2965 oct->sriov_info.vf_vlantci[vfidx] = vlantci;
2966
2967 return ret;
2968 }
2969
liquidio_get_vf_config(struct net_device * netdev,int vfidx,struct ifla_vf_info * ivi)2970 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
2971 struct ifla_vf_info *ivi)
2972 {
2973 struct lio *lio = GET_LIO(netdev);
2974 struct octeon_device *oct = lio->oct_dev;
2975 u8 *macaddr;
2976
2977 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2978 return -EINVAL;
2979
2980 memset(ivi, 0, sizeof(struct ifla_vf_info));
2981
2982 ivi->vf = vfidx;
2983 macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
2984 ether_addr_copy(&ivi->mac[0], macaddr);
2985 ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
2986 ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
2987 if (oct->sriov_info.trusted_vf.active &&
2988 oct->sriov_info.trusted_vf.id == vfidx)
2989 ivi->trusted = true;
2990 else
2991 ivi->trusted = false;
2992 ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
2993 ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx];
2994 ivi->max_tx_rate = lio->linfo.link.s.speed;
2995 ivi->min_tx_rate = 0;
2996
2997 return 0;
2998 }
2999
liquidio_send_vf_trust_cmd(struct lio * lio,int vfidx,bool trusted)3000 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
3001 {
3002 struct octeon_device *oct = lio->oct_dev;
3003 struct octeon_soft_command *sc;
3004 int retval;
3005
3006 sc = octeon_alloc_soft_command(oct, 0, 16, 0);
3007 if (!sc)
3008 return -ENOMEM;
3009
3010 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
3011
3012 /* vfidx is 0 based, but vf_num (param1) is 1 based */
3013 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
3014 OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
3015 trusted);
3016
3017 init_completion(&sc->complete);
3018 sc->sc_status = OCTEON_REQUEST_PENDING;
3019
3020 retval = octeon_send_soft_command(oct, sc);
3021 if (retval == IQ_SEND_FAILED) {
3022 octeon_free_soft_command(oct, sc);
3023 retval = -1;
3024 } else {
3025 /* Wait for response or timeout */
3026 retval = wait_for_sc_completion_timeout(oct, sc, 0);
3027 if (retval)
3028 return (retval);
3029
3030 WRITE_ONCE(sc->caller_is_done, true);
3031 }
3032
3033 return retval;
3034 }
3035
liquidio_set_vf_trust(struct net_device * netdev,int vfidx,bool setting)3036 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
3037 bool setting)
3038 {
3039 struct lio *lio = GET_LIO(netdev);
3040 struct octeon_device *oct = lio->oct_dev;
3041
3042 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
3043 /* trusted vf is not supported by firmware older than 1.7.1 */
3044 return -EOPNOTSUPP;
3045 }
3046
3047 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
3048 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
3049 return -EINVAL;
3050 }
3051
3052 if (setting) {
3053 /* Set */
3054
3055 if (oct->sriov_info.trusted_vf.active &&
3056 oct->sriov_info.trusted_vf.id == vfidx)
3057 return 0;
3058
3059 if (oct->sriov_info.trusted_vf.active) {
3060 netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
3061 return -EPERM;
3062 }
3063 } else {
3064 /* Clear */
3065
3066 if (!oct->sriov_info.trusted_vf.active)
3067 return 0;
3068 }
3069
3070 if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3071 if (setting) {
3072 oct->sriov_info.trusted_vf.id = vfidx;
3073 oct->sriov_info.trusted_vf.active = true;
3074 } else {
3075 oct->sriov_info.trusted_vf.active = false;
3076 }
3077
3078 netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3079 setting ? "" : "not ");
3080 } else {
3081 netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3082 return -1;
3083 }
3084
3085 return 0;
3086 }
3087
liquidio_set_vf_link_state(struct net_device * netdev,int vfidx,int linkstate)3088 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3089 int linkstate)
3090 {
3091 struct lio *lio = GET_LIO(netdev);
3092 struct octeon_device *oct = lio->oct_dev;
3093 struct octnic_ctrl_pkt nctrl;
3094 int ret = 0;
3095
3096 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3097 return -EINVAL;
3098
3099 if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3100 return 0;
3101
3102 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3103 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3104 nctrl.ncmd.s.param1 =
3105 vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3106 nctrl.ncmd.s.param2 = linkstate;
3107 nctrl.ncmd.s.more = 0;
3108 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3109 nctrl.cb_fn = NULL;
3110
3111 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
3112
3113 if (!ret)
3114 oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3115 else if (ret > 0)
3116 ret = -EIO;
3117
3118 return ret;
3119 }
3120
3121 static int
liquidio_eswitch_mode_get(struct devlink * devlink,u16 * mode)3122 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3123 {
3124 struct lio_devlink_priv *priv;
3125 struct octeon_device *oct;
3126
3127 priv = devlink_priv(devlink);
3128 oct = priv->oct;
3129
3130 *mode = oct->eswitch_mode;
3131
3132 return 0;
3133 }
3134
3135 static int
liquidio_eswitch_mode_set(struct devlink * devlink,u16 mode,struct netlink_ext_ack * extack)3136 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
3137 struct netlink_ext_ack *extack)
3138 {
3139 struct lio_devlink_priv *priv;
3140 struct octeon_device *oct;
3141 int ret = 0;
3142
3143 priv = devlink_priv(devlink);
3144 oct = priv->oct;
3145
3146 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3147 return -EINVAL;
3148
3149 if (oct->eswitch_mode == mode)
3150 return 0;
3151
3152 switch (mode) {
3153 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3154 oct->eswitch_mode = mode;
3155 ret = lio_vf_rep_create(oct);
3156 break;
3157
3158 case DEVLINK_ESWITCH_MODE_LEGACY:
3159 lio_vf_rep_destroy(oct);
3160 oct->eswitch_mode = mode;
3161 break;
3162
3163 default:
3164 ret = -EINVAL;
3165 }
3166
3167 return ret;
3168 }
3169
3170 static const struct devlink_ops liquidio_devlink_ops = {
3171 .eswitch_mode_get = liquidio_eswitch_mode_get,
3172 .eswitch_mode_set = liquidio_eswitch_mode_set,
3173 };
3174
3175 static int
liquidio_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)3176 liquidio_get_port_parent_id(struct net_device *dev,
3177 struct netdev_phys_item_id *ppid)
3178 {
3179 struct lio *lio = GET_LIO(dev);
3180 struct octeon_device *oct = lio->oct_dev;
3181
3182 if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3183 return -EOPNOTSUPP;
3184
3185 ppid->id_len = ETH_ALEN;
3186 ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
3187
3188 return 0;
3189 }
3190
liquidio_get_vf_stats(struct net_device * netdev,int vfidx,struct ifla_vf_stats * vf_stats)3191 static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
3192 struct ifla_vf_stats *vf_stats)
3193 {
3194 struct lio *lio = GET_LIO(netdev);
3195 struct octeon_device *oct = lio->oct_dev;
3196 struct oct_vf_stats stats;
3197 int ret;
3198
3199 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3200 return -EINVAL;
3201
3202 memset(&stats, 0, sizeof(struct oct_vf_stats));
3203 ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
3204 if (!ret) {
3205 vf_stats->rx_packets = stats.rx_packets;
3206 vf_stats->tx_packets = stats.tx_packets;
3207 vf_stats->rx_bytes = stats.rx_bytes;
3208 vf_stats->tx_bytes = stats.tx_bytes;
3209 vf_stats->broadcast = stats.broadcast;
3210 vf_stats->multicast = stats.multicast;
3211 }
3212
3213 return ret;
3214 }
3215
3216 static const struct net_device_ops lionetdevops = {
3217 .ndo_open = liquidio_open,
3218 .ndo_stop = liquidio_stop,
3219 .ndo_start_xmit = liquidio_xmit,
3220 .ndo_get_stats64 = liquidio_get_stats64,
3221 .ndo_set_mac_address = liquidio_set_mac,
3222 .ndo_set_rx_mode = liquidio_set_mcast_list,
3223 .ndo_tx_timeout = liquidio_tx_timeout,
3224
3225 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
3226 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
3227 .ndo_change_mtu = liquidio_change_mtu,
3228 .ndo_eth_ioctl = liquidio_ioctl,
3229 .ndo_fix_features = liquidio_fix_features,
3230 .ndo_set_features = liquidio_set_features,
3231 .ndo_set_vf_mac = liquidio_set_vf_mac,
3232 .ndo_set_vf_vlan = liquidio_set_vf_vlan,
3233 .ndo_get_vf_config = liquidio_get_vf_config,
3234 .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk,
3235 .ndo_set_vf_trust = liquidio_set_vf_trust,
3236 .ndo_set_vf_link_state = liquidio_set_vf_link_state,
3237 .ndo_get_vf_stats = liquidio_get_vf_stats,
3238 .ndo_get_port_parent_id = liquidio_get_port_parent_id,
3239 };
3240
3241 /**
3242 * liquidio_init - Entry point for the liquidio module
3243 */
liquidio_init(void)3244 static int __init liquidio_init(void)
3245 {
3246 int i;
3247 struct handshake *hs;
3248
3249 init_completion(&first_stage);
3250
3251 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3252
3253 if (liquidio_init_pci())
3254 return -EINVAL;
3255
3256 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3257
3258 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3259 hs = &handshake[i];
3260 if (hs->pci_dev) {
3261 wait_for_completion(&hs->init);
3262 if (!hs->init_ok) {
3263 /* init handshake failed */
3264 dev_err(&hs->pci_dev->dev,
3265 "Failed to init device\n");
3266 liquidio_deinit_pci();
3267 return -EIO;
3268 }
3269 }
3270 }
3271
3272 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3273 hs = &handshake[i];
3274 if (hs->pci_dev) {
3275 wait_for_completion_timeout(&hs->started,
3276 msecs_to_jiffies(30000));
3277 if (!hs->started_ok) {
3278 /* starter handshake failed */
3279 dev_err(&hs->pci_dev->dev,
3280 "Firmware failed to start\n");
3281 liquidio_deinit_pci();
3282 return -EIO;
3283 }
3284 }
3285 }
3286
3287 return 0;
3288 }
3289
lio_nic_info(struct octeon_recv_info * recv_info,void * buf)3290 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3291 {
3292 struct octeon_device *oct = (struct octeon_device *)buf;
3293 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3294 int gmxport = 0;
3295 union oct_link_status *ls;
3296 int i;
3297
3298 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3299 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3300 recv_pkt->buffer_size[0],
3301 recv_pkt->rh.r_nic_info.gmxport);
3302 goto nic_info_err;
3303 }
3304
3305 gmxport = recv_pkt->rh.r_nic_info.gmxport;
3306 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3307 OCT_DROQ_INFO_SIZE);
3308
3309 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3310 for (i = 0; i < oct->ifcount; i++) {
3311 if (oct->props[i].gmxport == gmxport) {
3312 update_link_status(oct->props[i].netdev, ls);
3313 break;
3314 }
3315 }
3316
3317 nic_info_err:
3318 for (i = 0; i < recv_pkt->buffer_count; i++)
3319 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3320 octeon_free_recv_info(recv_info);
3321 return 0;
3322 }
3323
3324 /**
3325 * setup_nic_devices - Setup network interfaces
3326 * @octeon_dev: octeon device
3327 *
3328 * Called during init time for each device. It assumes the NIC
3329 * is already up and running. The link information for each
3330 * interface is passed in link_info.
3331 */
setup_nic_devices(struct octeon_device * octeon_dev)3332 static int setup_nic_devices(struct octeon_device *octeon_dev)
3333 {
3334 struct lio *lio = NULL;
3335 struct net_device *netdev;
3336 u8 mac[6], i, j, *fw_ver, *micro_ver;
3337 unsigned long micro;
3338 u32 cur_ver;
3339 struct octeon_soft_command *sc;
3340 struct liquidio_if_cfg_resp *resp;
3341 struct octdev_props *props;
3342 int retval, num_iqueues, num_oqueues;
3343 int max_num_queues = 0;
3344 union oct_nic_if_cfg if_cfg;
3345 unsigned int base_queue;
3346 unsigned int gmx_port_id;
3347 u32 resp_size, data_size;
3348 u32 ifidx_or_pfnum;
3349 struct lio_version *vdata;
3350 struct devlink *devlink;
3351 struct lio_devlink_priv *lio_devlink;
3352
3353 /* This is to handle link status changes */
3354 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3355 OPCODE_NIC_INFO,
3356 lio_nic_info, octeon_dev);
3357
3358 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3359 * They are handled directly.
3360 */
3361 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3362 free_netbuf);
3363
3364 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3365 free_netsgbuf);
3366
3367 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3368 free_netsgbuf_with_resp);
3369
3370 for (i = 0; i < octeon_dev->ifcount; i++) {
3371 resp_size = sizeof(struct liquidio_if_cfg_resp);
3372 data_size = sizeof(struct lio_version);
3373 sc = (struct octeon_soft_command *)
3374 octeon_alloc_soft_command(octeon_dev, data_size,
3375 resp_size, 0);
3376 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3377 vdata = (struct lio_version *)sc->virtdptr;
3378
3379 *((u64 *)vdata) = 0;
3380 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3381 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3382 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3383
3384 if (OCTEON_CN23XX_PF(octeon_dev)) {
3385 num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3386 num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3387 base_queue = octeon_dev->sriov_info.pf_srn;
3388
3389 gmx_port_id = octeon_dev->pf_num;
3390 ifidx_or_pfnum = octeon_dev->pf_num;
3391 } else {
3392 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3393 octeon_get_conf(octeon_dev), i);
3394 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3395 octeon_get_conf(octeon_dev), i);
3396 base_queue = CFG_GET_BASE_QUE_NIC_IF(
3397 octeon_get_conf(octeon_dev), i);
3398 gmx_port_id = CFG_GET_GMXID_NIC_IF(
3399 octeon_get_conf(octeon_dev), i);
3400 ifidx_or_pfnum = i;
3401 }
3402
3403 dev_dbg(&octeon_dev->pci_dev->dev,
3404 "requesting config for interface %d, iqs %d, oqs %d\n",
3405 ifidx_or_pfnum, num_iqueues, num_oqueues);
3406
3407 if_cfg.u64 = 0;
3408 if_cfg.s.num_iqueues = num_iqueues;
3409 if_cfg.s.num_oqueues = num_oqueues;
3410 if_cfg.s.base_queue = base_queue;
3411 if_cfg.s.gmx_port_id = gmx_port_id;
3412
3413 sc->iq_no = 0;
3414
3415 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3416 OPCODE_NIC_IF_CFG, 0,
3417 if_cfg.u64, 0);
3418
3419 init_completion(&sc->complete);
3420 sc->sc_status = OCTEON_REQUEST_PENDING;
3421
3422 retval = octeon_send_soft_command(octeon_dev, sc);
3423 if (retval == IQ_SEND_FAILED) {
3424 dev_err(&octeon_dev->pci_dev->dev,
3425 "iq/oq config failed status: %x\n",
3426 retval);
3427 /* Soft instr is freed by driver in case of failure. */
3428 octeon_free_soft_command(octeon_dev, sc);
3429 return(-EIO);
3430 }
3431
3432 /* Sleep on a wait queue till the cond flag indicates that the
3433 * response arrived or timed-out.
3434 */
3435 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
3436 if (retval)
3437 return retval;
3438
3439 retval = resp->status;
3440 if (retval) {
3441 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3442 WRITE_ONCE(sc->caller_is_done, true);
3443 goto setup_nic_dev_done;
3444 }
3445 snprintf(octeon_dev->fw_info.liquidio_firmware_version,
3446 32, "%s",
3447 resp->cfg_info.liquidio_firmware_version);
3448
3449 /* Verify f/w version (in case of 'auto' loading from flash) */
3450 fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3451 if (memcmp(LIQUIDIO_BASE_VERSION,
3452 fw_ver,
3453 strlen(LIQUIDIO_BASE_VERSION))) {
3454 dev_err(&octeon_dev->pci_dev->dev,
3455 "Unmatched firmware version. Expected %s.x, got %s.\n",
3456 LIQUIDIO_BASE_VERSION, fw_ver);
3457 WRITE_ONCE(sc->caller_is_done, true);
3458 goto setup_nic_dev_done;
3459 } else if (atomic_read(octeon_dev->adapter_fw_state) ==
3460 FW_IS_PRELOADED) {
3461 dev_info(&octeon_dev->pci_dev->dev,
3462 "Using auto-loaded firmware version %s.\n",
3463 fw_ver);
3464 }
3465
3466 /* extract micro version field; point past '<maj>.<min>.' */
3467 micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
3468 if (kstrtoul(micro_ver, 10, µ) != 0)
3469 micro = 0;
3470 octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
3471 octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
3472 octeon_dev->fw_info.ver.rev = micro;
3473
3474 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3475 (sizeof(struct liquidio_if_cfg_info)) >> 3);
3476
3477 num_iqueues = hweight64(resp->cfg_info.iqmask);
3478 num_oqueues = hweight64(resp->cfg_info.oqmask);
3479
3480 if (!(num_iqueues) || !(num_oqueues)) {
3481 dev_err(&octeon_dev->pci_dev->dev,
3482 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3483 resp->cfg_info.iqmask,
3484 resp->cfg_info.oqmask);
3485 WRITE_ONCE(sc->caller_is_done, true);
3486 goto setup_nic_dev_done;
3487 }
3488
3489 if (OCTEON_CN6XXX(octeon_dev)) {
3490 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3491 cn6xxx));
3492 } else if (OCTEON_CN23XX_PF(octeon_dev)) {
3493 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3494 cn23xx_pf));
3495 }
3496
3497 dev_dbg(&octeon_dev->pci_dev->dev,
3498 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3499 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3500 num_iqueues, num_oqueues, max_num_queues);
3501 netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
3502
3503 if (!netdev) {
3504 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3505 WRITE_ONCE(sc->caller_is_done, true);
3506 goto setup_nic_dev_done;
3507 }
3508
3509 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3510
3511 /* Associate the routines that will handle different
3512 * netdev tasks.
3513 */
3514 netdev->netdev_ops = &lionetdevops;
3515
3516 retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
3517 if (retval) {
3518 dev_err(&octeon_dev->pci_dev->dev,
3519 "setting real number rx failed\n");
3520 WRITE_ONCE(sc->caller_is_done, true);
3521 goto setup_nic_dev_free;
3522 }
3523
3524 retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
3525 if (retval) {
3526 dev_err(&octeon_dev->pci_dev->dev,
3527 "setting real number tx failed\n");
3528 WRITE_ONCE(sc->caller_is_done, true);
3529 goto setup_nic_dev_free;
3530 }
3531
3532 lio = GET_LIO(netdev);
3533
3534 memset(lio, 0, sizeof(struct lio));
3535
3536 lio->ifidx = ifidx_or_pfnum;
3537
3538 props = &octeon_dev->props[i];
3539 props->gmxport = resp->cfg_info.linfo.gmxport;
3540 props->netdev = netdev;
3541
3542 lio->linfo.num_rxpciq = num_oqueues;
3543 lio->linfo.num_txpciq = num_iqueues;
3544 for (j = 0; j < num_oqueues; j++) {
3545 lio->linfo.rxpciq[j].u64 =
3546 resp->cfg_info.linfo.rxpciq[j].u64;
3547 }
3548 for (j = 0; j < num_iqueues; j++) {
3549 lio->linfo.txpciq[j].u64 =
3550 resp->cfg_info.linfo.txpciq[j].u64;
3551 }
3552 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3553 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3554 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3555
3556 WRITE_ONCE(sc->caller_is_done, true);
3557
3558 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3559
3560 if (OCTEON_CN23XX_PF(octeon_dev) ||
3561 OCTEON_CN6XXX(octeon_dev)) {
3562 lio->dev_capability = NETIF_F_HIGHDMA
3563 | NETIF_F_IP_CSUM
3564 | NETIF_F_IPV6_CSUM
3565 | NETIF_F_SG | NETIF_F_RXCSUM
3566 | NETIF_F_GRO
3567 | NETIF_F_TSO | NETIF_F_TSO6
3568 | NETIF_F_LRO;
3569 }
3570 netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3571
3572 /* Copy of transmit encapsulation capabilities:
3573 * TSO, TSO6, Checksums for this device
3574 */
3575 lio->enc_dev_capability = NETIF_F_IP_CSUM
3576 | NETIF_F_IPV6_CSUM
3577 | NETIF_F_GSO_UDP_TUNNEL
3578 | NETIF_F_HW_CSUM | NETIF_F_SG
3579 | NETIF_F_RXCSUM
3580 | NETIF_F_TSO | NETIF_F_TSO6
3581 | NETIF_F_LRO;
3582
3583 netdev->hw_enc_features = (lio->enc_dev_capability &
3584 ~NETIF_F_LRO);
3585
3586 netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
3587
3588 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3589
3590 netdev->vlan_features = lio->dev_capability;
3591 /* Add any unchangeable hw features */
3592 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
3593 NETIF_F_HW_VLAN_CTAG_RX |
3594 NETIF_F_HW_VLAN_CTAG_TX;
3595
3596 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3597
3598 netdev->hw_features = lio->dev_capability;
3599 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3600 netdev->hw_features = netdev->hw_features &
3601 ~NETIF_F_HW_VLAN_CTAG_RX;
3602
3603 /* MTU range: 68 - 16000 */
3604 netdev->min_mtu = LIO_MIN_MTU_SIZE;
3605 netdev->max_mtu = LIO_MAX_MTU_SIZE;
3606
3607 /* Point to the properties for octeon device to which this
3608 * interface belongs.
3609 */
3610 lio->oct_dev = octeon_dev;
3611 lio->octprops = props;
3612 lio->netdev = netdev;
3613
3614 dev_dbg(&octeon_dev->pci_dev->dev,
3615 "if%d gmx: %d hw_addr: 0x%llx\n", i,
3616 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3617
3618 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3619 u8 vfmac[ETH_ALEN];
3620
3621 eth_random_addr(vfmac);
3622 if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
3623 dev_err(&octeon_dev->pci_dev->dev,
3624 "Error setting VF%d MAC address\n",
3625 j);
3626 goto setup_nic_dev_free;
3627 }
3628 }
3629
3630 /* 64-bit swap required on LE machines */
3631 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3632 for (j = 0; j < 6; j++)
3633 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3634
3635 /* Copy MAC Address to OS network device structure */
3636
3637 eth_hw_addr_set(netdev, mac);
3638
3639 /* By default all interfaces on a single Octeon uses the same
3640 * tx and rx queues
3641 */
3642 lio->txq = lio->linfo.txpciq[0].s.q_no;
3643 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3644 if (liquidio_setup_io_queues(octeon_dev, i,
3645 lio->linfo.num_txpciq,
3646 lio->linfo.num_rxpciq)) {
3647 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3648 goto setup_nic_dev_free;
3649 }
3650
3651 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3652
3653 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3654 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3655
3656 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
3657 dev_err(&octeon_dev->pci_dev->dev,
3658 "Gather list allocation failed\n");
3659 goto setup_nic_dev_free;
3660 }
3661
3662 /* Register ethtool support */
3663 liquidio_set_ethtool_ops(netdev);
3664 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3665 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3666 else
3667 octeon_dev->priv_flags = 0x0;
3668
3669 if (netdev->features & NETIF_F_LRO)
3670 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3671 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3672
3673 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3674 OCTNET_CMD_VLAN_FILTER_ENABLE);
3675
3676 if ((debug != -1) && (debug & NETIF_MSG_HW))
3677 liquidio_set_feature(netdev,
3678 OCTNET_CMD_VERBOSE_ENABLE, 0);
3679
3680 if (setup_link_status_change_wq(netdev))
3681 goto setup_nic_dev_free;
3682
3683 if ((octeon_dev->fw_info.app_cap_flags &
3684 LIQUIDIO_TIME_SYNC_CAP) &&
3685 setup_sync_octeon_time_wq(netdev))
3686 goto setup_nic_dev_free;
3687
3688 if (setup_rx_oom_poll_fn(netdev))
3689 goto setup_nic_dev_free;
3690
3691 /* Register the network device with the OS */
3692 if (register_netdev(netdev)) {
3693 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3694 goto setup_nic_dev_free;
3695 }
3696
3697 dev_dbg(&octeon_dev->pci_dev->dev,
3698 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3699 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3700 netif_carrier_off(netdev);
3701 lio->link_changes++;
3702
3703 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3704
3705 /* Sending command to firmware to enable Rx checksum offload
3706 * by default at the time of setup of Liquidio driver for
3707 * this device
3708 */
3709 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3710 OCTNET_CMD_RXCSUM_ENABLE);
3711 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3712 OCTNET_CMD_TXCSUM_ENABLE);
3713
3714 dev_dbg(&octeon_dev->pci_dev->dev,
3715 "NIC ifidx:%d Setup successful\n", i);
3716
3717 if (octeon_dev->subsystem_id ==
3718 OCTEON_CN2350_25GB_SUBSYS_ID ||
3719 octeon_dev->subsystem_id ==
3720 OCTEON_CN2360_25GB_SUBSYS_ID) {
3721 cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
3722 octeon_dev->fw_info.ver.min,
3723 octeon_dev->fw_info.ver.rev);
3724
3725 /* speed control unsupported in f/w older than 1.7.2 */
3726 if (cur_ver < OCT_FW_VER(1, 7, 2)) {
3727 dev_info(&octeon_dev->pci_dev->dev,
3728 "speed setting not supported by f/w.");
3729 octeon_dev->speed_setting = 25;
3730 octeon_dev->no_speed_setting = 1;
3731 } else {
3732 liquidio_get_speed(lio);
3733 }
3734
3735 if (octeon_dev->speed_setting == 0) {
3736 octeon_dev->speed_setting = 25;
3737 octeon_dev->no_speed_setting = 1;
3738 }
3739 } else {
3740 octeon_dev->no_speed_setting = 1;
3741 octeon_dev->speed_setting = 10;
3742 }
3743 octeon_dev->speed_boot = octeon_dev->speed_setting;
3744
3745 /* don't read FEC setting if unsupported by f/w (see above) */
3746 if (octeon_dev->speed_boot == 25 &&
3747 !octeon_dev->no_speed_setting) {
3748 liquidio_get_fec(lio);
3749 octeon_dev->props[lio->ifidx].fec_boot =
3750 octeon_dev->props[lio->ifidx].fec;
3751 }
3752 }
3753
3754 device_lock(&octeon_dev->pci_dev->dev);
3755 devlink = devlink_alloc(&liquidio_devlink_ops,
3756 sizeof(struct lio_devlink_priv),
3757 &octeon_dev->pci_dev->dev);
3758 if (!devlink) {
3759 device_unlock(&octeon_dev->pci_dev->dev);
3760 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3761 goto setup_nic_dev_free;
3762 }
3763
3764 lio_devlink = devlink_priv(devlink);
3765 lio_devlink->oct = octeon_dev;
3766
3767 octeon_dev->devlink = devlink;
3768 octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3769 devlink_register(devlink);
3770 device_unlock(&octeon_dev->pci_dev->dev);
3771
3772 return 0;
3773
3774 setup_nic_dev_free:
3775
3776 while (i--) {
3777 dev_err(&octeon_dev->pci_dev->dev,
3778 "NIC ifidx:%d Setup failed\n", i);
3779 liquidio_destroy_nic_device(octeon_dev, i);
3780 }
3781
3782 setup_nic_dev_done:
3783
3784 return -ENODEV;
3785 }
3786
3787 #ifdef CONFIG_PCI_IOV
octeon_enable_sriov(struct octeon_device * oct)3788 static int octeon_enable_sriov(struct octeon_device *oct)
3789 {
3790 unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3791 struct pci_dev *vfdev;
3792 int err;
3793 u32 u;
3794
3795 if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3796 err = pci_enable_sriov(oct->pci_dev,
3797 oct->sriov_info.num_vfs_alloced);
3798 if (err) {
3799 dev_err(&oct->pci_dev->dev,
3800 "OCTEON: Failed to enable PCI sriov: %d\n",
3801 err);
3802 oct->sriov_info.num_vfs_alloced = 0;
3803 return err;
3804 }
3805 oct->sriov_info.sriov_enabled = 1;
3806
3807 /* init lookup table that maps DPI ring number to VF pci_dev
3808 * struct pointer
3809 */
3810 u = 0;
3811 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3812 OCTEON_CN23XX_VF_VID, NULL);
3813 while (vfdev) {
3814 if (vfdev->is_virtfn &&
3815 (vfdev->physfn == oct->pci_dev)) {
3816 oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3817 vfdev;
3818 u += oct->sriov_info.rings_per_vf;
3819 }
3820 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3821 OCTEON_CN23XX_VF_VID, vfdev);
3822 }
3823 }
3824
3825 return num_vfs_alloced;
3826 }
3827
lio_pci_sriov_disable(struct octeon_device * oct)3828 static int lio_pci_sriov_disable(struct octeon_device *oct)
3829 {
3830 int u;
3831
3832 if (pci_vfs_assigned(oct->pci_dev)) {
3833 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3834 return -EPERM;
3835 }
3836
3837 pci_disable_sriov(oct->pci_dev);
3838
3839 u = 0;
3840 while (u < MAX_POSSIBLE_VFS) {
3841 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3842 u += oct->sriov_info.rings_per_vf;
3843 }
3844
3845 oct->sriov_info.num_vfs_alloced = 0;
3846 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3847 oct->pf_num);
3848
3849 return 0;
3850 }
3851
liquidio_enable_sriov(struct pci_dev * dev,int num_vfs)3852 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3853 {
3854 struct octeon_device *oct = pci_get_drvdata(dev);
3855 int ret = 0;
3856
3857 if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3858 (oct->sriov_info.sriov_enabled)) {
3859 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3860 oct->pf_num, num_vfs);
3861 return 0;
3862 }
3863
3864 if (!num_vfs) {
3865 lio_vf_rep_destroy(oct);
3866 ret = lio_pci_sriov_disable(oct);
3867 } else if (num_vfs > oct->sriov_info.max_vfs) {
3868 dev_err(&oct->pci_dev->dev,
3869 "OCTEON: Max allowed VFs:%d user requested:%d",
3870 oct->sriov_info.max_vfs, num_vfs);
3871 ret = -EPERM;
3872 } else {
3873 oct->sriov_info.num_vfs_alloced = num_vfs;
3874 ret = octeon_enable_sriov(oct);
3875 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3876 oct->pf_num, num_vfs);
3877 ret = lio_vf_rep_create(oct);
3878 if (ret)
3879 dev_info(&oct->pci_dev->dev,
3880 "vf representor create failed");
3881 }
3882
3883 return ret;
3884 }
3885 #endif
3886
3887 /**
3888 * liquidio_init_nic_module - initialize the NIC
3889 * @oct: octeon device
3890 *
3891 * This initialization routine is called once the Octeon device application is
3892 * up and running
3893 */
liquidio_init_nic_module(struct octeon_device * oct)3894 static int liquidio_init_nic_module(struct octeon_device *oct)
3895 {
3896 int i, retval = 0;
3897 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3898
3899 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3900
3901 /* only default iq and oq were initialized
3902 * initialize the rest as well
3903 */
3904 /* run port_config command for each port */
3905 oct->ifcount = num_nic_ports;
3906
3907 memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3908
3909 for (i = 0; i < MAX_OCTEON_LINKS; i++)
3910 oct->props[i].gmxport = -1;
3911
3912 retval = setup_nic_devices(oct);
3913 if (retval) {
3914 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3915 goto octnet_init_failure;
3916 }
3917
3918 /* Call vf_rep_modinit if the firmware is switchdev capable
3919 * and do it from the first liquidio function probed.
3920 */
3921 if (!oct->octeon_id &&
3922 oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
3923 retval = lio_vf_rep_modinit();
3924 if (retval) {
3925 liquidio_stop_nic_module(oct);
3926 goto octnet_init_failure;
3927 }
3928 }
3929
3930 liquidio_ptp_init(oct);
3931
3932 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3933
3934 return retval;
3935
3936 octnet_init_failure:
3937
3938 oct->ifcount = 0;
3939
3940 return retval;
3941 }
3942
3943 /**
3944 * nic_starter - finish init
3945 * @work: work struct work_struct
3946 *
3947 * starter callback that invokes the remaining initialization work after the NIC is up and running.
3948 */
nic_starter(struct work_struct * work)3949 static void nic_starter(struct work_struct *work)
3950 {
3951 struct octeon_device *oct;
3952 struct cavium_wk *wk = (struct cavium_wk *)work;
3953
3954 oct = (struct octeon_device *)wk->ctxptr;
3955
3956 if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3957 return;
3958
3959 /* If the status of the device is CORE_OK, the core
3960 * application has reported its application type. Call
3961 * any registered handlers now and move to the RUNNING
3962 * state.
3963 */
3964 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3965 schedule_delayed_work(&oct->nic_poll_work.work,
3966 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3967 return;
3968 }
3969
3970 atomic_set(&oct->status, OCT_DEV_RUNNING);
3971
3972 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3973 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3974
3975 if (liquidio_init_nic_module(oct))
3976 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3977 else
3978 handshake[oct->octeon_id].started_ok = 1;
3979 } else {
3980 dev_err(&oct->pci_dev->dev,
3981 "Unexpected application running on NIC (%d). Check firmware.\n",
3982 oct->app_mode);
3983 }
3984
3985 complete(&handshake[oct->octeon_id].started);
3986 }
3987
3988 static int
octeon_recv_vf_drv_notice(struct octeon_recv_info * recv_info,void * buf)3989 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
3990 {
3991 struct octeon_device *oct = (struct octeon_device *)buf;
3992 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3993 int i, notice, vf_idx;
3994 bool cores_crashed;
3995 u64 *data, vf_num;
3996
3997 notice = recv_pkt->rh.r.ossp;
3998 data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
3999
4000 /* the first 64-bit word of data is the vf_num */
4001 vf_num = data[0];
4002 octeon_swap_8B_data(&vf_num, 1);
4003 vf_idx = (int)vf_num - 1;
4004
4005 cores_crashed = READ_ONCE(oct->cores_crashed);
4006
4007 if (notice == VF_DRV_LOADED) {
4008 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
4009 oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
4010 dev_info(&oct->pci_dev->dev,
4011 "driver for VF%d was loaded\n", vf_idx);
4012 if (!cores_crashed)
4013 try_module_get(THIS_MODULE);
4014 }
4015 } else if (notice == VF_DRV_REMOVED) {
4016 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
4017 oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
4018 dev_info(&oct->pci_dev->dev,
4019 "driver for VF%d was removed\n", vf_idx);
4020 if (!cores_crashed)
4021 module_put(THIS_MODULE);
4022 }
4023 } else if (notice == VF_DRV_MACADDR_CHANGED) {
4024 u8 *b = (u8 *)&data[1];
4025
4026 oct->sriov_info.vf_macaddr[vf_idx] = data[1];
4027 dev_info(&oct->pci_dev->dev,
4028 "VF driver changed VF%d's MAC address to %pM\n",
4029 vf_idx, b + 2);
4030 }
4031
4032 for (i = 0; i < recv_pkt->buffer_count; i++)
4033 recv_buffer_free(recv_pkt->buffer_ptr[i]);
4034 octeon_free_recv_info(recv_info);
4035
4036 return 0;
4037 }
4038
4039 /**
4040 * octeon_device_init - Device initialization for each Octeon device that is probed
4041 * @octeon_dev: octeon device
4042 */
octeon_device_init(struct octeon_device * octeon_dev)4043 static int octeon_device_init(struct octeon_device *octeon_dev)
4044 {
4045 int j, ret;
4046 char bootcmd[] = "\n";
4047 char *dbg_enb = NULL;
4048 enum lio_fw_state fw_state;
4049 struct octeon_device_priv *oct_priv = octeon_dev->priv;
4050 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4051
4052 /* Enable access to the octeon device and make its DMA capability
4053 * known to the OS.
4054 */
4055 if (octeon_pci_os_setup(octeon_dev))
4056 return 1;
4057
4058 atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4059
4060 /* Identify the Octeon type and map the BAR address space. */
4061 if (octeon_chip_specific_setup(octeon_dev)) {
4062 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4063 return 1;
4064 }
4065
4066 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4067
4068 /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4069 * since that is what is required for the reference to be removed
4070 * during de-initialization (see 'octeon_destroy_resources').
4071 */
4072 octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4073 PCI_SLOT(octeon_dev->pci_dev->devfn),
4074 PCI_FUNC(octeon_dev->pci_dev->devfn),
4075 true);
4076
4077 octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4078
4079 /* CN23XX supports preloaded firmware if the following is true:
4080 *
4081 * The adapter indicates that firmware is currently running AND
4082 * 'fw_type' is 'auto'.
4083 *
4084 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4085 */
4086 if (OCTEON_CN23XX_PF(octeon_dev) &&
4087 cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4088 atomic_cmpxchg(octeon_dev->adapter_fw_state,
4089 FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4090 }
4091
4092 /* If loading firmware, only first device of adapter needs to do so. */
4093 fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4094 FW_NEEDS_TO_BE_LOADED,
4095 FW_IS_BEING_LOADED);
4096
4097 /* Here, [local variable] 'fw_state' is set to one of:
4098 *
4099 * FW_IS_PRELOADED: No firmware is to be loaded (see above)
4100 * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4101 * firmware to the adapter.
4102 * FW_IS_BEING_LOADED: The driver's second instance will not load
4103 * firmware to the adapter.
4104 */
4105
4106 /* Prior to f/w load, perform a soft reset of the Octeon device;
4107 * if error resetting, return w/error.
4108 */
4109 if (fw_state == FW_NEEDS_TO_BE_LOADED)
4110 if (octeon_dev->fn_list.soft_reset(octeon_dev))
4111 return 1;
4112
4113 /* Initialize the dispatch mechanism used to push packets arriving on
4114 * Octeon Output queues.
4115 */
4116 if (octeon_init_dispatch_list(octeon_dev))
4117 return 1;
4118
4119 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4120 OPCODE_NIC_CORE_DRV_ACTIVE,
4121 octeon_core_drv_init,
4122 octeon_dev);
4123
4124 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4125 OPCODE_NIC_VF_DRV_NOTICE,
4126 octeon_recv_vf_drv_notice, octeon_dev);
4127 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4128 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4129 schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4130 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4131
4132 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4133
4134 if (octeon_set_io_queues_off(octeon_dev)) {
4135 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4136 return 1;
4137 }
4138
4139 if (OCTEON_CN23XX_PF(octeon_dev)) {
4140 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4141 if (ret) {
4142 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4143 return ret;
4144 }
4145 }
4146
4147 /* Initialize soft command buffer pool
4148 */
4149 if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4150 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4151 return 1;
4152 }
4153 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4154
4155 /* Setup the data structures that manage this Octeon's Input queues. */
4156 if (octeon_setup_instr_queues(octeon_dev)) {
4157 dev_err(&octeon_dev->pci_dev->dev,
4158 "instruction queue initialization failed\n");
4159 return 1;
4160 }
4161 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4162
4163 /* Initialize lists to manage the requests of different types that
4164 * arrive from user & kernel applications for this octeon device.
4165 */
4166 if (octeon_setup_response_list(octeon_dev)) {
4167 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4168 return 1;
4169 }
4170 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4171
4172 if (octeon_setup_output_queues(octeon_dev)) {
4173 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4174 return 1;
4175 }
4176
4177 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4178
4179 if (OCTEON_CN23XX_PF(octeon_dev)) {
4180 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4181 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4182 return 1;
4183 }
4184 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4185
4186 if (octeon_allocate_ioq_vector
4187 (octeon_dev,
4188 octeon_dev->sriov_info.num_pf_rings)) {
4189 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4190 return 1;
4191 }
4192 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4193
4194 } else {
4195 /* The input and output queue registers were setup earlier (the
4196 * queues were not enabled). Any additional registers
4197 * that need to be programmed should be done now.
4198 */
4199 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4200 if (ret) {
4201 dev_err(&octeon_dev->pci_dev->dev,
4202 "Failed to configure device registers\n");
4203 return ret;
4204 }
4205 }
4206
4207 /* Initialize the tasklet that handles output queue packet processing.*/
4208 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4209 tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh);
4210
4211 /* Setup the interrupt handler and record the INT SUM register address
4212 */
4213 if (octeon_setup_interrupt(octeon_dev,
4214 octeon_dev->sriov_info.num_pf_rings))
4215 return 1;
4216
4217 /* Enable Octeon device interrupts */
4218 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4219
4220 atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4221
4222 /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4223 * the output queue is enabled.
4224 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4225 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4226 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4227 * before any credits have been issued, causing the ring to be reset
4228 * (and the f/w appear to never have started).
4229 */
4230 for (j = 0; j < octeon_dev->num_oqs; j++)
4231 writel(octeon_dev->droq[j]->max_count,
4232 octeon_dev->droq[j]->pkts_credit_reg);
4233
4234 /* Enable the input and output queues for this Octeon device */
4235 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4236 if (ret) {
4237 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4238 return ret;
4239 }
4240
4241 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4242
4243 if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4244 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4245 if (!ddr_timeout) {
4246 dev_info(&octeon_dev->pci_dev->dev,
4247 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4248 }
4249
4250 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4251
4252 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4253 while (!ddr_timeout) {
4254 set_current_state(TASK_INTERRUPTIBLE);
4255 if (schedule_timeout(HZ / 10)) {
4256 /* user probably pressed Control-C */
4257 return 1;
4258 }
4259 }
4260 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4261 if (ret) {
4262 dev_err(&octeon_dev->pci_dev->dev,
4263 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4264 ret);
4265 return 1;
4266 }
4267
4268 if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4269 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4270 return 1;
4271 }
4272
4273 /* Divert uboot to take commands from host instead. */
4274 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4275
4276 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4277 ret = octeon_init_consoles(octeon_dev);
4278 if (ret) {
4279 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4280 return 1;
4281 }
4282 /* If console debug enabled, specify empty string to use default
4283 * enablement ELSE specify NULL string for 'disabled'.
4284 */
4285 dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4286 ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4287 if (ret) {
4288 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4289 return 1;
4290 } else if (octeon_console_debug_enabled(0)) {
4291 /* If console was added AND we're logging console output
4292 * then set our console print function.
4293 */
4294 octeon_dev->console[0].print = octeon_dbg_console_print;
4295 }
4296
4297 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4298
4299 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4300 ret = load_firmware(octeon_dev);
4301 if (ret) {
4302 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4303 return 1;
4304 }
4305
4306 atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4307 }
4308
4309 handshake[octeon_dev->octeon_id].init_ok = 1;
4310 complete(&handshake[octeon_dev->octeon_id].init);
4311
4312 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4313 oct_priv->dev = octeon_dev;
4314
4315 return 0;
4316 }
4317
4318 /**
4319 * octeon_dbg_console_print - Debug console print function
4320 * @oct: octeon device
4321 * @console_num: console number
4322 * @prefix: first portion of line to display
4323 * @suffix: second portion of line to display
4324 *
4325 * The OCTEON debug console outputs entire lines (excluding '\n').
4326 * Normally, the line will be passed in the 'prefix' parameter.
4327 * However, due to buffering, it is possible for a line to be split into two
4328 * parts, in which case they will be passed as the 'prefix' parameter and
4329 * 'suffix' parameter.
4330 */
octeon_dbg_console_print(struct octeon_device * oct,u32 console_num,char * prefix,char * suffix)4331 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4332 char *prefix, char *suffix)
4333 {
4334 if (prefix && suffix)
4335 dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4336 suffix);
4337 else if (prefix)
4338 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4339 else if (suffix)
4340 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4341
4342 return 0;
4343 }
4344
4345 /**
4346 * liquidio_exit - Exits the module
4347 */
liquidio_exit(void)4348 static void __exit liquidio_exit(void)
4349 {
4350 liquidio_deinit_pci();
4351
4352 pr_info("LiquidIO network module is now unloaded\n");
4353 }
4354
4355 module_init(liquidio_init);
4356 module_exit(liquidio_exit);
4357