1 /**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2016 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/if_vlan.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_nic.h"
26 #include "octeon_main.h"
27 #include "octeon_network.h"
28
29 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
30 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Core");
31 MODULE_LICENSE("GPL");
32
33 /* OOM task polling interval */
34 #define LIO_OOM_POLL_INTERVAL_MS 250
35
36 #define OCTNIC_MAX_SG MAX_SKB_FRAGS
37
38 /**
39 * lio_delete_glists - Delete gather lists
40 * @lio: per-network private data
41 */
lio_delete_glists(struct lio * lio)42 void lio_delete_glists(struct lio *lio)
43 {
44 struct octnic_gather *g;
45 int i;
46
47 kfree(lio->glist_lock);
48 lio->glist_lock = NULL;
49
50 if (!lio->glist)
51 return;
52
53 for (i = 0; i < lio->oct_dev->num_iqs; i++) {
54 do {
55 g = (struct octnic_gather *)
56 lio_list_delete_head(&lio->glist[i]);
57 kfree(g);
58 } while (g);
59
60 if (lio->glists_virt_base && lio->glists_virt_base[i] &&
61 lio->glists_dma_base && lio->glists_dma_base[i]) {
62 lio_dma_free(lio->oct_dev,
63 lio->glist_entry_size * lio->tx_qsize,
64 lio->glists_virt_base[i],
65 lio->glists_dma_base[i]);
66 }
67 }
68
69 kfree(lio->glists_virt_base);
70 lio->glists_virt_base = NULL;
71
72 kfree(lio->glists_dma_base);
73 lio->glists_dma_base = NULL;
74
75 kfree(lio->glist);
76 lio->glist = NULL;
77 }
78 EXPORT_SYMBOL_GPL(lio_delete_glists);
79
80 /**
81 * lio_setup_glists - Setup gather lists
82 * @oct: octeon_device
83 * @lio: per-network private data
84 * @num_iqs: count of iqs to allocate
85 */
lio_setup_glists(struct octeon_device * oct,struct lio * lio,int num_iqs)86 int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
87 {
88 struct octnic_gather *g;
89 int i, j;
90
91 lio->glist_lock =
92 kzalloc_objs(*lio->glist_lock, num_iqs);
93 if (!lio->glist_lock)
94 return -ENOMEM;
95
96 lio->glist =
97 kzalloc_objs(*lio->glist, num_iqs);
98 if (!lio->glist) {
99 kfree(lio->glist_lock);
100 lio->glist_lock = NULL;
101 return -ENOMEM;
102 }
103
104 lio->glist_entry_size =
105 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
106
107 /* allocate memory to store virtual and dma base address of
108 * per glist consistent memory
109 */
110 lio->glists_virt_base = kzalloc_objs(*lio->glists_virt_base, num_iqs);
111 lio->glists_dma_base = kzalloc_objs(*lio->glists_dma_base, num_iqs);
112
113 if (!lio->glists_virt_base || !lio->glists_dma_base) {
114 lio_delete_glists(lio);
115 return -ENOMEM;
116 }
117
118 for (i = 0; i < num_iqs; i++) {
119 int numa_node = dev_to_node(&oct->pci_dev->dev);
120
121 spin_lock_init(&lio->glist_lock[i]);
122
123 INIT_LIST_HEAD(&lio->glist[i]);
124
125 lio->glists_virt_base[i] =
126 lio_dma_alloc(oct,
127 lio->glist_entry_size * lio->tx_qsize,
128 &lio->glists_dma_base[i]);
129
130 if (!lio->glists_virt_base[i]) {
131 lio_delete_glists(lio);
132 return -ENOMEM;
133 }
134
135 for (j = 0; j < lio->tx_qsize; j++) {
136 g = kzalloc_node(sizeof(*g), GFP_KERNEL,
137 numa_node);
138 if (!g)
139 g = kzalloc_obj(*g);
140 if (!g)
141 break;
142
143 g->sg = lio->glists_virt_base[i] +
144 (j * lio->glist_entry_size);
145
146 g->sg_dma_ptr = lio->glists_dma_base[i] +
147 (j * lio->glist_entry_size);
148
149 list_add_tail(&g->list, &lio->glist[i]);
150 }
151
152 if (j != lio->tx_qsize) {
153 lio_delete_glists(lio);
154 return -ENOMEM;
155 }
156 }
157
158 return 0;
159 }
160 EXPORT_SYMBOL_GPL(lio_setup_glists);
161
liquidio_set_feature(struct net_device * netdev,int cmd,u16 param1)162 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
163 {
164 struct lio *lio = GET_LIO(netdev);
165 struct octeon_device *oct = lio->oct_dev;
166 struct octnic_ctrl_pkt nctrl;
167 int ret = 0;
168
169 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
170
171 nctrl.ncmd.u64 = 0;
172 nctrl.ncmd.s.cmd = cmd;
173 nctrl.ncmd.s.param1 = param1;
174 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
175 nctrl.netpndev = (u64)netdev;
176 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
177
178 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
179 if (ret) {
180 dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
181 ret);
182 if (ret > 0)
183 ret = -EIO;
184 }
185 return ret;
186 }
187 EXPORT_SYMBOL_GPL(liquidio_set_feature);
188
octeon_report_tx_completion_to_bql(void * txq,unsigned int pkts_compl,unsigned int bytes_compl)189 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
190 unsigned int bytes_compl)
191 {
192 struct netdev_queue *netdev_queue = txq;
193
194 netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
195 }
196
octeon_update_tx_completion_counters(void * buf,int reqtype,unsigned int * pkts_compl,unsigned int * bytes_compl)197 void octeon_update_tx_completion_counters(void *buf, int reqtype,
198 unsigned int *pkts_compl,
199 unsigned int *bytes_compl)
200 {
201 struct octnet_buf_free_info *finfo;
202 struct sk_buff *skb = NULL;
203 struct octeon_soft_command *sc;
204
205 switch (reqtype) {
206 case REQTYPE_NORESP_NET:
207 case REQTYPE_NORESP_NET_SG:
208 finfo = buf;
209 skb = finfo->skb;
210 break;
211
212 case REQTYPE_RESP_NET_SG:
213 case REQTYPE_RESP_NET:
214 sc = buf;
215 skb = sc->callback_arg;
216 break;
217
218 default:
219 return;
220 }
221
222 (*pkts_compl)++;
223 *bytes_compl += skb->len;
224 }
225
octeon_report_sent_bytes_to_bql(void * buf,int reqtype)226 int octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
227 {
228 struct octnet_buf_free_info *finfo;
229 struct sk_buff *skb;
230 struct octeon_soft_command *sc;
231 struct netdev_queue *txq;
232
233 switch (reqtype) {
234 case REQTYPE_NORESP_NET:
235 case REQTYPE_NORESP_NET_SG:
236 finfo = buf;
237 skb = finfo->skb;
238 break;
239
240 case REQTYPE_RESP_NET_SG:
241 case REQTYPE_RESP_NET:
242 sc = buf;
243 skb = sc->callback_arg;
244 break;
245
246 default:
247 return 0;
248 }
249
250 txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
251 netdev_tx_sent_queue(txq, skb->len);
252
253 return netif_xmit_stopped(txq);
254 }
255
liquidio_link_ctrl_cmd_completion(void * nctrl_ptr)256 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
257 {
258 struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
259 struct net_device *netdev = (struct net_device *)nctrl->netpndev;
260 struct lio *lio = GET_LIO(netdev);
261 struct octeon_device *oct = lio->oct_dev;
262 u8 *mac;
263
264 if (nctrl->sc_status)
265 return;
266
267 switch (nctrl->ncmd.s.cmd) {
268 case OCTNET_CMD_CHANGE_DEVFLAGS:
269 case OCTNET_CMD_SET_MULTI_LIST:
270 case OCTNET_CMD_SET_UC_LIST:
271 break;
272
273 case OCTNET_CMD_CHANGE_MACADDR:
274 mac = ((u8 *)&nctrl->udd[0]) + 2;
275 if (nctrl->ncmd.s.param1) {
276 /* vfidx is 0 based, but vf_num (param1) is 1 based */
277 int vfidx = nctrl->ncmd.s.param1 - 1;
278 bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
279
280 if (mac_is_admin_assigned)
281 netif_info(lio, probe, lio->netdev,
282 "MAC Address %pM is configured for VF %d\n",
283 mac, vfidx);
284 } else {
285 netif_info(lio, probe, lio->netdev,
286 " MACAddr changed to %pM\n",
287 mac);
288 }
289 break;
290
291 case OCTNET_CMD_GPIO_ACCESS:
292 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
293
294 break;
295
296 case OCTNET_CMD_ID_ACTIVE:
297 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
298
299 break;
300
301 case OCTNET_CMD_LRO_ENABLE:
302 dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
303 break;
304
305 case OCTNET_CMD_LRO_DISABLE:
306 dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
307 netdev->name);
308 break;
309
310 case OCTNET_CMD_VERBOSE_ENABLE:
311 dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
312 netdev->name);
313 break;
314
315 case OCTNET_CMD_VERBOSE_DISABLE:
316 dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
317 netdev->name);
318 break;
319
320 case OCTNET_CMD_VLAN_FILTER_CTL:
321 if (nctrl->ncmd.s.param1)
322 dev_info(&oct->pci_dev->dev,
323 "%s VLAN filter enabled\n", netdev->name);
324 else
325 dev_info(&oct->pci_dev->dev,
326 "%s VLAN filter disabled\n", netdev->name);
327 break;
328
329 case OCTNET_CMD_ADD_VLAN_FILTER:
330 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
331 netdev->name, nctrl->ncmd.s.param1);
332 break;
333
334 case OCTNET_CMD_DEL_VLAN_FILTER:
335 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
336 netdev->name, nctrl->ncmd.s.param1);
337 break;
338
339 case OCTNET_CMD_SET_SETTINGS:
340 dev_info(&oct->pci_dev->dev, "%s settings changed\n",
341 netdev->name);
342
343 break;
344
345 /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
346 * Command passed by NIC driver
347 */
348 case OCTNET_CMD_TNL_RX_CSUM_CTL:
349 if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
350 netif_info(lio, probe, lio->netdev,
351 "RX Checksum Offload Enabled\n");
352 } else if (nctrl->ncmd.s.param1 ==
353 OCTNET_CMD_RXCSUM_DISABLE) {
354 netif_info(lio, probe, lio->netdev,
355 "RX Checksum Offload Disabled\n");
356 }
357 break;
358
359 /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
360 * Command passed by NIC driver
361 */
362 case OCTNET_CMD_TNL_TX_CSUM_CTL:
363 if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
364 netif_info(lio, probe, lio->netdev,
365 "TX Checksum Offload Enabled\n");
366 } else if (nctrl->ncmd.s.param1 ==
367 OCTNET_CMD_TXCSUM_DISABLE) {
368 netif_info(lio, probe, lio->netdev,
369 "TX Checksum Offload Disabled\n");
370 }
371 break;
372
373 /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
374 * Command passed by NIC driver
375 */
376 case OCTNET_CMD_VXLAN_PORT_CONFIG:
377 if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
378 netif_info(lio, probe, lio->netdev,
379 "VxLAN Destination UDP PORT:%d ADDED\n",
380 nctrl->ncmd.s.param1);
381 } else if (nctrl->ncmd.s.more ==
382 OCTNET_CMD_VXLAN_PORT_DEL) {
383 netif_info(lio, probe, lio->netdev,
384 "VxLAN Destination UDP PORT:%d DELETED\n",
385 nctrl->ncmd.s.param1);
386 }
387 break;
388
389 case OCTNET_CMD_SET_FLOW_CTL:
390 netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
391 break;
392
393 case OCTNET_CMD_QUEUE_COUNT_CTL:
394 netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n",
395 nctrl->ncmd.s.param1);
396 break;
397
398 default:
399 dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
400 nctrl->ncmd.s.cmd);
401 }
402 }
403 EXPORT_SYMBOL_GPL(liquidio_link_ctrl_cmd_completion);
404
octeon_pf_changed_vf_macaddr(struct octeon_device * oct,u8 * mac)405 void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
406 {
407 bool macaddr_changed = false;
408 struct net_device *netdev;
409 struct lio *lio;
410
411 rtnl_lock();
412
413 netdev = oct->props[0].netdev;
414 lio = GET_LIO(netdev);
415
416 lio->linfo.macaddr_is_admin_asgnd = true;
417
418 if (!ether_addr_equal(netdev->dev_addr, mac)) {
419 macaddr_changed = true;
420 eth_hw_addr_set(netdev, mac);
421 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
422 call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
423 }
424
425 rtnl_unlock();
426
427 if (macaddr_changed)
428 dev_info(&oct->pci_dev->dev,
429 "PF changed VF's MAC address to %pM\n", mac);
430
431 /* no need to notify the firmware of the macaddr change because
432 * the PF did that already
433 */
434 }
435
octeon_schedule_rxq_oom_work(struct octeon_device * oct,struct octeon_droq * droq)436 void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
437 struct octeon_droq *droq)
438 {
439 struct net_device *netdev = oct->props[0].netdev;
440 struct lio *lio = GET_LIO(netdev);
441 struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no];
442
443 queue_delayed_work(wq->wq, &wq->wk.work,
444 msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
445 }
446
octnet_poll_check_rxq_oom_status(struct work_struct * work)447 static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
448 {
449 struct cavium_wk *wk = (struct cavium_wk *)work;
450 struct lio *lio = (struct lio *)wk->ctxptr;
451 struct octeon_device *oct = lio->oct_dev;
452 int q_no = wk->ctxul;
453 struct octeon_droq *droq = oct->droq[q_no];
454
455 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING) || !droq)
456 return;
457
458 if (octeon_retry_droq_refill(droq))
459 octeon_schedule_rxq_oom_work(oct, droq);
460 }
461
setup_rx_oom_poll_fn(struct net_device * netdev)462 int setup_rx_oom_poll_fn(struct net_device *netdev)
463 {
464 struct lio *lio = GET_LIO(netdev);
465 struct octeon_device *oct = lio->oct_dev;
466 struct cavium_wq *wq;
467 int q, q_no;
468
469 for (q = 0; q < oct->num_oqs; q++) {
470 q_no = lio->linfo.rxpciq[q].s.q_no;
471 wq = &lio->rxq_status_wq[q_no];
472 wq->wq = alloc_workqueue("rxq-oom-status",
473 WQ_MEM_RECLAIM | WQ_PERCPU, 0);
474 if (!wq->wq) {
475 dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
476 return -ENOMEM;
477 }
478
479 INIT_DELAYED_WORK(&wq->wk.work,
480 octnet_poll_check_rxq_oom_status);
481 wq->wk.ctxptr = lio;
482 wq->wk.ctxul = q_no;
483 }
484
485 return 0;
486 }
487 EXPORT_SYMBOL_GPL(setup_rx_oom_poll_fn);
488
cleanup_rx_oom_poll_fn(struct net_device * netdev)489 void cleanup_rx_oom_poll_fn(struct net_device *netdev)
490 {
491 struct lio *lio = GET_LIO(netdev);
492 struct octeon_device *oct = lio->oct_dev;
493 struct cavium_wq *wq;
494 int q_no;
495
496 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
497 wq = &lio->rxq_status_wq[q_no];
498 if (wq->wq) {
499 cancel_delayed_work_sync(&wq->wk.work);
500 destroy_workqueue(wq->wq);
501 wq->wq = NULL;
502 }
503 }
504 }
505 EXPORT_SYMBOL_GPL(cleanup_rx_oom_poll_fn);
506
507 /* Runs in interrupt context. */
lio_update_txq_status(struct octeon_device * oct,int iq_num)508 static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
509 {
510 struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
511 struct net_device *netdev;
512 struct lio *lio;
513
514 netdev = oct->props[iq->ifidx].netdev;
515
516 /* This is needed because the first IQ does not have
517 * a netdev associated with it.
518 */
519 if (!netdev)
520 return;
521
522 lio = GET_LIO(netdev);
523 if (__netif_subqueue_stopped(netdev, iq->q_index) &&
524 lio->linfo.link.s.link_up &&
525 (!octnet_iq_is_full(oct, iq_num))) {
526 netif_wake_subqueue(netdev, iq->q_index);
527 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
528 tx_restart, 1);
529 }
530 }
531
532 /**
533 * octeon_setup_droq - Setup output queue
534 * @oct: octeon device
535 * @q_no: which queue
536 * @num_descs: how many descriptors
537 * @desc_size: size of each descriptor
538 * @app_ctx: application context
539 */
octeon_setup_droq(struct octeon_device * oct,int q_no,int num_descs,int desc_size,void * app_ctx)540 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
541 int desc_size, void *app_ctx)
542 {
543 int ret_val;
544
545 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
546 /* droq creation and local register settings. */
547 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
548 if (ret_val < 0)
549 return ret_val;
550
551 if (ret_val == 1) {
552 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
553 return 0;
554 }
555
556 /* Enable the droq queues */
557 octeon_set_droq_pkt_op(oct, q_no, 1);
558
559 /* Send Credit for Octeon Output queues. Credits are always
560 * sent after the output queue is enabled.
561 */
562 writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
563
564 return ret_val;
565 }
566
567 /**
568 * liquidio_push_packet - Routine to push packets arriving on Octeon interface upto network layer.
569 * @octeon_id:octeon device id.
570 * @skbuff: skbuff struct to be passed to network layer.
571 * @len: size of total data received.
572 * @rh: Control header associated with the packet
573 * @param: additional control data with the packet
574 * @arg: farg registered in droq_ops
575 */
576 static void
liquidio_push_packet(u32 __maybe_unused octeon_id,void * skbuff,u32 len,union octeon_rh * rh,void * param,void * arg)577 liquidio_push_packet(u32 __maybe_unused octeon_id,
578 void *skbuff,
579 u32 len,
580 union octeon_rh *rh,
581 void *param,
582 void *arg)
583 {
584 struct net_device *netdev = (struct net_device *)arg;
585 struct octeon_droq *droq =
586 container_of(param, struct octeon_droq, napi);
587 struct sk_buff *skb = (struct sk_buff *)skbuff;
588 struct skb_shared_hwtstamps *shhwtstamps;
589 struct napi_struct *napi = param;
590 u16 vtag = 0;
591 u32 r_dh_off;
592 u64 ns;
593
594 if (netdev) {
595 struct lio *lio = GET_LIO(netdev);
596 struct octeon_device *oct = lio->oct_dev;
597
598 /* Do not proceed if the interface is not in RUNNING state. */
599 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
600 recv_buffer_free(skb);
601 droq->stats.rx_dropped++;
602 return;
603 }
604
605 skb->dev = netdev;
606
607 skb_record_rx_queue(skb, droq->q_no);
608 if (likely(len > MIN_SKB_SIZE)) {
609 struct octeon_skb_page_info *pg_info;
610 unsigned char *va;
611
612 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
613 if (pg_info->page) {
614 /* For Paged allocation use the frags */
615 va = page_address(pg_info->page) +
616 pg_info->page_offset;
617 memcpy(skb->data, va, MIN_SKB_SIZE);
618 skb_put(skb, MIN_SKB_SIZE);
619 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
620 pg_info->page,
621 pg_info->page_offset +
622 MIN_SKB_SIZE,
623 len - MIN_SKB_SIZE,
624 LIO_RXBUFFER_SZ);
625 }
626 } else {
627 struct octeon_skb_page_info *pg_info =
628 ((struct octeon_skb_page_info *)(skb->cb));
629 skb_copy_to_linear_data(skb, page_address(pg_info->page)
630 + pg_info->page_offset, len);
631 skb_put(skb, len);
632 put_page(pg_info->page);
633 }
634
635 r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
636
637 if (oct->ptp_enable) {
638 if (rh->r_dh.has_hwtstamp) {
639 /* timestamp is included from the hardware at
640 * the beginning of the packet.
641 */
642 if (ifstate_check
643 (lio,
644 LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
645 /* Nanoseconds are in the first 64-bits
646 * of the packet.
647 */
648 memcpy(&ns, (skb->data + r_dh_off),
649 sizeof(ns));
650 r_dh_off -= BYTES_PER_DHLEN_UNIT;
651 shhwtstamps = skb_hwtstamps(skb);
652 shhwtstamps->hwtstamp =
653 ns_to_ktime(ns +
654 lio->ptp_adjust);
655 }
656 }
657 }
658
659 if (rh->r_dh.has_hash) {
660 __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
661 u32 hash = be32_to_cpu(*hash_be);
662
663 skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
664 r_dh_off -= BYTES_PER_DHLEN_UNIT;
665 }
666
667 skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
668 skb->protocol = eth_type_trans(skb, skb->dev);
669
670 if ((netdev->features & NETIF_F_RXCSUM) &&
671 (((rh->r_dh.encap_on) &&
672 (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
673 (!(rh->r_dh.encap_on) &&
674 ((rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED) ==
675 CNNIC_CSUM_VERIFIED))))
676 /* checksum has already been verified */
677 skb->ip_summed = CHECKSUM_UNNECESSARY;
678 else
679 skb->ip_summed = CHECKSUM_NONE;
680
681 /* Setting Encapsulation field on basis of status received
682 * from the firmware
683 */
684 if (rh->r_dh.encap_on) {
685 skb->encapsulation = 1;
686 skb->csum_level = 1;
687 droq->stats.rx_vxlan++;
688 }
689
690 /* inbound VLAN tag */
691 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
692 rh->r_dh.vlan) {
693 u16 priority = rh->r_dh.priority;
694 u16 vid = rh->r_dh.vlan;
695
696 vtag = (priority << VLAN_PRIO_SHIFT) | vid;
697 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
698 }
699
700 napi_gro_receive(napi, skb);
701
702 droq->stats.rx_bytes_received += len -
703 rh->r_dh.len * BYTES_PER_DHLEN_UNIT;
704 droq->stats.rx_pkts_received++;
705 } else {
706 recv_buffer_free(skb);
707 }
708 }
709
710 /**
711 * napi_schedule_wrapper - wrapper for calling napi_schedule
712 * @param: parameters to pass to napi_schedule
713 *
714 * Used when scheduling on different CPUs
715 */
napi_schedule_wrapper(void * param)716 static void napi_schedule_wrapper(void *param)
717 {
718 struct napi_struct *napi = param;
719
720 napi_schedule(napi);
721 }
722
723 /**
724 * liquidio_napi_drv_callback - callback when receive interrupt occurs and we are in NAPI mode
725 * @arg: pointer to octeon output queue
726 */
liquidio_napi_drv_callback(void * arg)727 static void liquidio_napi_drv_callback(void *arg)
728 {
729 struct octeon_device *oct;
730 struct octeon_droq *droq = arg;
731 int this_cpu = smp_processor_id();
732
733 oct = droq->oct_dev;
734
735 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
736 droq->cpu_id == this_cpu) {
737 napi_schedule_irqoff(&droq->napi);
738 } else {
739 INIT_CSD(&droq->csd, napi_schedule_wrapper, &droq->napi);
740 smp_call_function_single_async(droq->cpu_id, &droq->csd);
741 }
742 }
743
744 /**
745 * liquidio_napi_poll - Entry point for NAPI polling
746 * @napi: NAPI structure
747 * @budget: maximum number of items to process
748 */
liquidio_napi_poll(struct napi_struct * napi,int budget)749 static int liquidio_napi_poll(struct napi_struct *napi, int budget)
750 {
751 struct octeon_instr_queue *iq;
752 struct octeon_device *oct;
753 struct octeon_droq *droq;
754 int tx_done = 0, iq_no;
755 int work_done;
756
757 droq = container_of(napi, struct octeon_droq, napi);
758 oct = droq->oct_dev;
759 iq_no = droq->q_no;
760
761 /* Handle Droq descriptors */
762 work_done = octeon_droq_process_poll_pkts(oct, droq, budget);
763
764 /* Flush the instruction queue */
765 iq = oct->instr_queue[iq_no];
766 if (iq) {
767 /* TODO: move this check to inside octeon_flush_iq,
768 * once check_db_timeout is removed
769 */
770 if (atomic_read(&iq->instr_pending))
771 /* Process iq buffers with in the budget limits */
772 tx_done = octeon_flush_iq(oct, iq, budget);
773 else
774 tx_done = 1;
775 /* Update iq read-index rather than waiting for next interrupt.
776 * Return back if tx_done is false.
777 */
778 /* sub-queue status update */
779 lio_update_txq_status(oct, iq_no);
780 } else {
781 dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
782 __func__, iq_no);
783 }
784
785 #define MAX_REG_CNT 2000000U
786 /* force enable interrupt if reg cnts are high to avoid wraparound */
787 if ((work_done < budget && tx_done) ||
788 (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
789 (droq->pkt_count >= MAX_REG_CNT)) {
790 napi_complete_done(napi, work_done);
791
792 octeon_enable_irq(droq->oct_dev, droq->q_no);
793 return 0;
794 }
795
796 return (!tx_done) ? (budget) : (work_done);
797 }
798
799 /**
800 * liquidio_setup_io_queues - Setup input and output queues
801 * @octeon_dev: octeon device
802 * @ifidx: Interface index
803 * @num_iqs: input io queue count
804 * @num_oqs: output io queue count
805 *
806 * Note: Queues are with respect to the octeon device. Thus
807 * an input queue is for egress packets, and output queues
808 * are for ingress packets.
809 */
liquidio_setup_io_queues(struct octeon_device * octeon_dev,int ifidx,u32 num_iqs,u32 num_oqs)810 int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
811 u32 num_iqs, u32 num_oqs)
812 {
813 struct octeon_droq_ops droq_ops;
814 struct net_device *netdev;
815 struct octeon_droq *droq;
816 struct napi_struct *napi;
817 int cpu_id_modulus;
818 int num_tx_descs;
819 struct lio *lio;
820 int retval = 0;
821 int q, q_no;
822 int cpu_id;
823
824 netdev = octeon_dev->props[ifidx].netdev;
825
826 lio = GET_LIO(netdev);
827
828 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
829
830 droq_ops.fptr = liquidio_push_packet;
831 droq_ops.farg = netdev;
832
833 droq_ops.poll_mode = 1;
834 droq_ops.napi_fn = liquidio_napi_drv_callback;
835 cpu_id = 0;
836 cpu_id_modulus = num_present_cpus();
837
838 /* set up DROQs. */
839 for (q = 0; q < num_oqs; q++) {
840 q_no = lio->linfo.rxpciq[q].s.q_no;
841 dev_dbg(&octeon_dev->pci_dev->dev,
842 "%s index:%d linfo.rxpciq.s.q_no:%d\n",
843 __func__, q, q_no);
844 retval = octeon_setup_droq(
845 octeon_dev, q_no,
846 CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
847 lio->ifidx),
848 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
849 lio->ifidx),
850 NULL);
851 if (retval) {
852 dev_err(&octeon_dev->pci_dev->dev,
853 "%s : Runtime DROQ(RxQ) creation failed.\n",
854 __func__);
855 return 1;
856 }
857
858 droq = octeon_dev->droq[q_no];
859 napi = &droq->napi;
860 dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
861 (u64)netdev, (u64)octeon_dev);
862 netif_napi_add(netdev, napi, liquidio_napi_poll);
863
864 /* designate a CPU for this droq */
865 droq->cpu_id = cpu_id;
866 cpu_id++;
867 if (cpu_id >= cpu_id_modulus)
868 cpu_id = 0;
869
870 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
871 }
872
873 if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
874 /* 23XX PF/VF can send/recv control messages (via the first
875 * PF/VF-owned droq) from the firmware even if the ethX
876 * interface is down, so that's why poll_mode must be off
877 * for the first droq.
878 */
879 octeon_dev->droq[0]->ops.poll_mode = 0;
880 }
881
882 /* set up IQs. */
883 for (q = 0; q < num_iqs; q++) {
884 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
885 octeon_get_conf(octeon_dev), lio->ifidx);
886 retval = octeon_setup_iq(octeon_dev, ifidx, q,
887 lio->linfo.txpciq[q], num_tx_descs,
888 netdev_get_tx_queue(netdev, q));
889 if (retval) {
890 dev_err(&octeon_dev->pci_dev->dev,
891 " %s : Runtime IQ(TxQ) creation failed.\n",
892 __func__);
893 return 1;
894 }
895
896 /* XPS */
897 if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
898 octeon_dev->ioq_vector) {
899 struct octeon_ioq_vector *ioq_vector;
900
901 ioq_vector = &octeon_dev->ioq_vector[q];
902 netif_set_xps_queue(netdev,
903 &ioq_vector->affinity_mask,
904 ioq_vector->iq_index);
905 }
906 }
907
908 return 0;
909 }
910 EXPORT_SYMBOL_GPL(liquidio_setup_io_queues);
911
912 static
liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq * droq,u64 ret)913 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
914 {
915 struct octeon_device *oct = droq->oct_dev;
916 struct octeon_device_priv *oct_priv =
917 (struct octeon_device_priv *)oct->priv;
918
919 if (droq->ops.poll_mode) {
920 droq->ops.napi_fn(droq);
921 } else {
922 if (ret & MSIX_PO_INT) {
923 if (OCTEON_CN23XX_VF(oct))
924 dev_err(&oct->pci_dev->dev,
925 "should not come here should not get rx when poll mode = 0 for vf\n");
926 tasklet_schedule(&oct_priv->droq_tasklet);
927 return 1;
928 }
929 /* this will be flushed periodically by check iq db */
930 if (ret & MSIX_PI_INT)
931 return 0;
932 }
933
934 return 0;
935 }
936
937 irqreturn_t
liquidio_msix_intr_handler(int __maybe_unused irq,void * dev)938 liquidio_msix_intr_handler(int __maybe_unused irq, void *dev)
939 {
940 struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
941 struct octeon_device *oct = ioq_vector->oct_dev;
942 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
943 u64 ret;
944
945 ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
946
947 if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
948 liquidio_schedule_msix_droq_pkt_handler(droq, ret);
949
950 return IRQ_HANDLED;
951 }
952
953 /**
954 * liquidio_schedule_droq_pkt_handlers - Droq packet processor sceduler
955 * @oct: octeon device
956 */
liquidio_schedule_droq_pkt_handlers(struct octeon_device * oct)957 static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
958 {
959 struct octeon_device_priv *oct_priv =
960 (struct octeon_device_priv *)oct->priv;
961 struct octeon_droq *droq;
962 u64 oq_no;
963
964 if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
965 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
966 oq_no++) {
967 if (!(oct->droq_intr & BIT_ULL(oq_no)))
968 continue;
969
970 droq = oct->droq[oq_no];
971
972 if (droq->ops.poll_mode) {
973 droq->ops.napi_fn(droq);
974 oct_priv->napi_mask |= BIT_ULL(oq_no);
975 } else {
976 tasklet_schedule(&oct_priv->droq_tasklet);
977 }
978 }
979 }
980 }
981
982 /**
983 * liquidio_legacy_intr_handler - Interrupt handler for octeon
984 * @irq: unused
985 * @dev: octeon device
986 */
987 static
liquidio_legacy_intr_handler(int __maybe_unused irq,void * dev)988 irqreturn_t liquidio_legacy_intr_handler(int __maybe_unused irq, void *dev)
989 {
990 struct octeon_device *oct = (struct octeon_device *)dev;
991 irqreturn_t ret;
992
993 /* Disable our interrupts for the duration of ISR */
994 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
995
996 ret = oct->fn_list.process_interrupt_regs(oct);
997
998 if (ret == IRQ_HANDLED)
999 liquidio_schedule_droq_pkt_handlers(oct);
1000
1001 /* Re-enable our interrupts */
1002 if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
1003 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
1004
1005 return ret;
1006 }
1007
1008 /**
1009 * octeon_setup_interrupt - Setup interrupt for octeon device
1010 * @oct: octeon device
1011 * @num_ioqs: number of queues
1012 *
1013 * Enable interrupt in Octeon device as given in the PCI interrupt mask.
1014 */
octeon_setup_interrupt(struct octeon_device * oct,u32 num_ioqs)1015 int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
1016 {
1017 struct msix_entry *msix_entries;
1018 char *queue_irq_names = NULL;
1019 int i, num_interrupts = 0;
1020 int num_alloc_ioq_vectors;
1021 char *aux_irq_name = NULL;
1022 int num_ioq_vectors;
1023 int irqret, err;
1024
1025 if (oct->msix_on) {
1026 oct->num_msix_irqs = num_ioqs;
1027 if (OCTEON_CN23XX_PF(oct)) {
1028 num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
1029
1030 /* one non ioq interrupt for handling
1031 * sli_mac_pf_int_sum
1032 */
1033 oct->num_msix_irqs += 1;
1034 } else if (OCTEON_CN23XX_VF(oct)) {
1035 num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
1036 }
1037
1038 /* allocate storage for the names assigned to each irq */
1039 oct->irq_name_storage =
1040 kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
1041 if (!oct->irq_name_storage) {
1042 dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
1043 return -ENOMEM;
1044 }
1045
1046 queue_irq_names = oct->irq_name_storage;
1047
1048 if (OCTEON_CN23XX_PF(oct))
1049 aux_irq_name = &queue_irq_names
1050 [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
1051
1052 oct->msix_entries = kzalloc_objs(struct msix_entry,
1053 oct->num_msix_irqs);
1054 if (!oct->msix_entries) {
1055 dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
1056 kfree(oct->irq_name_storage);
1057 oct->irq_name_storage = NULL;
1058 return -ENOMEM;
1059 }
1060
1061 msix_entries = (struct msix_entry *)oct->msix_entries;
1062
1063 /*Assumption is that pf msix vectors start from pf srn to pf to
1064 * trs and not from 0. if not change this code
1065 */
1066 if (OCTEON_CN23XX_PF(oct)) {
1067 for (i = 0; i < oct->num_msix_irqs - 1; i++)
1068 msix_entries[i].entry =
1069 oct->sriov_info.pf_srn + i;
1070
1071 msix_entries[oct->num_msix_irqs - 1].entry =
1072 oct->sriov_info.trs;
1073 } else if (OCTEON_CN23XX_VF(oct)) {
1074 for (i = 0; i < oct->num_msix_irqs; i++)
1075 msix_entries[i].entry = i;
1076 }
1077 num_alloc_ioq_vectors = pci_enable_msix_range(
1078 oct->pci_dev, msix_entries,
1079 oct->num_msix_irqs,
1080 oct->num_msix_irqs);
1081 if (num_alloc_ioq_vectors < 0) {
1082 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
1083 kfree(oct->msix_entries);
1084 oct->msix_entries = NULL;
1085 kfree(oct->irq_name_storage);
1086 oct->irq_name_storage = NULL;
1087 return num_alloc_ioq_vectors;
1088 }
1089
1090 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
1091
1092 num_ioq_vectors = oct->num_msix_irqs;
1093 /* For PF, there is one non-ioq interrupt handler */
1094 if (OCTEON_CN23XX_PF(oct)) {
1095 num_ioq_vectors -= 1;
1096
1097 snprintf(aux_irq_name, INTRNAMSIZ,
1098 "LiquidIO%u-pf%u-aux", oct->octeon_id,
1099 oct->pf_num);
1100 irqret = request_irq(
1101 msix_entries[num_ioq_vectors].vector,
1102 liquidio_legacy_intr_handler, 0,
1103 aux_irq_name, oct);
1104 if (irqret) {
1105 dev_err(&oct->pci_dev->dev,
1106 "Request_irq failed for MSIX interrupt Error: %d\n",
1107 irqret);
1108 pci_disable_msix(oct->pci_dev);
1109 kfree(oct->msix_entries);
1110 kfree(oct->irq_name_storage);
1111 oct->irq_name_storage = NULL;
1112 oct->msix_entries = NULL;
1113 return irqret;
1114 }
1115 }
1116 for (i = 0 ; i < num_ioq_vectors ; i++) {
1117 if (OCTEON_CN23XX_PF(oct))
1118 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1119 INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
1120 oct->octeon_id, oct->pf_num, i);
1121
1122 if (OCTEON_CN23XX_VF(oct))
1123 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1124 INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
1125 oct->octeon_id, oct->vf_num, i);
1126
1127 irqret = request_irq(msix_entries[i].vector,
1128 liquidio_msix_intr_handler, 0,
1129 &queue_irq_names[IRQ_NAME_OFF(i)],
1130 &oct->ioq_vector[i]);
1131
1132 if (irqret) {
1133 dev_err(&oct->pci_dev->dev,
1134 "Request_irq failed for MSIX interrupt Error: %d\n",
1135 irqret);
1136 /* Freeing the non-ioq irq vector here . */
1137 free_irq(msix_entries[num_ioq_vectors].vector,
1138 oct);
1139
1140 while (i) {
1141 i--;
1142 /* clearing affinity mask. */
1143 irq_set_affinity_hint(
1144 msix_entries[i].vector,
1145 NULL);
1146 free_irq(msix_entries[i].vector,
1147 &oct->ioq_vector[i]);
1148 }
1149 pci_disable_msix(oct->pci_dev);
1150 kfree(oct->msix_entries);
1151 kfree(oct->irq_name_storage);
1152 oct->irq_name_storage = NULL;
1153 oct->msix_entries = NULL;
1154 return irqret;
1155 }
1156 oct->ioq_vector[i].vector = msix_entries[i].vector;
1157 /* assign the cpu mask for this msix interrupt vector */
1158 irq_set_affinity_hint(msix_entries[i].vector,
1159 &oct->ioq_vector[i].affinity_mask
1160 );
1161 }
1162 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
1163 oct->octeon_id);
1164 } else {
1165 err = pci_enable_msi(oct->pci_dev);
1166 if (err)
1167 dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1168 err);
1169 else
1170 oct->flags |= LIO_FLAG_MSI_ENABLED;
1171
1172 /* allocate storage for the names assigned to the irq */
1173 oct->irq_name_storage = kzalloc(INTRNAMSIZ, GFP_KERNEL);
1174 if (!oct->irq_name_storage)
1175 return -ENOMEM;
1176
1177 queue_irq_names = oct->irq_name_storage;
1178
1179 if (OCTEON_CN23XX_PF(oct))
1180 snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1181 "LiquidIO%u-pf%u-rxtx-%u",
1182 oct->octeon_id, oct->pf_num, 0);
1183
1184 if (OCTEON_CN23XX_VF(oct))
1185 snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1186 "LiquidIO%u-vf%u-rxtx-%u",
1187 oct->octeon_id, oct->vf_num, 0);
1188
1189 irqret = request_irq(oct->pci_dev->irq,
1190 liquidio_legacy_intr_handler,
1191 IRQF_SHARED,
1192 &queue_irq_names[IRQ_NAME_OFF(0)], oct);
1193 if (irqret) {
1194 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1195 pci_disable_msi(oct->pci_dev);
1196 dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1197 irqret);
1198 kfree(oct->irq_name_storage);
1199 oct->irq_name_storage = NULL;
1200 return irqret;
1201 }
1202 }
1203 return 0;
1204 }
1205 EXPORT_SYMBOL_GPL(octeon_setup_interrupt);
1206
1207 /**
1208 * liquidio_change_mtu - Net device change_mtu
1209 * @netdev: network device
1210 * @new_mtu: the new max transmit unit size
1211 */
liquidio_change_mtu(struct net_device * netdev,int new_mtu)1212 int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
1213 {
1214 struct lio *lio = GET_LIO(netdev);
1215 struct octeon_device *oct = lio->oct_dev;
1216 struct octeon_soft_command *sc;
1217 union octnet_cmd *ncmd;
1218 int ret = 0;
1219
1220 sc = (struct octeon_soft_command *)
1221 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0);
1222 if (!sc) {
1223 netif_info(lio, rx_err, lio->netdev,
1224 "Failed to allocate soft command\n");
1225 return -ENOMEM;
1226 }
1227
1228 ncmd = (union octnet_cmd *)sc->virtdptr;
1229
1230 init_completion(&sc->complete);
1231 sc->sc_status = OCTEON_REQUEST_PENDING;
1232
1233 ncmd->u64 = 0;
1234 ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU;
1235 ncmd->s.param1 = new_mtu;
1236
1237 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1238
1239 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1240
1241 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1242 OPCODE_NIC_CMD, 0, 0, 0);
1243
1244 ret = octeon_send_soft_command(oct, sc);
1245 if (ret == IQ_SEND_FAILED) {
1246 netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n");
1247 octeon_free_soft_command(oct, sc);
1248 return -EINVAL;
1249 }
1250 /* Sleep on a wait queue till the cond flag indicates that the
1251 * response arrived or timed-out.
1252 */
1253 ret = wait_for_sc_completion_timeout(oct, sc, 0);
1254 if (ret)
1255 return ret;
1256
1257 if (sc->sc_status) {
1258 WRITE_ONCE(sc->caller_is_done, true);
1259 return -EINVAL;
1260 }
1261
1262 WRITE_ONCE(netdev->mtu, new_mtu);
1263 lio->mtu = new_mtu;
1264
1265 WRITE_ONCE(sc->caller_is_done, true);
1266 return 0;
1267 }
1268 EXPORT_SYMBOL_GPL(liquidio_change_mtu);
1269
lio_wait_for_clean_oq(struct octeon_device * oct)1270 int lio_wait_for_clean_oq(struct octeon_device *oct)
1271 {
1272 int retry = 100, pending_pkts = 0;
1273 int idx;
1274
1275 do {
1276 pending_pkts = 0;
1277
1278 for (idx = 0; idx < MAX_OCTEON_OUTPUT_QUEUES(oct); idx++) {
1279 if (!(oct->io_qmask.oq & BIT_ULL(idx)))
1280 continue;
1281 pending_pkts +=
1282 atomic_read(&oct->droq[idx]->pkts_pending);
1283 }
1284
1285 if (pending_pkts > 0)
1286 schedule_timeout_uninterruptible(1);
1287
1288 } while (retry-- && pending_pkts);
1289
1290 return pending_pkts;
1291 }
1292 EXPORT_SYMBOL_GPL(lio_wait_for_clean_oq);
1293
1294 static void
octnet_nic_stats_callback(struct octeon_device * oct_dev,u32 status,void * ptr)1295 octnet_nic_stats_callback(struct octeon_device *oct_dev,
1296 u32 status, void *ptr)
1297 {
1298 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1299 struct oct_nic_stats_resp *resp =
1300 (struct oct_nic_stats_resp *)sc->virtrptr;
1301 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1302 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1303 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1304 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1305
1306 if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1307 octeon_swap_8B_data((u64 *)&resp->stats,
1308 (sizeof(struct oct_link_stats)) >> 3);
1309
1310 /* RX link-level stats */
1311 rstats->total_rcvd = rsp_rstats->total_rcvd;
1312 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1313 rstats->total_bcst = rsp_rstats->total_bcst;
1314 rstats->total_mcst = rsp_rstats->total_mcst;
1315 rstats->runts = rsp_rstats->runts;
1316 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
1317 /* Accounts for over/under-run of buffers */
1318 rstats->fifo_err = rsp_rstats->fifo_err;
1319 rstats->dmac_drop = rsp_rstats->dmac_drop;
1320 rstats->fcs_err = rsp_rstats->fcs_err;
1321 rstats->jabber_err = rsp_rstats->jabber_err;
1322 rstats->l2_err = rsp_rstats->l2_err;
1323 rstats->frame_err = rsp_rstats->frame_err;
1324 rstats->red_drops = rsp_rstats->red_drops;
1325
1326 /* RX firmware stats */
1327 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1328 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1329 rstats->fw_total_mcast = rsp_rstats->fw_total_mcast;
1330 rstats->fw_total_bcast = rsp_rstats->fw_total_bcast;
1331 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1332 rstats->fw_err_link = rsp_rstats->fw_err_link;
1333 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1334 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1335 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1336
1337 /* Number of packets that are LROed */
1338 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1339 /* Number of octets that are LROed */
1340 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1341 /* Number of LRO packets formed */
1342 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1343 /* Number of times lRO of packet aborted */
1344 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1345 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1346 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1347 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1348 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1349 /* intrmod: packet forward rate */
1350 rstats->fwd_rate = rsp_rstats->fwd_rate;
1351
1352 /* TX link-level stats */
1353 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1354 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1355 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1356 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1357 tstats->ctl_sent = rsp_tstats->ctl_sent;
1358 /* Packets sent after one collision*/
1359 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1360 /* Packets sent after multiple collision*/
1361 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1362 /* Packets not sent due to max collisions */
1363 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1364 /* Packets not sent due to max deferrals */
1365 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1366 /* Accounts for over/under-run of buffers */
1367 tstats->fifo_err = rsp_tstats->fifo_err;
1368 tstats->runts = rsp_tstats->runts;
1369 /* Total number of collisions detected */
1370 tstats->total_collisions = rsp_tstats->total_collisions;
1371
1372 /* firmware stats */
1373 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1374 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1375 tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent;
1376 tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent;
1377 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1378 tstats->fw_err_pki = rsp_tstats->fw_err_pki;
1379 tstats->fw_err_link = rsp_tstats->fw_err_link;
1380 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1381 tstats->fw_tso = rsp_tstats->fw_tso;
1382 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1383 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1384 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1385
1386 resp->status = 1;
1387 } else {
1388 dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
1389 resp->status = -1;
1390 }
1391 }
1392
lio_fetch_vf_stats(struct lio * lio)1393 static int lio_fetch_vf_stats(struct lio *lio)
1394 {
1395 struct octeon_device *oct_dev = lio->oct_dev;
1396 struct octeon_soft_command *sc;
1397 struct oct_nic_vf_stats_resp *resp;
1398
1399 int retval;
1400
1401 /* Alloc soft command */
1402 sc = (struct octeon_soft_command *)
1403 octeon_alloc_soft_command(oct_dev,
1404 0,
1405 sizeof(struct oct_nic_vf_stats_resp),
1406 0);
1407
1408 if (!sc) {
1409 dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
1410 retval = -ENOMEM;
1411 goto lio_fetch_vf_stats_exit;
1412 }
1413
1414 resp = (struct oct_nic_vf_stats_resp *)sc->virtrptr;
1415 memset(resp, 0, sizeof(struct oct_nic_vf_stats_resp));
1416
1417 init_completion(&sc->complete);
1418 sc->sc_status = OCTEON_REQUEST_PENDING;
1419
1420 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1421
1422 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1423 OPCODE_NIC_VF_PORT_STATS, 0, 0, 0);
1424
1425 retval = octeon_send_soft_command(oct_dev, sc);
1426 if (retval == IQ_SEND_FAILED) {
1427 octeon_free_soft_command(oct_dev, sc);
1428 goto lio_fetch_vf_stats_exit;
1429 }
1430
1431 retval =
1432 wait_for_sc_completion_timeout(oct_dev, sc,
1433 (2 * LIO_SC_MAX_TMO_MS));
1434 if (retval) {
1435 dev_err(&oct_dev->pci_dev->dev,
1436 "sc OPCODE_NIC_VF_PORT_STATS command failed\n");
1437 goto lio_fetch_vf_stats_exit;
1438 }
1439
1440 if (sc->sc_status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1441 octeon_swap_8B_data((u64 *)&resp->spoofmac_cnt,
1442 (sizeof(u64)) >> 3);
1443
1444 if (resp->spoofmac_cnt != 0) {
1445 dev_warn(&oct_dev->pci_dev->dev,
1446 "%llu Spoofed packets detected\n",
1447 resp->spoofmac_cnt);
1448 }
1449 }
1450 WRITE_ONCE(sc->caller_is_done, 1);
1451
1452 lio_fetch_vf_stats_exit:
1453 return retval;
1454 }
1455
lio_fetch_stats(struct work_struct * work)1456 void lio_fetch_stats(struct work_struct *work)
1457 {
1458 struct cavium_wk *wk = (struct cavium_wk *)work;
1459 struct lio *lio = wk->ctxptr;
1460 struct octeon_device *oct_dev = lio->oct_dev;
1461 struct octeon_soft_command *sc;
1462 struct oct_nic_stats_resp *resp;
1463 unsigned long time_in_jiffies;
1464 int retval;
1465
1466 if (OCTEON_CN23XX_PF(oct_dev)) {
1467 /* report spoofchk every 2 seconds */
1468 if (!(oct_dev->vfstats_poll % LIO_VFSTATS_POLL) &&
1469 (oct_dev->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP) &&
1470 oct_dev->sriov_info.num_vfs_alloced) {
1471 lio_fetch_vf_stats(lio);
1472 }
1473
1474 oct_dev->vfstats_poll++;
1475 }
1476
1477 /* Alloc soft command */
1478 sc = (struct octeon_soft_command *)
1479 octeon_alloc_soft_command(oct_dev,
1480 0,
1481 sizeof(struct oct_nic_stats_resp),
1482 0);
1483
1484 if (!sc) {
1485 dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
1486 goto lio_fetch_stats_exit;
1487 }
1488
1489 resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1490 memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1491
1492 init_completion(&sc->complete);
1493 sc->sc_status = OCTEON_REQUEST_PENDING;
1494
1495 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1496
1497 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1498 OPCODE_NIC_PORT_STATS, 0, 0, 0);
1499
1500 retval = octeon_send_soft_command(oct_dev, sc);
1501 if (retval == IQ_SEND_FAILED) {
1502 octeon_free_soft_command(oct_dev, sc);
1503 goto lio_fetch_stats_exit;
1504 }
1505
1506 retval = wait_for_sc_completion_timeout(oct_dev, sc,
1507 (2 * LIO_SC_MAX_TMO_MS));
1508 if (retval) {
1509 dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
1510 goto lio_fetch_stats_exit;
1511 }
1512
1513 octnet_nic_stats_callback(oct_dev, sc->sc_status, sc);
1514 WRITE_ONCE(sc->caller_is_done, true);
1515
1516 lio_fetch_stats_exit:
1517 time_in_jiffies = msecs_to_jiffies(LIQUIDIO_NDEV_STATS_POLL_TIME_MS);
1518 if (ifstate_check(lio, LIO_IFSTATE_RUNNING))
1519 schedule_delayed_work(&lio->stats_wk.work, time_in_jiffies);
1520
1521 return;
1522 }
1523 EXPORT_SYMBOL_GPL(lio_fetch_stats);
1524
liquidio_set_speed(struct lio * lio,int speed)1525 int liquidio_set_speed(struct lio *lio, int speed)
1526 {
1527 struct octeon_device *oct = lio->oct_dev;
1528 struct oct_nic_seapi_resp *resp;
1529 struct octeon_soft_command *sc;
1530 union octnet_cmd *ncmd;
1531 int retval;
1532 u32 var;
1533
1534 if (oct->speed_setting == speed)
1535 return 0;
1536
1537 if (!OCTEON_CN23XX_PF(oct)) {
1538 dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n",
1539 __func__);
1540 return -EOPNOTSUPP;
1541 }
1542
1543 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1544 sizeof(struct oct_nic_seapi_resp),
1545 0);
1546 if (!sc)
1547 return -ENOMEM;
1548
1549 ncmd = sc->virtdptr;
1550 resp = sc->virtrptr;
1551 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1552
1553 init_completion(&sc->complete);
1554 sc->sc_status = OCTEON_REQUEST_PENDING;
1555
1556 ncmd->u64 = 0;
1557 ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
1558 ncmd->s.param1 = speed;
1559
1560 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1561
1562 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1563
1564 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1565 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1566
1567 retval = octeon_send_soft_command(oct, sc);
1568 if (retval == IQ_SEND_FAILED) {
1569 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1570 octeon_free_soft_command(oct, sc);
1571 retval = -EBUSY;
1572 } else {
1573 /* Wait for response or timeout */
1574 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1575 if (retval)
1576 return retval;
1577
1578 retval = resp->status;
1579
1580 if (retval) {
1581 dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
1582 __func__, retval);
1583 WRITE_ONCE(sc->caller_is_done, true);
1584
1585 return -EIO;
1586 }
1587
1588 var = be32_to_cpu((__force __be32)resp->speed);
1589 if (var != speed) {
1590 dev_err(&oct->pci_dev->dev,
1591 "%s: setting failed speed= %x, expect %x\n",
1592 __func__, var, speed);
1593 }
1594
1595 oct->speed_setting = var;
1596 WRITE_ONCE(sc->caller_is_done, true);
1597 }
1598
1599 return retval;
1600 }
1601
liquidio_get_speed(struct lio * lio)1602 int liquidio_get_speed(struct lio *lio)
1603 {
1604 struct octeon_device *oct = lio->oct_dev;
1605 struct oct_nic_seapi_resp *resp;
1606 struct octeon_soft_command *sc;
1607 union octnet_cmd *ncmd;
1608 int retval;
1609
1610 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1611 sizeof(struct oct_nic_seapi_resp),
1612 0);
1613 if (!sc)
1614 return -ENOMEM;
1615
1616 ncmd = sc->virtdptr;
1617 resp = sc->virtrptr;
1618 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1619
1620 init_completion(&sc->complete);
1621 sc->sc_status = OCTEON_REQUEST_PENDING;
1622
1623 ncmd->u64 = 0;
1624 ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
1625
1626 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1627
1628 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1629
1630 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1631 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1632
1633 retval = octeon_send_soft_command(oct, sc);
1634 if (retval == IQ_SEND_FAILED) {
1635 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1636 octeon_free_soft_command(oct, sc);
1637 retval = -EIO;
1638 } else {
1639 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1640 if (retval)
1641 return retval;
1642
1643 retval = resp->status;
1644 if (retval) {
1645 dev_err(&oct->pci_dev->dev,
1646 "%s failed retval=%d\n", __func__, retval);
1647 retval = -EIO;
1648 } else {
1649 u32 var;
1650
1651 var = be32_to_cpu((__force __be32)resp->speed);
1652 oct->speed_setting = var;
1653 if (var == 0xffff) {
1654 /* unable to access boot variables
1655 * get the default value based on the NIC type
1656 */
1657 if (oct->subsystem_id ==
1658 OCTEON_CN2350_25GB_SUBSYS_ID ||
1659 oct->subsystem_id ==
1660 OCTEON_CN2360_25GB_SUBSYS_ID) {
1661 oct->no_speed_setting = 1;
1662 oct->speed_setting = 25;
1663 } else {
1664 oct->speed_setting = 10;
1665 }
1666 }
1667
1668 }
1669 WRITE_ONCE(sc->caller_is_done, true);
1670 }
1671
1672 return retval;
1673 }
1674 EXPORT_SYMBOL_GPL(liquidio_get_speed);
1675
liquidio_set_fec(struct lio * lio,int on_off)1676 int liquidio_set_fec(struct lio *lio, int on_off)
1677 {
1678 struct oct_nic_seapi_resp *resp;
1679 struct octeon_soft_command *sc;
1680 struct octeon_device *oct;
1681 union octnet_cmd *ncmd;
1682 int retval;
1683 u32 var;
1684
1685 oct = lio->oct_dev;
1686
1687 if (oct->props[lio->ifidx].fec == on_off)
1688 return 0;
1689
1690 if (!OCTEON_CN23XX_PF(oct)) {
1691 dev_err(&oct->pci_dev->dev, "%s: SET FEC only for PF\n",
1692 __func__);
1693 return -1;
1694 }
1695
1696 if (oct->speed_boot != 25) {
1697 dev_err(&oct->pci_dev->dev,
1698 "Set FEC only when link speed is 25G during insmod\n");
1699 return -1;
1700 }
1701
1702 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1703 sizeof(struct oct_nic_seapi_resp), 0);
1704 if (!sc) {
1705 dev_err(&oct->pci_dev->dev,
1706 "Failed to allocate soft command\n");
1707 return -ENOMEM;
1708 }
1709
1710 ncmd = sc->virtdptr;
1711 resp = sc->virtrptr;
1712 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1713
1714 init_completion(&sc->complete);
1715 sc->sc_status = OCTEON_REQUEST_PENDING;
1716
1717 ncmd->u64 = 0;
1718 ncmd->s.cmd = SEAPI_CMD_FEC_SET;
1719 ncmd->s.param1 = on_off;
1720 /* SEAPI_CMD_FEC_DISABLE(0) or SEAPI_CMD_FEC_RS(1) */
1721
1722 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1723
1724 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1725
1726 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1727 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1728
1729 retval = octeon_send_soft_command(oct, sc);
1730 if (retval == IQ_SEND_FAILED) {
1731 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1732 octeon_free_soft_command(oct, sc);
1733 return -EIO;
1734 }
1735
1736 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1737 if (retval)
1738 return (-EIO);
1739
1740 var = be32_to_cpu(resp->fec_setting);
1741 resp->fec_setting = var;
1742 if (var != on_off) {
1743 dev_err(&oct->pci_dev->dev,
1744 "Setting failed fec= %x, expect %x\n",
1745 var, on_off);
1746 oct->props[lio->ifidx].fec = var;
1747 if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS)
1748 oct->props[lio->ifidx].fec = 1;
1749 else
1750 oct->props[lio->ifidx].fec = 0;
1751 }
1752
1753 WRITE_ONCE(sc->caller_is_done, true);
1754
1755 if (oct->props[lio->ifidx].fec !=
1756 oct->props[lio->ifidx].fec_boot) {
1757 dev_dbg(&oct->pci_dev->dev,
1758 "Reload driver to change fec to %s\n",
1759 oct->props[lio->ifidx].fec ? "on" : "off");
1760 }
1761
1762 return retval;
1763 }
1764
liquidio_get_fec(struct lio * lio)1765 int liquidio_get_fec(struct lio *lio)
1766 {
1767 struct oct_nic_seapi_resp *resp;
1768 struct octeon_soft_command *sc;
1769 struct octeon_device *oct;
1770 union octnet_cmd *ncmd;
1771 int retval;
1772 u32 var;
1773
1774 oct = lio->oct_dev;
1775
1776 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1777 sizeof(struct oct_nic_seapi_resp), 0);
1778 if (!sc)
1779 return -ENOMEM;
1780
1781 ncmd = sc->virtdptr;
1782 resp = sc->virtrptr;
1783 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1784
1785 init_completion(&sc->complete);
1786 sc->sc_status = OCTEON_REQUEST_PENDING;
1787
1788 ncmd->u64 = 0;
1789 ncmd->s.cmd = SEAPI_CMD_FEC_GET;
1790
1791 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1792
1793 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1794
1795 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1796 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1797
1798 retval = octeon_send_soft_command(oct, sc);
1799 if (retval == IQ_SEND_FAILED) {
1800 dev_info(&oct->pci_dev->dev,
1801 "%s: Failed to send soft command\n", __func__);
1802 octeon_free_soft_command(oct, sc);
1803 return -EIO;
1804 }
1805
1806 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1807 if (retval)
1808 return retval;
1809
1810 var = be32_to_cpu(resp->fec_setting);
1811 resp->fec_setting = var;
1812 if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS)
1813 oct->props[lio->ifidx].fec = 1;
1814 else
1815 oct->props[lio->ifidx].fec = 0;
1816
1817 WRITE_ONCE(sc->caller_is_done, true);
1818
1819 if (oct->props[lio->ifidx].fec !=
1820 oct->props[lio->ifidx].fec_boot) {
1821 dev_dbg(&oct->pci_dev->dev,
1822 "Reload driver to change fec to %s\n",
1823 oct->props[lio->ifidx].fec ? "on" : "off");
1824 }
1825
1826 return retval;
1827 }
1828 EXPORT_SYMBOL_GPL(liquidio_get_fec);
1829