1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qede NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7 #include <linux/crash_dump.h>
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 #include <linux/device.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/skbuff.h>
14 #include <linux/errno.h>
15 #include <linux/list.h>
16 #include <linux/string.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/interrupt.h>
19 #include <asm/byteorder.h>
20 #include <asm/param.h>
21 #include <linux/io.h>
22 #include <linux/netdev_features.h>
23 #include <linux/udp.h>
24 #include <linux/tcp.h>
25 #include <net/udp_tunnel.h>
26 #include <linux/ip.h>
27 #include <net/ipv6.h>
28 #include <net/tcp.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/pkt_sched.h>
32 #include <linux/ethtool.h>
33 #include <linux/in.h>
34 #include <linux/random.h>
35 #include <net/ip6_checksum.h>
36 #include <linux/bitops.h>
37 #include <linux/vmalloc.h>
38 #include "qede.h"
39 #include "qede_ptp.h"
40
41 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
42 MODULE_LICENSE("GPL");
43
44 static uint debug;
45 module_param(debug, uint, 0);
46 MODULE_PARM_DESC(debug, " Default debug msglevel");
47
48 static const struct qed_eth_ops *qed_ops;
49
50 #define CHIP_NUM_57980S_40 0x1634
51 #define CHIP_NUM_57980S_10 0x1666
52 #define CHIP_NUM_57980S_MF 0x1636
53 #define CHIP_NUM_57980S_100 0x1644
54 #define CHIP_NUM_57980S_50 0x1654
55 #define CHIP_NUM_57980S_25 0x1656
56 #define CHIP_NUM_57980S_IOV 0x1664
57 #define CHIP_NUM_AH 0x8070
58 #define CHIP_NUM_AH_IOV 0x8090
59
60 #ifndef PCI_DEVICE_ID_NX2_57980E
61 #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
62 #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
63 #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
64 #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
65 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
66 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
67 #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
68 #define PCI_DEVICE_ID_AH CHIP_NUM_AH
69 #define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV
70
71 #endif
72
73 enum qede_pci_private {
74 QEDE_PRIVATE_PF,
75 QEDE_PRIVATE_VF
76 };
77
78 static const struct pci_device_id qede_pci_tbl[] = {
79 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
80 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
81 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
82 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
83 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
84 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
85 #ifdef CONFIG_QED_SRIOV
86 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
87 #endif
88 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
89 #ifdef CONFIG_QED_SRIOV
90 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
91 #endif
92 { 0 }
93 };
94
95 MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
96
97 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
98 static pci_ers_result_t
99 qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
100
101 #define TX_TIMEOUT (5 * HZ)
102
103 /* Utilize last protocol index for XDP */
104 #define XDP_PI 11
105
106 static void qede_remove(struct pci_dev *pdev);
107 static void qede_shutdown(struct pci_dev *pdev);
108 static void qede_link_update(void *dev, struct qed_link_output *link);
109 static void qede_schedule_recovery_handler(void *dev);
110 static void qede_recovery_handler(struct qede_dev *edev);
111 static void qede_schedule_hw_err_handler(void *dev,
112 enum qed_hw_err_type err_type);
113 static void qede_get_eth_tlv_data(void *edev, void *data);
114 static void qede_get_generic_tlv_data(void *edev,
115 struct qed_generic_tlvs *data);
116 static void qede_generic_hw_err_handler(struct qede_dev *edev);
117 #ifdef CONFIG_QED_SRIOV
qede_set_vf_vlan(struct net_device * ndev,int vf,u16 vlan,u8 qos,__be16 vlan_proto)118 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
119 __be16 vlan_proto)
120 {
121 struct qede_dev *edev = netdev_priv(ndev);
122
123 if (vlan > 4095) {
124 DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
125 return -EINVAL;
126 }
127
128 if (vlan_proto != htons(ETH_P_8021Q))
129 return -EPROTONOSUPPORT;
130
131 DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
132 vlan, vf);
133
134 return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
135 }
136
qede_set_vf_mac(struct net_device * ndev,int vfidx,u8 * mac)137 static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
138 {
139 struct qede_dev *edev = netdev_priv(ndev);
140
141 DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx);
142
143 if (!is_valid_ether_addr(mac)) {
144 DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
145 return -EINVAL;
146 }
147
148 return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
149 }
150
qede_sriov_configure(struct pci_dev * pdev,int num_vfs_param)151 static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
152 {
153 struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
154 struct qed_dev_info *qed_info = &edev->dev_info.common;
155 struct qed_update_vport_params *vport_params;
156 int rc;
157
158 vport_params = vzalloc(sizeof(*vport_params));
159 if (!vport_params)
160 return -ENOMEM;
161 DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
162
163 rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
164
165 /* Enable/Disable Tx switching for PF */
166 if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
167 !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
168 vport_params->vport_id = 0;
169 vport_params->update_tx_switching_flg = 1;
170 vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
171 edev->ops->vport_update(edev->cdev, vport_params);
172 }
173
174 vfree(vport_params);
175 return rc;
176 }
177 #endif
178
qede_suspend(struct device * dev)179 static int __maybe_unused qede_suspend(struct device *dev)
180 {
181 dev_info(dev, "Device does not support suspend operation\n");
182
183 return -EOPNOTSUPP;
184 }
185
186 static DEFINE_SIMPLE_DEV_PM_OPS(qede_pm_ops, qede_suspend, NULL);
187
188 static const struct pci_error_handlers qede_err_handler = {
189 .error_detected = qede_io_error_detected,
190 };
191
192 static struct pci_driver qede_pci_driver = {
193 .name = "qede",
194 .id_table = qede_pci_tbl,
195 .probe = qede_probe,
196 .remove = qede_remove,
197 .shutdown = qede_shutdown,
198 #ifdef CONFIG_QED_SRIOV
199 .sriov_configure = qede_sriov_configure,
200 #endif
201 .err_handler = &qede_err_handler,
202 .driver.pm = &qede_pm_ops,
203 };
204
205 static struct qed_eth_cb_ops qede_ll_ops = {
206 .common = {
207 #ifdef CONFIG_RFS_ACCEL
208 .arfs_filter_op = qede_arfs_filter_op,
209 #endif
210 .link_update = qede_link_update,
211 .schedule_recovery_handler = qede_schedule_recovery_handler,
212 .schedule_hw_err_handler = qede_schedule_hw_err_handler,
213 .get_generic_tlv_data = qede_get_generic_tlv_data,
214 .get_protocol_tlv_data = qede_get_eth_tlv_data,
215 },
216 .force_mac = qede_force_mac,
217 .ports_update = qede_udp_ports_update,
218 };
219
qede_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)220 static int qede_netdev_event(struct notifier_block *this, unsigned long event,
221 void *ptr)
222 {
223 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
224 struct ethtool_drvinfo drvinfo;
225 struct qede_dev *edev;
226
227 if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
228 goto done;
229
230 /* Check whether this is a qede device */
231 if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
232 goto done;
233
234 memset(&drvinfo, 0, sizeof(drvinfo));
235 ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
236 if (strcmp(drvinfo.driver, "qede"))
237 goto done;
238 edev = netdev_priv(ndev);
239
240 switch (event) {
241 case NETDEV_CHANGENAME:
242 /* Notify qed of the name change */
243 if (!edev->ops || !edev->ops->common)
244 goto done;
245 edev->ops->common->set_name(edev->cdev, edev->ndev->name);
246 break;
247 case NETDEV_CHANGEADDR:
248 edev = netdev_priv(ndev);
249 qede_rdma_event_changeaddr(edev);
250 break;
251 }
252
253 done:
254 return NOTIFY_DONE;
255 }
256
257 static struct notifier_block qede_netdev_notifier = {
258 .notifier_call = qede_netdev_event,
259 };
260
261 static
qede_init(void)262 int __init qede_init(void)
263 {
264 int ret;
265
266 pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n");
267
268 qede_forced_speed_maps_init();
269
270 qed_ops = qed_get_eth_ops();
271 if (!qed_ops) {
272 pr_notice("Failed to get qed ethtool operations\n");
273 return -EINVAL;
274 }
275
276 /* Must register notifier before pci ops, since we might miss
277 * interface rename after pci probe and netdev registration.
278 */
279 ret = register_netdevice_notifier(&qede_netdev_notifier);
280 if (ret) {
281 pr_notice("Failed to register netdevice_notifier\n");
282 qed_put_eth_ops();
283 return -EINVAL;
284 }
285
286 ret = pci_register_driver(&qede_pci_driver);
287 if (ret) {
288 pr_notice("Failed to register driver\n");
289 unregister_netdevice_notifier(&qede_netdev_notifier);
290 qed_put_eth_ops();
291 return -EINVAL;
292 }
293
294 return 0;
295 }
296
qede_cleanup(void)297 static void __exit qede_cleanup(void)
298 {
299 if (debug & QED_LOG_INFO_MASK)
300 pr_info("qede_cleanup called\n");
301
302 unregister_netdevice_notifier(&qede_netdev_notifier);
303 pci_unregister_driver(&qede_pci_driver);
304 qed_put_eth_ops();
305 }
306
307 module_init(qede_init);
308 module_exit(qede_cleanup);
309
310 static int qede_open(struct net_device *ndev);
311 static int qede_close(struct net_device *ndev);
312
qede_fill_by_demand_stats(struct qede_dev * edev)313 void qede_fill_by_demand_stats(struct qede_dev *edev)
314 {
315 struct qede_stats_common *p_common = &edev->stats.common;
316 struct qed_eth_stats stats;
317
318 edev->ops->get_vport_stats(edev->cdev, &stats);
319
320 spin_lock(&edev->stats_lock);
321
322 p_common->no_buff_discards = stats.common.no_buff_discards;
323 p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
324 p_common->ttl0_discard = stats.common.ttl0_discard;
325 p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
326 p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
327 p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
328 p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
329 p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
330 p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
331 p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
332 p_common->mac_filter_discards = stats.common.mac_filter_discards;
333 p_common->gft_filter_drop = stats.common.gft_filter_drop;
334
335 p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
336 p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
337 p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
338 p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
339 p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
340 p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
341 p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
342 p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
343 p_common->coalesced_events = stats.common.tpa_coalesced_events;
344 p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
345 p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
346 p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
347
348 p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
349 p_common->rx_65_to_127_byte_packets =
350 stats.common.rx_65_to_127_byte_packets;
351 p_common->rx_128_to_255_byte_packets =
352 stats.common.rx_128_to_255_byte_packets;
353 p_common->rx_256_to_511_byte_packets =
354 stats.common.rx_256_to_511_byte_packets;
355 p_common->rx_512_to_1023_byte_packets =
356 stats.common.rx_512_to_1023_byte_packets;
357 p_common->rx_1024_to_1518_byte_packets =
358 stats.common.rx_1024_to_1518_byte_packets;
359 p_common->rx_crc_errors = stats.common.rx_crc_errors;
360 p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
361 p_common->rx_pause_frames = stats.common.rx_pause_frames;
362 p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
363 p_common->rx_align_errors = stats.common.rx_align_errors;
364 p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
365 p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
366 p_common->rx_jabbers = stats.common.rx_jabbers;
367 p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
368 p_common->rx_fragments = stats.common.rx_fragments;
369 p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
370 p_common->tx_65_to_127_byte_packets =
371 stats.common.tx_65_to_127_byte_packets;
372 p_common->tx_128_to_255_byte_packets =
373 stats.common.tx_128_to_255_byte_packets;
374 p_common->tx_256_to_511_byte_packets =
375 stats.common.tx_256_to_511_byte_packets;
376 p_common->tx_512_to_1023_byte_packets =
377 stats.common.tx_512_to_1023_byte_packets;
378 p_common->tx_1024_to_1518_byte_packets =
379 stats.common.tx_1024_to_1518_byte_packets;
380 p_common->tx_pause_frames = stats.common.tx_pause_frames;
381 p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
382 p_common->brb_truncates = stats.common.brb_truncates;
383 p_common->brb_discards = stats.common.brb_discards;
384 p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
385 p_common->link_change_count = stats.common.link_change_count;
386 p_common->ptp_skip_txts = edev->ptp_skip_txts;
387
388 if (QEDE_IS_BB(edev)) {
389 struct qede_stats_bb *p_bb = &edev->stats.bb;
390
391 p_bb->rx_1519_to_1522_byte_packets =
392 stats.bb.rx_1519_to_1522_byte_packets;
393 p_bb->rx_1519_to_2047_byte_packets =
394 stats.bb.rx_1519_to_2047_byte_packets;
395 p_bb->rx_2048_to_4095_byte_packets =
396 stats.bb.rx_2048_to_4095_byte_packets;
397 p_bb->rx_4096_to_9216_byte_packets =
398 stats.bb.rx_4096_to_9216_byte_packets;
399 p_bb->rx_9217_to_16383_byte_packets =
400 stats.bb.rx_9217_to_16383_byte_packets;
401 p_bb->tx_1519_to_2047_byte_packets =
402 stats.bb.tx_1519_to_2047_byte_packets;
403 p_bb->tx_2048_to_4095_byte_packets =
404 stats.bb.tx_2048_to_4095_byte_packets;
405 p_bb->tx_4096_to_9216_byte_packets =
406 stats.bb.tx_4096_to_9216_byte_packets;
407 p_bb->tx_9217_to_16383_byte_packets =
408 stats.bb.tx_9217_to_16383_byte_packets;
409 p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
410 p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
411 } else {
412 struct qede_stats_ah *p_ah = &edev->stats.ah;
413
414 p_ah->rx_1519_to_max_byte_packets =
415 stats.ah.rx_1519_to_max_byte_packets;
416 p_ah->tx_1519_to_max_byte_packets =
417 stats.ah.tx_1519_to_max_byte_packets;
418 }
419
420 spin_unlock(&edev->stats_lock);
421 }
422
qede_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)423 static void qede_get_stats64(struct net_device *dev,
424 struct rtnl_link_stats64 *stats)
425 {
426 struct qede_dev *edev = netdev_priv(dev);
427 struct qede_stats_common *p_common;
428
429 p_common = &edev->stats.common;
430
431 spin_lock(&edev->stats_lock);
432
433 stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
434 p_common->rx_bcast_pkts;
435 stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
436 p_common->tx_bcast_pkts;
437
438 stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
439 p_common->rx_bcast_bytes;
440 stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
441 p_common->tx_bcast_bytes;
442
443 stats->tx_errors = p_common->tx_err_drop_pkts;
444 stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
445
446 stats->rx_fifo_errors = p_common->no_buff_discards;
447
448 if (QEDE_IS_BB(edev))
449 stats->collisions = edev->stats.bb.tx_total_collisions;
450 stats->rx_crc_errors = p_common->rx_crc_errors;
451 stats->rx_frame_errors = p_common->rx_align_errors;
452
453 spin_unlock(&edev->stats_lock);
454 }
455
456 #ifdef CONFIG_QED_SRIOV
qede_get_vf_config(struct net_device * dev,int vfidx,struct ifla_vf_info * ivi)457 static int qede_get_vf_config(struct net_device *dev, int vfidx,
458 struct ifla_vf_info *ivi)
459 {
460 struct qede_dev *edev = netdev_priv(dev);
461
462 if (!edev->ops)
463 return -EINVAL;
464
465 return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
466 }
467
qede_set_vf_rate(struct net_device * dev,int vfidx,int min_tx_rate,int max_tx_rate)468 static int qede_set_vf_rate(struct net_device *dev, int vfidx,
469 int min_tx_rate, int max_tx_rate)
470 {
471 struct qede_dev *edev = netdev_priv(dev);
472
473 return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
474 max_tx_rate);
475 }
476
qede_set_vf_spoofchk(struct net_device * dev,int vfidx,bool val)477 static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
478 {
479 struct qede_dev *edev = netdev_priv(dev);
480
481 if (!edev->ops)
482 return -EINVAL;
483
484 return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
485 }
486
qede_set_vf_link_state(struct net_device * dev,int vfidx,int link_state)487 static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
488 int link_state)
489 {
490 struct qede_dev *edev = netdev_priv(dev);
491
492 if (!edev->ops)
493 return -EINVAL;
494
495 return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
496 }
497
qede_set_vf_trust(struct net_device * dev,int vfidx,bool setting)498 static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
499 {
500 struct qede_dev *edev = netdev_priv(dev);
501
502 if (!edev->ops)
503 return -EINVAL;
504
505 return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
506 }
507 #endif
508
qede_fp_sb_dump(struct qede_dev * edev,struct qede_fastpath * fp)509 static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp)
510 {
511 char *p_sb = (char *)fp->sb_info->sb_virt;
512 u32 sb_size, i;
513
514 sb_size = sizeof(struct status_block);
515
516 for (i = 0; i < sb_size; i += 8)
517 DP_NOTICE(edev,
518 "%02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX\n",
519 p_sb[i], p_sb[i + 1], p_sb[i + 2], p_sb[i + 3],
520 p_sb[i + 4], p_sb[i + 5], p_sb[i + 6], p_sb[i + 7]);
521 }
522
523 static void
qede_txq_fp_log_metadata(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_tx_queue * txq)524 qede_txq_fp_log_metadata(struct qede_dev *edev,
525 struct qede_fastpath *fp, struct qede_tx_queue *txq)
526 {
527 struct qed_chain *p_chain = &txq->tx_pbl;
528
529 /* Dump txq/fp/sb ids etc. other metadata */
530 DP_NOTICE(edev,
531 "fpid 0x%x sbid 0x%x txqid [0x%x] ndev_qid [0x%x] cos [0x%x] p_chain %p cap %d size %d jiffies %lu HZ 0x%x\n",
532 fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos,
533 p_chain, p_chain->capacity, p_chain->size, jiffies, HZ);
534
535 /* Dump all the relevant prod/cons indexes */
536 DP_NOTICE(edev,
537 "hw cons %04x sw_tx_prod=0x%x, sw_tx_cons=0x%x, bd_prod 0x%x bd_cons 0x%x\n",
538 le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons,
539 qed_chain_get_prod_idx(p_chain), qed_chain_get_cons_idx(p_chain));
540 }
541
542 static void
qede_tx_log_print(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_tx_queue * txq)543 qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq)
544 {
545 struct qed_sb_info_dbg sb_dbg;
546 int rc;
547
548 /* sb info */
549 qede_fp_sb_dump(edev, fp);
550
551 memset(&sb_dbg, 0, sizeof(sb_dbg));
552 rc = edev->ops->common->get_sb_info(edev->cdev, fp->sb_info, (u16)fp->id, &sb_dbg);
553
554 DP_NOTICE(edev, "IGU: prod %08x cons %08x CAU Tx %04x\n",
555 sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]);
556
557 /* report to mfw */
558 edev->ops->common->mfw_report(edev->cdev,
559 "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
560 txq->index, le16_to_cpu(*txq->hw_cons_ptr),
561 qed_chain_get_cons_idx(&txq->tx_pbl),
562 qed_chain_get_prod_idx(&txq->tx_pbl), jiffies);
563 if (!rc)
564 edev->ops->common->mfw_report(edev->cdev,
565 "Txq[%d]: SB[0x%04x] - IGU: prod %08x cons %08x CAU Tx %04x\n",
566 txq->index, fp->sb_info->igu_sb_id,
567 sb_dbg.igu_prod, sb_dbg.igu_cons,
568 sb_dbg.pi[TX_PI(txq->cos)]);
569 }
570
qede_tx_timeout(struct net_device * dev,unsigned int txqueue)571 static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
572 {
573 struct qede_dev *edev = netdev_priv(dev);
574 int i;
575
576 netif_carrier_off(dev);
577 DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
578
579 for_each_queue(i) {
580 struct qede_tx_queue *txq;
581 struct qede_fastpath *fp;
582 int cos;
583
584 fp = &edev->fp_array[i];
585 if (!(fp->type & QEDE_FASTPATH_TX))
586 continue;
587
588 for_each_cos_in_txq(edev, cos) {
589 txq = &fp->txq[cos];
590
591 /* Dump basic metadata for all queues */
592 qede_txq_fp_log_metadata(edev, fp, txq);
593
594 if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
595 qed_chain_get_prod_idx(&txq->tx_pbl))
596 qede_tx_log_print(edev, fp, txq);
597 }
598 }
599
600 if (IS_VF(edev))
601 return;
602
603 if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
604 edev->state == QEDE_STATE_RECOVERY) {
605 DP_INFO(edev,
606 "Avoid handling a Tx timeout while another HW error is being handled\n");
607 return;
608 }
609
610 set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
611 set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
612 schedule_delayed_work(&edev->sp_task, 0);
613 }
614
qede_setup_tc(struct net_device * ndev,u8 num_tc)615 static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
616 {
617 struct qede_dev *edev = netdev_priv(ndev);
618 int cos, count, offset;
619
620 if (num_tc > edev->dev_info.num_tc)
621 return -EINVAL;
622
623 netdev_reset_tc(ndev);
624 netdev_set_num_tc(ndev, num_tc);
625
626 for_each_cos_in_txq(edev, cos) {
627 count = QEDE_TSS_COUNT(edev);
628 offset = cos * QEDE_TSS_COUNT(edev);
629 netdev_set_tc_queue(ndev, cos, count, offset);
630 }
631
632 return 0;
633 }
634
635 static int
qede_set_flower(struct qede_dev * edev,struct flow_cls_offload * f,__be16 proto)636 qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f,
637 __be16 proto)
638 {
639 switch (f->command) {
640 case FLOW_CLS_REPLACE:
641 return qede_add_tc_flower_fltr(edev, proto, f);
642 case FLOW_CLS_DESTROY:
643 return qede_delete_flow_filter(edev, f->cookie);
644 default:
645 return -EOPNOTSUPP;
646 }
647 }
648
qede_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)649 static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
650 void *cb_priv)
651 {
652 struct flow_cls_offload *f;
653 struct qede_dev *edev = cb_priv;
654
655 if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
656 return -EOPNOTSUPP;
657
658 switch (type) {
659 case TC_SETUP_CLSFLOWER:
660 f = type_data;
661 return qede_set_flower(edev, f, f->common.protocol);
662 default:
663 return -EOPNOTSUPP;
664 }
665 }
666
667 static LIST_HEAD(qede_block_cb_list);
668
669 static int
qede_setup_tc_offload(struct net_device * dev,enum tc_setup_type type,void * type_data)670 qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
671 void *type_data)
672 {
673 struct qede_dev *edev = netdev_priv(dev);
674 struct tc_mqprio_qopt *mqprio;
675
676 switch (type) {
677 case TC_SETUP_BLOCK:
678 return flow_block_cb_setup_simple(type_data,
679 &qede_block_cb_list,
680 qede_setup_tc_block_cb,
681 edev, edev, true);
682 case TC_SETUP_QDISC_MQPRIO:
683 mqprio = type_data;
684
685 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
686 return qede_setup_tc(dev, mqprio->num_tc);
687 default:
688 return -EOPNOTSUPP;
689 }
690 }
691
692 static const struct net_device_ops qede_netdev_ops = {
693 .ndo_open = qede_open,
694 .ndo_stop = qede_close,
695 .ndo_start_xmit = qede_start_xmit,
696 .ndo_select_queue = qede_select_queue,
697 .ndo_set_rx_mode = qede_set_rx_mode,
698 .ndo_set_mac_address = qede_set_mac_addr,
699 .ndo_validate_addr = eth_validate_addr,
700 .ndo_change_mtu = qede_change_mtu,
701 .ndo_tx_timeout = qede_tx_timeout,
702 #ifdef CONFIG_QED_SRIOV
703 .ndo_set_vf_mac = qede_set_vf_mac,
704 .ndo_set_vf_vlan = qede_set_vf_vlan,
705 .ndo_set_vf_trust = qede_set_vf_trust,
706 #endif
707 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
708 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
709 .ndo_fix_features = qede_fix_features,
710 .ndo_set_features = qede_set_features,
711 .ndo_get_stats64 = qede_get_stats64,
712 #ifdef CONFIG_QED_SRIOV
713 .ndo_set_vf_link_state = qede_set_vf_link_state,
714 .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
715 .ndo_get_vf_config = qede_get_vf_config,
716 .ndo_set_vf_rate = qede_set_vf_rate,
717 #endif
718 .ndo_features_check = qede_features_check,
719 .ndo_bpf = qede_xdp,
720 #ifdef CONFIG_RFS_ACCEL
721 .ndo_rx_flow_steer = qede_rx_flow_steer,
722 #endif
723 .ndo_xdp_xmit = qede_xdp_transmit,
724 .ndo_setup_tc = qede_setup_tc_offload,
725 .ndo_hwtstamp_get = qede_hwtstamp_get,
726 .ndo_hwtstamp_set = qede_hwtstamp_set,
727 };
728
729 static const struct net_device_ops qede_netdev_vf_ops = {
730 .ndo_open = qede_open,
731 .ndo_stop = qede_close,
732 .ndo_start_xmit = qede_start_xmit,
733 .ndo_select_queue = qede_select_queue,
734 .ndo_set_rx_mode = qede_set_rx_mode,
735 .ndo_set_mac_address = qede_set_mac_addr,
736 .ndo_validate_addr = eth_validate_addr,
737 .ndo_change_mtu = qede_change_mtu,
738 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
739 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
740 .ndo_fix_features = qede_fix_features,
741 .ndo_set_features = qede_set_features,
742 .ndo_get_stats64 = qede_get_stats64,
743 .ndo_features_check = qede_features_check,
744 };
745
746 static const struct net_device_ops qede_netdev_vf_xdp_ops = {
747 .ndo_open = qede_open,
748 .ndo_stop = qede_close,
749 .ndo_start_xmit = qede_start_xmit,
750 .ndo_select_queue = qede_select_queue,
751 .ndo_set_rx_mode = qede_set_rx_mode,
752 .ndo_set_mac_address = qede_set_mac_addr,
753 .ndo_validate_addr = eth_validate_addr,
754 .ndo_change_mtu = qede_change_mtu,
755 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
756 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
757 .ndo_fix_features = qede_fix_features,
758 .ndo_set_features = qede_set_features,
759 .ndo_get_stats64 = qede_get_stats64,
760 .ndo_features_check = qede_features_check,
761 .ndo_bpf = qede_xdp,
762 .ndo_xdp_xmit = qede_xdp_transmit,
763 };
764
765 /* -------------------------------------------------------------------------
766 * START OF PROBE / REMOVE
767 * -------------------------------------------------------------------------
768 */
769
qede_alloc_etherdev(struct qed_dev * cdev,struct pci_dev * pdev,struct qed_dev_eth_info * info,u32 dp_module,u8 dp_level)770 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
771 struct pci_dev *pdev,
772 struct qed_dev_eth_info *info,
773 u32 dp_module, u8 dp_level)
774 {
775 struct net_device *ndev;
776 struct qede_dev *edev;
777
778 ndev = alloc_etherdev_mqs(sizeof(*edev),
779 info->num_queues * info->num_tc,
780 info->num_queues);
781 if (!ndev) {
782 pr_err("etherdev allocation failed\n");
783 return NULL;
784 }
785
786 edev = netdev_priv(ndev);
787 edev->ndev = ndev;
788 edev->cdev = cdev;
789 edev->pdev = pdev;
790 edev->dp_module = dp_module;
791 edev->dp_level = dp_level;
792 edev->ops = qed_ops;
793
794 if (is_kdump_kernel()) {
795 edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
796 edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
797 } else {
798 edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
799 edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
800 }
801
802 DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
803 info->num_queues, info->num_queues);
804
805 SET_NETDEV_DEV(ndev, &pdev->dev);
806
807 memset(&edev->stats, 0, sizeof(edev->stats));
808 memcpy(&edev->dev_info, info, sizeof(*info));
809
810 /* As ethtool doesn't have the ability to show WoL behavior as
811 * 'default', if device supports it declare it's enabled.
812 */
813 if (edev->dev_info.common.wol_support)
814 edev->wol_enabled = true;
815
816 INIT_LIST_HEAD(&edev->vlan_list);
817
818 return edev;
819 }
820
qede_init_ndev(struct qede_dev * edev)821 static void qede_init_ndev(struct qede_dev *edev)
822 {
823 struct net_device *ndev = edev->ndev;
824 struct pci_dev *pdev = edev->pdev;
825 bool udp_tunnel_enable = false;
826 netdev_features_t hw_features;
827
828 pci_set_drvdata(pdev, ndev);
829
830 ndev->mem_start = edev->dev_info.common.pci_mem_start;
831 ndev->base_addr = ndev->mem_start;
832 ndev->mem_end = edev->dev_info.common.pci_mem_end;
833 ndev->irq = edev->dev_info.common.pci_irq;
834
835 ndev->watchdog_timeo = TX_TIMEOUT;
836
837 if (IS_VF(edev)) {
838 if (edev->dev_info.xdp_supported)
839 ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
840 else
841 ndev->netdev_ops = &qede_netdev_vf_ops;
842 } else {
843 ndev->netdev_ops = &qede_netdev_ops;
844 }
845
846 qede_set_ethtool_ops(ndev);
847
848 ndev->priv_flags |= IFF_UNICAST_FLT;
849
850 /* user-changeble features */
851 hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
852 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
853 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
854
855 if (edev->dev_info.common.b_arfs_capable)
856 hw_features |= NETIF_F_NTUPLE;
857
858 if (edev->dev_info.common.vxlan_enable ||
859 edev->dev_info.common.geneve_enable)
860 udp_tunnel_enable = true;
861
862 if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
863 hw_features |= NETIF_F_TSO_ECN;
864 ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
865 NETIF_F_SG | NETIF_F_TSO |
866 NETIF_F_TSO_ECN | NETIF_F_TSO6 |
867 NETIF_F_RXCSUM;
868 }
869
870 if (udp_tunnel_enable) {
871 hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
872 NETIF_F_GSO_UDP_TUNNEL_CSUM);
873 ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
874 NETIF_F_GSO_UDP_TUNNEL_CSUM);
875
876 qede_set_udp_tunnels(edev);
877 }
878
879 if (edev->dev_info.common.gre_enable) {
880 hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
881 ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
882 NETIF_F_GSO_GRE_CSUM);
883 }
884
885 ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
886 NETIF_F_HIGHDMA;
887 ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
888 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
889 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
890
891 ndev->hw_features = hw_features;
892
893 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
894 NETDEV_XDP_ACT_NDO_XMIT;
895
896 /* MTU range: 46 - 9600 */
897 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
898 ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
899
900 /* Set network device HW mac */
901 eth_hw_addr_set(edev->ndev, edev->dev_info.common.hw_mac);
902
903 ndev->mtu = edev->dev_info.common.mtu;
904 }
905
906 /* This function converts from 32b param to two params of level and module
907 * Input 32b decoding:
908 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
909 * 'happy' flow, e.g. memory allocation failed.
910 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
911 * and provide important parameters.
912 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
913 * module. VERBOSE prints are for tracking the specific flow in low level.
914 *
915 * Notice that the level should be that of the lowest required logs.
916 */
qede_config_debug(uint debug,u32 * p_dp_module,u8 * p_dp_level)917 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
918 {
919 *p_dp_level = QED_LEVEL_NOTICE;
920 *p_dp_module = 0;
921
922 if (debug & QED_LOG_VERBOSE_MASK) {
923 *p_dp_level = QED_LEVEL_VERBOSE;
924 *p_dp_module = (debug & 0x3FFFFFFF);
925 } else if (debug & QED_LOG_INFO_MASK) {
926 *p_dp_level = QED_LEVEL_INFO;
927 } else if (debug & QED_LOG_NOTICE_MASK) {
928 *p_dp_level = QED_LEVEL_NOTICE;
929 }
930 }
931
qede_free_fp_array(struct qede_dev * edev)932 static void qede_free_fp_array(struct qede_dev *edev)
933 {
934 if (edev->fp_array) {
935 struct qede_fastpath *fp;
936 int i;
937
938 for_each_queue(i) {
939 fp = &edev->fp_array[i];
940
941 kfree(fp->sb_info);
942 /* Handle mem alloc failure case where qede_init_fp
943 * didn't register xdp_rxq_info yet.
944 * Implicit only (fp->type & QEDE_FASTPATH_RX)
945 */
946 if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
947 xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
948 kfree(fp->rxq);
949 kfree(fp->xdp_tx);
950 kfree(fp->txq);
951 }
952 kfree(edev->fp_array);
953 }
954
955 edev->num_queues = 0;
956 edev->fp_num_tx = 0;
957 edev->fp_num_rx = 0;
958 }
959
qede_alloc_fp_array(struct qede_dev * edev)960 static int qede_alloc_fp_array(struct qede_dev *edev)
961 {
962 u8 fp_combined, fp_rx = edev->fp_num_rx;
963 struct qede_fastpath *fp;
964 int i;
965
966 edev->fp_array = kzalloc_objs(*edev->fp_array, QEDE_QUEUE_CNT(edev));
967 if (!edev->fp_array) {
968 DP_NOTICE(edev, "fp array allocation failed\n");
969 goto err;
970 }
971
972 if (!edev->coal_entry) {
973 edev->coal_entry = kzalloc_objs(*edev->coal_entry,
974 QEDE_MAX_RSS_CNT(edev));
975 if (!edev->coal_entry) {
976 DP_ERR(edev, "coalesce entry allocation failed\n");
977 goto err;
978 }
979 }
980
981 fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
982
983 /* Allocate the FP elements for Rx queues followed by combined and then
984 * the Tx. This ordering should be maintained so that the respective
985 * queues (Rx or Tx) will be together in the fastpath array and the
986 * associated ids will be sequential.
987 */
988 for_each_queue(i) {
989 fp = &edev->fp_array[i];
990
991 fp->sb_info = kzalloc_obj(*fp->sb_info);
992 if (!fp->sb_info) {
993 DP_NOTICE(edev, "sb info struct allocation failed\n");
994 goto err;
995 }
996
997 if (fp_rx) {
998 fp->type = QEDE_FASTPATH_RX;
999 fp_rx--;
1000 } else if (fp_combined) {
1001 fp->type = QEDE_FASTPATH_COMBINED;
1002 fp_combined--;
1003 } else {
1004 fp->type = QEDE_FASTPATH_TX;
1005 }
1006
1007 if (fp->type & QEDE_FASTPATH_TX) {
1008 fp->txq = kzalloc_objs(*fp->txq, edev->dev_info.num_tc);
1009 if (!fp->txq)
1010 goto err;
1011 }
1012
1013 if (fp->type & QEDE_FASTPATH_RX) {
1014 fp->rxq = kzalloc_obj(*fp->rxq);
1015 if (!fp->rxq)
1016 goto err;
1017
1018 if (edev->xdp_prog) {
1019 fp->xdp_tx = kzalloc_obj(*fp->xdp_tx);
1020 if (!fp->xdp_tx)
1021 goto err;
1022 fp->type |= QEDE_FASTPATH_XDP;
1023 }
1024 }
1025 }
1026
1027 return 0;
1028 err:
1029 qede_free_fp_array(edev);
1030 return -ENOMEM;
1031 }
1032
1033 /* The qede lock is used to protect driver state change and driver flows that
1034 * are not reentrant.
1035 */
__qede_lock(struct qede_dev * edev)1036 void __qede_lock(struct qede_dev *edev)
1037 {
1038 mutex_lock(&edev->qede_lock);
1039 }
1040
__qede_unlock(struct qede_dev * edev)1041 void __qede_unlock(struct qede_dev *edev)
1042 {
1043 mutex_unlock(&edev->qede_lock);
1044 }
1045
1046 /* This version of the lock should be used when acquiring the RTNL lock is also
1047 * needed in addition to the internal qede lock.
1048 */
qede_lock(struct qede_dev * edev)1049 static void qede_lock(struct qede_dev *edev)
1050 {
1051 rtnl_lock();
1052 __qede_lock(edev);
1053 }
1054
qede_unlock(struct qede_dev * edev)1055 static void qede_unlock(struct qede_dev *edev)
1056 {
1057 __qede_unlock(edev);
1058 rtnl_unlock();
1059 }
1060
qede_periodic_task(struct work_struct * work)1061 static void qede_periodic_task(struct work_struct *work)
1062 {
1063 struct qede_dev *edev = container_of(work, struct qede_dev,
1064 periodic_task.work);
1065
1066 qede_fill_by_demand_stats(edev);
1067 schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks);
1068 }
1069
qede_init_periodic_task(struct qede_dev * edev)1070 static void qede_init_periodic_task(struct qede_dev *edev)
1071 {
1072 INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task);
1073 spin_lock_init(&edev->stats_lock);
1074 edev->stats_coal_usecs = USEC_PER_SEC;
1075 edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC);
1076 }
1077
qede_sp_task(struct work_struct * work)1078 static void qede_sp_task(struct work_struct *work)
1079 {
1080 struct qede_dev *edev = container_of(work, struct qede_dev,
1081 sp_task.work);
1082
1083 /* Disable execution of this deferred work once
1084 * qede removal is in progress, this stop any future
1085 * scheduling of sp_task.
1086 */
1087 if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
1088 return;
1089
1090 /* The locking scheme depends on the specific flag:
1091 * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
1092 * ensure that ongoing flows are ended and new ones are not started.
1093 * In other cases - only the internal qede lock should be acquired.
1094 */
1095
1096 if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
1097 cancel_delayed_work_sync(&edev->periodic_task);
1098 #ifdef CONFIG_QED_SRIOV
1099 /* SRIOV must be disabled outside the lock to avoid a deadlock.
1100 * The recovery of the active VFs is currently not supported.
1101 */
1102 if (pci_num_vf(edev->pdev))
1103 qede_sriov_configure(edev->pdev, 0);
1104 #endif
1105 qede_lock(edev);
1106 qede_recovery_handler(edev);
1107 qede_unlock(edev);
1108 }
1109
1110 __qede_lock(edev);
1111
1112 if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
1113 if (edev->state == QEDE_STATE_OPEN)
1114 qede_config_rx_mode(edev->ndev);
1115
1116 #ifdef CONFIG_RFS_ACCEL
1117 if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
1118 if (edev->state == QEDE_STATE_OPEN)
1119 qede_process_arfs_filters(edev, false);
1120 }
1121 #endif
1122 if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
1123 qede_generic_hw_err_handler(edev);
1124 __qede_unlock(edev);
1125
1126 if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
1127 #ifdef CONFIG_QED_SRIOV
1128 /* SRIOV must be disabled outside the lock to avoid a deadlock.
1129 * The recovery of the active VFs is currently not supported.
1130 */
1131 if (pci_num_vf(edev->pdev))
1132 qede_sriov_configure(edev->pdev, 0);
1133 #endif
1134 edev->ops->common->recovery_process(edev->cdev);
1135 }
1136 }
1137
qede_update_pf_params(struct qed_dev * cdev)1138 static void qede_update_pf_params(struct qed_dev *cdev)
1139 {
1140 struct qed_pf_params pf_params;
1141 u16 num_cons;
1142
1143 /* 64 rx + 64 tx + 64 XDP */
1144 memset(&pf_params, 0, sizeof(struct qed_pf_params));
1145
1146 /* 1 rx + 1 xdp + max tx cos */
1147 num_cons = QED_MIN_L2_CONS;
1148
1149 pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons;
1150
1151 /* Same for VFs - make sure they'll have sufficient connections
1152 * to support XDP Tx queues.
1153 */
1154 pf_params.eth_pf_params.num_vf_cons = 48;
1155
1156 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
1157 qed_ops->common->update_pf_params(cdev, &pf_params);
1158 }
1159
1160 #define QEDE_FW_VER_STR_SIZE 80
1161
qede_log_probe(struct qede_dev * edev)1162 static void qede_log_probe(struct qede_dev *edev)
1163 {
1164 struct qed_dev_info *p_dev_info = &edev->dev_info.common;
1165 u8 buf[QEDE_FW_VER_STR_SIZE];
1166 size_t left_size;
1167
1168 snprintf(buf, QEDE_FW_VER_STR_SIZE,
1169 "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
1170 p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
1171 p_dev_info->fw_eng,
1172 (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
1173 QED_MFW_VERSION_3_OFFSET,
1174 (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
1175 QED_MFW_VERSION_2_OFFSET,
1176 (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
1177 QED_MFW_VERSION_1_OFFSET,
1178 (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
1179 QED_MFW_VERSION_0_OFFSET);
1180
1181 left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
1182 if (p_dev_info->mbi_version && left_size)
1183 snprintf(buf + strlen(buf), left_size,
1184 " [MBI %d.%d.%d]",
1185 (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
1186 QED_MBI_VERSION_2_OFFSET,
1187 (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
1188 QED_MBI_VERSION_1_OFFSET,
1189 (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
1190 QED_MBI_VERSION_0_OFFSET);
1191
1192 pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
1193 PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
1194 buf, edev->ndev->name);
1195 }
1196
1197 enum qede_probe_mode {
1198 QEDE_PROBE_NORMAL,
1199 QEDE_PROBE_RECOVERY,
1200 };
1201
__qede_probe(struct pci_dev * pdev,u32 dp_module,u8 dp_level,bool is_vf,enum qede_probe_mode mode)1202 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
1203 bool is_vf, enum qede_probe_mode mode)
1204 {
1205 struct qed_probe_params probe_params;
1206 struct qed_slowpath_params sp_params;
1207 struct qed_dev_eth_info dev_info;
1208 struct qede_dev *edev;
1209 struct qed_dev *cdev;
1210 int rc;
1211
1212 if (unlikely(dp_level & QED_LEVEL_INFO))
1213 pr_notice("Starting qede probe\n");
1214
1215 memset(&probe_params, 0, sizeof(probe_params));
1216 probe_params.protocol = QED_PROTOCOL_ETH;
1217 probe_params.dp_module = dp_module;
1218 probe_params.dp_level = dp_level;
1219 probe_params.is_vf = is_vf;
1220 probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY);
1221 cdev = qed_ops->common->probe(pdev, &probe_params);
1222 if (!cdev) {
1223 rc = -ENODEV;
1224 goto err0;
1225 }
1226
1227 qede_update_pf_params(cdev);
1228
1229 /* Start the Slowpath-process */
1230 memset(&sp_params, 0, sizeof(sp_params));
1231 sp_params.int_mode = QED_INT_MODE_MSIX;
1232 strscpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
1233 rc = qed_ops->common->slowpath_start(cdev, &sp_params);
1234 if (rc) {
1235 pr_notice("Cannot start slowpath\n");
1236 goto err1;
1237 }
1238
1239 /* Learn information crucial for qede to progress */
1240 rc = qed_ops->fill_dev_info(cdev, &dev_info);
1241 if (rc)
1242 goto err2;
1243
1244 if (mode != QEDE_PROBE_RECOVERY) {
1245 edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
1246 dp_level);
1247 if (!edev) {
1248 rc = -ENOMEM;
1249 goto err2;
1250 }
1251
1252 edev->devlink = qed_ops->common->devlink_register(cdev);
1253 if (IS_ERR(edev->devlink)) {
1254 DP_NOTICE(edev, "Cannot register devlink\n");
1255 rc = PTR_ERR(edev->devlink);
1256 edev->devlink = NULL;
1257 goto err3;
1258 }
1259 } else {
1260 struct net_device *ndev = pci_get_drvdata(pdev);
1261 struct qed_devlink *qdl;
1262
1263 edev = netdev_priv(ndev);
1264 qdl = devlink_priv(edev->devlink);
1265 qdl->cdev = cdev;
1266 edev->cdev = cdev;
1267 memset(&edev->stats, 0, sizeof(edev->stats));
1268 memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
1269 }
1270
1271 if (is_vf)
1272 set_bit(QEDE_FLAGS_IS_VF, &edev->flags);
1273
1274 qede_init_ndev(edev);
1275
1276 rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY));
1277 if (rc)
1278 goto err3;
1279
1280 if (mode != QEDE_PROBE_RECOVERY) {
1281 /* Prepare the lock prior to the registration of the netdev,
1282 * as once it's registered we might reach flows requiring it
1283 * [it's even possible to reach a flow needing it directly
1284 * from there, although it's unlikely].
1285 */
1286 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
1287 mutex_init(&edev->qede_lock);
1288 qede_init_periodic_task(edev);
1289
1290 rc = register_netdev(edev->ndev);
1291 if (rc) {
1292 DP_NOTICE(edev, "Cannot register net-device\n");
1293 goto err4;
1294 }
1295 }
1296
1297 edev->ops->common->set_name(cdev, edev->ndev->name);
1298
1299 /* PTP not supported on VFs */
1300 if (!is_vf)
1301 qede_ptp_enable(edev);
1302
1303 edev->ops->register_ops(cdev, &qede_ll_ops, edev);
1304
1305 #ifdef CONFIG_DCB
1306 if (!IS_VF(edev))
1307 qede_set_dcbnl_ops(edev->ndev);
1308 #endif
1309
1310 edev->rx_copybreak = QEDE_RX_HDR_SIZE;
1311
1312 qede_log_probe(edev);
1313
1314 /* retain user config (for example - after recovery) */
1315 if (edev->stats_coal_usecs)
1316 schedule_delayed_work(&edev->periodic_task, 0);
1317
1318 return 0;
1319
1320 err4:
1321 qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
1322 err3:
1323 if (mode != QEDE_PROBE_RECOVERY)
1324 free_netdev(edev->ndev);
1325 else
1326 edev->cdev = NULL;
1327 err2:
1328 qed_ops->common->slowpath_stop(cdev);
1329 err1:
1330 qed_ops->common->remove(cdev);
1331 err0:
1332 return rc;
1333 }
1334
qede_probe(struct pci_dev * pdev,const struct pci_device_id * id)1335 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1336 {
1337 bool is_vf = false;
1338 u32 dp_module = 0;
1339 u8 dp_level = 0;
1340
1341 switch ((enum qede_pci_private)id->driver_data) {
1342 case QEDE_PRIVATE_VF:
1343 if (debug & QED_LOG_VERBOSE_MASK)
1344 dev_err(&pdev->dev, "Probing a VF\n");
1345 is_vf = true;
1346 break;
1347 default:
1348 if (debug & QED_LOG_VERBOSE_MASK)
1349 dev_err(&pdev->dev, "Probing a PF\n");
1350 }
1351
1352 qede_config_debug(debug, &dp_module, &dp_level);
1353
1354 return __qede_probe(pdev, dp_module, dp_level, is_vf,
1355 QEDE_PROBE_NORMAL);
1356 }
1357
1358 enum qede_remove_mode {
1359 QEDE_REMOVE_NORMAL,
1360 QEDE_REMOVE_RECOVERY,
1361 };
1362
__qede_remove(struct pci_dev * pdev,enum qede_remove_mode mode)1363 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1364 {
1365 struct net_device *ndev = pci_get_drvdata(pdev);
1366 struct qede_dev *edev;
1367 struct qed_dev *cdev;
1368
1369 if (!ndev) {
1370 dev_info(&pdev->dev, "Device has already been removed\n");
1371 return;
1372 }
1373
1374 edev = netdev_priv(ndev);
1375 cdev = edev->cdev;
1376
1377 DP_INFO(edev, "Starting qede_remove\n");
1378
1379 qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
1380
1381 if (mode != QEDE_REMOVE_RECOVERY) {
1382 set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
1383 unregister_netdev(ndev);
1384
1385 cancel_delayed_work_sync(&edev->sp_task);
1386 cancel_delayed_work_sync(&edev->periodic_task);
1387
1388 edev->ops->common->set_power_state(cdev, PCI_D0);
1389
1390 pci_set_drvdata(pdev, NULL);
1391 }
1392
1393 qede_ptp_disable(edev);
1394
1395 /* Use global ops since we've freed edev */
1396 qed_ops->common->slowpath_stop(cdev);
1397 if (system_state == SYSTEM_POWER_OFF)
1398 return;
1399
1400 if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) {
1401 qed_ops->common->devlink_unregister(edev->devlink);
1402 edev->devlink = NULL;
1403 }
1404 qed_ops->common->remove(cdev);
1405 edev->cdev = NULL;
1406
1407 /* Since this can happen out-of-sync with other flows,
1408 * don't release the netdevice until after slowpath stop
1409 * has been called to guarantee various other contexts
1410 * [e.g., QED register callbacks] won't break anything when
1411 * accessing the netdevice.
1412 */
1413 if (mode != QEDE_REMOVE_RECOVERY) {
1414 kfree(edev->coal_entry);
1415 free_netdev(ndev);
1416 }
1417
1418 dev_info(&pdev->dev, "Ending qede_remove successfully\n");
1419 }
1420
qede_remove(struct pci_dev * pdev)1421 static void qede_remove(struct pci_dev *pdev)
1422 {
1423 __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1424 }
1425
qede_shutdown(struct pci_dev * pdev)1426 static void qede_shutdown(struct pci_dev *pdev)
1427 {
1428 __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1429 }
1430
1431 /* -------------------------------------------------------------------------
1432 * START OF LOAD / UNLOAD
1433 * -------------------------------------------------------------------------
1434 */
1435
qede_set_num_queues(struct qede_dev * edev)1436 static int qede_set_num_queues(struct qede_dev *edev)
1437 {
1438 int rc;
1439 u16 rss_num;
1440
1441 /* Setup queues according to possible resources*/
1442 if (edev->req_queues)
1443 rss_num = edev->req_queues;
1444 else
1445 rss_num = netif_get_num_default_rss_queues() *
1446 edev->dev_info.common.num_hwfns;
1447
1448 rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
1449
1450 rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
1451 if (rc > 0) {
1452 /* Managed to request interrupts for our queues */
1453 edev->num_queues = rc;
1454 DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
1455 QEDE_QUEUE_CNT(edev), rss_num);
1456 rc = 0;
1457 }
1458
1459 edev->fp_num_tx = edev->req_num_tx;
1460 edev->fp_num_rx = edev->req_num_rx;
1461
1462 return rc;
1463 }
1464
qede_free_mem_sb(struct qede_dev * edev,struct qed_sb_info * sb_info,u16 sb_id)1465 static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
1466 u16 sb_id)
1467 {
1468 if (sb_info->sb_virt) {
1469 edev->ops->common->sb_release(edev->cdev, sb_info, sb_id,
1470 QED_SB_TYPE_L2_QUEUE);
1471 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
1472 (void *)sb_info->sb_virt, sb_info->sb_phys);
1473 memset(sb_info, 0, sizeof(*sb_info));
1474 }
1475 }
1476
1477 /* This function allocates fast-path status block memory */
qede_alloc_mem_sb(struct qede_dev * edev,struct qed_sb_info * sb_info,u16 sb_id)1478 static int qede_alloc_mem_sb(struct qede_dev *edev,
1479 struct qed_sb_info *sb_info, u16 sb_id)
1480 {
1481 struct status_block *sb_virt;
1482 dma_addr_t sb_phys;
1483 int rc;
1484
1485 sb_virt = dma_alloc_coherent(&edev->pdev->dev,
1486 sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
1487 if (!sb_virt) {
1488 DP_ERR(edev, "Status block allocation failed\n");
1489 return -ENOMEM;
1490 }
1491
1492 rc = edev->ops->common->sb_init(edev->cdev, sb_info,
1493 sb_virt, sb_phys, sb_id,
1494 QED_SB_TYPE_L2_QUEUE);
1495 if (rc) {
1496 DP_ERR(edev, "Status block initialization failed\n");
1497 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
1498 sb_virt, sb_phys);
1499 return rc;
1500 }
1501
1502 return 0;
1503 }
1504
qede_free_rx_buffers(struct qede_dev * edev,struct qede_rx_queue * rxq)1505 static void qede_free_rx_buffers(struct qede_dev *edev,
1506 struct qede_rx_queue *rxq)
1507 {
1508 u16 i;
1509
1510 for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
1511 struct sw_rx_data *rx_buf;
1512 struct page *data;
1513
1514 rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
1515 data = rx_buf->data;
1516
1517 dma_unmap_page(&edev->pdev->dev,
1518 rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
1519
1520 rx_buf->data = NULL;
1521 __free_page(data);
1522 }
1523 }
1524
qede_free_mem_rxq(struct qede_dev * edev,struct qede_rx_queue * rxq)1525 static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1526 {
1527 /* Free rx buffers */
1528 qede_free_rx_buffers(edev, rxq);
1529
1530 /* Free the parallel SW ring */
1531 kfree(rxq->sw_rx_ring);
1532
1533 /* Free the real RQ ring used by FW */
1534 edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
1535 edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
1536 }
1537
qede_set_tpa_param(struct qede_rx_queue * rxq)1538 static void qede_set_tpa_param(struct qede_rx_queue *rxq)
1539 {
1540 int i;
1541
1542 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1543 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1544
1545 tpa_info->state = QEDE_AGG_STATE_NONE;
1546 }
1547 }
1548
1549 /* This function allocates all memory needed per Rx queue */
qede_alloc_mem_rxq(struct qede_dev * edev,struct qede_rx_queue * rxq)1550 static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1551 {
1552 struct qed_chain_init_params params = {
1553 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
1554 .num_elems = RX_RING_SIZE,
1555 };
1556 struct qed_dev *cdev = edev->cdev;
1557 int i, rc, size;
1558
1559 rxq->num_rx_buffers = edev->q_num_rx_buffers;
1560
1561 rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
1562
1563 rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
1564 size = rxq->rx_headroom +
1565 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1566
1567 /* Make sure that the headroom and payload fit in a single page */
1568 if (rxq->rx_buf_size + size > PAGE_SIZE)
1569 rxq->rx_buf_size = PAGE_SIZE - size;
1570
1571 /* Segment size to split a page in multiple equal parts,
1572 * unless XDP is used in which case we'd use the entire page.
1573 */
1574 if (!edev->xdp_prog) {
1575 size = size + rxq->rx_buf_size;
1576 rxq->rx_buf_seg_size = roundup_pow_of_two(size);
1577 } else {
1578 rxq->rx_buf_seg_size = PAGE_SIZE;
1579 edev->ndev->features &= ~NETIF_F_GRO_HW;
1580 }
1581
1582 /* Allocate the parallel driver ring for Rx buffers */
1583 size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
1584 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
1585 if (!rxq->sw_rx_ring) {
1586 DP_ERR(edev, "Rx buffers ring allocation failed\n");
1587 rc = -ENOMEM;
1588 goto err;
1589 }
1590
1591 /* Allocate FW Rx ring */
1592 params.mode = QED_CHAIN_MODE_NEXT_PTR;
1593 params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
1594 params.elem_size = sizeof(struct eth_rx_bd);
1595
1596 rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, ¶ms);
1597 if (rc)
1598 goto err;
1599
1600 /* Allocate FW completion ring */
1601 params.mode = QED_CHAIN_MODE_PBL;
1602 params.intended_use = QED_CHAIN_USE_TO_CONSUME;
1603 params.elem_size = sizeof(union eth_rx_cqe);
1604
1605 rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, ¶ms);
1606 if (rc)
1607 goto err;
1608
1609 /* Allocate buffers for the Rx ring */
1610 rxq->filled_buffers = 0;
1611 for (i = 0; i < rxq->num_rx_buffers; i++) {
1612 rc = qede_alloc_rx_buffer(rxq, false);
1613 if (rc) {
1614 DP_ERR(edev,
1615 "Rx buffers allocation failed at index %d\n", i);
1616 goto err;
1617 }
1618 }
1619
1620 edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
1621 if (!edev->gro_disable)
1622 qede_set_tpa_param(rxq);
1623 err:
1624 return rc;
1625 }
1626
qede_free_mem_txq(struct qede_dev * edev,struct qede_tx_queue * txq)1627 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1628 {
1629 /* Free the parallel SW ring */
1630 if (txq->is_xdp)
1631 kfree(txq->sw_tx_ring.xdp);
1632 else
1633 kfree(txq->sw_tx_ring.skbs);
1634
1635 /* Free the real RQ ring used by FW */
1636 edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
1637 }
1638
1639 /* This function allocates all memory needed per Tx queue */
qede_alloc_mem_txq(struct qede_dev * edev,struct qede_tx_queue * txq)1640 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1641 {
1642 struct qed_chain_init_params params = {
1643 .mode = QED_CHAIN_MODE_PBL,
1644 .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1645 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
1646 .num_elems = edev->q_num_tx_buffers,
1647 .elem_size = sizeof(union eth_tx_bd_types),
1648 };
1649 int size, rc;
1650
1651 txq->num_tx_buffers = edev->q_num_tx_buffers;
1652
1653 /* Allocate the parallel driver ring for Tx buffers */
1654 if (txq->is_xdp) {
1655 size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
1656 txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
1657 if (!txq->sw_tx_ring.xdp)
1658 goto err;
1659 } else {
1660 size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
1661 txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
1662 if (!txq->sw_tx_ring.skbs)
1663 goto err;
1664 }
1665
1666 rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, ¶ms);
1667 if (rc)
1668 goto err;
1669
1670 return 0;
1671
1672 err:
1673 qede_free_mem_txq(edev, txq);
1674 return -ENOMEM;
1675 }
1676
1677 /* This function frees all memory of a single fp */
qede_free_mem_fp(struct qede_dev * edev,struct qede_fastpath * fp)1678 static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1679 {
1680 qede_free_mem_sb(edev, fp->sb_info, fp->id);
1681
1682 if (fp->type & QEDE_FASTPATH_RX)
1683 qede_free_mem_rxq(edev, fp->rxq);
1684
1685 if (fp->type & QEDE_FASTPATH_XDP)
1686 qede_free_mem_txq(edev, fp->xdp_tx);
1687
1688 if (fp->type & QEDE_FASTPATH_TX) {
1689 int cos;
1690
1691 for_each_cos_in_txq(edev, cos)
1692 qede_free_mem_txq(edev, &fp->txq[cos]);
1693 }
1694 }
1695
1696 /* This function allocates all memory needed for a single fp (i.e. an entity
1697 * which contains status block, one rx queue and/or multiple per-TC tx queues.
1698 */
qede_alloc_mem_fp(struct qede_dev * edev,struct qede_fastpath * fp)1699 static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1700 {
1701 int rc = 0;
1702
1703 rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
1704 if (rc)
1705 goto out;
1706
1707 if (fp->type & QEDE_FASTPATH_RX) {
1708 rc = qede_alloc_mem_rxq(edev, fp->rxq);
1709 if (rc)
1710 goto out;
1711 }
1712
1713 if (fp->type & QEDE_FASTPATH_XDP) {
1714 rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
1715 if (rc)
1716 goto out;
1717 }
1718
1719 if (fp->type & QEDE_FASTPATH_TX) {
1720 int cos;
1721
1722 for_each_cos_in_txq(edev, cos) {
1723 rc = qede_alloc_mem_txq(edev, &fp->txq[cos]);
1724 if (rc)
1725 goto out;
1726 }
1727 }
1728
1729 out:
1730 return rc;
1731 }
1732
qede_free_mem_load(struct qede_dev * edev)1733 static void qede_free_mem_load(struct qede_dev *edev)
1734 {
1735 int i;
1736
1737 for_each_queue(i) {
1738 struct qede_fastpath *fp = &edev->fp_array[i];
1739
1740 qede_free_mem_fp(edev, fp);
1741 }
1742 }
1743
1744 /* This function allocates all qede memory at NIC load. */
qede_alloc_mem_load(struct qede_dev * edev)1745 static int qede_alloc_mem_load(struct qede_dev *edev)
1746 {
1747 int rc = 0, queue_id;
1748
1749 for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
1750 struct qede_fastpath *fp = &edev->fp_array[queue_id];
1751
1752 rc = qede_alloc_mem_fp(edev, fp);
1753 if (rc) {
1754 DP_ERR(edev,
1755 "Failed to allocate memory for fastpath - rss id = %d\n",
1756 queue_id);
1757 qede_free_mem_load(edev);
1758 return rc;
1759 }
1760 }
1761
1762 return 0;
1763 }
1764
qede_empty_tx_queue(struct qede_dev * edev,struct qede_tx_queue * txq)1765 static void qede_empty_tx_queue(struct qede_dev *edev,
1766 struct qede_tx_queue *txq)
1767 {
1768 unsigned int pkts_compl = 0, bytes_compl = 0;
1769 struct netdev_queue *netdev_txq;
1770 int rc, len = 0;
1771
1772 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
1773
1774 while (qed_chain_get_cons_idx(&txq->tx_pbl) !=
1775 qed_chain_get_prod_idx(&txq->tx_pbl)) {
1776 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1777 "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1778 txq->index, qed_chain_get_cons_idx(&txq->tx_pbl),
1779 qed_chain_get_prod_idx(&txq->tx_pbl));
1780
1781 rc = qede_free_tx_pkt(edev, txq, &len);
1782 if (rc) {
1783 DP_NOTICE(edev,
1784 "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1785 txq->index,
1786 qed_chain_get_cons_idx(&txq->tx_pbl),
1787 qed_chain_get_prod_idx(&txq->tx_pbl));
1788 break;
1789 }
1790
1791 bytes_compl += len;
1792 pkts_compl++;
1793 txq->sw_tx_cons++;
1794 }
1795
1796 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
1797 }
1798
qede_empty_tx_queues(struct qede_dev * edev)1799 static void qede_empty_tx_queues(struct qede_dev *edev)
1800 {
1801 int i;
1802
1803 for_each_queue(i)
1804 if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
1805 int cos;
1806
1807 for_each_cos_in_txq(edev, cos) {
1808 struct qede_fastpath *fp;
1809
1810 fp = &edev->fp_array[i];
1811 qede_empty_tx_queue(edev,
1812 &fp->txq[cos]);
1813 }
1814 }
1815 }
1816
1817 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
qede_init_fp(struct qede_dev * edev)1818 static void qede_init_fp(struct qede_dev *edev)
1819 {
1820 int queue_id, rxq_index = 0, txq_index = 0;
1821 struct qede_fastpath *fp;
1822 bool init_xdp = false;
1823
1824 for_each_queue(queue_id) {
1825 fp = &edev->fp_array[queue_id];
1826
1827 fp->edev = edev;
1828 fp->id = queue_id;
1829
1830 if (fp->type & QEDE_FASTPATH_XDP) {
1831 fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
1832 rxq_index);
1833 fp->xdp_tx->is_xdp = 1;
1834
1835 spin_lock_init(&fp->xdp_tx->xdp_tx_lock);
1836 init_xdp = true;
1837 }
1838
1839 if (fp->type & QEDE_FASTPATH_RX) {
1840 fp->rxq->rxq_id = rxq_index++;
1841
1842 /* Determine how to map buffers for this queue */
1843 if (fp->type & QEDE_FASTPATH_XDP)
1844 fp->rxq->data_direction = DMA_BIDIRECTIONAL;
1845 else
1846 fp->rxq->data_direction = DMA_FROM_DEVICE;
1847 fp->rxq->dev = &edev->pdev->dev;
1848
1849 /* Driver have no error path from here */
1850 WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
1851 fp->rxq->rxq_id, 0) < 0);
1852
1853 if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq,
1854 MEM_TYPE_PAGE_ORDER0,
1855 NULL)) {
1856 DP_NOTICE(edev,
1857 "Failed to register XDP memory model\n");
1858 }
1859 }
1860
1861 if (fp->type & QEDE_FASTPATH_TX) {
1862 int cos;
1863
1864 for_each_cos_in_txq(edev, cos) {
1865 struct qede_tx_queue *txq = &fp->txq[cos];
1866 u16 ndev_tx_id;
1867
1868 txq->cos = cos;
1869 txq->index = txq_index;
1870 ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq);
1871 txq->ndev_txq_id = ndev_tx_id;
1872
1873 if (edev->dev_info.is_legacy)
1874 txq->is_legacy = true;
1875 txq->dev = &edev->pdev->dev;
1876 }
1877
1878 txq_index++;
1879 }
1880
1881 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1882 edev->ndev->name, queue_id);
1883 }
1884
1885 if (init_xdp) {
1886 edev->total_xdp_queues = QEDE_RSS_COUNT(edev);
1887 DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues);
1888 }
1889 }
1890
qede_set_real_num_queues(struct qede_dev * edev)1891 static int qede_set_real_num_queues(struct qede_dev *edev)
1892 {
1893 int rc = 0;
1894
1895 rc = netif_set_real_num_tx_queues(edev->ndev,
1896 QEDE_TSS_COUNT(edev) *
1897 edev->dev_info.num_tc);
1898 if (rc) {
1899 DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
1900 return rc;
1901 }
1902
1903 rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
1904 if (rc) {
1905 DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
1906 return rc;
1907 }
1908
1909 return 0;
1910 }
1911
qede_napi_disable_remove(struct qede_dev * edev)1912 static void qede_napi_disable_remove(struct qede_dev *edev)
1913 {
1914 int i;
1915
1916 for_each_queue(i) {
1917 napi_disable(&edev->fp_array[i].napi);
1918
1919 netif_napi_del(&edev->fp_array[i].napi);
1920 }
1921 }
1922
qede_napi_add_enable(struct qede_dev * edev)1923 static void qede_napi_add_enable(struct qede_dev *edev)
1924 {
1925 int i;
1926
1927 /* Add NAPI objects */
1928 for_each_queue(i) {
1929 netif_napi_add(edev->ndev, &edev->fp_array[i].napi, qede_poll);
1930 napi_enable(&edev->fp_array[i].napi);
1931 }
1932 }
1933
qede_sync_free_irqs(struct qede_dev * edev)1934 static void qede_sync_free_irqs(struct qede_dev *edev)
1935 {
1936 int i;
1937
1938 for (i = 0; i < edev->int_info.used_cnt; i++) {
1939 if (edev->int_info.msix_cnt) {
1940 free_irq(edev->int_info.msix[i].vector,
1941 &edev->fp_array[i]);
1942 } else {
1943 edev->ops->common->simd_handler_clean(edev->cdev, i);
1944 }
1945 }
1946
1947 edev->int_info.used_cnt = 0;
1948 edev->int_info.msix_cnt = 0;
1949 }
1950
qede_req_msix_irqs(struct qede_dev * edev)1951 static int qede_req_msix_irqs(struct qede_dev *edev)
1952 {
1953 int i, rc;
1954
1955 /* Sanitize number of interrupts == number of prepared RSS queues */
1956 if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
1957 DP_ERR(edev,
1958 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1959 QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
1960 return -EINVAL;
1961 }
1962
1963 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
1964 #ifdef CONFIG_RFS_ACCEL
1965 struct qede_fastpath *fp = &edev->fp_array[i];
1966
1967 if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
1968 rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
1969 edev->int_info.msix[i].vector);
1970 if (rc) {
1971 DP_ERR(edev, "Failed to add CPU rmap\n");
1972 qede_free_arfs(edev);
1973 }
1974 }
1975 #endif
1976 rc = request_irq(edev->int_info.msix[i].vector,
1977 qede_msix_fp_int, 0, edev->fp_array[i].name,
1978 &edev->fp_array[i]);
1979 if (rc) {
1980 DP_ERR(edev, "Request fp %d irq failed\n", i);
1981 #ifdef CONFIG_RFS_ACCEL
1982 if (edev->ndev->rx_cpu_rmap)
1983 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
1984
1985 edev->ndev->rx_cpu_rmap = NULL;
1986 #endif
1987 qede_sync_free_irqs(edev);
1988 return rc;
1989 }
1990 DP_VERBOSE(edev, NETIF_MSG_INTR,
1991 "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
1992 edev->fp_array[i].name, i,
1993 &edev->fp_array[i]);
1994 edev->int_info.used_cnt++;
1995 }
1996
1997 return 0;
1998 }
1999
qede_simd_fp_handler(void * cookie)2000 static void qede_simd_fp_handler(void *cookie)
2001 {
2002 struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
2003
2004 napi_schedule_irqoff(&fp->napi);
2005 }
2006
qede_setup_irqs(struct qede_dev * edev)2007 static int qede_setup_irqs(struct qede_dev *edev)
2008 {
2009 int i, rc = 0;
2010
2011 /* Learn Interrupt configuration */
2012 rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
2013 if (rc)
2014 return rc;
2015
2016 if (edev->int_info.msix_cnt) {
2017 rc = qede_req_msix_irqs(edev);
2018 if (rc)
2019 return rc;
2020 edev->ndev->irq = edev->int_info.msix[0].vector;
2021 } else {
2022 const struct qed_common_ops *ops;
2023
2024 /* qed should learn receive the RSS ids and callbacks */
2025 ops = edev->ops->common;
2026 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
2027 ops->simd_handler_config(edev->cdev,
2028 &edev->fp_array[i], i,
2029 qede_simd_fp_handler);
2030 edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
2031 }
2032 return 0;
2033 }
2034
qede_drain_txq(struct qede_dev * edev,struct qede_tx_queue * txq,bool allow_drain)2035 static int qede_drain_txq(struct qede_dev *edev,
2036 struct qede_tx_queue *txq, bool allow_drain)
2037 {
2038 int rc, cnt = 1000;
2039
2040 while (txq->sw_tx_cons != txq->sw_tx_prod) {
2041 if (!cnt) {
2042 if (allow_drain) {
2043 DP_NOTICE(edev,
2044 "Tx queue[%d] is stuck, requesting MCP to drain\n",
2045 txq->index);
2046 rc = edev->ops->common->drain(edev->cdev);
2047 if (rc)
2048 return rc;
2049 return qede_drain_txq(edev, txq, false);
2050 }
2051 DP_NOTICE(edev,
2052 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
2053 txq->index, txq->sw_tx_prod,
2054 txq->sw_tx_cons);
2055 return -ENODEV;
2056 }
2057 cnt--;
2058 usleep_range(1000, 2000);
2059 barrier();
2060 }
2061
2062 /* FW finished processing, wait for HW to transmit all tx packets */
2063 usleep_range(1000, 2000);
2064
2065 return 0;
2066 }
2067
qede_stop_txq(struct qede_dev * edev,struct qede_tx_queue * txq,int rss_id)2068 static int qede_stop_txq(struct qede_dev *edev,
2069 struct qede_tx_queue *txq, int rss_id)
2070 {
2071 /* delete doorbell from doorbell recovery mechanism */
2072 edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr,
2073 &txq->tx_db);
2074
2075 return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
2076 }
2077
qede_stop_queues(struct qede_dev * edev)2078 static int qede_stop_queues(struct qede_dev *edev)
2079 {
2080 struct qed_update_vport_params *vport_update_params;
2081 struct qed_dev *cdev = edev->cdev;
2082 struct qede_fastpath *fp;
2083 int rc, i;
2084
2085 /* Disable the vport */
2086 vport_update_params = vzalloc(sizeof(*vport_update_params));
2087 if (!vport_update_params)
2088 return -ENOMEM;
2089
2090 vport_update_params->vport_id = 0;
2091 vport_update_params->update_vport_active_flg = 1;
2092 vport_update_params->vport_active_flg = 0;
2093 vport_update_params->update_rss_flg = 0;
2094
2095 rc = edev->ops->vport_update(cdev, vport_update_params);
2096 vfree(vport_update_params);
2097
2098 if (rc) {
2099 DP_ERR(edev, "Failed to update vport\n");
2100 return rc;
2101 }
2102
2103 /* Flush Tx queues. If needed, request drain from MCP */
2104 for_each_queue(i) {
2105 fp = &edev->fp_array[i];
2106
2107 if (fp->type & QEDE_FASTPATH_TX) {
2108 int cos;
2109
2110 for_each_cos_in_txq(edev, cos) {
2111 rc = qede_drain_txq(edev, &fp->txq[cos], true);
2112 if (rc)
2113 return rc;
2114 }
2115 }
2116
2117 if (fp->type & QEDE_FASTPATH_XDP) {
2118 rc = qede_drain_txq(edev, fp->xdp_tx, true);
2119 if (rc)
2120 return rc;
2121 }
2122 }
2123
2124 /* Stop all Queues in reverse order */
2125 for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
2126 fp = &edev->fp_array[i];
2127
2128 /* Stop the Tx Queue(s) */
2129 if (fp->type & QEDE_FASTPATH_TX) {
2130 int cos;
2131
2132 for_each_cos_in_txq(edev, cos) {
2133 rc = qede_stop_txq(edev, &fp->txq[cos], i);
2134 if (rc)
2135 return rc;
2136 }
2137 }
2138
2139 /* Stop the Rx Queue */
2140 if (fp->type & QEDE_FASTPATH_RX) {
2141 rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
2142 if (rc) {
2143 DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
2144 return rc;
2145 }
2146 }
2147
2148 /* Stop the XDP forwarding queue */
2149 if (fp->type & QEDE_FASTPATH_XDP) {
2150 rc = qede_stop_txq(edev, fp->xdp_tx, i);
2151 if (rc)
2152 return rc;
2153
2154 bpf_prog_put(fp->rxq->xdp_prog);
2155 }
2156 }
2157
2158 /* Stop the vport */
2159 rc = edev->ops->vport_stop(cdev, 0);
2160 if (rc)
2161 DP_ERR(edev, "Failed to stop VPORT\n");
2162
2163 return rc;
2164 }
2165
qede_start_txq(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_tx_queue * txq,u8 rss_id,u16 sb_idx)2166 static int qede_start_txq(struct qede_dev *edev,
2167 struct qede_fastpath *fp,
2168 struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
2169 {
2170 dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
2171 u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
2172 struct qed_queue_start_common_params params;
2173 struct qed_txq_start_ret_params ret_params;
2174 int rc;
2175
2176 memset(¶ms, 0, sizeof(params));
2177 memset(&ret_params, 0, sizeof(ret_params));
2178
2179 /* Let the XDP queue share the queue-zone with one of the regular txq.
2180 * We don't really care about its coalescing.
2181 */
2182 if (txq->is_xdp)
2183 params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
2184 else
2185 params.queue_id = txq->index;
2186
2187 params.p_sb = fp->sb_info;
2188 params.sb_idx = sb_idx;
2189 params.tc = txq->cos;
2190
2191 rc = edev->ops->q_tx_start(edev->cdev, rss_id, ¶ms, phys_table,
2192 page_cnt, &ret_params);
2193 if (rc) {
2194 DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
2195 return rc;
2196 }
2197
2198 txq->doorbell_addr = ret_params.p_doorbell;
2199 txq->handle = ret_params.p_handle;
2200
2201 /* Determine the FW consumer address associated */
2202 txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
2203
2204 /* Prepare the doorbell parameters */
2205 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
2206 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
2207 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
2208 DQ_XCM_ETH_TX_BD_PROD_CMD);
2209 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
2210
2211 /* register doorbell with doorbell recovery mechanism */
2212 rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr,
2213 &txq->tx_db, DB_REC_WIDTH_32B,
2214 DB_REC_KERNEL);
2215
2216 return rc;
2217 }
2218
qede_start_queues(struct qede_dev * edev,bool clear_stats)2219 static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
2220 {
2221 int vlan_removal_en = 1;
2222 struct qed_dev *cdev = edev->cdev;
2223 struct qed_dev_info *qed_info = &edev->dev_info.common;
2224 struct qed_update_vport_params *vport_update_params;
2225 struct qed_queue_start_common_params q_params;
2226 struct qed_start_vport_params start = {0};
2227 int rc, i;
2228
2229 if (!edev->num_queues) {
2230 DP_ERR(edev,
2231 "Cannot update V-VPORT as active as there are no Rx queues\n");
2232 return -EINVAL;
2233 }
2234
2235 vport_update_params = vzalloc(sizeof(*vport_update_params));
2236 if (!vport_update_params)
2237 return -ENOMEM;
2238
2239 start.handle_ptp_pkts = !!(edev->ptp);
2240 start.gro_enable = !edev->gro_disable;
2241 start.mtu = edev->ndev->mtu;
2242 start.vport_id = 0;
2243 start.drop_ttl0 = true;
2244 start.remove_inner_vlan = vlan_removal_en;
2245 start.clear_stats = clear_stats;
2246
2247 rc = edev->ops->vport_start(cdev, &start);
2248
2249 if (rc) {
2250 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
2251 goto out;
2252 }
2253
2254 DP_VERBOSE(edev, NETIF_MSG_IFUP,
2255 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
2256 start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
2257
2258 for_each_queue(i) {
2259 struct qede_fastpath *fp = &edev->fp_array[i];
2260 dma_addr_t p_phys_table;
2261 u32 page_cnt;
2262
2263 if (fp->type & QEDE_FASTPATH_RX) {
2264 struct qed_rxq_start_ret_params ret_params;
2265 struct qede_rx_queue *rxq = fp->rxq;
2266 __le16 *val;
2267
2268 memset(&ret_params, 0, sizeof(ret_params));
2269 memset(&q_params, 0, sizeof(q_params));
2270 q_params.queue_id = rxq->rxq_id;
2271 q_params.vport_id = 0;
2272 q_params.p_sb = fp->sb_info;
2273 q_params.sb_idx = RX_PI;
2274
2275 p_phys_table =
2276 qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
2277 page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
2278
2279 rc = edev->ops->q_rx_start(cdev, i, &q_params,
2280 rxq->rx_buf_size,
2281 rxq->rx_bd_ring.p_phys_addr,
2282 p_phys_table,
2283 page_cnt, &ret_params);
2284 if (rc) {
2285 DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
2286 rc);
2287 goto out;
2288 }
2289
2290 /* Use the return parameters */
2291 rxq->hw_rxq_prod_addr = ret_params.p_prod;
2292 rxq->handle = ret_params.p_handle;
2293
2294 val = &fp->sb_info->sb_virt->pi_array[RX_PI];
2295 rxq->hw_cons_ptr = val;
2296
2297 qede_update_rx_prod(edev, rxq);
2298 }
2299
2300 if (fp->type & QEDE_FASTPATH_XDP) {
2301 rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
2302 if (rc)
2303 goto out;
2304
2305 bpf_prog_add(edev->xdp_prog, 1);
2306 fp->rxq->xdp_prog = edev->xdp_prog;
2307 }
2308
2309 if (fp->type & QEDE_FASTPATH_TX) {
2310 int cos;
2311
2312 for_each_cos_in_txq(edev, cos) {
2313 rc = qede_start_txq(edev, fp, &fp->txq[cos], i,
2314 TX_PI(cos));
2315 if (rc)
2316 goto out;
2317 }
2318 }
2319 }
2320
2321 /* Prepare and send the vport enable */
2322 vport_update_params->vport_id = start.vport_id;
2323 vport_update_params->update_vport_active_flg = 1;
2324 vport_update_params->vport_active_flg = 1;
2325
2326 if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
2327 qed_info->tx_switching) {
2328 vport_update_params->update_tx_switching_flg = 1;
2329 vport_update_params->tx_switching_flg = 1;
2330 }
2331
2332 qede_fill_rss_params(edev, &vport_update_params->rss_params,
2333 &vport_update_params->update_rss_flg);
2334
2335 rc = edev->ops->vport_update(cdev, vport_update_params);
2336 if (rc)
2337 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
2338
2339 out:
2340 vfree(vport_update_params);
2341 return rc;
2342 }
2343
2344 enum qede_unload_mode {
2345 QEDE_UNLOAD_NORMAL,
2346 QEDE_UNLOAD_RECOVERY,
2347 };
2348
qede_unload(struct qede_dev * edev,enum qede_unload_mode mode,bool is_locked)2349 static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
2350 bool is_locked)
2351 {
2352 struct qed_link_params link_params;
2353 int rc;
2354
2355 DP_INFO(edev, "Starting qede unload\n");
2356
2357 if (!is_locked)
2358 __qede_lock(edev);
2359
2360 clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2361
2362 if (mode != QEDE_UNLOAD_RECOVERY)
2363 edev->state = QEDE_STATE_CLOSED;
2364
2365 qede_rdma_dev_event_close(edev);
2366
2367 /* Close OS Tx */
2368 netif_tx_disable(edev->ndev);
2369 netif_carrier_off(edev->ndev);
2370
2371 if (mode != QEDE_UNLOAD_RECOVERY) {
2372 /* Reset the link */
2373 memset(&link_params, 0, sizeof(link_params));
2374 link_params.link_up = false;
2375 edev->ops->common->set_link(edev->cdev, &link_params);
2376
2377 rc = qede_stop_queues(edev);
2378 if (rc) {
2379 #ifdef CONFIG_RFS_ACCEL
2380 if (edev->dev_info.common.b_arfs_capable) {
2381 qede_poll_for_freeing_arfs_filters(edev);
2382 if (edev->ndev->rx_cpu_rmap)
2383 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
2384
2385 edev->ndev->rx_cpu_rmap = NULL;
2386 }
2387 #endif
2388 qede_sync_free_irqs(edev);
2389 goto out;
2390 }
2391
2392 DP_INFO(edev, "Stopped Queues\n");
2393 }
2394
2395 qede_vlan_mark_nonconfigured(edev);
2396 edev->ops->fastpath_stop(edev->cdev);
2397
2398 if (edev->dev_info.common.b_arfs_capable) {
2399 qede_poll_for_freeing_arfs_filters(edev);
2400 qede_free_arfs(edev);
2401 }
2402
2403 /* Release the interrupts */
2404 qede_sync_free_irqs(edev);
2405 edev->ops->common->set_fp_int(edev->cdev, 0);
2406
2407 qede_napi_disable_remove(edev);
2408
2409 if (mode == QEDE_UNLOAD_RECOVERY)
2410 qede_empty_tx_queues(edev);
2411
2412 qede_free_mem_load(edev);
2413 qede_free_fp_array(edev);
2414
2415 out:
2416 if (!is_locked)
2417 __qede_unlock(edev);
2418
2419 if (mode != QEDE_UNLOAD_RECOVERY)
2420 DP_NOTICE(edev, "Link is down\n");
2421
2422 edev->ptp_skip_txts = 0;
2423
2424 DP_INFO(edev, "Ending qede unload\n");
2425 }
2426
2427 enum qede_load_mode {
2428 QEDE_LOAD_NORMAL,
2429 QEDE_LOAD_RELOAD,
2430 QEDE_LOAD_RECOVERY,
2431 };
2432
qede_load(struct qede_dev * edev,enum qede_load_mode mode,bool is_locked)2433 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
2434 bool is_locked)
2435 {
2436 struct qed_link_params link_params;
2437 struct ethtool_coalesce coal = {};
2438 u8 num_tc;
2439 int rc, i;
2440
2441 DP_INFO(edev, "Starting qede load\n");
2442
2443 if (!is_locked)
2444 __qede_lock(edev);
2445
2446 rc = qede_set_num_queues(edev);
2447 if (rc)
2448 goto out;
2449
2450 rc = qede_alloc_fp_array(edev);
2451 if (rc)
2452 goto out;
2453
2454 qede_init_fp(edev);
2455
2456 rc = qede_alloc_mem_load(edev);
2457 if (rc)
2458 goto err1;
2459 DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
2460 QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
2461
2462 rc = qede_set_real_num_queues(edev);
2463 if (rc)
2464 goto err2;
2465
2466 if (qede_alloc_arfs(edev)) {
2467 edev->ndev->features &= ~NETIF_F_NTUPLE;
2468 edev->dev_info.common.b_arfs_capable = false;
2469 }
2470
2471 qede_napi_add_enable(edev);
2472 DP_INFO(edev, "Napi added and enabled\n");
2473
2474 rc = qede_setup_irqs(edev);
2475 if (rc)
2476 goto err3;
2477 DP_INFO(edev, "Setup IRQs succeeded\n");
2478
2479 rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
2480 if (rc)
2481 goto err4;
2482 DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
2483
2484 num_tc = netdev_get_num_tc(edev->ndev);
2485 num_tc = num_tc ? num_tc : edev->dev_info.num_tc;
2486 qede_setup_tc(edev->ndev, num_tc);
2487
2488 /* Program un-configured VLANs */
2489 qede_configure_vlan_filters(edev);
2490
2491 set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2492
2493 /* Ask for link-up using current configuration */
2494 memset(&link_params, 0, sizeof(link_params));
2495 link_params.link_up = true;
2496 edev->ops->common->set_link(edev->cdev, &link_params);
2497
2498 edev->state = QEDE_STATE_OPEN;
2499
2500 coal.rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
2501 coal.tx_coalesce_usecs = QED_DEFAULT_TX_USECS;
2502
2503 for_each_queue(i) {
2504 if (edev->coal_entry[i].isvalid) {
2505 coal.rx_coalesce_usecs = edev->coal_entry[i].rxc;
2506 coal.tx_coalesce_usecs = edev->coal_entry[i].txc;
2507 }
2508 __qede_unlock(edev);
2509 qede_set_per_coalesce(edev->ndev, i, &coal);
2510 __qede_lock(edev);
2511 }
2512 DP_INFO(edev, "Ending successfully qede load\n");
2513
2514 goto out;
2515 err4:
2516 qede_sync_free_irqs(edev);
2517 err3:
2518 qede_napi_disable_remove(edev);
2519 err2:
2520 qede_free_mem_load(edev);
2521 err1:
2522 edev->ops->common->set_fp_int(edev->cdev, 0);
2523 qede_free_fp_array(edev);
2524 edev->num_queues = 0;
2525 edev->fp_num_tx = 0;
2526 edev->fp_num_rx = 0;
2527 out:
2528 if (!is_locked)
2529 __qede_unlock(edev);
2530
2531 return rc;
2532 }
2533
2534 /* 'func' should be able to run between unload and reload assuming interface
2535 * is actually running, or afterwards in case it's currently DOWN.
2536 */
qede_reload(struct qede_dev * edev,struct qede_reload_args * args,bool is_locked)2537 void qede_reload(struct qede_dev *edev,
2538 struct qede_reload_args *args, bool is_locked)
2539 {
2540 if (!is_locked)
2541 __qede_lock(edev);
2542
2543 /* Since qede_lock is held, internal state wouldn't change even
2544 * if netdev state would start transitioning. Check whether current
2545 * internal configuration indicates device is up, then reload.
2546 */
2547 if (edev->state == QEDE_STATE_OPEN) {
2548 qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
2549 if (args)
2550 args->func(edev, args);
2551 qede_load(edev, QEDE_LOAD_RELOAD, true);
2552
2553 /* Since no one is going to do it for us, re-configure */
2554 qede_config_rx_mode(edev->ndev);
2555 } else if (args) {
2556 args->func(edev, args);
2557 }
2558
2559 if (!is_locked)
2560 __qede_unlock(edev);
2561 }
2562
2563 /* called with rtnl_lock */
qede_open(struct net_device * ndev)2564 static int qede_open(struct net_device *ndev)
2565 {
2566 struct qede_dev *edev = netdev_priv(ndev);
2567 int rc;
2568
2569 netif_carrier_off(ndev);
2570
2571 edev->ops->common->set_power_state(edev->cdev, PCI_D0);
2572
2573 rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
2574 if (rc)
2575 return rc;
2576
2577 udp_tunnel_nic_reset_ntf(ndev);
2578
2579 edev->ops->common->update_drv_state(edev->cdev, true);
2580
2581 return 0;
2582 }
2583
qede_close(struct net_device * ndev)2584 static int qede_close(struct net_device *ndev)
2585 {
2586 struct qede_dev *edev = netdev_priv(ndev);
2587
2588 qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
2589
2590 if (edev->cdev)
2591 edev->ops->common->update_drv_state(edev->cdev, false);
2592
2593 return 0;
2594 }
2595
qede_link_update(void * dev,struct qed_link_output * link)2596 static void qede_link_update(void *dev, struct qed_link_output *link)
2597 {
2598 struct qede_dev *edev = dev;
2599
2600 if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) {
2601 DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n");
2602 return;
2603 }
2604
2605 if (link->link_up) {
2606 if (!netif_carrier_ok(edev->ndev)) {
2607 DP_NOTICE(edev, "Link is up\n");
2608 netif_tx_start_all_queues(edev->ndev);
2609 netif_carrier_on(edev->ndev);
2610 qede_rdma_dev_event_open(edev);
2611 }
2612 } else {
2613 if (netif_carrier_ok(edev->ndev)) {
2614 DP_NOTICE(edev, "Link is down\n");
2615 netif_tx_disable(edev->ndev);
2616 netif_carrier_off(edev->ndev);
2617 qede_rdma_dev_event_close(edev);
2618 }
2619 }
2620 }
2621
qede_schedule_recovery_handler(void * dev)2622 static void qede_schedule_recovery_handler(void *dev)
2623 {
2624 struct qede_dev *edev = dev;
2625
2626 if (edev->state == QEDE_STATE_RECOVERY) {
2627 DP_NOTICE(edev,
2628 "Avoid scheduling a recovery handling since already in recovery state\n");
2629 return;
2630 }
2631
2632 set_bit(QEDE_SP_RECOVERY, &edev->sp_flags);
2633 schedule_delayed_work(&edev->sp_task, 0);
2634
2635 DP_INFO(edev, "Scheduled a recovery handler\n");
2636 }
2637
qede_recovery_failed(struct qede_dev * edev)2638 static void qede_recovery_failed(struct qede_dev *edev)
2639 {
2640 netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n");
2641
2642 netif_device_detach(edev->ndev);
2643
2644 if (edev->cdev)
2645 edev->ops->common->set_power_state(edev->cdev, PCI_D3hot);
2646 }
2647
qede_recovery_handler(struct qede_dev * edev)2648 static void qede_recovery_handler(struct qede_dev *edev)
2649 {
2650 u32 curr_state = edev->state;
2651 int rc;
2652
2653 DP_NOTICE(edev, "Starting a recovery process\n");
2654
2655 /* No need to acquire first the qede_lock since is done by qede_sp_task
2656 * before calling this function.
2657 */
2658 edev->state = QEDE_STATE_RECOVERY;
2659
2660 edev->ops->common->recovery_prolog(edev->cdev);
2661
2662 if (curr_state == QEDE_STATE_OPEN)
2663 qede_unload(edev, QEDE_UNLOAD_RECOVERY, true);
2664
2665 __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY);
2666
2667 rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level,
2668 IS_VF(edev), QEDE_PROBE_RECOVERY);
2669 if (rc) {
2670 edev->cdev = NULL;
2671 goto err;
2672 }
2673
2674 if (curr_state == QEDE_STATE_OPEN) {
2675 rc = qede_load(edev, QEDE_LOAD_RECOVERY, true);
2676 if (rc)
2677 goto err;
2678
2679 qede_config_rx_mode(edev->ndev);
2680 udp_tunnel_nic_reset_ntf(edev->ndev);
2681 }
2682
2683 edev->state = curr_state;
2684
2685 DP_NOTICE(edev, "Recovery handling is done\n");
2686
2687 return;
2688
2689 err:
2690 qede_recovery_failed(edev);
2691 }
2692
qede_atomic_hw_err_handler(struct qede_dev * edev)2693 static void qede_atomic_hw_err_handler(struct qede_dev *edev)
2694 {
2695 struct qed_dev *cdev = edev->cdev;
2696
2697 DP_NOTICE(edev,
2698 "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
2699 edev->err_flags);
2700
2701 /* Get a call trace of the flow that led to the error */
2702 WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
2703
2704 /* Prevent HW attentions from being reasserted */
2705 if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
2706 edev->ops->common->attn_clr_enable(cdev, true);
2707
2708 DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
2709 }
2710
qede_generic_hw_err_handler(struct qede_dev * edev)2711 static void qede_generic_hw_err_handler(struct qede_dev *edev)
2712 {
2713 DP_NOTICE(edev,
2714 "Generic sleepable HW error handling started - err_flags 0x%lx\n",
2715 edev->err_flags);
2716
2717 if (edev->devlink) {
2718 DP_NOTICE(edev, "Reporting fatal error to devlink\n");
2719 edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type);
2720 }
2721
2722 clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2723
2724 DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
2725 }
2726
qede_set_hw_err_flags(struct qede_dev * edev,enum qed_hw_err_type err_type)2727 static void qede_set_hw_err_flags(struct qede_dev *edev,
2728 enum qed_hw_err_type err_type)
2729 {
2730 unsigned long err_flags = 0;
2731
2732 switch (err_type) {
2733 case QED_HW_ERR_DMAE_FAIL:
2734 set_bit(QEDE_ERR_WARN, &err_flags);
2735 fallthrough;
2736 case QED_HW_ERR_MFW_RESP_FAIL:
2737 case QED_HW_ERR_HW_ATTN:
2738 case QED_HW_ERR_RAMROD_FAIL:
2739 case QED_HW_ERR_FW_ASSERT:
2740 set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
2741 set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
2742 /* make this error as recoverable and start recovery*/
2743 set_bit(QEDE_ERR_IS_RECOVERABLE, &err_flags);
2744 break;
2745
2746 default:
2747 DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
2748 break;
2749 }
2750
2751 edev->err_flags |= err_flags;
2752 }
2753
qede_schedule_hw_err_handler(void * dev,enum qed_hw_err_type err_type)2754 static void qede_schedule_hw_err_handler(void *dev,
2755 enum qed_hw_err_type err_type)
2756 {
2757 struct qede_dev *edev = dev;
2758
2759 /* Fan failure cannot be masked by handling of another HW error or by a
2760 * concurrent recovery process.
2761 */
2762 if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
2763 edev->state == QEDE_STATE_RECOVERY) &&
2764 err_type != QED_HW_ERR_FAN_FAIL) {
2765 DP_INFO(edev,
2766 "Avoid scheduling an error handling while another HW error is being handled\n");
2767 return;
2768 }
2769
2770 if (err_type >= QED_HW_ERR_LAST) {
2771 DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
2772 clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2773 return;
2774 }
2775
2776 edev->last_err_type = err_type;
2777 qede_set_hw_err_flags(edev, err_type);
2778 qede_atomic_hw_err_handler(edev);
2779 set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
2780 schedule_delayed_work(&edev->sp_task, 0);
2781
2782 DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
2783 }
2784
qede_is_txq_full(struct qede_dev * edev,struct qede_tx_queue * txq)2785 static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
2786 {
2787 struct netdev_queue *netdev_txq;
2788
2789 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
2790 if (netif_xmit_stopped(netdev_txq))
2791 return true;
2792
2793 return false;
2794 }
2795
qede_get_generic_tlv_data(void * dev,struct qed_generic_tlvs * data)2796 static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
2797 {
2798 struct qede_dev *edev = dev;
2799 struct netdev_hw_addr *ha;
2800 int i;
2801
2802 if (edev->ndev->features & NETIF_F_IP_CSUM)
2803 data->feat_flags |= QED_TLV_IP_CSUM;
2804 if (edev->ndev->features & NETIF_F_TSO)
2805 data->feat_flags |= QED_TLV_LSO;
2806
2807 ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
2808 eth_zero_addr(data->mac[1]);
2809 eth_zero_addr(data->mac[2]);
2810 /* Copy the first two UC macs */
2811 netif_addr_lock_bh(edev->ndev);
2812 i = 1;
2813 netdev_for_each_uc_addr(ha, edev->ndev) {
2814 ether_addr_copy(data->mac[i++], ha->addr);
2815 if (i == QED_TLV_MAC_COUNT)
2816 break;
2817 }
2818
2819 netif_addr_unlock_bh(edev->ndev);
2820 }
2821
qede_get_eth_tlv_data(void * dev,void * data)2822 static void qede_get_eth_tlv_data(void *dev, void *data)
2823 {
2824 struct qed_mfw_tlv_eth *etlv = data;
2825 struct qede_dev *edev = dev;
2826 struct qede_fastpath *fp;
2827 int i;
2828
2829 etlv->lso_maxoff_size = 0XFFFF;
2830 etlv->lso_maxoff_size_set = true;
2831 etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
2832 etlv->lso_minseg_size_set = true;
2833 etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
2834 etlv->prom_mode_set = true;
2835 etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
2836 etlv->tx_descr_size_set = true;
2837 etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
2838 etlv->rx_descr_size_set = true;
2839 etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
2840 etlv->iov_offload_set = true;
2841
2842 /* Fill information regarding queues; Should be done under the qede
2843 * lock to guarantee those don't change beneath our feet.
2844 */
2845 etlv->txqs_empty = true;
2846 etlv->rxqs_empty = true;
2847 etlv->num_txqs_full = 0;
2848 etlv->num_rxqs_full = 0;
2849
2850 __qede_lock(edev);
2851 for_each_queue(i) {
2852 fp = &edev->fp_array[i];
2853 if (fp->type & QEDE_FASTPATH_TX) {
2854 struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp);
2855
2856 if (txq->sw_tx_cons != txq->sw_tx_prod)
2857 etlv->txqs_empty = false;
2858 if (qede_is_txq_full(edev, txq))
2859 etlv->num_txqs_full++;
2860 }
2861 if (fp->type & QEDE_FASTPATH_RX) {
2862 if (qede_has_rx_work(fp->rxq))
2863 etlv->rxqs_empty = false;
2864
2865 /* This one is a bit tricky; Firmware might stop
2866 * placing packets if ring is not yet full.
2867 * Give an approximation.
2868 */
2869 if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
2870 qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
2871 RX_RING_SIZE - 100)
2872 etlv->num_rxqs_full++;
2873 }
2874 }
2875 __qede_unlock(edev);
2876
2877 etlv->txqs_empty_set = true;
2878 etlv->rxqs_empty_set = true;
2879 etlv->num_txqs_full_set = true;
2880 etlv->num_rxqs_full_set = true;
2881 }
2882
2883 /**
2884 * qede_io_error_detected(): Called when PCI error is detected
2885 *
2886 * @pdev: Pointer to PCI device
2887 * @state: The current pci connection state
2888 *
2889 *Return: pci_ers_result_t.
2890 *
2891 * This function is called after a PCI bus error affecting
2892 * this device has been detected.
2893 */
2894 static pci_ers_result_t
qede_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)2895 qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2896 {
2897 struct net_device *dev = pci_get_drvdata(pdev);
2898 struct qede_dev *edev = netdev_priv(dev);
2899
2900 if (!edev)
2901 return PCI_ERS_RESULT_NONE;
2902
2903 DP_NOTICE(edev, "IO error detected [%d]\n", state);
2904
2905 __qede_lock(edev);
2906 if (edev->state == QEDE_STATE_RECOVERY) {
2907 DP_NOTICE(edev, "Device already in the recovery state\n");
2908 __qede_unlock(edev);
2909 return PCI_ERS_RESULT_NONE;
2910 }
2911
2912 /* PF handles the recovery of its VFs */
2913 if (IS_VF(edev)) {
2914 DP_VERBOSE(edev, QED_MSG_IOV,
2915 "VF recovery is handled by its PF\n");
2916 __qede_unlock(edev);
2917 return PCI_ERS_RESULT_RECOVERED;
2918 }
2919
2920 /* Close OS Tx */
2921 netif_tx_disable(edev->ndev);
2922 netif_carrier_off(edev->ndev);
2923
2924 set_bit(QEDE_SP_AER, &edev->sp_flags);
2925 schedule_delayed_work(&edev->sp_task, 0);
2926
2927 __qede_unlock(edev);
2928
2929 return PCI_ERS_RESULT_CAN_RECOVER;
2930 }
2931