1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
3
4 #include <linux/types.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/netdevice.h>
8 #include <linux/string.h>
9 #include <linux/etherdevice.h>
10 #include <net/ip.h>
11 #include <linux/phy.h>
12 #include <linux/if_vlan.h>
13
14 #include "../libwx/wx_type.h"
15 #include "../libwx/wx_hw.h"
16 #include "../libwx/wx_lib.h"
17 #include "../libwx/wx_ptp.h"
18 #include "../libwx/wx_mbx.h"
19 #include "../libwx/wx_sriov.h"
20 #include "ngbe_type.h"
21 #include "ngbe_mdio.h"
22 #include "ngbe_hw.h"
23 #include "ngbe_ethtool.h"
24
25 char ngbe_driver_name[] = "ngbe";
26
27 /* ngbe_pci_tbl - PCI Device ID Table
28 *
29 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
30 * Class, Class Mask, private data (not used) }
31 */
32 static const struct pci_device_id ngbe_pci_tbl[] = {
33 { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W), 0},
34 { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A2), 0},
35 { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A2S), 0},
36 { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A4), 0},
37 { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A4S), 0},
38 { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL2), 0},
39 { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S), 0},
40 { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL4), 0},
41 { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S), 0},
42 { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860LC), 0},
43 { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A1), 0},
44 { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A1L), 0},
45 /* required last entry */
46 { .device = 0 }
47 };
48
49 /**
50 * ngbe_init_type_code - Initialize the shared code
51 * @wx: pointer to hardware structure
52 **/
ngbe_init_type_code(struct wx * wx)53 static void ngbe_init_type_code(struct wx *wx)
54 {
55 int wol_mask = 0, ncsi_mask = 0;
56 u16 type_mask = 0, val;
57
58 wx->mac.type = wx_mac_em;
59 type_mask = (u16)(wx->subsystem_device_id & NGBE_OEM_MASK);
60 ncsi_mask = wx->subsystem_device_id & NGBE_NCSI_MASK;
61 wol_mask = wx->subsystem_device_id & NGBE_WOL_MASK;
62
63 val = rd32(wx, WX_CFG_PORT_ST);
64 wx->mac_type = (val & BIT(7)) >> 7 ?
65 em_mac_type_rgmii :
66 em_mac_type_mdi;
67
68 wx->wol_hw_supported = (wol_mask == NGBE_WOL_SUP) ? 1 : 0;
69 wx->ncsi_enabled = (ncsi_mask == NGBE_NCSI_MASK ||
70 type_mask == NGBE_SUBID_OCP_CARD) ? 1 : 0;
71
72 switch (type_mask) {
73 case NGBE_SUBID_LY_YT8521S_SFP:
74 case NGBE_SUBID_LY_M88E1512_SFP:
75 case NGBE_SUBID_YT8521S_SFP_GPIO:
76 case NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO:
77 wx->gpio_ctrl = 1;
78 break;
79 default:
80 wx->gpio_ctrl = 0;
81 break;
82 }
83 }
84
85 /**
86 * ngbe_sw_init - Initialize general software structures
87 * @wx: board private structure to initialize
88 **/
ngbe_sw_init(struct wx * wx)89 static int ngbe_sw_init(struct wx *wx)
90 {
91 struct pci_dev *pdev = wx->pdev;
92 u16 msix_count = 0;
93 int err = 0;
94
95 wx->mac.num_rar_entries = NGBE_RAR_ENTRIES;
96 wx->mac.max_rx_queues = NGBE_MAX_RX_QUEUES;
97 wx->mac.max_tx_queues = NGBE_MAX_TX_QUEUES;
98 wx->mac.mcft_size = NGBE_MC_TBL_SIZE;
99 wx->mac.vft_size = NGBE_SP_VFT_TBL_SIZE;
100 wx->mac.rx_pb_size = NGBE_RX_PB_SIZE;
101 wx->mac.tx_pb_size = NGBE_TDB_PB_SZ;
102
103 /* PCI config space info */
104 err = wx_sw_init(wx);
105 if (err < 0)
106 return err;
107
108 /* mac type, phy type , oem type */
109 ngbe_init_type_code(wx);
110
111 /* Set common capability flags and settings */
112 wx->max_q_vectors = NGBE_MAX_MSIX_VECTORS;
113 err = wx_get_pcie_msix_counts(wx, &msix_count, NGBE_MAX_MSIX_VECTORS);
114 if (err)
115 dev_err(&pdev->dev, "Do not support MSI-X\n");
116 wx->mac.max_msix_vectors = msix_count;
117
118 wx->ring_feature[RING_F_RSS].limit = min_t(int, NGBE_MAX_RSS_INDICES,
119 num_online_cpus());
120 wx->rss_enabled = true;
121
122 /* enable itr by default in dynamic mode */
123 wx->rx_itr_setting = 1;
124 wx->tx_itr_setting = 1;
125
126 /* set default ring sizes */
127 wx->tx_ring_count = NGBE_DEFAULT_TXD;
128 wx->rx_ring_count = NGBE_DEFAULT_RXD;
129
130 /* set default work limits */
131 wx->tx_work_limit = NGBE_DEFAULT_TX_WORK;
132 wx->rx_work_limit = NGBE_DEFAULT_RX_WORK;
133
134 wx->mbx.size = WX_VXMAILBOX_SIZE;
135 wx->setup_tc = ngbe_setup_tc;
136 set_bit(0, &wx->fwd_bitmask);
137
138 return 0;
139 }
140
141 /**
142 * ngbe_irq_enable - Enable default interrupt generation settings
143 * @wx: board private structure
144 * @queues: enable all queues interrupts
145 **/
ngbe_irq_enable(struct wx * wx,bool queues)146 static void ngbe_irq_enable(struct wx *wx, bool queues)
147 {
148 u32 mask;
149
150 /* enable misc interrupt */
151 mask = NGBE_PX_MISC_IEN_MASK;
152
153 wr32(wx, WX_GPIO_DDR, WX_GPIO_DDR_0);
154 wr32(wx, WX_GPIO_INTEN, WX_GPIO_INTEN_0 | WX_GPIO_INTEN_1);
155 wr32(wx, WX_GPIO_INTTYPE_LEVEL, 0x0);
156 wr32(wx, WX_GPIO_POLARITY, wx->gpio_ctrl ? 0 : 0x3);
157
158 wr32(wx, WX_PX_MISC_IEN, mask);
159
160 /* mask interrupt */
161 if (queues)
162 wx_intr_enable(wx, NGBE_INTR_ALL);
163 else
164 wx_intr_enable(wx, NGBE_INTR_MISC(wx));
165 }
166
167 /**
168 * ngbe_intr - msi/legacy mode Interrupt Handler
169 * @irq: interrupt number
170 * @data: pointer to a network interface device structure
171 **/
ngbe_intr(int __always_unused irq,void * data)172 static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
173 {
174 struct wx_q_vector *q_vector;
175 struct wx *wx = data;
176 struct pci_dev *pdev;
177 u32 eicr, eicr_misc;
178
179 q_vector = wx->q_vector[0];
180 pdev = wx->pdev;
181
182 eicr = wx_misc_isb(wx, WX_ISB_VEC0);
183 if (!eicr) {
184 /* shared interrupt alert!
185 * the interrupt that we masked before the EICR read.
186 */
187 if (netif_running(wx->netdev))
188 ngbe_irq_enable(wx, true);
189 return IRQ_NONE; /* Not our interrupt */
190 }
191 wx->isb_mem[WX_ISB_VEC0] = 0;
192 if (!(pdev->msi_enabled))
193 wr32(wx, WX_PX_INTA, 1);
194
195 eicr_misc = wx_misc_isb(wx, WX_ISB_MISC);
196 if (unlikely(eicr_misc & NGBE_PX_MISC_IC_TIMESYNC))
197 wx_ptp_check_pps_event(wx);
198
199 wx->isb_mem[WX_ISB_MISC] = 0;
200 /* would disable interrupts here but it is auto disabled */
201 napi_schedule_irqoff(&q_vector->napi);
202
203 if (netif_running(wx->netdev))
204 ngbe_irq_enable(wx, false);
205
206 return IRQ_HANDLED;
207 }
208
__ngbe_msix_misc(struct wx * wx,u32 eicr)209 static irqreturn_t __ngbe_msix_misc(struct wx *wx, u32 eicr)
210 {
211 if (eicr & NGBE_PX_MISC_IC_VF_MBOX)
212 wx_msg_task(wx);
213
214 if (unlikely(eicr & NGBE_PX_MISC_IC_TIMESYNC))
215 wx_ptp_check_pps_event(wx);
216
217 /* re-enable the original interrupt state, no lsc, no queues */
218 if (netif_running(wx->netdev))
219 ngbe_irq_enable(wx, false);
220
221 return IRQ_HANDLED;
222 }
223
ngbe_msix_misc(int __always_unused irq,void * data)224 static irqreturn_t ngbe_msix_misc(int __always_unused irq, void *data)
225 {
226 struct wx *wx = data;
227 u32 eicr;
228
229 eicr = wx_misc_isb(wx, WX_ISB_MISC);
230
231 return __ngbe_msix_misc(wx, eicr);
232 }
233
ngbe_misc_and_queue(int __always_unused irq,void * data)234 static irqreturn_t ngbe_misc_and_queue(int __always_unused irq, void *data)
235 {
236 struct wx_q_vector *q_vector;
237 struct wx *wx = data;
238 u32 eicr;
239
240 eicr = wx_misc_isb(wx, WX_ISB_MISC);
241 if (!eicr) {
242 /* queue */
243 q_vector = wx->q_vector[0];
244 napi_schedule_irqoff(&q_vector->napi);
245 if (netif_running(wx->netdev))
246 ngbe_irq_enable(wx, true);
247 return IRQ_HANDLED;
248 }
249
250 return __ngbe_msix_misc(wx, eicr);
251 }
252
253 /**
254 * ngbe_request_msix_irqs - Initialize MSI-X interrupts
255 * @wx: board private structure
256 *
257 * ngbe_request_msix_irqs allocates MSI-X vectors and requests
258 * interrupts from the kernel.
259 **/
ngbe_request_msix_irqs(struct wx * wx)260 static int ngbe_request_msix_irqs(struct wx *wx)
261 {
262 struct net_device *netdev = wx->netdev;
263 int vector, err;
264
265 for (vector = 0; vector < wx->num_q_vectors; vector++) {
266 struct wx_q_vector *q_vector = wx->q_vector[vector];
267 struct msix_entry *entry = &wx->msix_q_entries[vector];
268
269 if (q_vector->tx.ring && q_vector->rx.ring)
270 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
271 "%s-TxRx-%d", netdev->name, entry->entry);
272 else
273 /* skip this unused q_vector */
274 continue;
275
276 err = request_irq(entry->vector, wx_msix_clean_rings, 0,
277 q_vector->name, q_vector);
278 if (err) {
279 wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
280 q_vector->name, err);
281 goto free_queue_irqs;
282 }
283 }
284
285 /* Due to hardware design, when num_vfs < 7, pf can use 0 for misc and 1
286 * for queue. But when num_vfs == 7, vector[1] is assigned to vf6.
287 * Misc and queue should reuse interrupt vector[0].
288 */
289 if (test_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags))
290 err = request_irq(wx->msix_entry->vector,
291 ngbe_misc_and_queue, 0, netdev->name, wx);
292 else
293 err = request_irq(wx->msix_entry->vector,
294 ngbe_msix_misc, 0, netdev->name, wx);
295
296 if (err) {
297 wx_err(wx, "request_irq for msix_other failed: %d\n", err);
298 goto free_queue_irqs;
299 }
300
301 return 0;
302
303 free_queue_irqs:
304 while (vector) {
305 vector--;
306 free_irq(wx->msix_q_entries[vector].vector,
307 wx->q_vector[vector]);
308 }
309 wx_reset_interrupt_capability(wx);
310 return err;
311 }
312
313 /**
314 * ngbe_request_irq - initialize interrupts
315 * @wx: board private structure
316 *
317 * Attempts to configure interrupts using the best available
318 * capabilities of the hardware and kernel.
319 **/
ngbe_request_irq(struct wx * wx)320 static int ngbe_request_irq(struct wx *wx)
321 {
322 struct net_device *netdev = wx->netdev;
323 struct pci_dev *pdev = wx->pdev;
324 int err;
325
326 if (pdev->msix_enabled)
327 err = ngbe_request_msix_irqs(wx);
328 else if (pdev->msi_enabled)
329 err = request_irq(pdev->irq, ngbe_intr, 0,
330 netdev->name, wx);
331 else
332 err = request_irq(pdev->irq, ngbe_intr, IRQF_SHARED,
333 netdev->name, wx);
334
335 if (err)
336 wx_err(wx, "request_irq failed, Error %d\n", err);
337
338 return err;
339 }
340
ngbe_disable_device(struct wx * wx)341 static void ngbe_disable_device(struct wx *wx)
342 {
343 struct net_device *netdev = wx->netdev;
344 u32 i;
345
346 if (wx->num_vfs) {
347 /* Clear EITR Select mapping */
348 wr32(wx, WX_PX_ITRSEL, 0);
349
350 /* Mark all the VFs as inactive */
351 for (i = 0; i < wx->num_vfs; i++)
352 wx->vfinfo[i].clear_to_send = 0;
353 wx->notify_down = true;
354 /* ping all the active vfs to let them know we are going down */
355 wx_ping_all_vfs_with_link_status(wx, false);
356 wx->notify_down = false;
357
358 /* Disable all VFTE/VFRE TX/RX */
359 wx_disable_vf_rx_tx(wx);
360 }
361
362 /* disable all enabled rx queues */
363 for (i = 0; i < wx->num_rx_queues; i++)
364 /* this call also flushes the previous write */
365 wx_disable_rx_queue(wx, wx->rx_ring[i]);
366 /* disable receives */
367 wx_disable_rx(wx);
368 wx_napi_disable_all(wx);
369 netif_tx_stop_all_queues(netdev);
370 netif_tx_disable(netdev);
371 if (wx->gpio_ctrl)
372 ngbe_sfp_modules_txrx_powerctl(wx, false);
373 wx_irq_disable(wx);
374 /* disable transmits in the hardware now that interrupts are off */
375 for (i = 0; i < wx->num_tx_queues; i++) {
376 u8 reg_idx = wx->tx_ring[i]->reg_idx;
377
378 wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
379 }
380
381 wx_update_stats(wx);
382 }
383
ngbe_reset(struct wx * wx)384 static void ngbe_reset(struct wx *wx)
385 {
386 wx_flush_sw_mac_table(wx);
387 wx_mac_set_default_filter(wx, wx->mac.addr);
388 if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
389 wx_ptp_reset(wx);
390 }
391
ngbe_down(struct wx * wx)392 void ngbe_down(struct wx *wx)
393 {
394 phylink_stop(wx->phylink);
395 ngbe_disable_device(wx);
396 ngbe_reset(wx);
397 wx_clean_all_tx_rings(wx);
398 wx_clean_all_rx_rings(wx);
399 }
400
ngbe_up(struct wx * wx)401 void ngbe_up(struct wx *wx)
402 {
403 wx_configure_vectors(wx);
404
405 /* make sure to complete pre-operations */
406 smp_mb__before_atomic();
407 wx_napi_enable_all(wx);
408 /* enable transmits */
409 netif_tx_start_all_queues(wx->netdev);
410
411 /* clear any pending interrupts, may auto mask */
412 rd32(wx, WX_PX_IC(0));
413 rd32(wx, WX_PX_MISC_IC);
414 ngbe_irq_enable(wx, true);
415 if (wx->gpio_ctrl)
416 ngbe_sfp_modules_txrx_powerctl(wx, true);
417
418 phylink_start(wx->phylink);
419 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
420 wr32m(wx, WX_CFG_PORT_CTL,
421 WX_CFG_PORT_CTL_PFRSTD, WX_CFG_PORT_CTL_PFRSTD);
422 if (wx->num_vfs)
423 wx_ping_all_vfs_with_link_status(wx, false);
424 }
425
426 /**
427 * ngbe_open - Called when a network interface is made active
428 * @netdev: network interface device structure
429 *
430 * Returns 0 on success, negative value on failure
431 *
432 * The open entry point is called when a network interface is made
433 * active by the system (IFF_UP).
434 **/
ngbe_open(struct net_device * netdev)435 static int ngbe_open(struct net_device *netdev)
436 {
437 struct wx *wx = netdev_priv(netdev);
438 int err;
439
440 wx_control_hw(wx, true);
441
442 err = wx_setup_resources(wx);
443 if (err)
444 return err;
445
446 wx_configure(wx);
447
448 err = ngbe_request_irq(wx);
449 if (err)
450 goto err_free_resources;
451
452 err = phylink_connect_phy(wx->phylink, wx->phydev);
453 if (err)
454 goto err_free_irq;
455
456 err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
457 if (err)
458 goto err_dis_phy;
459
460 err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues);
461 if (err)
462 goto err_dis_phy;
463
464 wx_ptp_init(wx);
465
466 ngbe_up(wx);
467
468 return 0;
469 err_dis_phy:
470 phylink_disconnect_phy(wx->phylink);
471 err_free_irq:
472 wx_free_irq(wx);
473 err_free_resources:
474 wx_free_isb_resources(wx);
475 wx_free_resources(wx);
476 return err;
477 }
478
479 /**
480 * ngbe_close - Disables a network interface
481 * @netdev: network interface device structure
482 *
483 * Returns 0, this is not allowed to fail
484 *
485 * The close entry point is called when an interface is de-activated
486 * by the OS. The hardware is still under the drivers control, but
487 * needs to be disabled. A global MAC reset is issued to stop the
488 * hardware, and all transmit and receive resources are freed.
489 **/
ngbe_close(struct net_device * netdev)490 static int ngbe_close(struct net_device *netdev)
491 {
492 struct wx *wx = netdev_priv(netdev);
493
494 wx_ptp_stop(wx);
495 ngbe_down(wx);
496 wx_free_irq(wx);
497 wx_free_isb_resources(wx);
498 wx_free_resources(wx);
499 phylink_disconnect_phy(wx->phylink);
500 wx_control_hw(wx, false);
501
502 return 0;
503 }
504
ngbe_dev_shutdown(struct pci_dev * pdev,bool * enable_wake)505 static void ngbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
506 {
507 struct wx *wx = pci_get_drvdata(pdev);
508 struct net_device *netdev;
509 u32 wufc = wx->wol;
510
511 netdev = wx->netdev;
512 rtnl_lock();
513 netif_device_detach(netdev);
514
515 if (netif_running(netdev))
516 ngbe_close(netdev);
517 wx_clear_interrupt_scheme(wx);
518 rtnl_unlock();
519
520 if (wufc) {
521 wx_set_rx_mode(netdev);
522 wx_configure_rx(wx);
523 wr32(wx, NGBE_PSR_WKUP_CTL, wufc);
524 } else {
525 wr32(wx, NGBE_PSR_WKUP_CTL, 0);
526 }
527 pci_wake_from_d3(pdev, !!wufc);
528 *enable_wake = !!wufc;
529 wx_control_hw(wx, false);
530
531 pci_disable_device(pdev);
532 }
533
ngbe_shutdown(struct pci_dev * pdev)534 static void ngbe_shutdown(struct pci_dev *pdev)
535 {
536 struct wx *wx = pci_get_drvdata(pdev);
537 bool wake;
538
539 wake = !!wx->wol;
540
541 ngbe_dev_shutdown(pdev, &wake);
542
543 if (system_state == SYSTEM_POWER_OFF) {
544 pci_wake_from_d3(pdev, wake);
545 pci_set_power_state(pdev, PCI_D3hot);
546 }
547 }
548
549 /**
550 * ngbe_setup_tc - routine to configure net_device for multiple traffic
551 * classes.
552 *
553 * @dev: net device to configure
554 * @tc: number of traffic classes to enable
555 */
ngbe_setup_tc(struct net_device * dev,u8 tc)556 int ngbe_setup_tc(struct net_device *dev, u8 tc)
557 {
558 struct wx *wx = netdev_priv(dev);
559
560 /* Hardware has to reinitialize queues and interrupts to
561 * match packet buffer alignment. Unfortunately, the
562 * hardware is not flexible enough to do this dynamically.
563 */
564 if (netif_running(dev))
565 ngbe_close(dev);
566
567 wx_clear_interrupt_scheme(wx);
568
569 if (tc)
570 netdev_set_num_tc(dev, tc);
571 else
572 netdev_reset_tc(dev);
573
574 wx_init_interrupt_scheme(wx);
575
576 if (netif_running(dev))
577 ngbe_open(dev);
578
579 return 0;
580 }
581
582 static const struct net_device_ops ngbe_netdev_ops = {
583 .ndo_open = ngbe_open,
584 .ndo_stop = ngbe_close,
585 .ndo_change_mtu = wx_change_mtu,
586 .ndo_start_xmit = wx_xmit_frame,
587 .ndo_set_rx_mode = wx_set_rx_mode,
588 .ndo_set_features = wx_set_features,
589 .ndo_fix_features = wx_fix_features,
590 .ndo_features_check = wx_features_check,
591 .ndo_validate_addr = eth_validate_addr,
592 .ndo_set_mac_address = wx_set_mac,
593 .ndo_get_stats64 = wx_get_stats64,
594 .ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid,
595 .ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid,
596 .ndo_hwtstamp_set = wx_hwtstamp_set,
597 .ndo_hwtstamp_get = wx_hwtstamp_get,
598 };
599
600 /**
601 * ngbe_probe - Device Initialization Routine
602 * @pdev: PCI device information struct
603 * @ent: entry in ngbe_pci_tbl
604 *
605 * Returns 0 on success, negative on failure
606 *
607 * ngbe_probe initializes an wx identified by a pci_dev structure.
608 * The OS initialization, configuring of the wx private structure,
609 * and a hardware reset occur.
610 **/
ngbe_probe(struct pci_dev * pdev,const struct pci_device_id __always_unused * ent)611 static int ngbe_probe(struct pci_dev *pdev,
612 const struct pci_device_id __always_unused *ent)
613 {
614 struct net_device *netdev;
615 u32 e2rom_cksum_cap = 0;
616 struct wx *wx = NULL;
617 static int func_nums;
618 u16 e2rom_ver = 0;
619 u32 etrack_id = 0;
620 u32 saved_ver = 0;
621 int err;
622
623 err = pci_enable_device_mem(pdev);
624 if (err)
625 return err;
626
627 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
628 if (err) {
629 dev_err(&pdev->dev,
630 "No usable DMA configuration, aborting\n");
631 goto err_pci_disable_dev;
632 }
633
634 err = pci_request_selected_regions(pdev,
635 pci_select_bars(pdev, IORESOURCE_MEM),
636 ngbe_driver_name);
637 if (err) {
638 dev_err(&pdev->dev,
639 "pci_request_selected_regions failed %d\n", err);
640 goto err_pci_disable_dev;
641 }
642
643 pci_set_master(pdev);
644
645 netdev = devm_alloc_etherdev_mqs(&pdev->dev,
646 sizeof(struct wx),
647 NGBE_MAX_TX_QUEUES,
648 NGBE_MAX_RX_QUEUES);
649 if (!netdev) {
650 err = -ENOMEM;
651 goto err_pci_release_regions;
652 }
653
654 SET_NETDEV_DEV(netdev, &pdev->dev);
655
656 wx = netdev_priv(netdev);
657 wx->netdev = netdev;
658 wx->pdev = pdev;
659 wx->msg_enable = BIT(3) - 1;
660
661 wx->hw_addr = devm_ioremap(&pdev->dev,
662 pci_resource_start(pdev, 0),
663 pci_resource_len(pdev, 0));
664 if (!wx->hw_addr) {
665 err = -EIO;
666 goto err_pci_release_regions;
667 }
668
669 /* The emerald supports up to 8 VFs per pf, but physical
670 * function also need one pool for basic networking.
671 */
672 pci_sriov_set_totalvfs(pdev, NGBE_MAX_VFS_DRV_LIMIT);
673 wx->driver_name = ngbe_driver_name;
674 ngbe_set_ethtool_ops(netdev);
675 netdev->netdev_ops = &ngbe_netdev_ops;
676
677 netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM |
678 NETIF_F_TSO | NETIF_F_TSO6 |
679 NETIF_F_RXHASH | NETIF_F_RXCSUM;
680 netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_TSO_MANGLEID;
681 netdev->vlan_features |= netdev->features;
682 netdev->features |= NETIF_F_IPV6_CSUM | NETIF_F_VLAN_FEATURES;
683 /* copy netdev features into list of user selectable features */
684 netdev->hw_features |= netdev->features | NETIF_F_RXALL;
685 netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
686 netdev->features |= NETIF_F_HIGHDMA;
687 netdev->hw_features |= NETIF_F_GRO;
688 netdev->features |= NETIF_F_GRO;
689
690 netdev->priv_flags |= IFF_UNICAST_FLT;
691 netdev->priv_flags |= IFF_SUPP_NOFCS;
692 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
693
694 netdev->min_mtu = ETH_MIN_MTU;
695 netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE -
696 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
697
698 wx->bd_number = func_nums;
699 /* setup the private structure */
700 err = ngbe_sw_init(wx);
701 if (err)
702 goto err_pci_release_regions;
703
704 /* check if flash load is done after hw power up */
705 err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PERST);
706 if (err)
707 goto err_free_mac_table;
708 err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PWRRST);
709 if (err)
710 goto err_free_mac_table;
711
712 err = wx_mng_present(wx);
713 if (err) {
714 dev_err(&pdev->dev, "Management capability is not present\n");
715 goto err_free_mac_table;
716 }
717
718 err = ngbe_reset_hw(wx);
719 if (err) {
720 dev_err(&pdev->dev, "HW Init failed: %d\n", err);
721 goto err_free_mac_table;
722 }
723
724 if (wx->bus.func == 0) {
725 wr32(wx, NGBE_CALSUM_CAP_STATUS, 0x0);
726 wr32(wx, NGBE_EEPROM_VERSION_STORE_REG, 0x0);
727 } else {
728 e2rom_cksum_cap = rd32(wx, NGBE_CALSUM_CAP_STATUS);
729 saved_ver = rd32(wx, NGBE_EEPROM_VERSION_STORE_REG);
730 }
731
732 wx_init_eeprom_params(wx);
733 if (wx->bus.func == 0 || e2rom_cksum_cap == 0) {
734 /* make sure the EEPROM is ready */
735 err = ngbe_eeprom_chksum_hostif(wx);
736 if (err) {
737 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
738 err = -EIO;
739 goto err_free_mac_table;
740 }
741 }
742
743 wx->wol = 0;
744 if (wx->wol_hw_supported)
745 wx->wol = NGBE_PSR_WKUP_CTL_MAG;
746
747 netdev->ethtool->wol_enabled = !!(wx->wol);
748 wr32(wx, NGBE_PSR_WKUP_CTL, wx->wol);
749 device_set_wakeup_enable(&pdev->dev, wx->wol);
750
751 /* Save off EEPROM version number and Option Rom version which
752 * together make a unique identify for the eeprom
753 */
754 if (saved_ver) {
755 etrack_id = saved_ver;
756 } else {
757 wx_read_ee_hostif(wx,
758 wx->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_H,
759 &e2rom_ver);
760 etrack_id = e2rom_ver << 16;
761 wx_read_ee_hostif(wx,
762 wx->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L,
763 &e2rom_ver);
764 etrack_id |= e2rom_ver;
765 wr32(wx, NGBE_EEPROM_VERSION_STORE_REG, etrack_id);
766 }
767 snprintf(wx->eeprom_id, sizeof(wx->eeprom_id),
768 "0x%08x", etrack_id);
769
770 eth_hw_addr_set(netdev, wx->mac.perm_addr);
771 wx_mac_set_default_filter(wx, wx->mac.perm_addr);
772
773 err = wx_init_interrupt_scheme(wx);
774 if (err)
775 goto err_free_mac_table;
776
777 /* phy Interface Configuration */
778 err = ngbe_mdio_init(wx);
779 if (err)
780 goto err_clear_interrupt_scheme;
781
782 err = register_netdev(netdev);
783 if (err)
784 goto err_register;
785
786 pci_set_drvdata(pdev, wx);
787
788 return 0;
789
790 err_register:
791 phylink_destroy(wx->phylink);
792 wx_control_hw(wx, false);
793 err_clear_interrupt_scheme:
794 wx_clear_interrupt_scheme(wx);
795 err_free_mac_table:
796 kfree(wx->rss_key);
797 kfree(wx->mac_table);
798 err_pci_release_regions:
799 pci_release_selected_regions(pdev,
800 pci_select_bars(pdev, IORESOURCE_MEM));
801 err_pci_disable_dev:
802 pci_disable_device(pdev);
803 return err;
804 }
805
806 /**
807 * ngbe_remove - Device Removal Routine
808 * @pdev: PCI device information struct
809 *
810 * ngbe_remove is called by the PCI subsystem to alert the driver
811 * that it should release a PCI device. The could be caused by a
812 * Hot-Plug event, or because the driver is going to be removed from
813 * memory.
814 **/
ngbe_remove(struct pci_dev * pdev)815 static void ngbe_remove(struct pci_dev *pdev)
816 {
817 struct wx *wx = pci_get_drvdata(pdev);
818 struct net_device *netdev;
819
820 netdev = wx->netdev;
821 wx_disable_sriov(wx);
822 unregister_netdev(netdev);
823 phylink_destroy(wx->phylink);
824 pci_release_selected_regions(pdev,
825 pci_select_bars(pdev, IORESOURCE_MEM));
826
827 kfree(wx->rss_key);
828 kfree(wx->mac_table);
829 wx_clear_interrupt_scheme(wx);
830
831 pci_disable_device(pdev);
832 }
833
ngbe_suspend(struct pci_dev * pdev,pm_message_t state)834 static int ngbe_suspend(struct pci_dev *pdev, pm_message_t state)
835 {
836 bool wake;
837
838 ngbe_dev_shutdown(pdev, &wake);
839 device_set_wakeup_enable(&pdev->dev, wake);
840
841 return 0;
842 }
843
ngbe_resume(struct pci_dev * pdev)844 static int ngbe_resume(struct pci_dev *pdev)
845 {
846 struct net_device *netdev;
847 struct wx *wx;
848 u32 err;
849
850 wx = pci_get_drvdata(pdev);
851 netdev = wx->netdev;
852
853 err = pci_enable_device_mem(pdev);
854 if (err) {
855 wx_err(wx, "Cannot enable PCI device from suspend\n");
856 return err;
857 }
858 pci_set_master(pdev);
859 device_wakeup_disable(&pdev->dev);
860
861 ngbe_reset_hw(wx);
862 rtnl_lock();
863 err = wx_init_interrupt_scheme(wx);
864 if (!err && netif_running(netdev))
865 err = ngbe_open(netdev);
866 if (!err)
867 netif_device_attach(netdev);
868 rtnl_unlock();
869
870 return 0;
871 }
872
873 static struct pci_driver ngbe_driver = {
874 .name = ngbe_driver_name,
875 .id_table = ngbe_pci_tbl,
876 .probe = ngbe_probe,
877 .remove = ngbe_remove,
878 .suspend = ngbe_suspend,
879 .resume = ngbe_resume,
880 .shutdown = ngbe_shutdown,
881 .sriov_configure = wx_pci_sriov_configure,
882 };
883
884 module_pci_driver(ngbe_driver);
885
886 MODULE_DEVICE_TABLE(pci, ngbe_pci_tbl);
887 MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@net-swift.com>");
888 MODULE_DESCRIPTION("WangXun(R) Gigabit PCI Express Network Driver");
889 MODULE_LICENSE("GPL");
890