xref: /linux/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c (revision d8d2b1f81530988abe2e2bfaceec1c5d30b9a0b4)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/types.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/netdevice.h>
8 #include <linux/string.h>
9 #include <linux/etherdevice.h>
10 
11 #include "../libwx/wx_type.h"
12 #include "../libwx/wx_hw.h"
13 #include "../libwx/wx_lib.h"
14 #include "../libwx/wx_mbx.h"
15 #include "../libwx/wx_vf.h"
16 #include "../libwx/wx_vf_common.h"
17 #include "../libwx/wx_ethtool.h"
18 #include "txgbevf_type.h"
19 
20 /* txgbevf_pci_tbl - PCI Device ID Table
21  *
22  * Wildcard entries (PCI_ANY_ID) should come last
23  * Last entry must be all 0s
24  *
25  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
26  *   Class, Class Mask, private data (not used) }
27  */
28 static const struct pci_device_id txgbevf_pci_tbl[] = {
29 	{ PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_SP1000), 0},
30 	{ PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_WX1820), 0},
31 	{ PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML500F), 0},
32 	{ PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML510F), 0},
33 	{ PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML5024), 0},
34 	{ PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML5124), 0},
35 	{ PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML503F), 0},
36 	{ PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML513F), 0},
37 	/* required last entry */
38 	{ .device = 0 }
39 };
40 
41 static const struct net_device_ops txgbevf_netdev_ops = {
42 	.ndo_open               = wxvf_open,
43 	.ndo_stop               = wxvf_close,
44 	.ndo_start_xmit         = wx_xmit_frame,
45 	.ndo_validate_addr      = eth_validate_addr,
46 	.ndo_set_mac_address    = wx_set_mac_vf,
47 };
48 
49 static void txgbevf_set_num_queues(struct wx *wx)
50 {
51 	u32 def_q = 0, num_tcs = 0;
52 	u16 rss, queue;
53 	int ret = 0;
54 
55 	/* Start with base case */
56 	wx->num_rx_queues = 1;
57 	wx->num_tx_queues = 1;
58 
59 	spin_lock_bh(&wx->mbx.mbx_lock);
60 	/* fetch queue configuration from the PF */
61 	ret = wx_get_queues_vf(wx, &num_tcs, &def_q);
62 	spin_unlock_bh(&wx->mbx.mbx_lock);
63 
64 	if (ret)
65 		return;
66 
67 	/* we need as many queues as traffic classes */
68 	if (num_tcs > 1) {
69 		wx->num_rx_queues = num_tcs;
70 	} else {
71 		rss = min_t(u16, num_online_cpus(), TXGBEVF_MAX_RSS_NUM);
72 		queue = min_t(u16, wx->mac.max_rx_queues, wx->mac.max_tx_queues);
73 		rss = min_t(u16, queue, rss);
74 
75 		if (wx->vfinfo->vf_api >= wx_mbox_api_13) {
76 			wx->num_rx_queues = rss;
77 			wx->num_tx_queues = rss;
78 		}
79 	}
80 }
81 
82 static void txgbevf_init_type_code(struct wx *wx)
83 {
84 	switch (wx->device_id) {
85 	case TXGBEVF_DEV_ID_SP1000:
86 	case TXGBEVF_DEV_ID_WX1820:
87 		wx->mac.type = wx_mac_sp;
88 		break;
89 	case TXGBEVF_DEV_ID_AML500F:
90 	case TXGBEVF_DEV_ID_AML510F:
91 	case TXGBEVF_DEV_ID_AML5024:
92 	case TXGBEVF_DEV_ID_AML5124:
93 	case TXGBEVF_DEV_ID_AML503F:
94 	case TXGBEVF_DEV_ID_AML513F:
95 		wx->mac.type = wx_mac_aml;
96 		break;
97 	default:
98 		wx->mac.type = wx_mac_unknown;
99 		break;
100 	}
101 }
102 
103 static int txgbevf_sw_init(struct wx *wx)
104 {
105 	struct net_device *netdev = wx->netdev;
106 	struct pci_dev *pdev = wx->pdev;
107 	int err;
108 
109 	/* Initialize pcie info and common capability flags */
110 	err = wx_sw_init(wx);
111 	if (err < 0)
112 		goto err_wx_sw_init;
113 
114 	/* Initialize the mailbox */
115 	err = wx_init_mbx_params_vf(wx);
116 	if (err)
117 		goto err_init_mbx_params;
118 
119 	/* max q_vectors */
120 	wx->mac.max_msix_vectors = TXGBEVF_MAX_MSIX_VECTORS;
121 	/* Initialize the device type */
122 	txgbevf_init_type_code(wx);
123 	/* lock to protect mailbox accesses */
124 	spin_lock_init(&wx->mbx.mbx_lock);
125 
126 	err = wx_reset_hw_vf(wx);
127 	if (err) {
128 		wx_err(wx, "PF still in reset state. Is the PF interface up?\n");
129 		goto err_reset_hw;
130 	}
131 	wx_init_hw_vf(wx);
132 	wx_negotiate_api_vf(wx);
133 	if (is_zero_ether_addr(wx->mac.addr))
134 		dev_info(&pdev->dev,
135 			 "MAC address not assigned by administrator.\n");
136 	eth_hw_addr_set(netdev, wx->mac.addr);
137 
138 	if (!is_valid_ether_addr(netdev->dev_addr)) {
139 		dev_info(&pdev->dev, "Assigning random MAC address\n");
140 		eth_hw_addr_random(netdev);
141 		ether_addr_copy(wx->mac.addr, netdev->dev_addr);
142 		ether_addr_copy(wx->mac.perm_addr, netdev->dev_addr);
143 	}
144 
145 	wx->mac.max_tx_queues = TXGBEVF_MAX_TX_QUEUES;
146 	wx->mac.max_rx_queues = TXGBEVF_MAX_RX_QUEUES;
147 	/* Enable dynamic interrupt throttling rates */
148 	wx->adaptive_itr = true;
149 	wx->rx_itr_setting = 1;
150 	wx->tx_itr_setting = 1;
151 	/* set default ring sizes */
152 	wx->tx_ring_count = TXGBEVF_DEFAULT_TXD;
153 	wx->rx_ring_count = TXGBEVF_DEFAULT_RXD;
154 	/* set default work limits */
155 	wx->tx_work_limit = TXGBEVF_DEFAULT_TX_WORK;
156 	wx->rx_work_limit = TXGBEVF_DEFAULT_RX_WORK;
157 
158 	wx->set_num_queues = txgbevf_set_num_queues;
159 
160 	return 0;
161 err_reset_hw:
162 	kfree(wx->vfinfo);
163 err_init_mbx_params:
164 	kfree(wx->rss_key);
165 	kfree(wx->mac_table);
166 err_wx_sw_init:
167 	return err;
168 }
169 
170 /**
171  * txgbevf_probe - Device Initialization Routine
172  * @pdev: PCI device information struct
173  * @ent: entry in txgbevf_pci_tbl
174  *
175  * Return: return 0 on success, negative on failure
176  *
177  * txgbevf_probe initializes an adapter identified by a pci_dev structure.
178  * The OS initialization, configuring of the adapter private structure,
179  * and a hardware reset occur.
180  **/
181 static int txgbevf_probe(struct pci_dev *pdev,
182 			 const struct pci_device_id __always_unused *ent)
183 {
184 	struct net_device *netdev;
185 	struct wx *wx = NULL;
186 	int err;
187 
188 	err = pci_enable_device_mem(pdev);
189 	if (err)
190 		return err;
191 
192 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
193 	if (err) {
194 		dev_err(&pdev->dev,
195 			"No usable DMA configuration, aborting\n");
196 		goto err_pci_disable_dev;
197 	}
198 
199 	err = pci_request_selected_regions(pdev,
200 					   pci_select_bars(pdev, IORESOURCE_MEM),
201 					   dev_driver_string(&pdev->dev));
202 	if (err) {
203 		dev_err(&pdev->dev,
204 			"pci_request_selected_regions failed 0x%x\n", err);
205 		goto err_pci_disable_dev;
206 	}
207 
208 	pci_set_master(pdev);
209 
210 	netdev = devm_alloc_etherdev_mqs(&pdev->dev,
211 					 sizeof(struct wx),
212 					 TXGBEVF_MAX_TX_QUEUES,
213 					 TXGBEVF_MAX_RX_QUEUES);
214 	if (!netdev) {
215 		err = -ENOMEM;
216 		goto err_pci_release_regions;
217 	}
218 
219 	SET_NETDEV_DEV(netdev, &pdev->dev);
220 
221 	wx = netdev_priv(netdev);
222 	wx->netdev = netdev;
223 	wx->pdev = pdev;
224 
225 	wx->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV |
226 					NETIF_MSG_PROBE | NETIF_MSG_LINK);
227 	wx->hw_addr = devm_ioremap(&pdev->dev,
228 				   pci_resource_start(pdev, 0),
229 				   pci_resource_len(pdev, 0));
230 	if (!wx->hw_addr) {
231 		err = -EIO;
232 		goto err_pci_release_regions;
233 	}
234 
235 	wx->b4_addr = devm_ioremap(&pdev->dev,
236 				   pci_resource_start(pdev, 4),
237 				   pci_resource_len(pdev, 4));
238 	if (!wx->b4_addr) {
239 		err = -EIO;
240 		goto err_pci_release_regions;
241 	}
242 
243 	wx->driver_name = KBUILD_MODNAME;
244 	wx_set_ethtool_ops_vf(netdev);
245 	netdev->netdev_ops = &txgbevf_netdev_ops;
246 
247 	/* setup the private structure */
248 	err = txgbevf_sw_init(wx);
249 	if (err)
250 		goto err_pci_release_regions;
251 
252 	netdev->features |= NETIF_F_HIGHDMA;
253 
254 	eth_hw_addr_set(netdev, wx->mac.perm_addr);
255 	ether_addr_copy(netdev->perm_addr, wx->mac.addr);
256 
257 	wxvf_init_service(wx);
258 	err = wx_init_interrupt_scheme(wx);
259 	if (err)
260 		goto err_free_sw_init;
261 
262 	wx_get_fw_version_vf(wx);
263 	err = register_netdev(netdev);
264 	if (err)
265 		goto err_register;
266 
267 	pci_set_drvdata(pdev, wx);
268 	netif_tx_stop_all_queues(netdev);
269 
270 	return 0;
271 
272 err_register:
273 	wx_clear_interrupt_scheme(wx);
274 err_free_sw_init:
275 	timer_delete_sync(&wx->service_timer);
276 	cancel_work_sync(&wx->service_task);
277 	kfree(wx->vfinfo);
278 	kfree(wx->rss_key);
279 	kfree(wx->mac_table);
280 err_pci_release_regions:
281 	pci_release_selected_regions(pdev,
282 				     pci_select_bars(pdev, IORESOURCE_MEM));
283 err_pci_disable_dev:
284 	pci_disable_device(pdev);
285 	return err;
286 }
287 
288 /**
289  * txgbevf_remove - Device Removal Routine
290  * @pdev: PCI device information struct
291  *
292  * txgbevf_remove is called by the PCI subsystem to alert the driver
293  * that it should release a PCI device.  The could be caused by a
294  * Hot-Plug event, or because the driver is going to be removed from
295  * memory.
296  **/
297 static void txgbevf_remove(struct pci_dev *pdev)
298 {
299 	wxvf_remove(pdev);
300 }
301 
302 static DEFINE_SIMPLE_DEV_PM_OPS(txgbevf_pm_ops, wxvf_suspend, wxvf_resume);
303 
304 static struct pci_driver txgbevf_driver = {
305 	.name     = KBUILD_MODNAME,
306 	.id_table = txgbevf_pci_tbl,
307 	.probe    = txgbevf_probe,
308 	.remove   = txgbevf_remove,
309 	.shutdown = wxvf_shutdown,
310 	/* Power Management Hooks */
311 	.driver.pm	= pm_sleep_ptr(&txgbevf_pm_ops)
312 };
313 
314 module_pci_driver(txgbevf_driver);
315 
316 MODULE_DEVICE_TABLE(pci, txgbevf_pci_tbl);
317 MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>");
318 MODULE_DESCRIPTION("WangXun(R) 10/25/40 Gigabit Virtual Function Network Driver");
319 MODULE_LICENSE("GPL");
320