1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ 3 4 #include <linux/types.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/netdevice.h> 8 #include <linux/string.h> 9 #include <linux/etherdevice.h> 10 11 #include "../libwx/wx_type.h" 12 #include "../libwx/wx_hw.h" 13 #include "../libwx/wx_lib.h" 14 #include "../libwx/wx_mbx.h" 15 #include "../libwx/wx_vf.h" 16 #include "../libwx/wx_vf_common.h" 17 #include "txgbevf_type.h" 18 19 /* txgbevf_pci_tbl - PCI Device ID Table 20 * 21 * Wildcard entries (PCI_ANY_ID) should come last 22 * Last entry must be all 0s 23 * 24 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 25 * Class, Class Mask, private data (not used) } 26 */ 27 static const struct pci_device_id txgbevf_pci_tbl[] = { 28 { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_SP1000), 0}, 29 { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_WX1820), 0}, 30 { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML500F), 0}, 31 { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML510F), 0}, 32 { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML5024), 0}, 33 { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML5124), 0}, 34 { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML503F), 0}, 35 { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML513F), 0}, 36 /* required last entry */ 37 { .device = 0 } 38 }; 39 40 static const struct net_device_ops txgbevf_netdev_ops = { 41 .ndo_open = wxvf_open, 42 .ndo_stop = wxvf_close, 43 .ndo_start_xmit = wx_xmit_frame, 44 .ndo_validate_addr = eth_validate_addr, 45 .ndo_set_mac_address = wx_set_mac_vf, 46 }; 47 48 static void txgbevf_set_num_queues(struct wx *wx) 49 { 50 u32 def_q = 0, num_tcs = 0; 51 u16 rss, queue; 52 int ret = 0; 53 54 /* Start with base case */ 55 wx->num_rx_queues = 1; 56 wx->num_tx_queues = 1; 57 58 spin_lock_bh(&wx->mbx.mbx_lock); 59 /* fetch queue configuration from the PF */ 60 ret = wx_get_queues_vf(wx, &num_tcs, &def_q); 61 spin_unlock_bh(&wx->mbx.mbx_lock); 62 63 if (ret) 64 return; 65 66 /* we need as many queues as traffic classes */ 67 if (num_tcs > 1) { 68 wx->num_rx_queues = num_tcs; 69 } else { 70 rss = min_t(u16, num_online_cpus(), TXGBEVF_MAX_RSS_NUM); 71 queue = min_t(u16, wx->mac.max_rx_queues, wx->mac.max_tx_queues); 72 rss = min_t(u16, queue, rss); 73 74 if (wx->vfinfo->vf_api >= wx_mbox_api_13) { 75 wx->num_rx_queues = rss; 76 wx->num_tx_queues = rss; 77 } 78 } 79 } 80 81 static void txgbevf_init_type_code(struct wx *wx) 82 { 83 switch (wx->device_id) { 84 case TXGBEVF_DEV_ID_SP1000: 85 case TXGBEVF_DEV_ID_WX1820: 86 wx->mac.type = wx_mac_sp; 87 break; 88 case TXGBEVF_DEV_ID_AML500F: 89 case TXGBEVF_DEV_ID_AML510F: 90 case TXGBEVF_DEV_ID_AML5024: 91 case TXGBEVF_DEV_ID_AML5124: 92 case TXGBEVF_DEV_ID_AML503F: 93 case TXGBEVF_DEV_ID_AML513F: 94 wx->mac.type = wx_mac_aml; 95 break; 96 default: 97 wx->mac.type = wx_mac_unknown; 98 break; 99 } 100 } 101 102 static int txgbevf_sw_init(struct wx *wx) 103 { 104 struct net_device *netdev = wx->netdev; 105 struct pci_dev *pdev = wx->pdev; 106 int err; 107 108 /* Initialize pcie info and common capability flags */ 109 err = wx_sw_init(wx); 110 if (err < 0) 111 goto err_wx_sw_init; 112 113 /* Initialize the mailbox */ 114 err = wx_init_mbx_params_vf(wx); 115 if (err) 116 goto err_init_mbx_params; 117 118 /* max q_vectors */ 119 wx->mac.max_msix_vectors = TXGBEVF_MAX_MSIX_VECTORS; 120 /* Initialize the device type */ 121 txgbevf_init_type_code(wx); 122 /* lock to protect mailbox accesses */ 123 spin_lock_init(&wx->mbx.mbx_lock); 124 125 err = wx_reset_hw_vf(wx); 126 if (err) { 127 wx_err(wx, "PF still in reset state. Is the PF interface up?\n"); 128 goto err_reset_hw; 129 } 130 wx_init_hw_vf(wx); 131 wx_negotiate_api_vf(wx); 132 if (is_zero_ether_addr(wx->mac.addr)) 133 dev_info(&pdev->dev, 134 "MAC address not assigned by administrator.\n"); 135 eth_hw_addr_set(netdev, wx->mac.addr); 136 137 if (!is_valid_ether_addr(netdev->dev_addr)) { 138 dev_info(&pdev->dev, "Assigning random MAC address\n"); 139 eth_hw_addr_random(netdev); 140 ether_addr_copy(wx->mac.addr, netdev->dev_addr); 141 ether_addr_copy(wx->mac.perm_addr, netdev->dev_addr); 142 } 143 144 wx->mac.max_tx_queues = TXGBEVF_MAX_TX_QUEUES; 145 wx->mac.max_rx_queues = TXGBEVF_MAX_RX_QUEUES; 146 /* Enable dynamic interrupt throttling rates */ 147 wx->adaptive_itr = true; 148 wx->rx_itr_setting = 1; 149 wx->tx_itr_setting = 1; 150 /* set default ring sizes */ 151 wx->tx_ring_count = TXGBEVF_DEFAULT_TXD; 152 wx->rx_ring_count = TXGBEVF_DEFAULT_RXD; 153 /* set default work limits */ 154 wx->tx_work_limit = TXGBEVF_DEFAULT_TX_WORK; 155 wx->rx_work_limit = TXGBEVF_DEFAULT_RX_WORK; 156 157 wx->set_num_queues = txgbevf_set_num_queues; 158 159 return 0; 160 err_reset_hw: 161 kfree(wx->vfinfo); 162 err_init_mbx_params: 163 kfree(wx->rss_key); 164 kfree(wx->mac_table); 165 err_wx_sw_init: 166 return err; 167 } 168 169 /** 170 * txgbevf_probe - Device Initialization Routine 171 * @pdev: PCI device information struct 172 * @ent: entry in txgbevf_pci_tbl 173 * 174 * Return: return 0 on success, negative on failure 175 * 176 * txgbevf_probe initializes an adapter identified by a pci_dev structure. 177 * The OS initialization, configuring of the adapter private structure, 178 * and a hardware reset occur. 179 **/ 180 static int txgbevf_probe(struct pci_dev *pdev, 181 const struct pci_device_id __always_unused *ent) 182 { 183 struct net_device *netdev; 184 struct wx *wx = NULL; 185 int err; 186 187 err = pci_enable_device_mem(pdev); 188 if (err) 189 return err; 190 191 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 192 if (err) { 193 dev_err(&pdev->dev, 194 "No usable DMA configuration, aborting\n"); 195 goto err_pci_disable_dev; 196 } 197 198 err = pci_request_selected_regions(pdev, 199 pci_select_bars(pdev, IORESOURCE_MEM), 200 dev_driver_string(&pdev->dev)); 201 if (err) { 202 dev_err(&pdev->dev, 203 "pci_request_selected_regions failed 0x%x\n", err); 204 goto err_pci_disable_dev; 205 } 206 207 pci_set_master(pdev); 208 209 netdev = devm_alloc_etherdev_mqs(&pdev->dev, 210 sizeof(struct wx), 211 TXGBEVF_MAX_TX_QUEUES, 212 TXGBEVF_MAX_RX_QUEUES); 213 if (!netdev) { 214 err = -ENOMEM; 215 goto err_pci_release_regions; 216 } 217 218 SET_NETDEV_DEV(netdev, &pdev->dev); 219 220 wx = netdev_priv(netdev); 221 wx->netdev = netdev; 222 wx->pdev = pdev; 223 224 wx->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV | 225 NETIF_MSG_PROBE | NETIF_MSG_LINK); 226 wx->hw_addr = devm_ioremap(&pdev->dev, 227 pci_resource_start(pdev, 0), 228 pci_resource_len(pdev, 0)); 229 if (!wx->hw_addr) { 230 err = -EIO; 231 goto err_pci_release_regions; 232 } 233 234 wx->b4_addr = devm_ioremap(&pdev->dev, 235 pci_resource_start(pdev, 4), 236 pci_resource_len(pdev, 4)); 237 if (!wx->b4_addr) { 238 err = -EIO; 239 goto err_pci_release_regions; 240 } 241 242 netdev->netdev_ops = &txgbevf_netdev_ops; 243 244 /* setup the private structure */ 245 err = txgbevf_sw_init(wx); 246 if (err) 247 goto err_pci_release_regions; 248 249 netdev->features |= NETIF_F_HIGHDMA; 250 251 eth_hw_addr_set(netdev, wx->mac.perm_addr); 252 ether_addr_copy(netdev->perm_addr, wx->mac.addr); 253 254 wxvf_init_service(wx); 255 err = wx_init_interrupt_scheme(wx); 256 if (err) 257 goto err_free_sw_init; 258 259 err = register_netdev(netdev); 260 if (err) 261 goto err_register; 262 263 pci_set_drvdata(pdev, wx); 264 netif_tx_stop_all_queues(netdev); 265 266 return 0; 267 268 err_register: 269 wx_clear_interrupt_scheme(wx); 270 err_free_sw_init: 271 timer_delete_sync(&wx->service_timer); 272 cancel_work_sync(&wx->service_task); 273 kfree(wx->vfinfo); 274 kfree(wx->rss_key); 275 kfree(wx->mac_table); 276 err_pci_release_regions: 277 pci_release_selected_regions(pdev, 278 pci_select_bars(pdev, IORESOURCE_MEM)); 279 err_pci_disable_dev: 280 pci_disable_device(pdev); 281 return err; 282 } 283 284 /** 285 * txgbevf_remove - Device Removal Routine 286 * @pdev: PCI device information struct 287 * 288 * txgbevf_remove is called by the PCI subsystem to alert the driver 289 * that it should release a PCI device. The could be caused by a 290 * Hot-Plug event, or because the driver is going to be removed from 291 * memory. 292 **/ 293 static void txgbevf_remove(struct pci_dev *pdev) 294 { 295 wxvf_remove(pdev); 296 } 297 298 static DEFINE_SIMPLE_DEV_PM_OPS(txgbevf_pm_ops, wxvf_suspend, wxvf_resume); 299 300 static struct pci_driver txgbevf_driver = { 301 .name = KBUILD_MODNAME, 302 .id_table = txgbevf_pci_tbl, 303 .probe = txgbevf_probe, 304 .remove = txgbevf_remove, 305 .shutdown = wxvf_shutdown, 306 /* Power Management Hooks */ 307 .driver.pm = pm_sleep_ptr(&txgbevf_pm_ops) 308 }; 309 310 module_pci_driver(txgbevf_driver); 311 312 MODULE_DEVICE_TABLE(pci, txgbevf_pci_tbl); 313 MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>"); 314 MODULE_DESCRIPTION("WangXun(R) 10/25/40 Gigabit Virtual Function Network Driver"); 315 MODULE_LICENSE("GPL"); 316