1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Huawei HiNIC PCI Express Linux driver 4 * Copyright(c) 2017 Huawei Technologies Co., Ltd 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/moduleparam.h> 10 #include <linux/pci.h> 11 #include <linux/device.h> 12 #include <linux/errno.h> 13 #include <linux/types.h> 14 #include <linux/etherdevice.h> 15 #include <linux/netdevice.h> 16 #include <linux/slab.h> 17 #include <linux/if_vlan.h> 18 #include <linux/semaphore.h> 19 #include <linux/workqueue.h> 20 #include <net/ip.h> 21 #include <linux/bitops.h> 22 #include <linux/bitmap.h> 23 #include <linux/delay.h> 24 #include <linux/err.h> 25 26 #include "hinic_hw_qp.h" 27 #include "hinic_hw_dev.h" 28 #include "hinic_port.h" 29 #include "hinic_tx.h" 30 #include "hinic_rx.h" 31 #include "hinic_dev.h" 32 #include "hinic_sriov.h" 33 34 MODULE_AUTHOR("Huawei Technologies CO., Ltd"); 35 MODULE_DESCRIPTION("Huawei Intelligent NIC driver"); 36 MODULE_LICENSE("GPL"); 37 38 static unsigned int tx_weight = 64; 39 module_param(tx_weight, uint, 0644); 40 MODULE_PARM_DESC(tx_weight, "Number Tx packets for NAPI budget (default=64)"); 41 42 static unsigned int rx_weight = 64; 43 module_param(rx_weight, uint, 0644); 44 MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)"); 45 46 #define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822 47 #define HINIC_DEV_ID_DUAL_PORT_100GE 0x0200 48 #define HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ 0x0205 49 #define HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ 0x0210 50 #define HINIC_DEV_ID_VF 0x375e 51 52 #define HINIC_WQ_NAME "hinic_dev" 53 54 #define MSG_ENABLE_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ 55 NETIF_MSG_IFUP | \ 56 NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) 57 58 #define HINIC_LRO_MAX_WQE_NUM_DEFAULT 8 59 60 #define HINIC_LRO_RX_TIMER_DEFAULT 16 61 62 #define VLAN_BITMAP_SIZE(nic_dev) (ALIGN(VLAN_N_VID, 8) / 8) 63 64 #define work_to_rx_mode_work(work) \ 65 container_of(work, struct hinic_rx_mode_work, work) 66 67 #define rx_mode_work_to_nic_dev(rx_mode_work) \ 68 container_of(rx_mode_work, struct hinic_dev, rx_mode_work) 69 70 #define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000 71 72 static int change_mac_addr(struct net_device *netdev, const u8 *addr); 73 74 static int set_features(struct hinic_dev *nic_dev, 75 netdev_features_t pre_features, 76 netdev_features_t features, bool force_change); 77 78 static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq) 79 { 80 struct hinic_rxq_stats *nic_rx_stats = &nic_dev->rx_stats; 81 struct hinic_rxq_stats rx_stats; 82 83 u64_stats_init(&rx_stats.syncp); 84 85 hinic_rxq_get_stats(rxq, &rx_stats); 86 87 u64_stats_update_begin(&nic_rx_stats->syncp); 88 nic_rx_stats->bytes += rx_stats.bytes; 89 nic_rx_stats->pkts += rx_stats.pkts; 90 nic_rx_stats->errors += rx_stats.errors; 91 nic_rx_stats->csum_errors += rx_stats.csum_errors; 92 nic_rx_stats->other_errors += rx_stats.other_errors; 93 u64_stats_update_end(&nic_rx_stats->syncp); 94 95 hinic_rxq_clean_stats(rxq); 96 } 97 98 static void update_tx_stats(struct hinic_dev *nic_dev, struct hinic_txq *txq) 99 { 100 struct hinic_txq_stats *nic_tx_stats = &nic_dev->tx_stats; 101 struct hinic_txq_stats tx_stats; 102 103 u64_stats_init(&tx_stats.syncp); 104 105 hinic_txq_get_stats(txq, &tx_stats); 106 107 u64_stats_update_begin(&nic_tx_stats->syncp); 108 nic_tx_stats->bytes += tx_stats.bytes; 109 nic_tx_stats->pkts += tx_stats.pkts; 110 nic_tx_stats->tx_busy += tx_stats.tx_busy; 111 nic_tx_stats->tx_wake += tx_stats.tx_wake; 112 nic_tx_stats->tx_dropped += tx_stats.tx_dropped; 113 nic_tx_stats->big_frags_pkts += tx_stats.big_frags_pkts; 114 u64_stats_update_end(&nic_tx_stats->syncp); 115 116 hinic_txq_clean_stats(txq); 117 } 118 119 static void update_nic_stats(struct hinic_dev *nic_dev) 120 { 121 int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev); 122 123 for (i = 0; i < num_qps; i++) 124 update_rx_stats(nic_dev, &nic_dev->rxqs[i]); 125 126 for (i = 0; i < num_qps; i++) 127 update_tx_stats(nic_dev, &nic_dev->txqs[i]); 128 } 129 130 /** 131 * create_txqs - Create the Logical Tx Queues of specific NIC device 132 * @nic_dev: the specific NIC device 133 * 134 * Return 0 - Success, negative - Failure 135 **/ 136 static int create_txqs(struct hinic_dev *nic_dev) 137 { 138 int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); 139 struct net_device *netdev = nic_dev->netdev; 140 size_t txq_size; 141 142 if (nic_dev->txqs) 143 return -EINVAL; 144 145 txq_size = num_txqs * sizeof(*nic_dev->txqs); 146 nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL); 147 if (!nic_dev->txqs) 148 return -ENOMEM; 149 150 for (i = 0; i < num_txqs; i++) { 151 struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i); 152 153 err = hinic_init_txq(&nic_dev->txqs[i], sq, netdev); 154 if (err) { 155 netif_err(nic_dev, drv, netdev, 156 "Failed to init Txq\n"); 157 goto err_init_txq; 158 } 159 } 160 161 return 0; 162 163 err_init_txq: 164 for (j = 0; j < i; j++) 165 hinic_clean_txq(&nic_dev->txqs[j]); 166 167 devm_kfree(&netdev->dev, nic_dev->txqs); 168 return err; 169 } 170 171 /** 172 * free_txqs - Free the Logical Tx Queues of specific NIC device 173 * @nic_dev: the specific NIC device 174 **/ 175 static void free_txqs(struct hinic_dev *nic_dev) 176 { 177 int i, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); 178 struct net_device *netdev = nic_dev->netdev; 179 180 if (!nic_dev->txqs) 181 return; 182 183 for (i = 0; i < num_txqs; i++) 184 hinic_clean_txq(&nic_dev->txqs[i]); 185 186 devm_kfree(&netdev->dev, nic_dev->txqs); 187 nic_dev->txqs = NULL; 188 } 189 190 /** 191 * create_txqs - Create the Logical Rx Queues of specific NIC device 192 * @nic_dev: the specific NIC device 193 * 194 * Return 0 - Success, negative - Failure 195 **/ 196 static int create_rxqs(struct hinic_dev *nic_dev) 197 { 198 int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); 199 struct net_device *netdev = nic_dev->netdev; 200 size_t rxq_size; 201 202 if (nic_dev->rxqs) 203 return -EINVAL; 204 205 rxq_size = num_rxqs * sizeof(*nic_dev->rxqs); 206 nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL); 207 if (!nic_dev->rxqs) 208 return -ENOMEM; 209 210 for (i = 0; i < num_rxqs; i++) { 211 struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i); 212 213 err = hinic_init_rxq(&nic_dev->rxqs[i], rq, netdev); 214 if (err) { 215 netif_err(nic_dev, drv, netdev, 216 "Failed to init rxq\n"); 217 goto err_init_rxq; 218 } 219 } 220 221 return 0; 222 223 err_init_rxq: 224 for (j = 0; j < i; j++) 225 hinic_clean_rxq(&nic_dev->rxqs[j]); 226 227 devm_kfree(&netdev->dev, nic_dev->rxqs); 228 return err; 229 } 230 231 /** 232 * free_txqs - Free the Logical Rx Queues of specific NIC device 233 * @nic_dev: the specific NIC device 234 **/ 235 static void free_rxqs(struct hinic_dev *nic_dev) 236 { 237 int i, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); 238 struct net_device *netdev = nic_dev->netdev; 239 240 if (!nic_dev->rxqs) 241 return; 242 243 for (i = 0; i < num_rxqs; i++) 244 hinic_clean_rxq(&nic_dev->rxqs[i]); 245 246 devm_kfree(&netdev->dev, nic_dev->rxqs); 247 nic_dev->rxqs = NULL; 248 } 249 250 static int hinic_configure_max_qnum(struct hinic_dev *nic_dev) 251 { 252 int err; 253 254 err = hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps); 255 if (err) 256 return err; 257 258 return 0; 259 } 260 261 static int hinic_rss_init(struct hinic_dev *nic_dev) 262 { 263 u8 default_rss_key[HINIC_RSS_KEY_SIZE]; 264 u8 tmpl_idx = nic_dev->rss_tmpl_idx; 265 u32 *indir_tbl; 266 int err, i; 267 268 indir_tbl = kcalloc(HINIC_RSS_INDIR_SIZE, sizeof(u32), GFP_KERNEL); 269 if (!indir_tbl) 270 return -ENOMEM; 271 272 netdev_rss_key_fill(default_rss_key, sizeof(default_rss_key)); 273 for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) 274 indir_tbl[i] = ethtool_rxfh_indir_default(i, nic_dev->num_rss); 275 276 err = hinic_rss_set_template_tbl(nic_dev, tmpl_idx, default_rss_key); 277 if (err) 278 goto out; 279 280 err = hinic_rss_set_indir_tbl(nic_dev, tmpl_idx, indir_tbl); 281 if (err) 282 goto out; 283 284 err = hinic_set_rss_type(nic_dev, tmpl_idx, nic_dev->rss_type); 285 if (err) 286 goto out; 287 288 err = hinic_rss_set_hash_engine(nic_dev, tmpl_idx, 289 nic_dev->rss_hash_engine); 290 if (err) 291 goto out; 292 293 err = hinic_rss_cfg(nic_dev, 1, tmpl_idx); 294 if (err) 295 goto out; 296 297 out: 298 kfree(indir_tbl); 299 return err; 300 } 301 302 static void hinic_rss_deinit(struct hinic_dev *nic_dev) 303 { 304 hinic_rss_cfg(nic_dev, 0, nic_dev->rss_tmpl_idx); 305 } 306 307 static void hinic_init_rss_parameters(struct hinic_dev *nic_dev) 308 { 309 nic_dev->rss_hash_engine = HINIC_RSS_HASH_ENGINE_TYPE_XOR; 310 nic_dev->rss_type.tcp_ipv6_ext = 1; 311 nic_dev->rss_type.ipv6_ext = 1; 312 nic_dev->rss_type.tcp_ipv6 = 1; 313 nic_dev->rss_type.ipv6 = 1; 314 nic_dev->rss_type.tcp_ipv4 = 1; 315 nic_dev->rss_type.ipv4 = 1; 316 nic_dev->rss_type.udp_ipv6 = 1; 317 nic_dev->rss_type.udp_ipv4 = 1; 318 } 319 320 static void hinic_enable_rss(struct hinic_dev *nic_dev) 321 { 322 struct net_device *netdev = nic_dev->netdev; 323 struct hinic_hwdev *hwdev = nic_dev->hwdev; 324 struct hinic_hwif *hwif = hwdev->hwif; 325 struct pci_dev *pdev = hwif->pdev; 326 int i, node, err = 0; 327 u16 num_cpus = 0; 328 329 nic_dev->max_qps = hinic_hwdev_max_num_qps(hwdev); 330 if (nic_dev->max_qps <= 1) { 331 nic_dev->flags &= ~HINIC_RSS_ENABLE; 332 nic_dev->rss_limit = nic_dev->max_qps; 333 nic_dev->num_qps = nic_dev->max_qps; 334 nic_dev->num_rss = nic_dev->max_qps; 335 336 return; 337 } 338 339 err = hinic_rss_template_alloc(nic_dev, &nic_dev->rss_tmpl_idx); 340 if (err) { 341 netif_err(nic_dev, drv, netdev, 342 "Failed to alloc tmpl_idx for rss, can't enable rss for this function\n"); 343 nic_dev->flags &= ~HINIC_RSS_ENABLE; 344 nic_dev->max_qps = 1; 345 nic_dev->rss_limit = nic_dev->max_qps; 346 nic_dev->num_qps = nic_dev->max_qps; 347 nic_dev->num_rss = nic_dev->max_qps; 348 349 return; 350 } 351 352 nic_dev->flags |= HINIC_RSS_ENABLE; 353 354 for (i = 0; i < num_online_cpus(); i++) { 355 node = cpu_to_node(i); 356 if (node == dev_to_node(&pdev->dev)) 357 num_cpus++; 358 } 359 360 if (!num_cpus) 361 num_cpus = num_online_cpus(); 362 363 nic_dev->num_qps = hinic_hwdev_num_qps(hwdev); 364 nic_dev->num_qps = min_t(u16, nic_dev->num_qps, num_cpus); 365 366 nic_dev->rss_limit = nic_dev->num_qps; 367 nic_dev->num_rss = nic_dev->num_qps; 368 369 hinic_init_rss_parameters(nic_dev); 370 err = hinic_rss_init(nic_dev); 371 if (err) 372 netif_err(nic_dev, drv, netdev, "Failed to init rss\n"); 373 } 374 375 int hinic_open(struct net_device *netdev) 376 { 377 struct hinic_dev *nic_dev = netdev_priv(netdev); 378 enum hinic_port_link_state link_state; 379 int err, ret; 380 381 if (!(nic_dev->flags & HINIC_INTF_UP)) { 382 err = hinic_hwdev_ifup(nic_dev->hwdev, nic_dev->sq_depth, 383 nic_dev->rq_depth); 384 if (err) { 385 netif_err(nic_dev, drv, netdev, 386 "Failed - HW interface up\n"); 387 return err; 388 } 389 } 390 391 err = create_txqs(nic_dev); 392 if (err) { 393 netif_err(nic_dev, drv, netdev, 394 "Failed to create Tx queues\n"); 395 goto err_create_txqs; 396 } 397 398 err = create_rxqs(nic_dev); 399 if (err) { 400 netif_err(nic_dev, drv, netdev, 401 "Failed to create Rx queues\n"); 402 goto err_create_rxqs; 403 } 404 405 hinic_enable_rss(nic_dev); 406 407 err = hinic_configure_max_qnum(nic_dev); 408 if (err) { 409 netif_err(nic_dev, drv, nic_dev->netdev, 410 "Failed to configure the maximum number of queues\n"); 411 goto err_port_state; 412 } 413 414 netif_set_real_num_tx_queues(netdev, nic_dev->num_qps); 415 netif_set_real_num_rx_queues(netdev, nic_dev->num_qps); 416 417 err = hinic_port_set_state(nic_dev, HINIC_PORT_ENABLE); 418 if (err) { 419 netif_err(nic_dev, drv, netdev, 420 "Failed to set port state\n"); 421 goto err_port_state; 422 } 423 424 err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_ENABLE); 425 if (err) { 426 netif_err(nic_dev, drv, netdev, 427 "Failed to set func port state\n"); 428 goto err_func_port_state; 429 } 430 431 down(&nic_dev->mgmt_lock); 432 433 err = hinic_port_link_state(nic_dev, &link_state); 434 if (err) { 435 netif_err(nic_dev, drv, netdev, "Failed to get link state\n"); 436 goto err_port_link; 437 } 438 439 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) 440 hinic_notify_all_vfs_link_changed(nic_dev->hwdev, link_state); 441 442 if (link_state == HINIC_LINK_STATE_UP) 443 nic_dev->flags |= HINIC_LINK_UP; 444 445 nic_dev->flags |= HINIC_INTF_UP; 446 447 if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) == 448 (HINIC_LINK_UP | HINIC_INTF_UP)) { 449 netif_info(nic_dev, drv, netdev, "link + intf UP\n"); 450 netif_carrier_on(netdev); 451 netif_tx_wake_all_queues(netdev); 452 } 453 454 up(&nic_dev->mgmt_lock); 455 456 netif_info(nic_dev, drv, netdev, "HINIC_INTF is UP\n"); 457 return 0; 458 459 err_port_link: 460 up(&nic_dev->mgmt_lock); 461 ret = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); 462 if (ret) 463 netif_warn(nic_dev, drv, netdev, 464 "Failed to revert func port state\n"); 465 466 err_func_port_state: 467 ret = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); 468 if (ret) 469 netif_warn(nic_dev, drv, netdev, 470 "Failed to revert port state\n"); 471 err_port_state: 472 free_rxqs(nic_dev); 473 if (nic_dev->flags & HINIC_RSS_ENABLE) { 474 hinic_rss_deinit(nic_dev); 475 hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx); 476 } 477 478 err_create_rxqs: 479 free_txqs(nic_dev); 480 481 err_create_txqs: 482 if (!(nic_dev->flags & HINIC_INTF_UP)) 483 hinic_hwdev_ifdown(nic_dev->hwdev); 484 return err; 485 } 486 487 int hinic_close(struct net_device *netdev) 488 { 489 struct hinic_dev *nic_dev = netdev_priv(netdev); 490 unsigned int flags; 491 492 down(&nic_dev->mgmt_lock); 493 494 flags = nic_dev->flags; 495 nic_dev->flags &= ~HINIC_INTF_UP; 496 497 netif_carrier_off(netdev); 498 netif_tx_disable(netdev); 499 500 update_nic_stats(nic_dev); 501 502 up(&nic_dev->mgmt_lock); 503 504 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) 505 hinic_notify_all_vfs_link_changed(nic_dev->hwdev, 0); 506 507 hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); 508 509 hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); 510 511 if (nic_dev->flags & HINIC_RSS_ENABLE) { 512 hinic_rss_deinit(nic_dev); 513 hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx); 514 } 515 516 free_rxqs(nic_dev); 517 free_txqs(nic_dev); 518 519 if (flags & HINIC_INTF_UP) 520 hinic_hwdev_ifdown(nic_dev->hwdev); 521 522 netif_info(nic_dev, drv, netdev, "HINIC_INTF is DOWN\n"); 523 return 0; 524 } 525 526 static int hinic_change_mtu(struct net_device *netdev, int new_mtu) 527 { 528 struct hinic_dev *nic_dev = netdev_priv(netdev); 529 int err; 530 531 netif_info(nic_dev, drv, netdev, "set_mtu = %d\n", new_mtu); 532 533 err = hinic_port_set_mtu(nic_dev, new_mtu); 534 if (err) 535 netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n"); 536 else 537 netdev->mtu = new_mtu; 538 539 return err; 540 } 541 542 /** 543 * change_mac_addr - change the main mac address of network device 544 * @netdev: network device 545 * @addr: mac address to set 546 * 547 * Return 0 - Success, negative - Failure 548 **/ 549 static int change_mac_addr(struct net_device *netdev, const u8 *addr) 550 { 551 struct hinic_dev *nic_dev = netdev_priv(netdev); 552 u16 vid = 0; 553 int err; 554 555 if (!is_valid_ether_addr(addr)) 556 return -EADDRNOTAVAIL; 557 558 netif_info(nic_dev, drv, netdev, "change mac addr = %02x %02x %02x %02x %02x %02x\n", 559 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 560 561 down(&nic_dev->mgmt_lock); 562 563 do { 564 err = hinic_port_del_mac(nic_dev, netdev->dev_addr, vid); 565 if (err) { 566 netif_err(nic_dev, drv, netdev, 567 "Failed to delete mac\n"); 568 break; 569 } 570 571 err = hinic_port_add_mac(nic_dev, addr, vid); 572 if (err) { 573 netif_err(nic_dev, drv, netdev, "Failed to add mac\n"); 574 break; 575 } 576 577 vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); 578 } while (vid != VLAN_N_VID); 579 580 up(&nic_dev->mgmt_lock); 581 return err; 582 } 583 584 static int hinic_set_mac_addr(struct net_device *netdev, void *addr) 585 { 586 unsigned char new_mac[ETH_ALEN]; 587 struct sockaddr *saddr = addr; 588 int err; 589 590 memcpy(new_mac, saddr->sa_data, ETH_ALEN); 591 592 err = change_mac_addr(netdev, new_mac); 593 if (!err) 594 memcpy(netdev->dev_addr, new_mac, ETH_ALEN); 595 596 return err; 597 } 598 599 /** 600 * add_mac_addr - add mac address to network device 601 * @netdev: network device 602 * @addr: mac address to add 603 * 604 * Return 0 - Success, negative - Failure 605 **/ 606 static int add_mac_addr(struct net_device *netdev, const u8 *addr) 607 { 608 struct hinic_dev *nic_dev = netdev_priv(netdev); 609 u16 vid = 0; 610 int err; 611 612 netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x %02x\n", 613 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 614 615 down(&nic_dev->mgmt_lock); 616 617 do { 618 err = hinic_port_add_mac(nic_dev, addr, vid); 619 if (err) { 620 netif_err(nic_dev, drv, netdev, "Failed to add mac\n"); 621 break; 622 } 623 624 vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); 625 } while (vid != VLAN_N_VID); 626 627 up(&nic_dev->mgmt_lock); 628 return err; 629 } 630 631 /** 632 * remove_mac_addr - remove mac address from network device 633 * @netdev: network device 634 * @addr: mac address to remove 635 * 636 * Return 0 - Success, negative - Failure 637 **/ 638 static int remove_mac_addr(struct net_device *netdev, const u8 *addr) 639 { 640 struct hinic_dev *nic_dev = netdev_priv(netdev); 641 u16 vid = 0; 642 int err; 643 644 if (!is_valid_ether_addr(addr)) 645 return -EADDRNOTAVAIL; 646 647 netif_info(nic_dev, drv, netdev, "remove mac addr = %02x %02x %02x %02x %02x %02x\n", 648 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 649 650 down(&nic_dev->mgmt_lock); 651 652 do { 653 err = hinic_port_del_mac(nic_dev, addr, vid); 654 if (err) { 655 netif_err(nic_dev, drv, netdev, 656 "Failed to delete mac\n"); 657 break; 658 } 659 660 vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); 661 } while (vid != VLAN_N_VID); 662 663 up(&nic_dev->mgmt_lock); 664 return err; 665 } 666 667 static int hinic_vlan_rx_add_vid(struct net_device *netdev, 668 __always_unused __be16 proto, u16 vid) 669 { 670 struct hinic_dev *nic_dev = netdev_priv(netdev); 671 int ret, err; 672 673 netif_info(nic_dev, drv, netdev, "add vid = %d\n", vid); 674 675 down(&nic_dev->mgmt_lock); 676 677 err = hinic_port_add_vlan(nic_dev, vid); 678 if (err) { 679 netif_err(nic_dev, drv, netdev, "Failed to add vlan\n"); 680 goto err_vlan_add; 681 } 682 683 err = hinic_port_add_mac(nic_dev, netdev->dev_addr, vid); 684 if (err && err != HINIC_PF_SET_VF_ALREADY) { 685 netif_err(nic_dev, drv, netdev, "Failed to set mac\n"); 686 goto err_add_mac; 687 } 688 689 bitmap_set(nic_dev->vlan_bitmap, vid, 1); 690 691 up(&nic_dev->mgmt_lock); 692 return 0; 693 694 err_add_mac: 695 ret = hinic_port_del_vlan(nic_dev, vid); 696 if (ret) 697 netif_err(nic_dev, drv, netdev, 698 "Failed to revert by removing vlan\n"); 699 700 err_vlan_add: 701 up(&nic_dev->mgmt_lock); 702 return err; 703 } 704 705 static int hinic_vlan_rx_kill_vid(struct net_device *netdev, 706 __always_unused __be16 proto, u16 vid) 707 { 708 struct hinic_dev *nic_dev = netdev_priv(netdev); 709 int err; 710 711 netif_info(nic_dev, drv, netdev, "remove vid = %d\n", vid); 712 713 down(&nic_dev->mgmt_lock); 714 715 err = hinic_port_del_vlan(nic_dev, vid); 716 if (err) { 717 netif_err(nic_dev, drv, netdev, "Failed to delete vlan\n"); 718 goto err_del_vlan; 719 } 720 721 bitmap_clear(nic_dev->vlan_bitmap, vid, 1); 722 723 up(&nic_dev->mgmt_lock); 724 return 0; 725 726 err_del_vlan: 727 up(&nic_dev->mgmt_lock); 728 return err; 729 } 730 731 static void set_rx_mode(struct work_struct *work) 732 { 733 struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work); 734 struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work); 735 736 hinic_port_set_rx_mode(nic_dev, rx_mode_work->rx_mode); 737 738 __dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr); 739 __dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr); 740 } 741 742 static void hinic_set_rx_mode(struct net_device *netdev) 743 { 744 struct hinic_dev *nic_dev = netdev_priv(netdev); 745 struct hinic_rx_mode_work *rx_mode_work; 746 u32 rx_mode; 747 748 rx_mode_work = &nic_dev->rx_mode_work; 749 750 rx_mode = HINIC_RX_MODE_UC | 751 HINIC_RX_MODE_MC | 752 HINIC_RX_MODE_BC; 753 754 if (netdev->flags & IFF_PROMISC) { 755 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) 756 rx_mode |= HINIC_RX_MODE_PROMISC; 757 } else if (netdev->flags & IFF_ALLMULTI) { 758 rx_mode |= HINIC_RX_MODE_MC_ALL; 759 } 760 761 rx_mode_work->rx_mode = rx_mode; 762 763 queue_work(nic_dev->workq, &rx_mode_work->work); 764 } 765 766 static void hinic_tx_timeout(struct net_device *netdev, unsigned int txqueue) 767 { 768 struct hinic_dev *nic_dev = netdev_priv(netdev); 769 u16 sw_pi, hw_ci, sw_ci; 770 struct hinic_sq *sq; 771 u16 num_sqs, q_id; 772 773 num_sqs = hinic_hwdev_num_qps(nic_dev->hwdev); 774 775 netif_err(nic_dev, drv, netdev, "Tx timeout\n"); 776 777 for (q_id = 0; q_id < num_sqs; q_id++) { 778 if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id))) 779 continue; 780 781 sq = hinic_hwdev_get_sq(nic_dev->hwdev, q_id); 782 sw_pi = atomic_read(&sq->wq->prod_idx) & sq->wq->mask; 783 hw_ci = be16_to_cpu(*(u16 *)(sq->hw_ci_addr)) & sq->wq->mask; 784 sw_ci = atomic_read(&sq->wq->cons_idx) & sq->wq->mask; 785 netif_err(nic_dev, drv, netdev, "Txq%d: sw_pi: %d, hw_ci: %d, sw_ci: %d, napi->state: 0x%lx\n", 786 q_id, sw_pi, hw_ci, sw_ci, 787 nic_dev->txqs[q_id].napi.state); 788 } 789 } 790 791 static void hinic_get_stats64(struct net_device *netdev, 792 struct rtnl_link_stats64 *stats) 793 { 794 struct hinic_dev *nic_dev = netdev_priv(netdev); 795 struct hinic_rxq_stats *nic_rx_stats; 796 struct hinic_txq_stats *nic_tx_stats; 797 798 nic_rx_stats = &nic_dev->rx_stats; 799 nic_tx_stats = &nic_dev->tx_stats; 800 801 down(&nic_dev->mgmt_lock); 802 803 if (nic_dev->flags & HINIC_INTF_UP) 804 update_nic_stats(nic_dev); 805 806 up(&nic_dev->mgmt_lock); 807 808 stats->rx_bytes = nic_rx_stats->bytes; 809 stats->rx_packets = nic_rx_stats->pkts; 810 stats->rx_errors = nic_rx_stats->errors; 811 812 stats->tx_bytes = nic_tx_stats->bytes; 813 stats->tx_packets = nic_tx_stats->pkts; 814 stats->tx_errors = nic_tx_stats->tx_dropped; 815 } 816 817 static int hinic_set_features(struct net_device *netdev, 818 netdev_features_t features) 819 { 820 struct hinic_dev *nic_dev = netdev_priv(netdev); 821 822 return set_features(nic_dev, nic_dev->netdev->features, 823 features, false); 824 } 825 826 static netdev_features_t hinic_fix_features(struct net_device *netdev, 827 netdev_features_t features) 828 { 829 struct hinic_dev *nic_dev = netdev_priv(netdev); 830 831 /* If Rx checksum is disabled, then LRO should also be disabled */ 832 if (!(features & NETIF_F_RXCSUM)) { 833 netif_info(nic_dev, drv, netdev, "disabling LRO as RXCSUM is off\n"); 834 features &= ~NETIF_F_LRO; 835 } 836 837 return features; 838 } 839 840 static const struct net_device_ops hinic_netdev_ops = { 841 .ndo_open = hinic_open, 842 .ndo_stop = hinic_close, 843 .ndo_change_mtu = hinic_change_mtu, 844 .ndo_set_mac_address = hinic_set_mac_addr, 845 .ndo_validate_addr = eth_validate_addr, 846 .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid, 847 .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid, 848 .ndo_set_rx_mode = hinic_set_rx_mode, 849 .ndo_start_xmit = hinic_xmit_frame, 850 .ndo_tx_timeout = hinic_tx_timeout, 851 .ndo_get_stats64 = hinic_get_stats64, 852 .ndo_fix_features = hinic_fix_features, 853 .ndo_set_features = hinic_set_features, 854 .ndo_set_vf_mac = hinic_ndo_set_vf_mac, 855 .ndo_set_vf_vlan = hinic_ndo_set_vf_vlan, 856 .ndo_get_vf_config = hinic_ndo_get_vf_config, 857 .ndo_set_vf_trust = hinic_ndo_set_vf_trust, 858 .ndo_set_vf_rate = hinic_ndo_set_vf_bw, 859 .ndo_set_vf_spoofchk = hinic_ndo_set_vf_spoofchk, 860 .ndo_set_vf_link_state = hinic_ndo_set_vf_link_state, 861 }; 862 863 static const struct net_device_ops hinicvf_netdev_ops = { 864 .ndo_open = hinic_open, 865 .ndo_stop = hinic_close, 866 .ndo_change_mtu = hinic_change_mtu, 867 .ndo_set_mac_address = hinic_set_mac_addr, 868 .ndo_validate_addr = eth_validate_addr, 869 .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid, 870 .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid, 871 .ndo_set_rx_mode = hinic_set_rx_mode, 872 .ndo_start_xmit = hinic_xmit_frame, 873 .ndo_tx_timeout = hinic_tx_timeout, 874 .ndo_get_stats64 = hinic_get_stats64, 875 .ndo_fix_features = hinic_fix_features, 876 .ndo_set_features = hinic_set_features, 877 }; 878 879 static void netdev_features_init(struct net_device *netdev) 880 { 881 netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | 882 NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | 883 NETIF_F_RXCSUM | NETIF_F_LRO | 884 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 885 886 netdev->vlan_features = netdev->hw_features; 887 888 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; 889 } 890 891 /** 892 * link_status_event_handler - link event handler 893 * @handle: nic device for the handler 894 * @buf_in: input buffer 895 * @in_size: input size 896 * @buf_in: output buffer 897 * @out_size: returned output size 898 * 899 * Return 0 - Success, negative - Failure 900 **/ 901 static void link_status_event_handler(void *handle, void *buf_in, u16 in_size, 902 void *buf_out, u16 *out_size) 903 { 904 struct hinic_port_link_status *link_status, *ret_link_status; 905 struct hinic_dev *nic_dev = handle; 906 907 link_status = buf_in; 908 909 if (link_status->link == HINIC_LINK_STATE_UP) { 910 down(&nic_dev->mgmt_lock); 911 912 nic_dev->flags |= HINIC_LINK_UP; 913 914 if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) == 915 (HINIC_LINK_UP | HINIC_INTF_UP)) { 916 netif_carrier_on(nic_dev->netdev); 917 netif_tx_wake_all_queues(nic_dev->netdev); 918 } 919 920 up(&nic_dev->mgmt_lock); 921 922 netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is UP\n"); 923 } else { 924 down(&nic_dev->mgmt_lock); 925 926 nic_dev->flags &= ~HINIC_LINK_UP; 927 928 netif_carrier_off(nic_dev->netdev); 929 netif_tx_disable(nic_dev->netdev); 930 931 up(&nic_dev->mgmt_lock); 932 933 netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is DOWN\n"); 934 } 935 936 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) 937 hinic_notify_all_vfs_link_changed(nic_dev->hwdev, 938 link_status->link); 939 940 ret_link_status = buf_out; 941 ret_link_status->status = 0; 942 943 *out_size = sizeof(*ret_link_status); 944 } 945 946 static int set_features(struct hinic_dev *nic_dev, 947 netdev_features_t pre_features, 948 netdev_features_t features, bool force_change) 949 { 950 netdev_features_t changed = force_change ? ~0 : pre_features ^ features; 951 u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN; 952 int err = 0; 953 954 if (changed & NETIF_F_TSO) 955 err = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ? 956 HINIC_TSO_ENABLE : HINIC_TSO_DISABLE); 957 958 if (changed & NETIF_F_RXCSUM) 959 err = hinic_set_rx_csum_offload(nic_dev, csum_en); 960 961 if (changed & NETIF_F_LRO) { 962 err = hinic_set_rx_lro_state(nic_dev, 963 !!(features & NETIF_F_LRO), 964 HINIC_LRO_RX_TIMER_DEFAULT, 965 HINIC_LRO_MAX_WQE_NUM_DEFAULT); 966 } 967 968 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 969 err = hinic_set_rx_vlan_offload(nic_dev, 970 !!(features & 971 NETIF_F_HW_VLAN_CTAG_RX)); 972 973 return err; 974 } 975 976 /** 977 * nic_dev_init - Initialize the NIC device 978 * @pdev: the NIC pci device 979 * 980 * Return 0 - Success, negative - Failure 981 **/ 982 static int nic_dev_init(struct pci_dev *pdev) 983 { 984 struct hinic_rx_mode_work *rx_mode_work; 985 struct hinic_txq_stats *tx_stats; 986 struct hinic_rxq_stats *rx_stats; 987 struct hinic_dev *nic_dev; 988 struct net_device *netdev; 989 struct hinic_hwdev *hwdev; 990 int err, num_qps; 991 992 hwdev = hinic_init_hwdev(pdev); 993 if (IS_ERR(hwdev)) { 994 dev_err(&pdev->dev, "Failed to initialize HW device\n"); 995 return PTR_ERR(hwdev); 996 } 997 998 num_qps = hinic_hwdev_num_qps(hwdev); 999 if (num_qps <= 0) { 1000 dev_err(&pdev->dev, "Invalid number of QPS\n"); 1001 err = -EINVAL; 1002 goto err_num_qps; 1003 } 1004 1005 netdev = alloc_etherdev_mq(sizeof(*nic_dev), num_qps); 1006 if (!netdev) { 1007 dev_err(&pdev->dev, "Failed to allocate Ethernet device\n"); 1008 err = -ENOMEM; 1009 goto err_alloc_etherdev; 1010 } 1011 1012 hinic_set_ethtool_ops(netdev); 1013 1014 if (!HINIC_IS_VF(hwdev->hwif)) 1015 netdev->netdev_ops = &hinic_netdev_ops; 1016 else 1017 netdev->netdev_ops = &hinicvf_netdev_ops; 1018 1019 netdev->max_mtu = ETH_MAX_MTU; 1020 1021 nic_dev = netdev_priv(netdev); 1022 nic_dev->netdev = netdev; 1023 nic_dev->hwdev = hwdev; 1024 nic_dev->msg_enable = MSG_ENABLE_DEFAULT; 1025 nic_dev->flags = 0; 1026 nic_dev->txqs = NULL; 1027 nic_dev->rxqs = NULL; 1028 nic_dev->tx_weight = tx_weight; 1029 nic_dev->rx_weight = rx_weight; 1030 nic_dev->sq_depth = HINIC_SQ_DEPTH; 1031 nic_dev->rq_depth = HINIC_RQ_DEPTH; 1032 nic_dev->sriov_info.hwdev = hwdev; 1033 nic_dev->sriov_info.pdev = pdev; 1034 1035 sema_init(&nic_dev->mgmt_lock, 1); 1036 1037 tx_stats = &nic_dev->tx_stats; 1038 rx_stats = &nic_dev->rx_stats; 1039 1040 u64_stats_init(&tx_stats->syncp); 1041 u64_stats_init(&rx_stats->syncp); 1042 1043 nic_dev->vlan_bitmap = devm_kzalloc(&pdev->dev, 1044 VLAN_BITMAP_SIZE(nic_dev), 1045 GFP_KERNEL); 1046 if (!nic_dev->vlan_bitmap) { 1047 err = -ENOMEM; 1048 goto err_vlan_bitmap; 1049 } 1050 1051 nic_dev->workq = create_singlethread_workqueue(HINIC_WQ_NAME); 1052 if (!nic_dev->workq) { 1053 err = -ENOMEM; 1054 goto err_workq; 1055 } 1056 1057 pci_set_drvdata(pdev, netdev); 1058 1059 err = hinic_port_get_mac(nic_dev, netdev->dev_addr); 1060 if (err) { 1061 dev_err(&pdev->dev, "Failed to get mac address\n"); 1062 goto err_get_mac; 1063 } 1064 1065 if (!is_valid_ether_addr(netdev->dev_addr)) { 1066 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) { 1067 dev_err(&pdev->dev, "Invalid MAC address\n"); 1068 err = -EIO; 1069 goto err_add_mac; 1070 } 1071 1072 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", 1073 netdev->dev_addr); 1074 eth_hw_addr_random(netdev); 1075 } 1076 1077 err = hinic_port_add_mac(nic_dev, netdev->dev_addr, 0); 1078 if (err && err != HINIC_PF_SET_VF_ALREADY) { 1079 dev_err(&pdev->dev, "Failed to add mac\n"); 1080 goto err_add_mac; 1081 } 1082 1083 err = hinic_port_set_mtu(nic_dev, netdev->mtu); 1084 if (err) { 1085 dev_err(&pdev->dev, "Failed to set mtu\n"); 1086 goto err_set_mtu; 1087 } 1088 1089 rx_mode_work = &nic_dev->rx_mode_work; 1090 INIT_WORK(&rx_mode_work->work, set_rx_mode); 1091 1092 netdev_features_init(netdev); 1093 1094 netif_carrier_off(netdev); 1095 1096 hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, 1097 nic_dev, link_status_event_handler); 1098 1099 err = set_features(nic_dev, 0, nic_dev->netdev->features, true); 1100 if (err) 1101 goto err_set_features; 1102 1103 SET_NETDEV_DEV(netdev, &pdev->dev); 1104 1105 err = register_netdev(netdev); 1106 if (err) { 1107 dev_err(&pdev->dev, "Failed to register netdev\n"); 1108 goto err_reg_netdev; 1109 } 1110 1111 return 0; 1112 1113 err_reg_netdev: 1114 err_set_features: 1115 hinic_hwdev_cb_unregister(nic_dev->hwdev, 1116 HINIC_MGMT_MSG_CMD_LINK_STATUS); 1117 cancel_work_sync(&rx_mode_work->work); 1118 1119 err_set_mtu: 1120 err_get_mac: 1121 err_add_mac: 1122 pci_set_drvdata(pdev, NULL); 1123 destroy_workqueue(nic_dev->workq); 1124 1125 err_workq: 1126 err_vlan_bitmap: 1127 free_netdev(netdev); 1128 1129 err_alloc_etherdev: 1130 err_num_qps: 1131 hinic_free_hwdev(hwdev); 1132 return err; 1133 } 1134 1135 static int hinic_probe(struct pci_dev *pdev, 1136 const struct pci_device_id *id) 1137 { 1138 int err = pci_enable_device(pdev); 1139 1140 if (err) { 1141 dev_err(&pdev->dev, "Failed to enable PCI device\n"); 1142 return err; 1143 } 1144 1145 err = pci_request_regions(pdev, HINIC_DRV_NAME); 1146 if (err) { 1147 dev_err(&pdev->dev, "Failed to request PCI regions\n"); 1148 goto err_pci_regions; 1149 } 1150 1151 pci_set_master(pdev); 1152 1153 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1154 if (err) { 1155 dev_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); 1156 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1157 if (err) { 1158 dev_err(&pdev->dev, "Failed to set DMA mask\n"); 1159 goto err_dma_mask; 1160 } 1161 } 1162 1163 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1164 if (err) { 1165 dev_warn(&pdev->dev, 1166 "Couldn't set 64-bit consistent DMA mask\n"); 1167 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1168 if (err) { 1169 dev_err(&pdev->dev, 1170 "Failed to set consistent DMA mask\n"); 1171 goto err_dma_consistent_mask; 1172 } 1173 } 1174 1175 err = nic_dev_init(pdev); 1176 if (err) { 1177 dev_err(&pdev->dev, "Failed to initialize NIC device\n"); 1178 goto err_nic_dev_init; 1179 } 1180 1181 dev_info(&pdev->dev, "HiNIC driver - probed\n"); 1182 return 0; 1183 1184 err_nic_dev_init: 1185 err_dma_consistent_mask: 1186 err_dma_mask: 1187 pci_release_regions(pdev); 1188 1189 err_pci_regions: 1190 pci_disable_device(pdev); 1191 return err; 1192 } 1193 1194 #define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000 1195 1196 static void wait_sriov_cfg_complete(struct hinic_dev *nic_dev) 1197 { 1198 struct hinic_sriov_info *sriov_info = &nic_dev->sriov_info; 1199 u32 loop_cnt = 0; 1200 1201 set_bit(HINIC_FUNC_REMOVE, &sriov_info->state); 1202 usleep_range(9900, 10000); 1203 1204 while (loop_cnt < HINIC_WAIT_SRIOV_CFG_TIMEOUT) { 1205 if (!test_bit(HINIC_SRIOV_ENABLE, &sriov_info->state) && 1206 !test_bit(HINIC_SRIOV_DISABLE, &sriov_info->state)) 1207 return; 1208 1209 usleep_range(9900, 10000); 1210 loop_cnt++; 1211 } 1212 } 1213 1214 static void hinic_remove(struct pci_dev *pdev) 1215 { 1216 struct net_device *netdev = pci_get_drvdata(pdev); 1217 struct hinic_dev *nic_dev = netdev_priv(netdev); 1218 struct hinic_rx_mode_work *rx_mode_work; 1219 1220 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) { 1221 wait_sriov_cfg_complete(nic_dev); 1222 hinic_pci_sriov_disable(pdev); 1223 } 1224 1225 unregister_netdev(netdev); 1226 1227 hinic_port_del_mac(nic_dev, netdev->dev_addr, 0); 1228 1229 hinic_hwdev_cb_unregister(nic_dev->hwdev, 1230 HINIC_MGMT_MSG_CMD_LINK_STATUS); 1231 1232 rx_mode_work = &nic_dev->rx_mode_work; 1233 cancel_work_sync(&rx_mode_work->work); 1234 1235 pci_set_drvdata(pdev, NULL); 1236 1237 destroy_workqueue(nic_dev->workq); 1238 1239 hinic_vf_func_free(nic_dev->hwdev); 1240 1241 hinic_free_hwdev(nic_dev->hwdev); 1242 1243 free_netdev(netdev); 1244 1245 pci_release_regions(pdev); 1246 pci_disable_device(pdev); 1247 1248 dev_info(&pdev->dev, "HiNIC driver - removed\n"); 1249 } 1250 1251 static void hinic_shutdown(struct pci_dev *pdev) 1252 { 1253 pci_disable_device(pdev); 1254 } 1255 1256 static const struct pci_device_id hinic_pci_table[] = { 1257 { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0}, 1258 { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0}, 1259 { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ), 0}, 1260 { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ), 0}, 1261 { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_VF), 0}, 1262 { 0, 0} 1263 }; 1264 MODULE_DEVICE_TABLE(pci, hinic_pci_table); 1265 1266 static struct pci_driver hinic_driver = { 1267 .name = HINIC_DRV_NAME, 1268 .id_table = hinic_pci_table, 1269 .probe = hinic_probe, 1270 .remove = hinic_remove, 1271 .shutdown = hinic_shutdown, 1272 .sriov_configure = hinic_pci_sriov_configure, 1273 }; 1274 1275 module_pci_driver(hinic_driver); 1276