1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Huawei HiNIC PCI Express Linux driver 4 * Copyright(c) 2017 Huawei Technologies Co., Ltd 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/moduleparam.h> 10 #include <linux/pci.h> 11 #include <linux/device.h> 12 #include <linux/errno.h> 13 #include <linux/types.h> 14 #include <linux/etherdevice.h> 15 #include <linux/netdevice.h> 16 #include <linux/slab.h> 17 #include <linux/if_vlan.h> 18 #include <linux/semaphore.h> 19 #include <linux/workqueue.h> 20 #include <net/ip.h> 21 #include <linux/bitops.h> 22 #include <linux/bitmap.h> 23 #include <linux/delay.h> 24 #include <linux/err.h> 25 26 #include "hinic_hw_qp.h" 27 #include "hinic_hw_dev.h" 28 #include "hinic_port.h" 29 #include "hinic_tx.h" 30 #include "hinic_rx.h" 31 #include "hinic_dev.h" 32 33 MODULE_AUTHOR("Huawei Technologies CO., Ltd"); 34 MODULE_DESCRIPTION("Huawei Intelligent NIC driver"); 35 MODULE_LICENSE("GPL"); 36 37 static unsigned int tx_weight = 64; 38 module_param(tx_weight, uint, 0644); 39 MODULE_PARM_DESC(tx_weight, "Number Tx packets for NAPI budget (default=64)"); 40 41 static unsigned int rx_weight = 64; 42 module_param(rx_weight, uint, 0644); 43 MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)"); 44 45 #define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822 46 #define HINIC_DEV_ID_DUAL_PORT_100GE 0x0200 47 #define HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ 0x0205 48 #define HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ 0x0210 49 50 #define HINIC_WQ_NAME "hinic_dev" 51 52 #define MSG_ENABLE_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ 53 NETIF_MSG_IFUP | \ 54 NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) 55 56 #define HINIC_LRO_MAX_WQE_NUM_DEFAULT 8 57 58 #define HINIC_LRO_RX_TIMER_DEFAULT 16 59 60 #define VLAN_BITMAP_SIZE(nic_dev) (ALIGN(VLAN_N_VID, 8) / 8) 61 62 #define work_to_rx_mode_work(work) \ 63 container_of(work, struct hinic_rx_mode_work, work) 64 65 #define rx_mode_work_to_nic_dev(rx_mode_work) \ 66 container_of(rx_mode_work, struct hinic_dev, rx_mode_work) 67 68 static int change_mac_addr(struct net_device *netdev, const u8 *addr); 69 70 static int set_features(struct hinic_dev *nic_dev, 71 netdev_features_t pre_features, 72 netdev_features_t features, bool force_change); 73 74 static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq) 75 { 76 struct hinic_rxq_stats *nic_rx_stats = &nic_dev->rx_stats; 77 struct hinic_rxq_stats rx_stats; 78 79 u64_stats_init(&rx_stats.syncp); 80 81 hinic_rxq_get_stats(rxq, &rx_stats); 82 83 u64_stats_update_begin(&nic_rx_stats->syncp); 84 nic_rx_stats->bytes += rx_stats.bytes; 85 nic_rx_stats->pkts += rx_stats.pkts; 86 u64_stats_update_end(&nic_rx_stats->syncp); 87 88 hinic_rxq_clean_stats(rxq); 89 } 90 91 static void update_tx_stats(struct hinic_dev *nic_dev, struct hinic_txq *txq) 92 { 93 struct hinic_txq_stats *nic_tx_stats = &nic_dev->tx_stats; 94 struct hinic_txq_stats tx_stats; 95 96 u64_stats_init(&tx_stats.syncp); 97 98 hinic_txq_get_stats(txq, &tx_stats); 99 100 u64_stats_update_begin(&nic_tx_stats->syncp); 101 nic_tx_stats->bytes += tx_stats.bytes; 102 nic_tx_stats->pkts += tx_stats.pkts; 103 nic_tx_stats->tx_busy += tx_stats.tx_busy; 104 nic_tx_stats->tx_wake += tx_stats.tx_wake; 105 nic_tx_stats->tx_dropped += tx_stats.tx_dropped; 106 u64_stats_update_end(&nic_tx_stats->syncp); 107 108 hinic_txq_clean_stats(txq); 109 } 110 111 static void update_nic_stats(struct hinic_dev *nic_dev) 112 { 113 int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev); 114 115 for (i = 0; i < num_qps; i++) 116 update_rx_stats(nic_dev, &nic_dev->rxqs[i]); 117 118 for (i = 0; i < num_qps; i++) 119 update_tx_stats(nic_dev, &nic_dev->txqs[i]); 120 } 121 122 /** 123 * create_txqs - Create the Logical Tx Queues of specific NIC device 124 * @nic_dev: the specific NIC device 125 * 126 * Return 0 - Success, negative - Failure 127 **/ 128 static int create_txqs(struct hinic_dev *nic_dev) 129 { 130 int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); 131 struct net_device *netdev = nic_dev->netdev; 132 size_t txq_size; 133 134 if (nic_dev->txqs) 135 return -EINVAL; 136 137 txq_size = num_txqs * sizeof(*nic_dev->txqs); 138 nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL); 139 if (!nic_dev->txqs) 140 return -ENOMEM; 141 142 for (i = 0; i < num_txqs; i++) { 143 struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i); 144 145 err = hinic_init_txq(&nic_dev->txqs[i], sq, netdev); 146 if (err) { 147 netif_err(nic_dev, drv, netdev, 148 "Failed to init Txq\n"); 149 goto err_init_txq; 150 } 151 } 152 153 return 0; 154 155 err_init_txq: 156 for (j = 0; j < i; j++) 157 hinic_clean_txq(&nic_dev->txqs[j]); 158 159 devm_kfree(&netdev->dev, nic_dev->txqs); 160 return err; 161 } 162 163 /** 164 * free_txqs - Free the Logical Tx Queues of specific NIC device 165 * @nic_dev: the specific NIC device 166 **/ 167 static void free_txqs(struct hinic_dev *nic_dev) 168 { 169 int i, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); 170 struct net_device *netdev = nic_dev->netdev; 171 172 if (!nic_dev->txqs) 173 return; 174 175 for (i = 0; i < num_txqs; i++) 176 hinic_clean_txq(&nic_dev->txqs[i]); 177 178 devm_kfree(&netdev->dev, nic_dev->txqs); 179 nic_dev->txqs = NULL; 180 } 181 182 /** 183 * create_txqs - Create the Logical Rx Queues of specific NIC device 184 * @nic_dev: the specific NIC device 185 * 186 * Return 0 - Success, negative - Failure 187 **/ 188 static int create_rxqs(struct hinic_dev *nic_dev) 189 { 190 int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); 191 struct net_device *netdev = nic_dev->netdev; 192 size_t rxq_size; 193 194 if (nic_dev->rxqs) 195 return -EINVAL; 196 197 rxq_size = num_rxqs * sizeof(*nic_dev->rxqs); 198 nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL); 199 if (!nic_dev->rxqs) 200 return -ENOMEM; 201 202 for (i = 0; i < num_rxqs; i++) { 203 struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i); 204 205 err = hinic_init_rxq(&nic_dev->rxqs[i], rq, netdev); 206 if (err) { 207 netif_err(nic_dev, drv, netdev, 208 "Failed to init rxq\n"); 209 goto err_init_rxq; 210 } 211 } 212 213 return 0; 214 215 err_init_rxq: 216 for (j = 0; j < i; j++) 217 hinic_clean_rxq(&nic_dev->rxqs[j]); 218 219 devm_kfree(&netdev->dev, nic_dev->rxqs); 220 return err; 221 } 222 223 /** 224 * free_txqs - Free the Logical Rx Queues of specific NIC device 225 * @nic_dev: the specific NIC device 226 **/ 227 static void free_rxqs(struct hinic_dev *nic_dev) 228 { 229 int i, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); 230 struct net_device *netdev = nic_dev->netdev; 231 232 if (!nic_dev->rxqs) 233 return; 234 235 for (i = 0; i < num_rxqs; i++) 236 hinic_clean_rxq(&nic_dev->rxqs[i]); 237 238 devm_kfree(&netdev->dev, nic_dev->rxqs); 239 nic_dev->rxqs = NULL; 240 } 241 242 static int hinic_configure_max_qnum(struct hinic_dev *nic_dev) 243 { 244 int err; 245 246 err = hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps); 247 if (err) 248 return err; 249 250 return 0; 251 } 252 253 static int hinic_rss_init(struct hinic_dev *nic_dev) 254 { 255 u32 indir_tbl[HINIC_RSS_INDIR_SIZE] = { 0 }; 256 u8 default_rss_key[HINIC_RSS_KEY_SIZE]; 257 u8 tmpl_idx = nic_dev->rss_tmpl_idx; 258 int err, i; 259 260 netdev_rss_key_fill(default_rss_key, sizeof(default_rss_key)); 261 for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) 262 indir_tbl[i] = ethtool_rxfh_indir_default(i, nic_dev->num_rss); 263 264 err = hinic_rss_set_template_tbl(nic_dev, tmpl_idx, default_rss_key); 265 if (err) 266 return err; 267 268 err = hinic_rss_set_indir_tbl(nic_dev, tmpl_idx, indir_tbl); 269 if (err) 270 return err; 271 272 err = hinic_set_rss_type(nic_dev, tmpl_idx, nic_dev->rss_type); 273 if (err) 274 return err; 275 276 err = hinic_rss_set_hash_engine(nic_dev, tmpl_idx, 277 nic_dev->rss_hash_engine); 278 if (err) 279 return err; 280 281 err = hinic_rss_cfg(nic_dev, 1, tmpl_idx); 282 if (err) 283 return err; 284 285 return 0; 286 } 287 288 static void hinic_rss_deinit(struct hinic_dev *nic_dev) 289 { 290 hinic_rss_cfg(nic_dev, 0, nic_dev->rss_tmpl_idx); 291 } 292 293 static void hinic_init_rss_parameters(struct hinic_dev *nic_dev) 294 { 295 nic_dev->rss_hash_engine = HINIC_RSS_HASH_ENGINE_TYPE_XOR; 296 nic_dev->rss_type.tcp_ipv6_ext = 1; 297 nic_dev->rss_type.ipv6_ext = 1; 298 nic_dev->rss_type.tcp_ipv6 = 1; 299 nic_dev->rss_type.ipv6 = 1; 300 nic_dev->rss_type.tcp_ipv4 = 1; 301 nic_dev->rss_type.ipv4 = 1; 302 nic_dev->rss_type.udp_ipv6 = 1; 303 nic_dev->rss_type.udp_ipv4 = 1; 304 } 305 306 static void hinic_enable_rss(struct hinic_dev *nic_dev) 307 { 308 struct net_device *netdev = nic_dev->netdev; 309 struct hinic_hwdev *hwdev = nic_dev->hwdev; 310 struct hinic_hwif *hwif = hwdev->hwif; 311 struct pci_dev *pdev = hwif->pdev; 312 int i, node, err = 0; 313 u16 num_cpus = 0; 314 315 nic_dev->max_qps = hinic_hwdev_max_num_qps(hwdev); 316 if (nic_dev->max_qps <= 1) { 317 nic_dev->flags &= ~HINIC_RSS_ENABLE; 318 nic_dev->rss_limit = nic_dev->max_qps; 319 nic_dev->num_qps = nic_dev->max_qps; 320 nic_dev->num_rss = nic_dev->max_qps; 321 322 return; 323 } 324 325 err = hinic_rss_template_alloc(nic_dev, &nic_dev->rss_tmpl_idx); 326 if (err) { 327 netif_err(nic_dev, drv, netdev, 328 "Failed to alloc tmpl_idx for rss, can't enable rss for this function\n"); 329 nic_dev->flags &= ~HINIC_RSS_ENABLE; 330 nic_dev->max_qps = 1; 331 nic_dev->rss_limit = nic_dev->max_qps; 332 nic_dev->num_qps = nic_dev->max_qps; 333 nic_dev->num_rss = nic_dev->max_qps; 334 335 return; 336 } 337 338 nic_dev->flags |= HINIC_RSS_ENABLE; 339 340 for (i = 0; i < num_online_cpus(); i++) { 341 node = cpu_to_node(i); 342 if (node == dev_to_node(&pdev->dev)) 343 num_cpus++; 344 } 345 346 if (!num_cpus) 347 num_cpus = num_online_cpus(); 348 349 nic_dev->num_qps = min_t(u16, nic_dev->max_qps, num_cpus); 350 351 nic_dev->rss_limit = nic_dev->num_qps; 352 nic_dev->num_rss = nic_dev->num_qps; 353 354 hinic_init_rss_parameters(nic_dev); 355 err = hinic_rss_init(nic_dev); 356 if (err) 357 netif_err(nic_dev, drv, netdev, "Failed to init rss\n"); 358 } 359 360 static int hinic_open(struct net_device *netdev) 361 { 362 struct hinic_dev *nic_dev = netdev_priv(netdev); 363 enum hinic_port_link_state link_state; 364 int err, ret; 365 366 if (!(nic_dev->flags & HINIC_INTF_UP)) { 367 err = hinic_hwdev_ifup(nic_dev->hwdev); 368 if (err) { 369 netif_err(nic_dev, drv, netdev, 370 "Failed - HW interface up\n"); 371 return err; 372 } 373 } 374 375 err = create_txqs(nic_dev); 376 if (err) { 377 netif_err(nic_dev, drv, netdev, 378 "Failed to create Tx queues\n"); 379 goto err_create_txqs; 380 } 381 382 err = create_rxqs(nic_dev); 383 if (err) { 384 netif_err(nic_dev, drv, netdev, 385 "Failed to create Rx queues\n"); 386 goto err_create_rxqs; 387 } 388 389 hinic_enable_rss(nic_dev); 390 391 err = hinic_configure_max_qnum(nic_dev); 392 if (err) { 393 netif_err(nic_dev, drv, nic_dev->netdev, 394 "Failed to configure the maximum number of queues\n"); 395 goto err_port_state; 396 } 397 398 netif_set_real_num_tx_queues(netdev, nic_dev->num_qps); 399 netif_set_real_num_rx_queues(netdev, nic_dev->num_qps); 400 401 err = hinic_port_set_state(nic_dev, HINIC_PORT_ENABLE); 402 if (err) { 403 netif_err(nic_dev, drv, netdev, 404 "Failed to set port state\n"); 405 goto err_port_state; 406 } 407 408 err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_ENABLE); 409 if (err) { 410 netif_err(nic_dev, drv, netdev, 411 "Failed to set func port state\n"); 412 goto err_func_port_state; 413 } 414 415 /* Wait up to 3 sec between port enable to link state */ 416 msleep(3000); 417 418 down(&nic_dev->mgmt_lock); 419 420 err = hinic_port_link_state(nic_dev, &link_state); 421 if (err) { 422 netif_err(nic_dev, drv, netdev, "Failed to get link state\n"); 423 goto err_port_link; 424 } 425 426 if (link_state == HINIC_LINK_STATE_UP) 427 nic_dev->flags |= HINIC_LINK_UP; 428 429 nic_dev->flags |= HINIC_INTF_UP; 430 431 if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) == 432 (HINIC_LINK_UP | HINIC_INTF_UP)) { 433 netif_info(nic_dev, drv, netdev, "link + intf UP\n"); 434 netif_carrier_on(netdev); 435 netif_tx_wake_all_queues(netdev); 436 } 437 438 up(&nic_dev->mgmt_lock); 439 440 netif_info(nic_dev, drv, netdev, "HINIC_INTF is UP\n"); 441 return 0; 442 443 err_port_link: 444 up(&nic_dev->mgmt_lock); 445 ret = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); 446 if (ret) 447 netif_warn(nic_dev, drv, netdev, 448 "Failed to revert func port state\n"); 449 450 err_func_port_state: 451 ret = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); 452 if (ret) 453 netif_warn(nic_dev, drv, netdev, 454 "Failed to revert port state\n"); 455 err_port_state: 456 free_rxqs(nic_dev); 457 if (nic_dev->flags & HINIC_RSS_ENABLE) { 458 hinic_rss_deinit(nic_dev); 459 hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx); 460 } 461 462 err_create_rxqs: 463 free_txqs(nic_dev); 464 465 err_create_txqs: 466 if (!(nic_dev->flags & HINIC_INTF_UP)) 467 hinic_hwdev_ifdown(nic_dev->hwdev); 468 return err; 469 } 470 471 static int hinic_close(struct net_device *netdev) 472 { 473 struct hinic_dev *nic_dev = netdev_priv(netdev); 474 unsigned int flags; 475 int err; 476 477 down(&nic_dev->mgmt_lock); 478 479 flags = nic_dev->flags; 480 nic_dev->flags &= ~HINIC_INTF_UP; 481 482 netif_carrier_off(netdev); 483 netif_tx_disable(netdev); 484 485 update_nic_stats(nic_dev); 486 487 up(&nic_dev->mgmt_lock); 488 489 err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); 490 if (err) { 491 netif_err(nic_dev, drv, netdev, 492 "Failed to set func port state\n"); 493 nic_dev->flags |= (flags & HINIC_INTF_UP); 494 return err; 495 } 496 497 err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); 498 if (err) { 499 netif_err(nic_dev, drv, netdev, "Failed to set port state\n"); 500 nic_dev->flags |= (flags & HINIC_INTF_UP); 501 return err; 502 } 503 504 if (nic_dev->flags & HINIC_RSS_ENABLE) { 505 hinic_rss_deinit(nic_dev); 506 hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx); 507 } 508 509 free_rxqs(nic_dev); 510 free_txqs(nic_dev); 511 512 if (flags & HINIC_INTF_UP) 513 hinic_hwdev_ifdown(nic_dev->hwdev); 514 515 netif_info(nic_dev, drv, netdev, "HINIC_INTF is DOWN\n"); 516 return 0; 517 } 518 519 static int hinic_change_mtu(struct net_device *netdev, int new_mtu) 520 { 521 struct hinic_dev *nic_dev = netdev_priv(netdev); 522 int err; 523 524 netif_info(nic_dev, drv, netdev, "set_mtu = %d\n", new_mtu); 525 526 err = hinic_port_set_mtu(nic_dev, new_mtu); 527 if (err) 528 netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n"); 529 else 530 netdev->mtu = new_mtu; 531 532 return err; 533 } 534 535 /** 536 * change_mac_addr - change the main mac address of network device 537 * @netdev: network device 538 * @addr: mac address to set 539 * 540 * Return 0 - Success, negative - Failure 541 **/ 542 static int change_mac_addr(struct net_device *netdev, const u8 *addr) 543 { 544 struct hinic_dev *nic_dev = netdev_priv(netdev); 545 u16 vid = 0; 546 int err; 547 548 if (!is_valid_ether_addr(addr)) 549 return -EADDRNOTAVAIL; 550 551 netif_info(nic_dev, drv, netdev, "change mac addr = %02x %02x %02x %02x %02x %02x\n", 552 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 553 554 down(&nic_dev->mgmt_lock); 555 556 do { 557 err = hinic_port_del_mac(nic_dev, netdev->dev_addr, vid); 558 if (err) { 559 netif_err(nic_dev, drv, netdev, 560 "Failed to delete mac\n"); 561 break; 562 } 563 564 err = hinic_port_add_mac(nic_dev, addr, vid); 565 if (err) { 566 netif_err(nic_dev, drv, netdev, "Failed to add mac\n"); 567 break; 568 } 569 570 vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); 571 } while (vid != VLAN_N_VID); 572 573 up(&nic_dev->mgmt_lock); 574 return err; 575 } 576 577 static int hinic_set_mac_addr(struct net_device *netdev, void *addr) 578 { 579 unsigned char new_mac[ETH_ALEN]; 580 struct sockaddr *saddr = addr; 581 int err; 582 583 memcpy(new_mac, saddr->sa_data, ETH_ALEN); 584 585 err = change_mac_addr(netdev, new_mac); 586 if (!err) 587 memcpy(netdev->dev_addr, new_mac, ETH_ALEN); 588 589 return err; 590 } 591 592 /** 593 * add_mac_addr - add mac address to network device 594 * @netdev: network device 595 * @addr: mac address to add 596 * 597 * Return 0 - Success, negative - Failure 598 **/ 599 static int add_mac_addr(struct net_device *netdev, const u8 *addr) 600 { 601 struct hinic_dev *nic_dev = netdev_priv(netdev); 602 u16 vid = 0; 603 int err; 604 605 netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x %02x\n", 606 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 607 608 down(&nic_dev->mgmt_lock); 609 610 do { 611 err = hinic_port_add_mac(nic_dev, addr, vid); 612 if (err) { 613 netif_err(nic_dev, drv, netdev, "Failed to add mac\n"); 614 break; 615 } 616 617 vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); 618 } while (vid != VLAN_N_VID); 619 620 up(&nic_dev->mgmt_lock); 621 return err; 622 } 623 624 /** 625 * remove_mac_addr - remove mac address from network device 626 * @netdev: network device 627 * @addr: mac address to remove 628 * 629 * Return 0 - Success, negative - Failure 630 **/ 631 static int remove_mac_addr(struct net_device *netdev, const u8 *addr) 632 { 633 struct hinic_dev *nic_dev = netdev_priv(netdev); 634 u16 vid = 0; 635 int err; 636 637 if (!is_valid_ether_addr(addr)) 638 return -EADDRNOTAVAIL; 639 640 netif_info(nic_dev, drv, netdev, "remove mac addr = %02x %02x %02x %02x %02x %02x\n", 641 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 642 643 down(&nic_dev->mgmt_lock); 644 645 do { 646 err = hinic_port_del_mac(nic_dev, addr, vid); 647 if (err) { 648 netif_err(nic_dev, drv, netdev, 649 "Failed to delete mac\n"); 650 break; 651 } 652 653 vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); 654 } while (vid != VLAN_N_VID); 655 656 up(&nic_dev->mgmt_lock); 657 return err; 658 } 659 660 static int hinic_vlan_rx_add_vid(struct net_device *netdev, 661 __always_unused __be16 proto, u16 vid) 662 { 663 struct hinic_dev *nic_dev = netdev_priv(netdev); 664 int ret, err; 665 666 netif_info(nic_dev, drv, netdev, "add vid = %d\n", vid); 667 668 down(&nic_dev->mgmt_lock); 669 670 err = hinic_port_add_vlan(nic_dev, vid); 671 if (err) { 672 netif_err(nic_dev, drv, netdev, "Failed to add vlan\n"); 673 goto err_vlan_add; 674 } 675 676 err = hinic_port_add_mac(nic_dev, netdev->dev_addr, vid); 677 if (err) { 678 netif_err(nic_dev, drv, netdev, "Failed to set mac\n"); 679 goto err_add_mac; 680 } 681 682 bitmap_set(nic_dev->vlan_bitmap, vid, 1); 683 684 up(&nic_dev->mgmt_lock); 685 return 0; 686 687 err_add_mac: 688 ret = hinic_port_del_vlan(nic_dev, vid); 689 if (ret) 690 netif_err(nic_dev, drv, netdev, 691 "Failed to revert by removing vlan\n"); 692 693 err_vlan_add: 694 up(&nic_dev->mgmt_lock); 695 return err; 696 } 697 698 static int hinic_vlan_rx_kill_vid(struct net_device *netdev, 699 __always_unused __be16 proto, u16 vid) 700 { 701 struct hinic_dev *nic_dev = netdev_priv(netdev); 702 int err; 703 704 netif_info(nic_dev, drv, netdev, "remove vid = %d\n", vid); 705 706 down(&nic_dev->mgmt_lock); 707 708 err = hinic_port_del_vlan(nic_dev, vid); 709 if (err) { 710 netif_err(nic_dev, drv, netdev, "Failed to delete vlan\n"); 711 goto err_del_vlan; 712 } 713 714 bitmap_clear(nic_dev->vlan_bitmap, vid, 1); 715 716 up(&nic_dev->mgmt_lock); 717 return 0; 718 719 err_del_vlan: 720 up(&nic_dev->mgmt_lock); 721 return err; 722 } 723 724 static void set_rx_mode(struct work_struct *work) 725 { 726 struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work); 727 struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work); 728 729 netif_info(nic_dev, drv, nic_dev->netdev, "set rx mode work\n"); 730 731 hinic_port_set_rx_mode(nic_dev, rx_mode_work->rx_mode); 732 733 __dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr); 734 __dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr); 735 } 736 737 static void hinic_set_rx_mode(struct net_device *netdev) 738 { 739 struct hinic_dev *nic_dev = netdev_priv(netdev); 740 struct hinic_rx_mode_work *rx_mode_work; 741 u32 rx_mode; 742 743 rx_mode_work = &nic_dev->rx_mode_work; 744 745 rx_mode = HINIC_RX_MODE_UC | 746 HINIC_RX_MODE_MC | 747 HINIC_RX_MODE_BC; 748 749 if (netdev->flags & IFF_PROMISC) 750 rx_mode |= HINIC_RX_MODE_PROMISC; 751 else if (netdev->flags & IFF_ALLMULTI) 752 rx_mode |= HINIC_RX_MODE_MC_ALL; 753 754 rx_mode_work->rx_mode = rx_mode; 755 756 queue_work(nic_dev->workq, &rx_mode_work->work); 757 } 758 759 static void hinic_tx_timeout(struct net_device *netdev) 760 { 761 struct hinic_dev *nic_dev = netdev_priv(netdev); 762 763 netif_err(nic_dev, drv, netdev, "Tx timeout\n"); 764 } 765 766 static void hinic_get_stats64(struct net_device *netdev, 767 struct rtnl_link_stats64 *stats) 768 { 769 struct hinic_dev *nic_dev = netdev_priv(netdev); 770 struct hinic_rxq_stats *nic_rx_stats; 771 struct hinic_txq_stats *nic_tx_stats; 772 773 nic_rx_stats = &nic_dev->rx_stats; 774 nic_tx_stats = &nic_dev->tx_stats; 775 776 down(&nic_dev->mgmt_lock); 777 778 if (nic_dev->flags & HINIC_INTF_UP) 779 update_nic_stats(nic_dev); 780 781 up(&nic_dev->mgmt_lock); 782 783 stats->rx_bytes = nic_rx_stats->bytes; 784 stats->rx_packets = nic_rx_stats->pkts; 785 786 stats->tx_bytes = nic_tx_stats->bytes; 787 stats->tx_packets = nic_tx_stats->pkts; 788 stats->tx_errors = nic_tx_stats->tx_dropped; 789 } 790 791 static int hinic_set_features(struct net_device *netdev, 792 netdev_features_t features) 793 { 794 struct hinic_dev *nic_dev = netdev_priv(netdev); 795 796 return set_features(nic_dev, nic_dev->netdev->features, 797 features, false); 798 } 799 800 static netdev_features_t hinic_fix_features(struct net_device *netdev, 801 netdev_features_t features) 802 { 803 struct hinic_dev *nic_dev = netdev_priv(netdev); 804 805 /* If Rx checksum is disabled, then LRO should also be disabled */ 806 if (!(features & NETIF_F_RXCSUM)) { 807 netif_info(nic_dev, drv, netdev, "disabling LRO as RXCSUM is off\n"); 808 features &= ~NETIF_F_LRO; 809 } 810 811 return features; 812 } 813 814 static const struct net_device_ops hinic_netdev_ops = { 815 .ndo_open = hinic_open, 816 .ndo_stop = hinic_close, 817 .ndo_change_mtu = hinic_change_mtu, 818 .ndo_set_mac_address = hinic_set_mac_addr, 819 .ndo_validate_addr = eth_validate_addr, 820 .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid, 821 .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid, 822 .ndo_set_rx_mode = hinic_set_rx_mode, 823 .ndo_start_xmit = hinic_xmit_frame, 824 .ndo_tx_timeout = hinic_tx_timeout, 825 .ndo_get_stats64 = hinic_get_stats64, 826 .ndo_fix_features = hinic_fix_features, 827 .ndo_set_features = hinic_set_features, 828 829 }; 830 831 static void netdev_features_init(struct net_device *netdev) 832 { 833 netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | 834 NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | 835 NETIF_F_RXCSUM | NETIF_F_LRO; 836 837 netdev->vlan_features = netdev->hw_features; 838 839 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; 840 } 841 842 /** 843 * link_status_event_handler - link event handler 844 * @handle: nic device for the handler 845 * @buf_in: input buffer 846 * @in_size: input size 847 * @buf_in: output buffer 848 * @out_size: returned output size 849 * 850 * Return 0 - Success, negative - Failure 851 **/ 852 static void link_status_event_handler(void *handle, void *buf_in, u16 in_size, 853 void *buf_out, u16 *out_size) 854 { 855 struct hinic_port_link_status *link_status, *ret_link_status; 856 struct hinic_dev *nic_dev = handle; 857 858 link_status = buf_in; 859 860 if (link_status->link == HINIC_LINK_STATE_UP) { 861 down(&nic_dev->mgmt_lock); 862 863 nic_dev->flags |= HINIC_LINK_UP; 864 865 if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) == 866 (HINIC_LINK_UP | HINIC_INTF_UP)) { 867 netif_carrier_on(nic_dev->netdev); 868 netif_tx_wake_all_queues(nic_dev->netdev); 869 } 870 871 up(&nic_dev->mgmt_lock); 872 873 netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is UP\n"); 874 } else { 875 down(&nic_dev->mgmt_lock); 876 877 nic_dev->flags &= ~HINIC_LINK_UP; 878 879 netif_carrier_off(nic_dev->netdev); 880 netif_tx_disable(nic_dev->netdev); 881 882 up(&nic_dev->mgmt_lock); 883 884 netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is DOWN\n"); 885 } 886 887 ret_link_status = buf_out; 888 ret_link_status->status = 0; 889 890 *out_size = sizeof(*ret_link_status); 891 } 892 893 static int set_features(struct hinic_dev *nic_dev, 894 netdev_features_t pre_features, 895 netdev_features_t features, bool force_change) 896 { 897 netdev_features_t changed = force_change ? ~0 : pre_features ^ features; 898 u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN; 899 int err = 0; 900 901 if (changed & NETIF_F_TSO) 902 err = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ? 903 HINIC_TSO_ENABLE : HINIC_TSO_DISABLE); 904 905 if (changed & NETIF_F_RXCSUM) 906 err = hinic_set_rx_csum_offload(nic_dev, csum_en); 907 908 if (changed & NETIF_F_LRO) { 909 err = hinic_set_rx_lro_state(nic_dev, 910 !!(features & NETIF_F_LRO), 911 HINIC_LRO_RX_TIMER_DEFAULT, 912 HINIC_LRO_MAX_WQE_NUM_DEFAULT); 913 } 914 915 return err; 916 } 917 918 /** 919 * nic_dev_init - Initialize the NIC device 920 * @pdev: the NIC pci device 921 * 922 * Return 0 - Success, negative - Failure 923 **/ 924 static int nic_dev_init(struct pci_dev *pdev) 925 { 926 struct hinic_rx_mode_work *rx_mode_work; 927 struct hinic_txq_stats *tx_stats; 928 struct hinic_rxq_stats *rx_stats; 929 struct hinic_dev *nic_dev; 930 struct net_device *netdev; 931 struct hinic_hwdev *hwdev; 932 int err, num_qps; 933 934 hwdev = hinic_init_hwdev(pdev); 935 if (IS_ERR(hwdev)) { 936 dev_err(&pdev->dev, "Failed to initialize HW device\n"); 937 return PTR_ERR(hwdev); 938 } 939 940 num_qps = hinic_hwdev_num_qps(hwdev); 941 if (num_qps <= 0) { 942 dev_err(&pdev->dev, "Invalid number of QPS\n"); 943 err = -EINVAL; 944 goto err_num_qps; 945 } 946 947 netdev = alloc_etherdev_mq(sizeof(*nic_dev), num_qps); 948 if (!netdev) { 949 dev_err(&pdev->dev, "Failed to allocate Ethernet device\n"); 950 err = -ENOMEM; 951 goto err_alloc_etherdev; 952 } 953 954 hinic_set_ethtool_ops(netdev); 955 netdev->netdev_ops = &hinic_netdev_ops; 956 netdev->max_mtu = ETH_MAX_MTU; 957 958 nic_dev = netdev_priv(netdev); 959 nic_dev->netdev = netdev; 960 nic_dev->hwdev = hwdev; 961 nic_dev->msg_enable = MSG_ENABLE_DEFAULT; 962 nic_dev->flags = 0; 963 nic_dev->txqs = NULL; 964 nic_dev->rxqs = NULL; 965 nic_dev->tx_weight = tx_weight; 966 nic_dev->rx_weight = rx_weight; 967 968 sema_init(&nic_dev->mgmt_lock, 1); 969 970 tx_stats = &nic_dev->tx_stats; 971 rx_stats = &nic_dev->rx_stats; 972 973 u64_stats_init(&tx_stats->syncp); 974 u64_stats_init(&rx_stats->syncp); 975 976 nic_dev->vlan_bitmap = devm_kzalloc(&pdev->dev, 977 VLAN_BITMAP_SIZE(nic_dev), 978 GFP_KERNEL); 979 if (!nic_dev->vlan_bitmap) { 980 err = -ENOMEM; 981 goto err_vlan_bitmap; 982 } 983 984 nic_dev->workq = create_singlethread_workqueue(HINIC_WQ_NAME); 985 if (!nic_dev->workq) { 986 err = -ENOMEM; 987 goto err_workq; 988 } 989 990 pci_set_drvdata(pdev, netdev); 991 992 err = hinic_port_get_mac(nic_dev, netdev->dev_addr); 993 if (err) 994 dev_warn(&pdev->dev, "Failed to get mac address\n"); 995 996 err = hinic_port_add_mac(nic_dev, netdev->dev_addr, 0); 997 if (err) { 998 dev_err(&pdev->dev, "Failed to add mac\n"); 999 goto err_add_mac; 1000 } 1001 1002 err = hinic_port_set_mtu(nic_dev, netdev->mtu); 1003 if (err) { 1004 dev_err(&pdev->dev, "Failed to set mtu\n"); 1005 goto err_set_mtu; 1006 } 1007 1008 rx_mode_work = &nic_dev->rx_mode_work; 1009 INIT_WORK(&rx_mode_work->work, set_rx_mode); 1010 1011 netdev_features_init(netdev); 1012 1013 netif_carrier_off(netdev); 1014 1015 hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, 1016 nic_dev, link_status_event_handler); 1017 1018 err = set_features(nic_dev, 0, nic_dev->netdev->features, true); 1019 if (err) 1020 goto err_set_features; 1021 1022 SET_NETDEV_DEV(netdev, &pdev->dev); 1023 1024 err = register_netdev(netdev); 1025 if (err) { 1026 dev_err(&pdev->dev, "Failed to register netdev\n"); 1027 goto err_reg_netdev; 1028 } 1029 1030 return 0; 1031 1032 err_reg_netdev: 1033 err_set_features: 1034 hinic_hwdev_cb_unregister(nic_dev->hwdev, 1035 HINIC_MGMT_MSG_CMD_LINK_STATUS); 1036 cancel_work_sync(&rx_mode_work->work); 1037 1038 err_set_mtu: 1039 err_add_mac: 1040 pci_set_drvdata(pdev, NULL); 1041 destroy_workqueue(nic_dev->workq); 1042 1043 err_workq: 1044 err_vlan_bitmap: 1045 free_netdev(netdev); 1046 1047 err_alloc_etherdev: 1048 err_num_qps: 1049 hinic_free_hwdev(hwdev); 1050 return err; 1051 } 1052 1053 static int hinic_probe(struct pci_dev *pdev, 1054 const struct pci_device_id *id) 1055 { 1056 int err = pci_enable_device(pdev); 1057 1058 if (err) { 1059 dev_err(&pdev->dev, "Failed to enable PCI device\n"); 1060 return err; 1061 } 1062 1063 err = pci_request_regions(pdev, HINIC_DRV_NAME); 1064 if (err) { 1065 dev_err(&pdev->dev, "Failed to request PCI regions\n"); 1066 goto err_pci_regions; 1067 } 1068 1069 pci_set_master(pdev); 1070 1071 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1072 if (err) { 1073 dev_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); 1074 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1075 if (err) { 1076 dev_err(&pdev->dev, "Failed to set DMA mask\n"); 1077 goto err_dma_mask; 1078 } 1079 } 1080 1081 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1082 if (err) { 1083 dev_warn(&pdev->dev, 1084 "Couldn't set 64-bit consistent DMA mask\n"); 1085 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1086 if (err) { 1087 dev_err(&pdev->dev, 1088 "Failed to set consistent DMA mask\n"); 1089 goto err_dma_consistent_mask; 1090 } 1091 } 1092 1093 err = nic_dev_init(pdev); 1094 if (err) { 1095 dev_err(&pdev->dev, "Failed to initialize NIC device\n"); 1096 goto err_nic_dev_init; 1097 } 1098 1099 dev_info(&pdev->dev, "HiNIC driver - probed\n"); 1100 return 0; 1101 1102 err_nic_dev_init: 1103 err_dma_consistent_mask: 1104 err_dma_mask: 1105 pci_release_regions(pdev); 1106 1107 err_pci_regions: 1108 pci_disable_device(pdev); 1109 return err; 1110 } 1111 1112 static void hinic_remove(struct pci_dev *pdev) 1113 { 1114 struct net_device *netdev = pci_get_drvdata(pdev); 1115 struct hinic_dev *nic_dev = netdev_priv(netdev); 1116 struct hinic_rx_mode_work *rx_mode_work; 1117 1118 unregister_netdev(netdev); 1119 1120 hinic_hwdev_cb_unregister(nic_dev->hwdev, 1121 HINIC_MGMT_MSG_CMD_LINK_STATUS); 1122 1123 rx_mode_work = &nic_dev->rx_mode_work; 1124 cancel_work_sync(&rx_mode_work->work); 1125 1126 pci_set_drvdata(pdev, NULL); 1127 1128 destroy_workqueue(nic_dev->workq); 1129 1130 hinic_free_hwdev(nic_dev->hwdev); 1131 1132 free_netdev(netdev); 1133 1134 pci_release_regions(pdev); 1135 pci_disable_device(pdev); 1136 1137 dev_info(&pdev->dev, "HiNIC driver - removed\n"); 1138 } 1139 1140 static void hinic_shutdown(struct pci_dev *pdev) 1141 { 1142 pci_disable_device(pdev); 1143 } 1144 1145 static const struct pci_device_id hinic_pci_table[] = { 1146 { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0}, 1147 { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0}, 1148 { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ), 0}, 1149 { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ), 0}, 1150 { 0, 0} 1151 }; 1152 MODULE_DEVICE_TABLE(pci, hinic_pci_table); 1153 1154 static struct pci_driver hinic_driver = { 1155 .name = HINIC_DRV_NAME, 1156 .id_table = hinic_pci_table, 1157 .probe = hinic_probe, 1158 .remove = hinic_remove, 1159 .shutdown = hinic_shutdown, 1160 }; 1161 1162 module_pci_driver(hinic_driver); 1163