1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/dma-mapping.h> 5 #include <linux/etherdevice.h> 6 #include <linux/interrupt.h> 7 #ifdef CONFIG_RFS_ACCEL 8 #include <linux/cpu_rmap.h> 9 #endif 10 #include <linux/if_vlan.h> 11 #include <linux/ip.h> 12 #include <linux/ipv6.h> 13 #include <linux/module.h> 14 #include <linux/pci.h> 15 #include <linux/aer.h> 16 #include <linux/skbuff.h> 17 #include <linux/sctp.h> 18 #include <linux/vermagic.h> 19 #include <net/gre.h> 20 #include <net/ip6_checksum.h> 21 #include <net/pkt_cls.h> 22 #include <net/tcp.h> 23 #include <net/vxlan.h> 24 25 #include "hnae3.h" 26 #include "hns3_enet.h" 27 28 #define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) 29 #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE) 30 31 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force); 32 static void hns3_remove_hw_addr(struct net_device *netdev); 33 34 static const char hns3_driver_name[] = "hns3"; 35 const char hns3_driver_version[] = VERMAGIC_STRING; 36 static const char hns3_driver_string[] = 37 "Hisilicon Ethernet Network Driver for Hip08 Family"; 38 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; 39 static struct hnae3_client client; 40 41 static int debug = -1; 42 module_param(debug, int, 0); 43 MODULE_PARM_DESC(debug, " Network interface message level setting"); 44 45 #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 46 NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) 47 48 /* hns3_pci_tbl - PCI Device ID Table 49 * 50 * Last entry must be all 0s 51 * 52 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 53 * Class, Class Mask, private data (not used) } 54 */ 55 static const struct pci_device_id hns3_pci_tbl[] = { 56 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 57 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 58 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 59 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 60 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 61 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 62 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 63 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 64 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 65 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 66 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 67 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 68 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 69 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 70 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 71 /* required last entry */ 72 {0, } 73 }; 74 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); 75 76 static irqreturn_t hns3_irq_handle(int irq, void *vector) 77 { 78 struct hns3_enet_tqp_vector *tqp_vector = vector; 79 80 napi_schedule_irqoff(&tqp_vector->napi); 81 82 return IRQ_HANDLED; 83 } 84 85 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) 86 { 87 struct hns3_enet_tqp_vector *tqp_vectors; 88 unsigned int i; 89 90 for (i = 0; i < priv->vector_num; i++) { 91 tqp_vectors = &priv->tqp_vector[i]; 92 93 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) 94 continue; 95 96 /* clear the affinity mask */ 97 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); 98 99 /* release the irq resource */ 100 free_irq(tqp_vectors->vector_irq, tqp_vectors); 101 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; 102 } 103 } 104 105 static int hns3_nic_init_irq(struct hns3_nic_priv *priv) 106 { 107 struct hns3_enet_tqp_vector *tqp_vectors; 108 int txrx_int_idx = 0; 109 int rx_int_idx = 0; 110 int tx_int_idx = 0; 111 unsigned int i; 112 int ret; 113 114 for (i = 0; i < priv->vector_num; i++) { 115 tqp_vectors = &priv->tqp_vector[i]; 116 117 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) 118 continue; 119 120 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { 121 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, 122 "%s-%s-%d", priv->netdev->name, "TxRx", 123 txrx_int_idx++); 124 txrx_int_idx++; 125 } else if (tqp_vectors->rx_group.ring) { 126 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, 127 "%s-%s-%d", priv->netdev->name, "Rx", 128 rx_int_idx++); 129 } else if (tqp_vectors->tx_group.ring) { 130 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, 131 "%s-%s-%d", priv->netdev->name, "Tx", 132 tx_int_idx++); 133 } else { 134 /* Skip this unused q_vector */ 135 continue; 136 } 137 138 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; 139 140 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, 141 tqp_vectors->name, tqp_vectors); 142 if (ret) { 143 netdev_err(priv->netdev, "request irq(%d) fail\n", 144 tqp_vectors->vector_irq); 145 hns3_nic_uninit_irq(priv); 146 return ret; 147 } 148 149 irq_set_affinity_hint(tqp_vectors->vector_irq, 150 &tqp_vectors->affinity_mask); 151 152 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; 153 } 154 155 return 0; 156 } 157 158 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, 159 u32 mask_en) 160 { 161 writel(mask_en, tqp_vector->mask_addr); 162 } 163 164 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) 165 { 166 napi_enable(&tqp_vector->napi); 167 168 /* enable vector */ 169 hns3_mask_vector_irq(tqp_vector, 1); 170 } 171 172 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) 173 { 174 /* disable vector */ 175 hns3_mask_vector_irq(tqp_vector, 0); 176 177 disable_irq(tqp_vector->vector_irq); 178 napi_disable(&tqp_vector->napi); 179 } 180 181 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, 182 u32 rl_value) 183 { 184 u32 rl_reg = hns3_rl_usec_to_reg(rl_value); 185 186 /* this defines the configuration for RL (Interrupt Rate Limiter). 187 * Rl defines rate of interrupts i.e. number of interrupts-per-second 188 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing 189 */ 190 191 if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable && 192 !tqp_vector->rx_group.coal.gl_adapt_enable) 193 /* According to the hardware, the range of rl_reg is 194 * 0-59 and the unit is 4. 195 */ 196 rl_reg |= HNS3_INT_RL_ENABLE_MASK; 197 198 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); 199 } 200 201 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, 202 u32 gl_value) 203 { 204 u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value); 205 206 writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); 207 } 208 209 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, 210 u32 gl_value) 211 { 212 u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value); 213 214 writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); 215 } 216 217 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, 218 struct hns3_nic_priv *priv) 219 { 220 /* initialize the configuration for interrupt coalescing. 221 * 1. GL (Interrupt Gap Limiter) 222 * 2. RL (Interrupt Rate Limiter) 223 */ 224 225 /* Default: enable interrupt coalescing self-adaptive and GL */ 226 tqp_vector->tx_group.coal.gl_adapt_enable = 1; 227 tqp_vector->rx_group.coal.gl_adapt_enable = 1; 228 229 tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; 230 tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; 231 232 tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; 233 tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; 234 } 235 236 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector, 237 struct hns3_nic_priv *priv) 238 { 239 struct hnae3_handle *h = priv->ae_handle; 240 241 hns3_set_vector_coalesce_tx_gl(tqp_vector, 242 tqp_vector->tx_group.coal.int_gl); 243 hns3_set_vector_coalesce_rx_gl(tqp_vector, 244 tqp_vector->rx_group.coal.int_gl); 245 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); 246 } 247 248 static int hns3_nic_set_real_num_queue(struct net_device *netdev) 249 { 250 struct hnae3_handle *h = hns3_get_handle(netdev); 251 struct hnae3_knic_private_info *kinfo = &h->kinfo; 252 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; 253 int i, ret; 254 255 if (kinfo->num_tc <= 1) { 256 netdev_reset_tc(netdev); 257 } else { 258 ret = netdev_set_num_tc(netdev, kinfo->num_tc); 259 if (ret) { 260 netdev_err(netdev, 261 "netdev_set_num_tc fail, ret=%d!\n", ret); 262 return ret; 263 } 264 265 for (i = 0; i < HNAE3_MAX_TC; i++) { 266 if (!kinfo->tc_info[i].enable) 267 continue; 268 269 netdev_set_tc_queue(netdev, 270 kinfo->tc_info[i].tc, 271 kinfo->tc_info[i].tqp_count, 272 kinfo->tc_info[i].tqp_offset); 273 } 274 } 275 276 ret = netif_set_real_num_tx_queues(netdev, queue_size); 277 if (ret) { 278 netdev_err(netdev, 279 "netif_set_real_num_tx_queues fail, ret=%d!\n", ret); 280 return ret; 281 } 282 283 ret = netif_set_real_num_rx_queues(netdev, queue_size); 284 if (ret) { 285 netdev_err(netdev, 286 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); 287 return ret; 288 } 289 290 return 0; 291 } 292 293 static u16 hns3_get_max_available_channels(struct hnae3_handle *h) 294 { 295 u16 alloc_tqps, max_rss_size, rss_size; 296 297 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); 298 rss_size = alloc_tqps / h->kinfo.num_tc; 299 300 return min_t(u16, rss_size, max_rss_size); 301 } 302 303 static void hns3_tqp_enable(struct hnae3_queue *tqp) 304 { 305 u32 rcb_reg; 306 307 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); 308 rcb_reg |= BIT(HNS3_RING_EN_B); 309 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); 310 } 311 312 static void hns3_tqp_disable(struct hnae3_queue *tqp) 313 { 314 u32 rcb_reg; 315 316 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); 317 rcb_reg &= ~BIT(HNS3_RING_EN_B); 318 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); 319 } 320 321 static void hns3_free_rx_cpu_rmap(struct net_device *netdev) 322 { 323 #ifdef CONFIG_RFS_ACCEL 324 free_irq_cpu_rmap(netdev->rx_cpu_rmap); 325 netdev->rx_cpu_rmap = NULL; 326 #endif 327 } 328 329 static int hns3_set_rx_cpu_rmap(struct net_device *netdev) 330 { 331 #ifdef CONFIG_RFS_ACCEL 332 struct hns3_nic_priv *priv = netdev_priv(netdev); 333 struct hns3_enet_tqp_vector *tqp_vector; 334 int i, ret; 335 336 if (!netdev->rx_cpu_rmap) { 337 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num); 338 if (!netdev->rx_cpu_rmap) 339 return -ENOMEM; 340 } 341 342 for (i = 0; i < priv->vector_num; i++) { 343 tqp_vector = &priv->tqp_vector[i]; 344 ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap, 345 tqp_vector->vector_irq); 346 if (ret) { 347 hns3_free_rx_cpu_rmap(netdev); 348 return ret; 349 } 350 } 351 #endif 352 return 0; 353 } 354 355 static int hns3_nic_net_up(struct net_device *netdev) 356 { 357 struct hns3_nic_priv *priv = netdev_priv(netdev); 358 struct hnae3_handle *h = priv->ae_handle; 359 int i, j; 360 int ret; 361 362 ret = hns3_nic_reset_all_ring(h); 363 if (ret) 364 return ret; 365 366 /* the device can work without cpu rmap, only aRFS needs it */ 367 ret = hns3_set_rx_cpu_rmap(netdev); 368 if (ret) 369 netdev_warn(netdev, "set rx cpu rmap fail, ret=%d!\n", ret); 370 371 /* get irq resource for all vectors */ 372 ret = hns3_nic_init_irq(priv); 373 if (ret) { 374 netdev_err(netdev, "init irq failed! ret=%d\n", ret); 375 goto free_rmap; 376 } 377 378 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); 379 380 /* enable the vectors */ 381 for (i = 0; i < priv->vector_num; i++) 382 hns3_vector_enable(&priv->tqp_vector[i]); 383 384 /* enable rcb */ 385 for (j = 0; j < h->kinfo.num_tqps; j++) 386 hns3_tqp_enable(h->kinfo.tqp[j]); 387 388 /* start the ae_dev */ 389 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; 390 if (ret) 391 goto out_start_err; 392 393 return 0; 394 395 out_start_err: 396 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); 397 while (j--) 398 hns3_tqp_disable(h->kinfo.tqp[j]); 399 400 for (j = i - 1; j >= 0; j--) 401 hns3_vector_disable(&priv->tqp_vector[j]); 402 403 hns3_nic_uninit_irq(priv); 404 free_rmap: 405 hns3_free_rx_cpu_rmap(netdev); 406 return ret; 407 } 408 409 static void hns3_config_xps(struct hns3_nic_priv *priv) 410 { 411 int i; 412 413 for (i = 0; i < priv->vector_num; i++) { 414 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; 415 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; 416 417 while (ring) { 418 int ret; 419 420 ret = netif_set_xps_queue(priv->netdev, 421 &tqp_vector->affinity_mask, 422 ring->tqp->tqp_index); 423 if (ret) 424 netdev_warn(priv->netdev, 425 "set xps queue failed: %d", ret); 426 427 ring = ring->next; 428 } 429 } 430 } 431 432 static int hns3_nic_net_open(struct net_device *netdev) 433 { 434 struct hns3_nic_priv *priv = netdev_priv(netdev); 435 struct hnae3_handle *h = hns3_get_handle(netdev); 436 struct hnae3_knic_private_info *kinfo; 437 int i, ret; 438 439 if (hns3_nic_resetting(netdev)) 440 return -EBUSY; 441 442 netif_carrier_off(netdev); 443 444 ret = hns3_nic_set_real_num_queue(netdev); 445 if (ret) 446 return ret; 447 448 ret = hns3_nic_net_up(netdev); 449 if (ret) { 450 netdev_err(netdev, "net up fail, ret=%d!\n", ret); 451 return ret; 452 } 453 454 kinfo = &h->kinfo; 455 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 456 netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]); 457 458 if (h->ae_algo->ops->set_timer_task) 459 h->ae_algo->ops->set_timer_task(priv->ae_handle, true); 460 461 hns3_config_xps(priv); 462 return 0; 463 } 464 465 static void hns3_reset_tx_queue(struct hnae3_handle *h) 466 { 467 struct net_device *ndev = h->kinfo.netdev; 468 struct hns3_nic_priv *priv = netdev_priv(ndev); 469 struct netdev_queue *dev_queue; 470 u32 i; 471 472 for (i = 0; i < h->kinfo.num_tqps; i++) { 473 dev_queue = netdev_get_tx_queue(ndev, 474 priv->ring_data[i].queue_index); 475 netdev_tx_reset_queue(dev_queue); 476 } 477 } 478 479 static void hns3_nic_net_down(struct net_device *netdev) 480 { 481 struct hns3_nic_priv *priv = netdev_priv(netdev); 482 struct hnae3_handle *h = hns3_get_handle(netdev); 483 const struct hnae3_ae_ops *ops; 484 int i; 485 486 /* disable vectors */ 487 for (i = 0; i < priv->vector_num; i++) 488 hns3_vector_disable(&priv->tqp_vector[i]); 489 490 /* disable rcb */ 491 for (i = 0; i < h->kinfo.num_tqps; i++) 492 hns3_tqp_disable(h->kinfo.tqp[i]); 493 494 /* stop ae_dev */ 495 ops = priv->ae_handle->ae_algo->ops; 496 if (ops->stop) 497 ops->stop(priv->ae_handle); 498 499 hns3_free_rx_cpu_rmap(netdev); 500 501 /* free irq resources */ 502 hns3_nic_uninit_irq(priv); 503 504 /* delay ring buffer clearing to hns3_reset_notify_uninit_enet 505 * during reset process, because driver may not be able 506 * to disable the ring through firmware when downing the netdev. 507 */ 508 if (!hns3_nic_resetting(netdev)) 509 hns3_clear_all_ring(priv->ae_handle, false); 510 511 hns3_reset_tx_queue(priv->ae_handle); 512 } 513 514 static int hns3_nic_net_stop(struct net_device *netdev) 515 { 516 struct hns3_nic_priv *priv = netdev_priv(netdev); 517 struct hnae3_handle *h = hns3_get_handle(netdev); 518 519 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 520 return 0; 521 522 if (h->ae_algo->ops->set_timer_task) 523 h->ae_algo->ops->set_timer_task(priv->ae_handle, false); 524 525 netif_tx_stop_all_queues(netdev); 526 netif_carrier_off(netdev); 527 528 hns3_nic_net_down(netdev); 529 530 return 0; 531 } 532 533 static int hns3_nic_uc_sync(struct net_device *netdev, 534 const unsigned char *addr) 535 { 536 struct hnae3_handle *h = hns3_get_handle(netdev); 537 538 if (h->ae_algo->ops->add_uc_addr) 539 return h->ae_algo->ops->add_uc_addr(h, addr); 540 541 return 0; 542 } 543 544 static int hns3_nic_uc_unsync(struct net_device *netdev, 545 const unsigned char *addr) 546 { 547 struct hnae3_handle *h = hns3_get_handle(netdev); 548 549 if (h->ae_algo->ops->rm_uc_addr) 550 return h->ae_algo->ops->rm_uc_addr(h, addr); 551 552 return 0; 553 } 554 555 static int hns3_nic_mc_sync(struct net_device *netdev, 556 const unsigned char *addr) 557 { 558 struct hnae3_handle *h = hns3_get_handle(netdev); 559 560 if (h->ae_algo->ops->add_mc_addr) 561 return h->ae_algo->ops->add_mc_addr(h, addr); 562 563 return 0; 564 } 565 566 static int hns3_nic_mc_unsync(struct net_device *netdev, 567 const unsigned char *addr) 568 { 569 struct hnae3_handle *h = hns3_get_handle(netdev); 570 571 if (h->ae_algo->ops->rm_mc_addr) 572 return h->ae_algo->ops->rm_mc_addr(h, addr); 573 574 return 0; 575 } 576 577 static u8 hns3_get_netdev_flags(struct net_device *netdev) 578 { 579 u8 flags = 0; 580 581 if (netdev->flags & IFF_PROMISC) { 582 flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE; 583 } else { 584 flags |= HNAE3_VLAN_FLTR; 585 if (netdev->flags & IFF_ALLMULTI) 586 flags |= HNAE3_USER_MPE; 587 } 588 589 return flags; 590 } 591 592 static void hns3_nic_set_rx_mode(struct net_device *netdev) 593 { 594 struct hnae3_handle *h = hns3_get_handle(netdev); 595 u8 new_flags; 596 int ret; 597 598 new_flags = hns3_get_netdev_flags(netdev); 599 600 ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); 601 if (ret) { 602 netdev_err(netdev, "sync uc address fail\n"); 603 if (ret == -ENOSPC) 604 new_flags |= HNAE3_OVERFLOW_UPE; 605 } 606 607 if (netdev->flags & IFF_MULTICAST) { 608 ret = __dev_mc_sync(netdev, hns3_nic_mc_sync, 609 hns3_nic_mc_unsync); 610 if (ret) { 611 netdev_err(netdev, "sync mc address fail\n"); 612 if (ret == -ENOSPC) 613 new_flags |= HNAE3_OVERFLOW_MPE; 614 } 615 } 616 617 /* User mode Promisc mode enable and vlan filtering is disabled to 618 * let all packets in. MAC-VLAN Table overflow Promisc enabled and 619 * vlan fitering is enabled 620 */ 621 hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR); 622 h->netdev_flags = new_flags; 623 hns3_update_promisc_mode(netdev, new_flags); 624 } 625 626 int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags) 627 { 628 struct hns3_nic_priv *priv = netdev_priv(netdev); 629 struct hnae3_handle *h = priv->ae_handle; 630 631 if (h->ae_algo->ops->set_promisc_mode) { 632 return h->ae_algo->ops->set_promisc_mode(h, 633 promisc_flags & HNAE3_UPE, 634 promisc_flags & HNAE3_MPE); 635 } 636 637 return 0; 638 } 639 640 void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) 641 { 642 struct hns3_nic_priv *priv = netdev_priv(netdev); 643 struct hnae3_handle *h = priv->ae_handle; 644 bool last_state; 645 646 if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) { 647 last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false; 648 if (enable != last_state) { 649 netdev_info(netdev, 650 "%s vlan filter\n", 651 enable ? "enable" : "disable"); 652 h->ae_algo->ops->enable_vlan_filter(h, enable); 653 } 654 } 655 } 656 657 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, 658 u16 *mss, u32 *type_cs_vlan_tso) 659 { 660 u32 l4_offset, hdr_len; 661 union l3_hdr_info l3; 662 union l4_hdr_info l4; 663 u32 l4_paylen; 664 int ret; 665 666 if (!skb_is_gso(skb)) 667 return 0; 668 669 ret = skb_cow_head(skb, 0); 670 if (unlikely(ret)) 671 return ret; 672 673 l3.hdr = skb_network_header(skb); 674 l4.hdr = skb_transport_header(skb); 675 676 /* Software should clear the IPv4's checksum field when tso is 677 * needed. 678 */ 679 if (l3.v4->version == 4) 680 l3.v4->check = 0; 681 682 /* tunnel packet */ 683 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 684 SKB_GSO_GRE_CSUM | 685 SKB_GSO_UDP_TUNNEL | 686 SKB_GSO_UDP_TUNNEL_CSUM)) { 687 if ((!(skb_shinfo(skb)->gso_type & 688 SKB_GSO_PARTIAL)) && 689 (skb_shinfo(skb)->gso_type & 690 SKB_GSO_UDP_TUNNEL_CSUM)) { 691 /* Software should clear the udp's checksum 692 * field when tso is needed. 693 */ 694 l4.udp->check = 0; 695 } 696 /* reset l3&l4 pointers from outer to inner headers */ 697 l3.hdr = skb_inner_network_header(skb); 698 l4.hdr = skb_inner_transport_header(skb); 699 700 /* Software should clear the IPv4's checksum field when 701 * tso is needed. 702 */ 703 if (l3.v4->version == 4) 704 l3.v4->check = 0; 705 } 706 707 /* normal or tunnel packet */ 708 l4_offset = l4.hdr - skb->data; 709 hdr_len = (l4.tcp->doff << 2) + l4_offset; 710 711 /* remove payload length from inner pseudo checksum when tso */ 712 l4_paylen = skb->len - l4_offset; 713 csum_replace_by_diff(&l4.tcp->check, 714 (__force __wsum)htonl(l4_paylen)); 715 716 /* find the txbd field values */ 717 *paylen = skb->len - hdr_len; 718 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); 719 720 /* get MSS for TSO */ 721 *mss = skb_shinfo(skb)->gso_size; 722 723 return 0; 724 } 725 726 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, 727 u8 *il4_proto) 728 { 729 union l3_hdr_info l3; 730 unsigned char *l4_hdr; 731 unsigned char *exthdr; 732 u8 l4_proto_tmp; 733 __be16 frag_off; 734 735 /* find outer header point */ 736 l3.hdr = skb_network_header(skb); 737 l4_hdr = skb_transport_header(skb); 738 739 if (skb->protocol == htons(ETH_P_IPV6)) { 740 exthdr = l3.hdr + sizeof(*l3.v6); 741 l4_proto_tmp = l3.v6->nexthdr; 742 if (l4_hdr != exthdr) 743 ipv6_skip_exthdr(skb, exthdr - skb->data, 744 &l4_proto_tmp, &frag_off); 745 } else if (skb->protocol == htons(ETH_P_IP)) { 746 l4_proto_tmp = l3.v4->protocol; 747 } else { 748 return -EINVAL; 749 } 750 751 *ol4_proto = l4_proto_tmp; 752 753 /* tunnel packet */ 754 if (!skb->encapsulation) { 755 *il4_proto = 0; 756 return 0; 757 } 758 759 /* find inner header point */ 760 l3.hdr = skb_inner_network_header(skb); 761 l4_hdr = skb_inner_transport_header(skb); 762 763 if (l3.v6->version == 6) { 764 exthdr = l3.hdr + sizeof(*l3.v6); 765 l4_proto_tmp = l3.v6->nexthdr; 766 if (l4_hdr != exthdr) 767 ipv6_skip_exthdr(skb, exthdr - skb->data, 768 &l4_proto_tmp, &frag_off); 769 } else if (l3.v4->version == 4) { 770 l4_proto_tmp = l3.v4->protocol; 771 } 772 773 *il4_proto = l4_proto_tmp; 774 775 return 0; 776 } 777 778 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL 779 * and it is udp packet, which has a dest port as the IANA assigned. 780 * the hardware is expected to do the checksum offload, but the 781 * hardware will not do the checksum offload when udp dest port is 782 * 4789. 783 */ 784 static bool hns3_tunnel_csum_bug(struct sk_buff *skb) 785 { 786 union l4_hdr_info l4; 787 788 l4.hdr = skb_transport_header(skb); 789 790 if (!(!skb->encapsulation && 791 l4.udp->dest == htons(IANA_VXLAN_UDP_PORT))) 792 return false; 793 794 skb_checksum_help(skb); 795 796 return true; 797 } 798 799 static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, 800 u32 *ol_type_vlan_len_msec) 801 { 802 u32 l2_len, l3_len, l4_len; 803 unsigned char *il2_hdr; 804 union l3_hdr_info l3; 805 union l4_hdr_info l4; 806 807 l3.hdr = skb_network_header(skb); 808 l4.hdr = skb_transport_header(skb); 809 810 /* compute OL2 header size, defined in 2 Bytes */ 811 l2_len = l3.hdr - skb->data; 812 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1); 813 814 /* compute OL3 header size, defined in 4 Bytes */ 815 l3_len = l4.hdr - l3.hdr; 816 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2); 817 818 il2_hdr = skb_inner_mac_header(skb); 819 /* compute OL4 header size, defined in 4 Bytes */ 820 l4_len = il2_hdr - l4.hdr; 821 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2); 822 823 /* define outer network header type */ 824 if (skb->protocol == htons(ETH_P_IP)) { 825 if (skb_is_gso(skb)) 826 hns3_set_field(*ol_type_vlan_len_msec, 827 HNS3_TXD_OL3T_S, 828 HNS3_OL3T_IPV4_CSUM); 829 else 830 hns3_set_field(*ol_type_vlan_len_msec, 831 HNS3_TXD_OL3T_S, 832 HNS3_OL3T_IPV4_NO_CSUM); 833 834 } else if (skb->protocol == htons(ETH_P_IPV6)) { 835 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, 836 HNS3_OL3T_IPV6); 837 } 838 839 if (ol4_proto == IPPROTO_UDP) 840 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, 841 HNS3_TUN_MAC_IN_UDP); 842 else if (ol4_proto == IPPROTO_GRE) 843 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, 844 HNS3_TUN_NVGRE); 845 } 846 847 static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, 848 u8 il4_proto, u32 *type_cs_vlan_tso, 849 u32 *ol_type_vlan_len_msec) 850 { 851 unsigned char *l2_hdr = skb->data; 852 u32 l4_proto = ol4_proto; 853 union l4_hdr_info l4; 854 union l3_hdr_info l3; 855 u32 l2_len, l3_len; 856 857 l4.hdr = skb_transport_header(skb); 858 l3.hdr = skb_network_header(skb); 859 860 /* handle encapsulation skb */ 861 if (skb->encapsulation) { 862 /* If this is a not UDP/GRE encapsulation skb */ 863 if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { 864 /* drop the skb tunnel packet if hardware don't support, 865 * because hardware can't calculate csum when TSO. 866 */ 867 if (skb_is_gso(skb)) 868 return -EDOM; 869 870 /* the stack computes the IP header already, 871 * driver calculate l4 checksum when not TSO. 872 */ 873 skb_checksum_help(skb); 874 return 0; 875 } 876 877 hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); 878 879 /* switch to inner header */ 880 l2_hdr = skb_inner_mac_header(skb); 881 l3.hdr = skb_inner_network_header(skb); 882 l4.hdr = skb_inner_transport_header(skb); 883 l4_proto = il4_proto; 884 } 885 886 if (l3.v4->version == 4) { 887 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, 888 HNS3_L3T_IPV4); 889 890 /* the stack computes the IP header already, the only time we 891 * need the hardware to recompute it is in the case of TSO. 892 */ 893 if (skb_is_gso(skb)) 894 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); 895 } else if (l3.v6->version == 6) { 896 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, 897 HNS3_L3T_IPV6); 898 } 899 900 /* compute inner(/normal) L2 header size, defined in 2 Bytes */ 901 l2_len = l3.hdr - l2_hdr; 902 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); 903 904 /* compute inner(/normal) L3 header size, defined in 4 Bytes */ 905 l3_len = l4.hdr - l3.hdr; 906 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); 907 908 /* compute inner(/normal) L4 header size, defined in 4 Bytes */ 909 switch (l4_proto) { 910 case IPPROTO_TCP: 911 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 912 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 913 HNS3_L4T_TCP); 914 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 915 l4.tcp->doff); 916 break; 917 case IPPROTO_UDP: 918 if (hns3_tunnel_csum_bug(skb)) 919 break; 920 921 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 922 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 923 HNS3_L4T_UDP); 924 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 925 (sizeof(struct udphdr) >> 2)); 926 break; 927 case IPPROTO_SCTP: 928 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 929 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 930 HNS3_L4T_SCTP); 931 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 932 (sizeof(struct sctphdr) >> 2)); 933 break; 934 default: 935 /* drop the skb tunnel packet if hardware don't support, 936 * because hardware can't calculate csum when TSO. 937 */ 938 if (skb_is_gso(skb)) 939 return -EDOM; 940 941 /* the stack computes the IP header already, 942 * driver calculate l4 checksum when not TSO. 943 */ 944 skb_checksum_help(skb); 945 return 0; 946 } 947 948 return 0; 949 } 950 951 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) 952 { 953 /* Config bd buffer end */ 954 hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); 955 hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); 956 } 957 958 static int hns3_fill_desc_vtags(struct sk_buff *skb, 959 struct hns3_enet_ring *tx_ring, 960 u32 *inner_vlan_flag, 961 u32 *out_vlan_flag, 962 u16 *inner_vtag, 963 u16 *out_vtag) 964 { 965 #define HNS3_TX_VLAN_PRIO_SHIFT 13 966 967 struct hnae3_handle *handle = tx_ring->tqp->handle; 968 969 /* Since HW limitation, if port based insert VLAN enabled, only one VLAN 970 * header is allowed in skb, otherwise it will cause RAS error. 971 */ 972 if (unlikely(skb_vlan_tagged_multi(skb) && 973 handle->port_base_vlan_state == 974 HNAE3_PORT_BASE_VLAN_ENABLE)) 975 return -EINVAL; 976 977 if (skb->protocol == htons(ETH_P_8021Q) && 978 !(tx_ring->tqp->handle->kinfo.netdev->features & 979 NETIF_F_HW_VLAN_CTAG_TX)) { 980 /* When HW VLAN acceleration is turned off, and the stack 981 * sets the protocol to 802.1q, the driver just need to 982 * set the protocol to the encapsulated ethertype. 983 */ 984 skb->protocol = vlan_get_protocol(skb); 985 return 0; 986 } 987 988 if (skb_vlan_tag_present(skb)) { 989 u16 vlan_tag; 990 991 vlan_tag = skb_vlan_tag_get(skb); 992 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT; 993 994 /* Based on hw strategy, use out_vtag in two layer tag case, 995 * and use inner_vtag in one tag case. 996 */ 997 if (skb->protocol == htons(ETH_P_8021Q)) { 998 if (handle->port_base_vlan_state == 999 HNAE3_PORT_BASE_VLAN_DISABLE){ 1000 hns3_set_field(*out_vlan_flag, 1001 HNS3_TXD_OVLAN_B, 1); 1002 *out_vtag = vlan_tag; 1003 } else { 1004 hns3_set_field(*inner_vlan_flag, 1005 HNS3_TXD_VLAN_B, 1); 1006 *inner_vtag = vlan_tag; 1007 } 1008 } else { 1009 hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); 1010 *inner_vtag = vlan_tag; 1011 } 1012 } else if (skb->protocol == htons(ETH_P_8021Q)) { 1013 struct vlan_ethhdr *vhdr; 1014 int rc; 1015 1016 rc = skb_cow_head(skb, 0); 1017 if (unlikely(rc < 0)) 1018 return rc; 1019 vhdr = (struct vlan_ethhdr *)skb->data; 1020 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7) 1021 << HNS3_TX_VLAN_PRIO_SHIFT); 1022 } 1023 1024 skb->protocol = vlan_get_protocol(skb); 1025 return 0; 1026 } 1027 1028 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, 1029 unsigned int size, int frag_end, 1030 enum hns_desc_type type) 1031 { 1032 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 1033 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 1034 struct device *dev = ring_to_dev(ring); 1035 struct skb_frag_struct *frag; 1036 unsigned int frag_buf_num; 1037 int k, sizeoflast; 1038 dma_addr_t dma; 1039 1040 if (type == DESC_TYPE_SKB) { 1041 struct sk_buff *skb = (struct sk_buff *)priv; 1042 u32 ol_type_vlan_len_msec = 0; 1043 u32 type_cs_vlan_tso = 0; 1044 u32 paylen = skb->len; 1045 u16 inner_vtag = 0; 1046 u16 out_vtag = 0; 1047 u16 mss = 0; 1048 int ret; 1049 1050 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso, 1051 &ol_type_vlan_len_msec, 1052 &inner_vtag, &out_vtag); 1053 if (unlikely(ret)) 1054 return ret; 1055 1056 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1057 u8 ol4_proto, il4_proto; 1058 1059 skb_reset_mac_len(skb); 1060 1061 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); 1062 if (unlikely(ret)) 1063 return ret; 1064 1065 ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, 1066 &type_cs_vlan_tso, 1067 &ol_type_vlan_len_msec); 1068 if (unlikely(ret)) 1069 return ret; 1070 1071 ret = hns3_set_tso(skb, &paylen, &mss, 1072 &type_cs_vlan_tso); 1073 if (unlikely(ret)) 1074 return ret; 1075 } 1076 1077 /* Set txbd */ 1078 desc->tx.ol_type_vlan_len_msec = 1079 cpu_to_le32(ol_type_vlan_len_msec); 1080 desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); 1081 desc->tx.paylen = cpu_to_le32(paylen); 1082 desc->tx.mss = cpu_to_le16(mss); 1083 desc->tx.vlan_tag = cpu_to_le16(inner_vtag); 1084 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); 1085 1086 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 1087 } else { 1088 frag = (struct skb_frag_struct *)priv; 1089 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); 1090 } 1091 1092 if (unlikely(dma_mapping_error(dev, dma))) { 1093 ring->stats.sw_err_cnt++; 1094 return -ENOMEM; 1095 } 1096 1097 desc_cb->length = size; 1098 1099 if (likely(size <= HNS3_MAX_BD_SIZE)) { 1100 u16 bdtp_fe_sc_vld_ra_ri = 0; 1101 1102 desc_cb->priv = priv; 1103 desc_cb->dma = dma; 1104 desc_cb->type = type; 1105 desc->addr = cpu_to_le64(dma); 1106 desc->tx.send_size = cpu_to_le16(size); 1107 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); 1108 desc->tx.bdtp_fe_sc_vld_ra_ri = 1109 cpu_to_le16(bdtp_fe_sc_vld_ra_ri); 1110 1111 ring_ptr_move_fw(ring, next_to_use); 1112 return 0; 1113 } 1114 1115 frag_buf_num = hns3_tx_bd_count(size); 1116 sizeoflast = size & HNS3_TX_LAST_SIZE_M; 1117 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; 1118 1119 /* When frag size is bigger than hardware limit, split this frag */ 1120 for (k = 0; k < frag_buf_num; k++) { 1121 u16 bdtp_fe_sc_vld_ra_ri = 0; 1122 1123 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ 1124 desc_cb->priv = priv; 1125 desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k; 1126 desc_cb->type = (type == DESC_TYPE_SKB && !k) ? 1127 DESC_TYPE_SKB : DESC_TYPE_PAGE; 1128 1129 /* now, fill the descriptor */ 1130 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); 1131 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? 1132 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); 1133 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, 1134 frag_end && (k == frag_buf_num - 1) ? 1135 1 : 0); 1136 desc->tx.bdtp_fe_sc_vld_ra_ri = 1137 cpu_to_le16(bdtp_fe_sc_vld_ra_ri); 1138 1139 /* move ring pointer to next */ 1140 ring_ptr_move_fw(ring, next_to_use); 1141 1142 desc_cb = &ring->desc_cb[ring->next_to_use]; 1143 desc = &ring->desc[ring->next_to_use]; 1144 } 1145 1146 return 0; 1147 } 1148 1149 static int hns3_nic_bd_num(struct sk_buff *skb) 1150 { 1151 int size = skb_headlen(skb); 1152 int i, bd_num; 1153 1154 /* if the total len is within the max bd limit */ 1155 if (likely(skb->len <= HNS3_MAX_BD_SIZE)) 1156 return skb_shinfo(skb)->nr_frags + 1; 1157 1158 bd_num = hns3_tx_bd_count(size); 1159 1160 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1161 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 1162 int frag_bd_num; 1163 1164 size = skb_frag_size(frag); 1165 frag_bd_num = hns3_tx_bd_count(size); 1166 1167 if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG)) 1168 return -ENOMEM; 1169 1170 bd_num += frag_bd_num; 1171 } 1172 1173 return bd_num; 1174 } 1175 1176 static unsigned int hns3_gso_hdr_len(struct sk_buff *skb) 1177 { 1178 if (!skb->encapsulation) 1179 return skb_transport_offset(skb) + tcp_hdrlen(skb); 1180 1181 return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); 1182 } 1183 1184 /* HW need every continuous 8 buffer data to be larger than MSS, 1185 * we simplify it by ensuring skb_headlen + the first continuous 1186 * 7 frags to to be larger than gso header len + mss, and the remaining 1187 * continuous 7 frags to be larger than MSS except the last 7 frags. 1188 */ 1189 static bool hns3_skb_need_linearized(struct sk_buff *skb) 1190 { 1191 int bd_limit = HNS3_MAX_BD_PER_FRAG - 1; 1192 unsigned int tot_len = 0; 1193 int i; 1194 1195 for (i = 0; i < bd_limit; i++) 1196 tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]); 1197 1198 /* ensure headlen + the first 7 frags is greater than mss + header 1199 * and the first 7 frags is greater than mss. 1200 */ 1201 if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size + 1202 hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size)) 1203 return true; 1204 1205 /* ensure the remaining continuous 7 buffer is greater than mss */ 1206 for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) { 1207 tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]); 1208 tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]); 1209 1210 if (tot_len < skb_shinfo(skb)->gso_size) 1211 return true; 1212 } 1213 1214 return false; 1215 } 1216 1217 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, 1218 struct sk_buff **out_skb) 1219 { 1220 struct sk_buff *skb = *out_skb; 1221 int bd_num; 1222 1223 bd_num = hns3_nic_bd_num(skb); 1224 if (bd_num < 0) 1225 return bd_num; 1226 1227 if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) { 1228 struct sk_buff *new_skb; 1229 1230 if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb)) 1231 goto out; 1232 1233 bd_num = hns3_tx_bd_count(skb->len); 1234 if (unlikely(ring_space(ring) < bd_num)) 1235 return -EBUSY; 1236 /* manual split the send packet */ 1237 new_skb = skb_copy(skb, GFP_ATOMIC); 1238 if (!new_skb) 1239 return -ENOMEM; 1240 dev_kfree_skb_any(skb); 1241 *out_skb = new_skb; 1242 1243 u64_stats_update_begin(&ring->syncp); 1244 ring->stats.tx_copy++; 1245 u64_stats_update_end(&ring->syncp); 1246 } 1247 1248 out: 1249 if (unlikely(ring_space(ring) < bd_num)) 1250 return -EBUSY; 1251 1252 return bd_num; 1253 } 1254 1255 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) 1256 { 1257 struct device *dev = ring_to_dev(ring); 1258 unsigned int i; 1259 1260 for (i = 0; i < ring->desc_num; i++) { 1261 /* check if this is where we started */ 1262 if (ring->next_to_use == next_to_use_orig) 1263 break; 1264 1265 /* rollback one */ 1266 ring_ptr_move_bw(ring, next_to_use); 1267 1268 /* unmap the descriptor dma address */ 1269 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) 1270 dma_unmap_single(dev, 1271 ring->desc_cb[ring->next_to_use].dma, 1272 ring->desc_cb[ring->next_to_use].length, 1273 DMA_TO_DEVICE); 1274 else if (ring->desc_cb[ring->next_to_use].length) 1275 dma_unmap_page(dev, 1276 ring->desc_cb[ring->next_to_use].dma, 1277 ring->desc_cb[ring->next_to_use].length, 1278 DMA_TO_DEVICE); 1279 1280 ring->desc_cb[ring->next_to_use].length = 0; 1281 ring->desc_cb[ring->next_to_use].dma = 0; 1282 } 1283 } 1284 1285 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) 1286 { 1287 struct hns3_nic_priv *priv = netdev_priv(netdev); 1288 struct hns3_nic_ring_data *ring_data = 1289 &tx_ring_data(priv, skb->queue_mapping); 1290 struct hns3_enet_ring *ring = ring_data->ring; 1291 struct netdev_queue *dev_queue; 1292 struct skb_frag_struct *frag; 1293 int next_to_use_head; 1294 int buf_num; 1295 int seg_num; 1296 int size; 1297 int ret; 1298 int i; 1299 1300 /* Prefetch the data used later */ 1301 prefetch(skb->data); 1302 1303 buf_num = hns3_nic_maybe_stop_tx(ring, &skb); 1304 if (unlikely(buf_num <= 0)) { 1305 if (buf_num == -EBUSY) { 1306 u64_stats_update_begin(&ring->syncp); 1307 ring->stats.tx_busy++; 1308 u64_stats_update_end(&ring->syncp); 1309 goto out_net_tx_busy; 1310 } else if (buf_num == -ENOMEM) { 1311 u64_stats_update_begin(&ring->syncp); 1312 ring->stats.sw_err_cnt++; 1313 u64_stats_update_end(&ring->syncp); 1314 } 1315 1316 if (net_ratelimit()) 1317 netdev_err(netdev, "xmit error: %d!\n", buf_num); 1318 1319 goto out_err_tx_ok; 1320 } 1321 1322 /* No. of segments (plus a header) */ 1323 seg_num = skb_shinfo(skb)->nr_frags + 1; 1324 /* Fill the first part */ 1325 size = skb_headlen(skb); 1326 1327 next_to_use_head = ring->next_to_use; 1328 1329 ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0, 1330 DESC_TYPE_SKB); 1331 if (unlikely(ret)) 1332 goto fill_err; 1333 1334 /* Fill the fragments */ 1335 for (i = 1; i < seg_num; i++) { 1336 frag = &skb_shinfo(skb)->frags[i - 1]; 1337 size = skb_frag_size(frag); 1338 1339 ret = hns3_fill_desc(ring, frag, size, 1340 seg_num - 1 == i ? 1 : 0, 1341 DESC_TYPE_PAGE); 1342 1343 if (unlikely(ret)) 1344 goto fill_err; 1345 } 1346 1347 /* Complete translate all packets */ 1348 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index); 1349 netdev_tx_sent_queue(dev_queue, skb->len); 1350 1351 wmb(); /* Commit all data before submit */ 1352 1353 hnae3_queue_xmit(ring->tqp, buf_num); 1354 1355 return NETDEV_TX_OK; 1356 1357 fill_err: 1358 hns3_clear_desc(ring, next_to_use_head); 1359 1360 out_err_tx_ok: 1361 dev_kfree_skb_any(skb); 1362 return NETDEV_TX_OK; 1363 1364 out_net_tx_busy: 1365 netif_stop_subqueue(netdev, ring_data->queue_index); 1366 smp_mb(); /* Commit all data before submit */ 1367 1368 return NETDEV_TX_BUSY; 1369 } 1370 1371 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) 1372 { 1373 struct hnae3_handle *h = hns3_get_handle(netdev); 1374 struct sockaddr *mac_addr = p; 1375 int ret; 1376 1377 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) 1378 return -EADDRNOTAVAIL; 1379 1380 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { 1381 netdev_info(netdev, "already using mac address %pM\n", 1382 mac_addr->sa_data); 1383 return 0; 1384 } 1385 1386 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); 1387 if (ret) { 1388 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); 1389 return ret; 1390 } 1391 1392 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); 1393 1394 return 0; 1395 } 1396 1397 static int hns3_nic_do_ioctl(struct net_device *netdev, 1398 struct ifreq *ifr, int cmd) 1399 { 1400 struct hnae3_handle *h = hns3_get_handle(netdev); 1401 1402 if (!netif_running(netdev)) 1403 return -EINVAL; 1404 1405 if (!h->ae_algo->ops->do_ioctl) 1406 return -EOPNOTSUPP; 1407 1408 return h->ae_algo->ops->do_ioctl(h, ifr, cmd); 1409 } 1410 1411 static int hns3_nic_set_features(struct net_device *netdev, 1412 netdev_features_t features) 1413 { 1414 netdev_features_t changed = netdev->features ^ features; 1415 struct hns3_nic_priv *priv = netdev_priv(netdev); 1416 struct hnae3_handle *h = priv->ae_handle; 1417 bool enable; 1418 int ret; 1419 1420 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { 1421 enable = !!(features & NETIF_F_GRO_HW); 1422 ret = h->ae_algo->ops->set_gro_en(h, enable); 1423 if (ret) 1424 return ret; 1425 } 1426 1427 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && 1428 h->ae_algo->ops->enable_vlan_filter) { 1429 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER); 1430 h->ae_algo->ops->enable_vlan_filter(h, enable); 1431 } 1432 1433 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && 1434 h->ae_algo->ops->enable_hw_strip_rxvtag) { 1435 enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); 1436 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); 1437 if (ret) 1438 return ret; 1439 } 1440 1441 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { 1442 enable = !!(features & NETIF_F_NTUPLE); 1443 h->ae_algo->ops->enable_fd(h, enable); 1444 } 1445 1446 netdev->features = features; 1447 return 0; 1448 } 1449 1450 static void hns3_nic_get_stats64(struct net_device *netdev, 1451 struct rtnl_link_stats64 *stats) 1452 { 1453 struct hns3_nic_priv *priv = netdev_priv(netdev); 1454 int queue_num = priv->ae_handle->kinfo.num_tqps; 1455 struct hnae3_handle *handle = priv->ae_handle; 1456 struct hns3_enet_ring *ring; 1457 u64 rx_length_errors = 0; 1458 u64 rx_crc_errors = 0; 1459 u64 rx_multicast = 0; 1460 unsigned int start; 1461 u64 tx_errors = 0; 1462 u64 rx_errors = 0; 1463 unsigned int idx; 1464 u64 tx_bytes = 0; 1465 u64 rx_bytes = 0; 1466 u64 tx_pkts = 0; 1467 u64 rx_pkts = 0; 1468 u64 tx_drop = 0; 1469 u64 rx_drop = 0; 1470 1471 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 1472 return; 1473 1474 handle->ae_algo->ops->update_stats(handle, &netdev->stats); 1475 1476 for (idx = 0; idx < queue_num; idx++) { 1477 /* fetch the tx stats */ 1478 ring = priv->ring_data[idx].ring; 1479 do { 1480 start = u64_stats_fetch_begin_irq(&ring->syncp); 1481 tx_bytes += ring->stats.tx_bytes; 1482 tx_pkts += ring->stats.tx_pkts; 1483 tx_drop += ring->stats.sw_err_cnt; 1484 tx_errors += ring->stats.sw_err_cnt; 1485 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1486 1487 /* fetch the rx stats */ 1488 ring = priv->ring_data[idx + queue_num].ring; 1489 do { 1490 start = u64_stats_fetch_begin_irq(&ring->syncp); 1491 rx_bytes += ring->stats.rx_bytes; 1492 rx_pkts += ring->stats.rx_pkts; 1493 rx_drop += ring->stats.l2_err; 1494 rx_errors += ring->stats.l2_err; 1495 rx_errors += ring->stats.l3l4_csum_err; 1496 rx_crc_errors += ring->stats.l2_err; 1497 rx_multicast += ring->stats.rx_multicast; 1498 rx_length_errors += ring->stats.err_pkt_len; 1499 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1500 } 1501 1502 stats->tx_bytes = tx_bytes; 1503 stats->tx_packets = tx_pkts; 1504 stats->rx_bytes = rx_bytes; 1505 stats->rx_packets = rx_pkts; 1506 1507 stats->rx_errors = rx_errors; 1508 stats->multicast = rx_multicast; 1509 stats->rx_length_errors = rx_length_errors; 1510 stats->rx_crc_errors = rx_crc_errors; 1511 stats->rx_missed_errors = netdev->stats.rx_missed_errors; 1512 1513 stats->tx_errors = tx_errors; 1514 stats->rx_dropped = rx_drop; 1515 stats->tx_dropped = tx_drop; 1516 stats->collisions = netdev->stats.collisions; 1517 stats->rx_over_errors = netdev->stats.rx_over_errors; 1518 stats->rx_frame_errors = netdev->stats.rx_frame_errors; 1519 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; 1520 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; 1521 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; 1522 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; 1523 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; 1524 stats->tx_window_errors = netdev->stats.tx_window_errors; 1525 stats->rx_compressed = netdev->stats.rx_compressed; 1526 stats->tx_compressed = netdev->stats.tx_compressed; 1527 } 1528 1529 static int hns3_setup_tc(struct net_device *netdev, void *type_data) 1530 { 1531 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 1532 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; 1533 struct hnae3_knic_private_info *kinfo; 1534 u8 tc = mqprio_qopt->qopt.num_tc; 1535 u16 mode = mqprio_qopt->mode; 1536 u8 hw = mqprio_qopt->qopt.hw; 1537 struct hnae3_handle *h; 1538 1539 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && 1540 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) 1541 return -EOPNOTSUPP; 1542 1543 if (tc > HNAE3_MAX_TC) 1544 return -EINVAL; 1545 1546 if (!netdev) 1547 return -EINVAL; 1548 1549 h = hns3_get_handle(netdev); 1550 kinfo = &h->kinfo; 1551 1552 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? 1553 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; 1554 } 1555 1556 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, 1557 void *type_data) 1558 { 1559 if (type != TC_SETUP_QDISC_MQPRIO) 1560 return -EOPNOTSUPP; 1561 1562 return hns3_setup_tc(dev, type_data); 1563 } 1564 1565 static int hns3_vlan_rx_add_vid(struct net_device *netdev, 1566 __be16 proto, u16 vid) 1567 { 1568 struct hnae3_handle *h = hns3_get_handle(netdev); 1569 int ret = -EIO; 1570 1571 if (h->ae_algo->ops->set_vlan_filter) 1572 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); 1573 1574 return ret; 1575 } 1576 1577 static int hns3_vlan_rx_kill_vid(struct net_device *netdev, 1578 __be16 proto, u16 vid) 1579 { 1580 struct hnae3_handle *h = hns3_get_handle(netdev); 1581 int ret = -EIO; 1582 1583 if (h->ae_algo->ops->set_vlan_filter) 1584 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); 1585 1586 return ret; 1587 } 1588 1589 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 1590 u8 qos, __be16 vlan_proto) 1591 { 1592 struct hnae3_handle *h = hns3_get_handle(netdev); 1593 int ret = -EIO; 1594 1595 if (h->ae_algo->ops->set_vf_vlan_filter) 1596 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, 1597 qos, vlan_proto); 1598 1599 return ret; 1600 } 1601 1602 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) 1603 { 1604 struct hnae3_handle *h = hns3_get_handle(netdev); 1605 int ret; 1606 1607 if (hns3_nic_resetting(netdev)) 1608 return -EBUSY; 1609 1610 if (!h->ae_algo->ops->set_mtu) 1611 return -EOPNOTSUPP; 1612 1613 ret = h->ae_algo->ops->set_mtu(h, new_mtu); 1614 if (ret) 1615 netdev_err(netdev, "failed to change MTU in hardware %d\n", 1616 ret); 1617 else 1618 netdev->mtu = new_mtu; 1619 1620 return ret; 1621 } 1622 1623 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) 1624 { 1625 struct hns3_nic_priv *priv = netdev_priv(ndev); 1626 struct hnae3_handle *h = hns3_get_handle(ndev); 1627 struct hns3_enet_ring *tx_ring = NULL; 1628 struct napi_struct *napi; 1629 int timeout_queue = 0; 1630 int hw_head, hw_tail; 1631 int fbd_num, fbd_oft; 1632 int ebd_num, ebd_oft; 1633 int bd_num, bd_err; 1634 int ring_en, tc; 1635 int i; 1636 1637 /* Find the stopped queue the same way the stack does */ 1638 for (i = 0; i < ndev->num_tx_queues; i++) { 1639 struct netdev_queue *q; 1640 unsigned long trans_start; 1641 1642 q = netdev_get_tx_queue(ndev, i); 1643 trans_start = q->trans_start; 1644 if (netif_xmit_stopped(q) && 1645 time_after(jiffies, 1646 (trans_start + ndev->watchdog_timeo))) { 1647 timeout_queue = i; 1648 break; 1649 } 1650 } 1651 1652 if (i == ndev->num_tx_queues) { 1653 netdev_info(ndev, 1654 "no netdev TX timeout queue found, timeout count: %llu\n", 1655 priv->tx_timeout_count); 1656 return false; 1657 } 1658 1659 priv->tx_timeout_count++; 1660 1661 tx_ring = priv->ring_data[timeout_queue].ring; 1662 napi = &tx_ring->tqp_vector->napi; 1663 1664 netdev_info(ndev, 1665 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", 1666 priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, 1667 tx_ring->next_to_clean, napi->state); 1668 1669 netdev_info(ndev, 1670 "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n", 1671 tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, 1672 tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt); 1673 1674 netdev_info(ndev, 1675 "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n", 1676 tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt, 1677 tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); 1678 1679 /* When mac received many pause frames continuous, it's unable to send 1680 * packets, which may cause tx timeout 1681 */ 1682 if (h->ae_algo->ops->update_stats && 1683 h->ae_algo->ops->get_mac_pause_stats) { 1684 u64 tx_pause_cnt, rx_pause_cnt; 1685 1686 h->ae_algo->ops->update_stats(h, &ndev->stats); 1687 h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt, 1688 &rx_pause_cnt); 1689 netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n", 1690 tx_pause_cnt, rx_pause_cnt); 1691 } 1692 1693 hw_head = readl_relaxed(tx_ring->tqp->io_base + 1694 HNS3_RING_TX_RING_HEAD_REG); 1695 hw_tail = readl_relaxed(tx_ring->tqp->io_base + 1696 HNS3_RING_TX_RING_TAIL_REG); 1697 fbd_num = readl_relaxed(tx_ring->tqp->io_base + 1698 HNS3_RING_TX_RING_FBDNUM_REG); 1699 fbd_oft = readl_relaxed(tx_ring->tqp->io_base + 1700 HNS3_RING_TX_RING_OFFSET_REG); 1701 ebd_num = readl_relaxed(tx_ring->tqp->io_base + 1702 HNS3_RING_TX_RING_EBDNUM_REG); 1703 ebd_oft = readl_relaxed(tx_ring->tqp->io_base + 1704 HNS3_RING_TX_RING_EBD_OFFSET_REG); 1705 bd_num = readl_relaxed(tx_ring->tqp->io_base + 1706 HNS3_RING_TX_RING_BD_NUM_REG); 1707 bd_err = readl_relaxed(tx_ring->tqp->io_base + 1708 HNS3_RING_TX_RING_BD_ERR_REG); 1709 ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG); 1710 tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG); 1711 1712 netdev_info(ndev, 1713 "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", 1714 bd_num, hw_head, hw_tail, bd_err, 1715 readl(tx_ring->tqp_vector->mask_addr)); 1716 netdev_info(ndev, 1717 "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", 1718 ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft); 1719 1720 return true; 1721 } 1722 1723 static void hns3_nic_net_timeout(struct net_device *ndev) 1724 { 1725 struct hns3_nic_priv *priv = netdev_priv(ndev); 1726 struct hnae3_handle *h = priv->ae_handle; 1727 1728 if (!hns3_get_tx_timeo_queue_info(ndev)) 1729 return; 1730 1731 /* request the reset, and let the hclge to determine 1732 * which reset level should be done 1733 */ 1734 if (h->ae_algo->ops->reset_event) 1735 h->ae_algo->ops->reset_event(h->pdev, h); 1736 } 1737 1738 #ifdef CONFIG_RFS_ACCEL 1739 static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 1740 u16 rxq_index, u32 flow_id) 1741 { 1742 struct hnae3_handle *h = hns3_get_handle(dev); 1743 struct flow_keys fkeys; 1744 1745 if (!h->ae_algo->ops->add_arfs_entry) 1746 return -EOPNOTSUPP; 1747 1748 if (skb->encapsulation) 1749 return -EPROTONOSUPPORT; 1750 1751 if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0)) 1752 return -EPROTONOSUPPORT; 1753 1754 if ((fkeys.basic.n_proto != htons(ETH_P_IP) && 1755 fkeys.basic.n_proto != htons(ETH_P_IPV6)) || 1756 (fkeys.basic.ip_proto != IPPROTO_TCP && 1757 fkeys.basic.ip_proto != IPPROTO_UDP)) 1758 return -EPROTONOSUPPORT; 1759 1760 return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys); 1761 } 1762 #endif 1763 1764 static const struct net_device_ops hns3_nic_netdev_ops = { 1765 .ndo_open = hns3_nic_net_open, 1766 .ndo_stop = hns3_nic_net_stop, 1767 .ndo_start_xmit = hns3_nic_net_xmit, 1768 .ndo_tx_timeout = hns3_nic_net_timeout, 1769 .ndo_set_mac_address = hns3_nic_net_set_mac_address, 1770 .ndo_do_ioctl = hns3_nic_do_ioctl, 1771 .ndo_change_mtu = hns3_nic_change_mtu, 1772 .ndo_set_features = hns3_nic_set_features, 1773 .ndo_get_stats64 = hns3_nic_get_stats64, 1774 .ndo_setup_tc = hns3_nic_setup_tc, 1775 .ndo_set_rx_mode = hns3_nic_set_rx_mode, 1776 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, 1777 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, 1778 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, 1779 #ifdef CONFIG_RFS_ACCEL 1780 .ndo_rx_flow_steer = hns3_rx_flow_steer, 1781 #endif 1782 1783 }; 1784 1785 bool hns3_is_phys_func(struct pci_dev *pdev) 1786 { 1787 u32 dev_id = pdev->device; 1788 1789 switch (dev_id) { 1790 case HNAE3_DEV_ID_GE: 1791 case HNAE3_DEV_ID_25GE: 1792 case HNAE3_DEV_ID_25GE_RDMA: 1793 case HNAE3_DEV_ID_25GE_RDMA_MACSEC: 1794 case HNAE3_DEV_ID_50GE_RDMA: 1795 case HNAE3_DEV_ID_50GE_RDMA_MACSEC: 1796 case HNAE3_DEV_ID_100G_RDMA_MACSEC: 1797 return true; 1798 case HNAE3_DEV_ID_100G_VF: 1799 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF: 1800 return false; 1801 default: 1802 dev_warn(&pdev->dev, "un-recognized pci device-id %d", 1803 dev_id); 1804 } 1805 1806 return false; 1807 } 1808 1809 static void hns3_disable_sriov(struct pci_dev *pdev) 1810 { 1811 /* If our VFs are assigned we cannot shut down SR-IOV 1812 * without causing issues, so just leave the hardware 1813 * available but disabled 1814 */ 1815 if (pci_vfs_assigned(pdev)) { 1816 dev_warn(&pdev->dev, 1817 "disabling driver while VFs are assigned\n"); 1818 return; 1819 } 1820 1821 pci_disable_sriov(pdev); 1822 } 1823 1824 static void hns3_get_dev_capability(struct pci_dev *pdev, 1825 struct hnae3_ae_dev *ae_dev) 1826 { 1827 if (pdev->revision >= 0x21) { 1828 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1); 1829 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1); 1830 } 1831 } 1832 1833 /* hns3_probe - Device initialization routine 1834 * @pdev: PCI device information struct 1835 * @ent: entry in hns3_pci_tbl 1836 * 1837 * hns3_probe initializes a PF identified by a pci_dev structure. 1838 * The OS initialization, configuring of the PF private structure, 1839 * and a hardware reset occur. 1840 * 1841 * Returns 0 on success, negative on failure 1842 */ 1843 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1844 { 1845 struct hnae3_ae_dev *ae_dev; 1846 int ret; 1847 1848 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL); 1849 if (!ae_dev) { 1850 ret = -ENOMEM; 1851 return ret; 1852 } 1853 1854 ae_dev->pdev = pdev; 1855 ae_dev->flag = ent->driver_data; 1856 ae_dev->reset_type = HNAE3_NONE_RESET; 1857 hns3_get_dev_capability(pdev, ae_dev); 1858 pci_set_drvdata(pdev, ae_dev); 1859 1860 ret = hnae3_register_ae_dev(ae_dev); 1861 if (ret) { 1862 devm_kfree(&pdev->dev, ae_dev); 1863 pci_set_drvdata(pdev, NULL); 1864 } 1865 1866 return ret; 1867 } 1868 1869 /* hns3_remove - Device removal routine 1870 * @pdev: PCI device information struct 1871 */ 1872 static void hns3_remove(struct pci_dev *pdev) 1873 { 1874 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1875 1876 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) 1877 hns3_disable_sriov(pdev); 1878 1879 hnae3_unregister_ae_dev(ae_dev); 1880 pci_set_drvdata(pdev, NULL); 1881 } 1882 1883 /** 1884 * hns3_pci_sriov_configure 1885 * @pdev: pointer to a pci_dev structure 1886 * @num_vfs: number of VFs to allocate 1887 * 1888 * Enable or change the number of VFs. Called when the user updates the number 1889 * of VFs in sysfs. 1890 **/ 1891 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1892 { 1893 int ret; 1894 1895 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) { 1896 dev_warn(&pdev->dev, "Can not config SRIOV\n"); 1897 return -EINVAL; 1898 } 1899 1900 if (num_vfs) { 1901 ret = pci_enable_sriov(pdev, num_vfs); 1902 if (ret) 1903 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); 1904 else 1905 return num_vfs; 1906 } else if (!pci_vfs_assigned(pdev)) { 1907 pci_disable_sriov(pdev); 1908 } else { 1909 dev_warn(&pdev->dev, 1910 "Unable to free VFs because some are assigned to VMs.\n"); 1911 } 1912 1913 return 0; 1914 } 1915 1916 static void hns3_shutdown(struct pci_dev *pdev) 1917 { 1918 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1919 1920 hnae3_unregister_ae_dev(ae_dev); 1921 devm_kfree(&pdev->dev, ae_dev); 1922 pci_set_drvdata(pdev, NULL); 1923 1924 if (system_state == SYSTEM_POWER_OFF) 1925 pci_set_power_state(pdev, PCI_D3hot); 1926 } 1927 1928 static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, 1929 pci_channel_state_t state) 1930 { 1931 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1932 pci_ers_result_t ret; 1933 1934 dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state); 1935 1936 if (state == pci_channel_io_perm_failure) 1937 return PCI_ERS_RESULT_DISCONNECT; 1938 1939 if (!ae_dev || !ae_dev->ops) { 1940 dev_err(&pdev->dev, 1941 "Can't recover - error happened before device initialized\n"); 1942 return PCI_ERS_RESULT_NONE; 1943 } 1944 1945 if (ae_dev->ops->handle_hw_ras_error) 1946 ret = ae_dev->ops->handle_hw_ras_error(ae_dev); 1947 else 1948 return PCI_ERS_RESULT_NONE; 1949 1950 return ret; 1951 } 1952 1953 static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) 1954 { 1955 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1956 const struct hnae3_ae_ops *ops; 1957 enum hnae3_reset_type reset_type; 1958 struct device *dev = &pdev->dev; 1959 1960 if (!ae_dev || !ae_dev->ops) 1961 return PCI_ERS_RESULT_NONE; 1962 1963 ops = ae_dev->ops; 1964 /* request the reset */ 1965 if (ops->reset_event) { 1966 if (ae_dev->hw_err_reset_req) { 1967 reset_type = ops->get_reset_level(ae_dev, 1968 &ae_dev->hw_err_reset_req); 1969 ops->set_default_reset_request(ae_dev, reset_type); 1970 dev_info(dev, "requesting reset due to PCI error\n"); 1971 ops->reset_event(pdev, NULL); 1972 } 1973 1974 return PCI_ERS_RESULT_RECOVERED; 1975 } 1976 1977 return PCI_ERS_RESULT_DISCONNECT; 1978 } 1979 1980 static void hns3_reset_prepare(struct pci_dev *pdev) 1981 { 1982 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1983 1984 dev_info(&pdev->dev, "hns3 flr prepare\n"); 1985 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare) 1986 ae_dev->ops->flr_prepare(ae_dev); 1987 } 1988 1989 static void hns3_reset_done(struct pci_dev *pdev) 1990 { 1991 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1992 1993 dev_info(&pdev->dev, "hns3 flr done\n"); 1994 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done) 1995 ae_dev->ops->flr_done(ae_dev); 1996 } 1997 1998 static const struct pci_error_handlers hns3_err_handler = { 1999 .error_detected = hns3_error_detected, 2000 .slot_reset = hns3_slot_reset, 2001 .reset_prepare = hns3_reset_prepare, 2002 .reset_done = hns3_reset_done, 2003 }; 2004 2005 static struct pci_driver hns3_driver = { 2006 .name = hns3_driver_name, 2007 .id_table = hns3_pci_tbl, 2008 .probe = hns3_probe, 2009 .remove = hns3_remove, 2010 .shutdown = hns3_shutdown, 2011 .sriov_configure = hns3_pci_sriov_configure, 2012 .err_handler = &hns3_err_handler, 2013 }; 2014 2015 /* set default feature to hns3 */ 2016 static void hns3_set_default_feature(struct net_device *netdev) 2017 { 2018 struct hnae3_handle *h = hns3_get_handle(netdev); 2019 struct pci_dev *pdev = h->pdev; 2020 2021 netdev->priv_flags |= IFF_UNICAST_FLT; 2022 2023 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2024 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2025 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2026 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2027 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; 2028 2029 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 2030 2031 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 2032 2033 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2034 NETIF_F_HW_VLAN_CTAG_FILTER | 2035 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 2036 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2037 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2038 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2039 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; 2040 2041 netdev->vlan_features |= 2042 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | 2043 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | 2044 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2045 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2046 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; 2047 2048 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2049 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 2050 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2051 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2052 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2053 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; 2054 2055 if (pdev->revision >= 0x21) { 2056 netdev->hw_features |= NETIF_F_GRO_HW; 2057 netdev->features |= NETIF_F_GRO_HW; 2058 2059 if (!(h->flags & HNAE3_SUPPORT_VF)) { 2060 netdev->hw_features |= NETIF_F_NTUPLE; 2061 netdev->features |= NETIF_F_NTUPLE; 2062 } 2063 } 2064 } 2065 2066 static int hns3_alloc_buffer(struct hns3_enet_ring *ring, 2067 struct hns3_desc_cb *cb) 2068 { 2069 unsigned int order = hnae3_page_order(ring); 2070 struct page *p; 2071 2072 p = dev_alloc_pages(order); 2073 if (!p) 2074 return -ENOMEM; 2075 2076 cb->priv = p; 2077 cb->page_offset = 0; 2078 cb->reuse_flag = 0; 2079 cb->buf = page_address(p); 2080 cb->length = hnae3_page_size(ring); 2081 cb->type = DESC_TYPE_PAGE; 2082 2083 return 0; 2084 } 2085 2086 static void hns3_free_buffer(struct hns3_enet_ring *ring, 2087 struct hns3_desc_cb *cb) 2088 { 2089 if (cb->type == DESC_TYPE_SKB) 2090 dev_kfree_skb_any((struct sk_buff *)cb->priv); 2091 else if (!HNAE3_IS_TX_RING(ring)) 2092 put_page((struct page *)cb->priv); 2093 memset(cb, 0, sizeof(*cb)); 2094 } 2095 2096 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) 2097 { 2098 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, 2099 cb->length, ring_to_dma_dir(ring)); 2100 2101 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) 2102 return -EIO; 2103 2104 return 0; 2105 } 2106 2107 static void hns3_unmap_buffer(struct hns3_enet_ring *ring, 2108 struct hns3_desc_cb *cb) 2109 { 2110 if (cb->type == DESC_TYPE_SKB) 2111 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 2112 ring_to_dma_dir(ring)); 2113 else if (cb->length) 2114 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 2115 ring_to_dma_dir(ring)); 2116 } 2117 2118 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) 2119 { 2120 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 2121 ring->desc[i].addr = 0; 2122 } 2123 2124 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) 2125 { 2126 struct hns3_desc_cb *cb = &ring->desc_cb[i]; 2127 2128 if (!ring->desc_cb[i].dma) 2129 return; 2130 2131 hns3_buffer_detach(ring, i); 2132 hns3_free_buffer(ring, cb); 2133 } 2134 2135 static void hns3_free_buffers(struct hns3_enet_ring *ring) 2136 { 2137 int i; 2138 2139 for (i = 0; i < ring->desc_num; i++) 2140 hns3_free_buffer_detach(ring, i); 2141 } 2142 2143 /* free desc along with its attached buffer */ 2144 static void hns3_free_desc(struct hns3_enet_ring *ring) 2145 { 2146 int size = ring->desc_num * sizeof(ring->desc[0]); 2147 2148 hns3_free_buffers(ring); 2149 2150 if (ring->desc) { 2151 dma_free_coherent(ring_to_dev(ring), size, 2152 ring->desc, ring->desc_dma_addr); 2153 ring->desc = NULL; 2154 } 2155 } 2156 2157 static int hns3_alloc_desc(struct hns3_enet_ring *ring) 2158 { 2159 int size = ring->desc_num * sizeof(ring->desc[0]); 2160 2161 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, 2162 &ring->desc_dma_addr, GFP_KERNEL); 2163 if (!ring->desc) 2164 return -ENOMEM; 2165 2166 return 0; 2167 } 2168 2169 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, 2170 struct hns3_desc_cb *cb) 2171 { 2172 int ret; 2173 2174 ret = hns3_alloc_buffer(ring, cb); 2175 if (ret) 2176 goto out; 2177 2178 ret = hns3_map_buffer(ring, cb); 2179 if (ret) 2180 goto out_with_buf; 2181 2182 return 0; 2183 2184 out_with_buf: 2185 hns3_free_buffer(ring, cb); 2186 out: 2187 return ret; 2188 } 2189 2190 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i) 2191 { 2192 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]); 2193 2194 if (ret) 2195 return ret; 2196 2197 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 2198 2199 return 0; 2200 } 2201 2202 /* Allocate memory for raw pkg, and map with dma */ 2203 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) 2204 { 2205 int i, j, ret; 2206 2207 for (i = 0; i < ring->desc_num; i++) { 2208 ret = hns3_alloc_buffer_attach(ring, i); 2209 if (ret) 2210 goto out_buffer_fail; 2211 } 2212 2213 return 0; 2214 2215 out_buffer_fail: 2216 for (j = i - 1; j >= 0; j--) 2217 hns3_free_buffer_detach(ring, j); 2218 return ret; 2219 } 2220 2221 /* detach a in-used buffer and replace with a reserved one */ 2222 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, 2223 struct hns3_desc_cb *res_cb) 2224 { 2225 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 2226 ring->desc_cb[i] = *res_cb; 2227 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 2228 ring->desc[i].rx.bd_base_info = 0; 2229 } 2230 2231 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) 2232 { 2233 ring->desc_cb[i].reuse_flag = 0; 2234 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + 2235 ring->desc_cb[i].page_offset); 2236 ring->desc[i].rx.bd_base_info = 0; 2237 } 2238 2239 static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head, 2240 int *bytes, int *pkts) 2241 { 2242 int ntc = ring->next_to_clean; 2243 struct hns3_desc_cb *desc_cb; 2244 2245 while (head != ntc) { 2246 desc_cb = &ring->desc_cb[ntc]; 2247 (*pkts) += (desc_cb->type == DESC_TYPE_SKB); 2248 (*bytes) += desc_cb->length; 2249 /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ 2250 hns3_free_buffer_detach(ring, ntc); 2251 2252 if (++ntc == ring->desc_num) 2253 ntc = 0; 2254 2255 /* Issue prefetch for next Tx descriptor */ 2256 prefetch(&ring->desc_cb[ntc]); 2257 } 2258 2259 /* This smp_store_release() pairs with smp_load_acquire() in 2260 * ring_space called by hns3_nic_net_xmit. 2261 */ 2262 smp_store_release(&ring->next_to_clean, ntc); 2263 } 2264 2265 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) 2266 { 2267 int u = ring->next_to_use; 2268 int c = ring->next_to_clean; 2269 2270 if (unlikely(h > ring->desc_num)) 2271 return 0; 2272 2273 return u > c ? (h > c && h <= u) : (h > c || h <= u); 2274 } 2275 2276 void hns3_clean_tx_ring(struct hns3_enet_ring *ring) 2277 { 2278 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2279 struct hns3_nic_priv *priv = netdev_priv(netdev); 2280 struct netdev_queue *dev_queue; 2281 int bytes, pkts; 2282 int head; 2283 2284 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); 2285 rmb(); /* Make sure head is ready before touch any data */ 2286 2287 if (is_ring_empty(ring) || head == ring->next_to_clean) 2288 return; /* no data to poll */ 2289 2290 if (unlikely(!is_valid_clean_head(ring, head))) { 2291 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, 2292 ring->next_to_use, ring->next_to_clean); 2293 2294 u64_stats_update_begin(&ring->syncp); 2295 ring->stats.io_err_cnt++; 2296 u64_stats_update_end(&ring->syncp); 2297 return; 2298 } 2299 2300 bytes = 0; 2301 pkts = 0; 2302 hns3_nic_reclaim_desc(ring, head, &bytes, &pkts); 2303 2304 ring->tqp_vector->tx_group.total_bytes += bytes; 2305 ring->tqp_vector->tx_group.total_packets += pkts; 2306 2307 u64_stats_update_begin(&ring->syncp); 2308 ring->stats.tx_bytes += bytes; 2309 ring->stats.tx_pkts += pkts; 2310 u64_stats_update_end(&ring->syncp); 2311 2312 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); 2313 netdev_tx_completed_queue(dev_queue, pkts, bytes); 2314 2315 if (unlikely(pkts && netif_carrier_ok(netdev) && 2316 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) { 2317 /* Make sure that anybody stopping the queue after this 2318 * sees the new next_to_clean. 2319 */ 2320 smp_mb(); 2321 if (netif_tx_queue_stopped(dev_queue) && 2322 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 2323 netif_tx_wake_queue(dev_queue); 2324 ring->stats.restart_queue++; 2325 } 2326 } 2327 } 2328 2329 static int hns3_desc_unused(struct hns3_enet_ring *ring) 2330 { 2331 int ntc = ring->next_to_clean; 2332 int ntu = ring->next_to_use; 2333 2334 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; 2335 } 2336 2337 static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, 2338 int cleand_count) 2339 { 2340 struct hns3_desc_cb *desc_cb; 2341 struct hns3_desc_cb res_cbs; 2342 int i, ret; 2343 2344 for (i = 0; i < cleand_count; i++) { 2345 desc_cb = &ring->desc_cb[ring->next_to_use]; 2346 if (desc_cb->reuse_flag) { 2347 u64_stats_update_begin(&ring->syncp); 2348 ring->stats.reuse_pg_cnt++; 2349 u64_stats_update_end(&ring->syncp); 2350 2351 hns3_reuse_buffer(ring, ring->next_to_use); 2352 } else { 2353 ret = hns3_reserve_buffer_map(ring, &res_cbs); 2354 if (ret) { 2355 u64_stats_update_begin(&ring->syncp); 2356 ring->stats.sw_err_cnt++; 2357 u64_stats_update_end(&ring->syncp); 2358 2359 netdev_err(ring->tqp->handle->kinfo.netdev, 2360 "hnae reserve buffer map failed.\n"); 2361 break; 2362 } 2363 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 2364 2365 u64_stats_update_begin(&ring->syncp); 2366 ring->stats.non_reuse_pg++; 2367 u64_stats_update_end(&ring->syncp); 2368 } 2369 2370 ring_ptr_move_fw(ring, next_to_use); 2371 } 2372 2373 wmb(); /* Make all data has been write before submit */ 2374 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); 2375 } 2376 2377 static void hns3_nic_reuse_page(struct sk_buff *skb, int i, 2378 struct hns3_enet_ring *ring, int pull_len, 2379 struct hns3_desc_cb *desc_cb) 2380 { 2381 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; 2382 int size = le16_to_cpu(desc->rx.size); 2383 u32 truesize = hnae3_buf_size(ring); 2384 2385 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 2386 size - pull_len, truesize); 2387 2388 /* Avoid re-using remote pages, or the stack is still using the page 2389 * when page_offset rollback to zero, flag default unreuse 2390 */ 2391 if (unlikely(page_to_nid(desc_cb->priv) != numa_mem_id()) || 2392 (!desc_cb->page_offset && page_count(desc_cb->priv) > 1)) 2393 return; 2394 2395 /* Move offset up to the next cache line */ 2396 desc_cb->page_offset += truesize; 2397 2398 if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) { 2399 desc_cb->reuse_flag = 1; 2400 /* Bump ref count on page before it is given */ 2401 get_page(desc_cb->priv); 2402 } else if (page_count(desc_cb->priv) == 1) { 2403 desc_cb->reuse_flag = 1; 2404 desc_cb->page_offset = 0; 2405 get_page(desc_cb->priv); 2406 } 2407 } 2408 2409 static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) 2410 { 2411 __be16 type = skb->protocol; 2412 struct tcphdr *th; 2413 int depth = 0; 2414 2415 while (eth_type_vlan(type)) { 2416 struct vlan_hdr *vh; 2417 2418 if ((depth + VLAN_HLEN) > skb_headlen(skb)) 2419 return -EFAULT; 2420 2421 vh = (struct vlan_hdr *)(skb->data + depth); 2422 type = vh->h_vlan_encapsulated_proto; 2423 depth += VLAN_HLEN; 2424 } 2425 2426 skb_set_network_header(skb, depth); 2427 2428 if (type == htons(ETH_P_IP)) { 2429 const struct iphdr *iph = ip_hdr(skb); 2430 2431 depth += sizeof(struct iphdr); 2432 skb_set_transport_header(skb, depth); 2433 th = tcp_hdr(skb); 2434 th->check = ~tcp_v4_check(skb->len - depth, iph->saddr, 2435 iph->daddr, 0); 2436 } else if (type == htons(ETH_P_IPV6)) { 2437 const struct ipv6hdr *iph = ipv6_hdr(skb); 2438 2439 depth += sizeof(struct ipv6hdr); 2440 skb_set_transport_header(skb, depth); 2441 th = tcp_hdr(skb); 2442 th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr, 2443 &iph->daddr, 0); 2444 } else { 2445 netdev_err(skb->dev, 2446 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n", 2447 be16_to_cpu(type), depth); 2448 return -EFAULT; 2449 } 2450 2451 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 2452 if (th->cwr) 2453 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 2454 2455 if (l234info & BIT(HNS3_RXD_GRO_FIXID_B)) 2456 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; 2457 2458 skb->csum_start = (unsigned char *)th - skb->head; 2459 skb->csum_offset = offsetof(struct tcphdr, check); 2460 skb->ip_summed = CHECKSUM_PARTIAL; 2461 return 0; 2462 } 2463 2464 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, 2465 u32 l234info, u32 bd_base_info, u32 ol_info) 2466 { 2467 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2468 int l3_type, l4_type; 2469 int ol4_type; 2470 2471 skb->ip_summed = CHECKSUM_NONE; 2472 2473 skb_checksum_none_assert(skb); 2474 2475 if (!(netdev->features & NETIF_F_RXCSUM)) 2476 return; 2477 2478 /* check if hardware has done checksum */ 2479 if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) 2480 return; 2481 2482 if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | 2483 BIT(HNS3_RXD_OL3E_B) | 2484 BIT(HNS3_RXD_OL4E_B)))) { 2485 u64_stats_update_begin(&ring->syncp); 2486 ring->stats.l3l4_csum_err++; 2487 u64_stats_update_end(&ring->syncp); 2488 2489 return; 2490 } 2491 2492 ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M, 2493 HNS3_RXD_OL4ID_S); 2494 switch (ol4_type) { 2495 case HNS3_OL4_TYPE_MAC_IN_UDP: 2496 case HNS3_OL4_TYPE_NVGRE: 2497 skb->csum_level = 1; 2498 /* fall through */ 2499 case HNS3_OL4_TYPE_NO_TUN: 2500 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, 2501 HNS3_RXD_L3ID_S); 2502 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, 2503 HNS3_RXD_L4ID_S); 2504 2505 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ 2506 if ((l3_type == HNS3_L3_TYPE_IPV4 || 2507 l3_type == HNS3_L3_TYPE_IPV6) && 2508 (l4_type == HNS3_L4_TYPE_UDP || 2509 l4_type == HNS3_L4_TYPE_TCP || 2510 l4_type == HNS3_L4_TYPE_SCTP)) 2511 skb->ip_summed = CHECKSUM_UNNECESSARY; 2512 break; 2513 default: 2514 break; 2515 } 2516 } 2517 2518 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) 2519 { 2520 if (skb_has_frag_list(skb)) 2521 napi_gro_flush(&ring->tqp_vector->napi, false); 2522 2523 napi_gro_receive(&ring->tqp_vector->napi, skb); 2524 } 2525 2526 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, 2527 struct hns3_desc *desc, u32 l234info, 2528 u16 *vlan_tag) 2529 { 2530 struct hnae3_handle *handle = ring->tqp->handle; 2531 struct pci_dev *pdev = ring->tqp->handle->pdev; 2532 2533 if (pdev->revision == 0x20) { 2534 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 2535 if (!(*vlan_tag & VLAN_VID_MASK)) 2536 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 2537 2538 return (*vlan_tag != 0); 2539 } 2540 2541 #define HNS3_STRP_OUTER_VLAN 0x1 2542 #define HNS3_STRP_INNER_VLAN 0x2 2543 #define HNS3_STRP_BOTH 0x3 2544 2545 /* Hardware always insert VLAN tag into RX descriptor when 2546 * remove the tag from packet, driver needs to determine 2547 * reporting which tag to stack. 2548 */ 2549 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, 2550 HNS3_RXD_STRP_TAGP_S)) { 2551 case HNS3_STRP_OUTER_VLAN: 2552 if (handle->port_base_vlan_state != 2553 HNAE3_PORT_BASE_VLAN_DISABLE) 2554 return false; 2555 2556 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 2557 return true; 2558 case HNS3_STRP_INNER_VLAN: 2559 if (handle->port_base_vlan_state != 2560 HNAE3_PORT_BASE_VLAN_DISABLE) 2561 return false; 2562 2563 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 2564 return true; 2565 case HNS3_STRP_BOTH: 2566 if (handle->port_base_vlan_state == 2567 HNAE3_PORT_BASE_VLAN_DISABLE) 2568 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 2569 else 2570 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 2571 2572 return true; 2573 default: 2574 return false; 2575 } 2576 } 2577 2578 static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length, 2579 unsigned char *va) 2580 { 2581 #define HNS3_NEED_ADD_FRAG 1 2582 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; 2583 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2584 struct sk_buff *skb; 2585 2586 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); 2587 skb = ring->skb; 2588 if (unlikely(!skb)) { 2589 netdev_err(netdev, "alloc rx skb fail\n"); 2590 2591 u64_stats_update_begin(&ring->syncp); 2592 ring->stats.sw_err_cnt++; 2593 u64_stats_update_end(&ring->syncp); 2594 2595 return -ENOMEM; 2596 } 2597 2598 prefetchw(skb->data); 2599 2600 ring->pending_buf = 1; 2601 ring->frag_num = 0; 2602 ring->tail_skb = NULL; 2603 if (length <= HNS3_RX_HEAD_SIZE) { 2604 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); 2605 2606 /* We can reuse buffer as-is, just make sure it is local */ 2607 if (likely(page_to_nid(desc_cb->priv) == numa_mem_id())) 2608 desc_cb->reuse_flag = 1; 2609 else /* This page cannot be reused so discard it */ 2610 put_page(desc_cb->priv); 2611 2612 ring_ptr_move_fw(ring, next_to_clean); 2613 return 0; 2614 } 2615 u64_stats_update_begin(&ring->syncp); 2616 ring->stats.seg_pkt_cnt++; 2617 u64_stats_update_end(&ring->syncp); 2618 2619 ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); 2620 __skb_put(skb, ring->pull_len); 2621 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, 2622 desc_cb); 2623 ring_ptr_move_fw(ring, next_to_clean); 2624 2625 return HNS3_NEED_ADD_FRAG; 2626 } 2627 2628 static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, 2629 struct sk_buff **out_skb, bool pending) 2630 { 2631 struct sk_buff *skb = *out_skb; 2632 struct sk_buff *head_skb = *out_skb; 2633 struct sk_buff *new_skb; 2634 struct hns3_desc_cb *desc_cb; 2635 struct hns3_desc *pre_desc; 2636 u32 bd_base_info; 2637 int pre_bd; 2638 2639 /* if there is pending bd, the SW param next_to_clean has moved 2640 * to next and the next is NULL 2641 */ 2642 if (pending) { 2643 pre_bd = (ring->next_to_clean - 1 + ring->desc_num) % 2644 ring->desc_num; 2645 pre_desc = &ring->desc[pre_bd]; 2646 bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info); 2647 } else { 2648 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2649 } 2650 2651 while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { 2652 desc = &ring->desc[ring->next_to_clean]; 2653 desc_cb = &ring->desc_cb[ring->next_to_clean]; 2654 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2655 /* make sure HW write desc complete */ 2656 dma_rmb(); 2657 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B))) 2658 return -ENXIO; 2659 2660 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { 2661 new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 2662 HNS3_RX_HEAD_SIZE); 2663 if (unlikely(!new_skb)) { 2664 netdev_err(ring->tqp->handle->kinfo.netdev, 2665 "alloc rx skb frag fail\n"); 2666 return -ENXIO; 2667 } 2668 ring->frag_num = 0; 2669 2670 if (ring->tail_skb) { 2671 ring->tail_skb->next = new_skb; 2672 ring->tail_skb = new_skb; 2673 } else { 2674 skb_shinfo(skb)->frag_list = new_skb; 2675 ring->tail_skb = new_skb; 2676 } 2677 } 2678 2679 if (ring->tail_skb) { 2680 head_skb->truesize += hnae3_buf_size(ring); 2681 head_skb->data_len += le16_to_cpu(desc->rx.size); 2682 head_skb->len += le16_to_cpu(desc->rx.size); 2683 skb = ring->tail_skb; 2684 } 2685 2686 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); 2687 ring_ptr_move_fw(ring, next_to_clean); 2688 ring->pending_buf++; 2689 } 2690 2691 return 0; 2692 } 2693 2694 static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, 2695 struct sk_buff *skb, u32 l234info, 2696 u32 bd_base_info, u32 ol_info) 2697 { 2698 u32 l3_type; 2699 2700 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, 2701 HNS3_RXD_GRO_SIZE_M, 2702 HNS3_RXD_GRO_SIZE_S); 2703 /* if there is no HW GRO, do not set gro params */ 2704 if (!skb_shinfo(skb)->gso_size) { 2705 hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info); 2706 return 0; 2707 } 2708 2709 NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info, 2710 HNS3_RXD_GRO_COUNT_M, 2711 HNS3_RXD_GRO_COUNT_S); 2712 2713 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); 2714 if (l3_type == HNS3_L3_TYPE_IPV4) 2715 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 2716 else if (l3_type == HNS3_L3_TYPE_IPV6) 2717 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 2718 else 2719 return -EFAULT; 2720 2721 return hns3_gro_complete(skb, l234info); 2722 } 2723 2724 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, 2725 struct sk_buff *skb, u32 rss_hash) 2726 { 2727 struct hnae3_handle *handle = ring->tqp->handle; 2728 enum pkt_hash_types rss_type; 2729 2730 if (rss_hash) 2731 rss_type = handle->kinfo.rss_type; 2732 else 2733 rss_type = PKT_HASH_TYPE_NONE; 2734 2735 skb_set_hash(skb, rss_hash, rss_type); 2736 } 2737 2738 static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) 2739 { 2740 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2741 enum hns3_pkt_l2t_type l2_frame_type; 2742 u32 bd_base_info, l234info, ol_info; 2743 struct hns3_desc *desc; 2744 unsigned int len; 2745 int pre_ntc, ret; 2746 2747 /* bdinfo handled below is only valid on the last BD of the 2748 * current packet, and ring->next_to_clean indicates the first 2749 * descriptor of next packet, so need - 1 below. 2750 */ 2751 pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : 2752 (ring->desc_num - 1); 2753 desc = &ring->desc[pre_ntc]; 2754 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2755 l234info = le32_to_cpu(desc->rx.l234_info); 2756 ol_info = le32_to_cpu(desc->rx.ol_info); 2757 2758 /* Based on hw strategy, the tag offloaded will be stored at 2759 * ot_vlan_tag in two layer tag case, and stored at vlan_tag 2760 * in one layer tag case. 2761 */ 2762 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { 2763 u16 vlan_tag; 2764 2765 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) 2766 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 2767 vlan_tag); 2768 } 2769 2770 if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | 2771 BIT(HNS3_RXD_L2E_B))))) { 2772 u64_stats_update_begin(&ring->syncp); 2773 if (l234info & BIT(HNS3_RXD_L2E_B)) 2774 ring->stats.l2_err++; 2775 else 2776 ring->stats.err_pkt_len++; 2777 u64_stats_update_end(&ring->syncp); 2778 2779 return -EFAULT; 2780 } 2781 2782 len = skb->len; 2783 2784 /* Do update ip stack process */ 2785 skb->protocol = eth_type_trans(skb, netdev); 2786 2787 /* This is needed in order to enable forwarding support */ 2788 ret = hns3_set_gro_and_checksum(ring, skb, l234info, 2789 bd_base_info, ol_info); 2790 if (unlikely(ret)) { 2791 u64_stats_update_begin(&ring->syncp); 2792 ring->stats.rx_err_cnt++; 2793 u64_stats_update_end(&ring->syncp); 2794 return ret; 2795 } 2796 2797 l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, 2798 HNS3_RXD_DMAC_S); 2799 2800 u64_stats_update_begin(&ring->syncp); 2801 ring->stats.rx_pkts++; 2802 ring->stats.rx_bytes += len; 2803 2804 if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) 2805 ring->stats.rx_multicast++; 2806 2807 u64_stats_update_end(&ring->syncp); 2808 2809 ring->tqp_vector->rx_group.total_bytes += len; 2810 2811 hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash)); 2812 return 0; 2813 } 2814 2815 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, 2816 struct sk_buff **out_skb) 2817 { 2818 struct sk_buff *skb = ring->skb; 2819 struct hns3_desc_cb *desc_cb; 2820 struct hns3_desc *desc; 2821 u32 bd_base_info; 2822 int length; 2823 int ret; 2824 2825 desc = &ring->desc[ring->next_to_clean]; 2826 desc_cb = &ring->desc_cb[ring->next_to_clean]; 2827 2828 prefetch(desc); 2829 2830 length = le16_to_cpu(desc->rx.size); 2831 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2832 2833 /* Check valid BD */ 2834 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) 2835 return -ENXIO; 2836 2837 if (!skb) 2838 ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; 2839 2840 /* Prefetch first cache line of first page 2841 * Idea is to cache few bytes of the header of the packet. Our L1 Cache 2842 * line size is 64B so need to prefetch twice to make it 128B. But in 2843 * actual we can have greater size of caches with 128B Level 1 cache 2844 * lines. In such a case, single fetch would suffice to cache in the 2845 * relevant part of the header. 2846 */ 2847 prefetch(ring->va); 2848 #if L1_CACHE_BYTES < 128 2849 prefetch(ring->va + L1_CACHE_BYTES); 2850 #endif 2851 2852 if (!skb) { 2853 ret = hns3_alloc_skb(ring, length, ring->va); 2854 *out_skb = skb = ring->skb; 2855 2856 if (ret < 0) /* alloc buffer fail */ 2857 return ret; 2858 if (ret > 0) { /* need add frag */ 2859 ret = hns3_add_frag(ring, desc, &skb, false); 2860 if (ret) 2861 return ret; 2862 2863 /* As the head data may be changed when GRO enable, copy 2864 * the head data in after other data rx completed 2865 */ 2866 memcpy(skb->data, ring->va, 2867 ALIGN(ring->pull_len, sizeof(long))); 2868 } 2869 } else { 2870 ret = hns3_add_frag(ring, desc, &skb, true); 2871 if (ret) 2872 return ret; 2873 2874 /* As the head data may be changed when GRO enable, copy 2875 * the head data in after other data rx completed 2876 */ 2877 memcpy(skb->data, ring->va, 2878 ALIGN(ring->pull_len, sizeof(long))); 2879 } 2880 2881 ret = hns3_handle_bdinfo(ring, skb); 2882 if (unlikely(ret)) { 2883 dev_kfree_skb_any(skb); 2884 return ret; 2885 } 2886 2887 skb_record_rx_queue(skb, ring->tqp->tqp_index); 2888 *out_skb = skb; 2889 2890 return 0; 2891 } 2892 2893 int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, 2894 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) 2895 { 2896 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 2897 int recv_pkts, recv_bds, clean_count, err; 2898 int unused_count = hns3_desc_unused(ring); 2899 struct sk_buff *skb = ring->skb; 2900 int num; 2901 2902 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); 2903 rmb(); /* Make sure num taken effect before the other data is touched */ 2904 2905 recv_pkts = 0, recv_bds = 0, clean_count = 0; 2906 num -= unused_count; 2907 unused_count -= ring->pending_buf; 2908 2909 while (recv_pkts < budget && recv_bds < num) { 2910 /* Reuse or realloc buffers */ 2911 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { 2912 hns3_nic_alloc_rx_buffers(ring, 2913 clean_count + unused_count); 2914 clean_count = 0; 2915 unused_count = hns3_desc_unused(ring) - 2916 ring->pending_buf; 2917 } 2918 2919 /* Poll one pkt */ 2920 err = hns3_handle_rx_bd(ring, &skb); 2921 if (unlikely(!skb)) /* This fault cannot be repaired */ 2922 goto out; 2923 2924 if (err == -ENXIO) { /* Do not get FE for the packet */ 2925 goto out; 2926 } else if (unlikely(err)) { /* Do jump the err */ 2927 recv_bds += ring->pending_buf; 2928 clean_count += ring->pending_buf; 2929 ring->skb = NULL; 2930 ring->pending_buf = 0; 2931 continue; 2932 } 2933 2934 rx_fn(ring, skb); 2935 recv_bds += ring->pending_buf; 2936 clean_count += ring->pending_buf; 2937 ring->skb = NULL; 2938 ring->pending_buf = 0; 2939 2940 recv_pkts++; 2941 } 2942 2943 out: 2944 /* Make all data has been write before submit */ 2945 if (clean_count + unused_count > 0) 2946 hns3_nic_alloc_rx_buffers(ring, clean_count + unused_count); 2947 2948 return recv_pkts; 2949 } 2950 2951 static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group) 2952 { 2953 #define HNS3_RX_LOW_BYTE_RATE 10000 2954 #define HNS3_RX_MID_BYTE_RATE 20000 2955 #define HNS3_RX_ULTRA_PACKET_RATE 40 2956 2957 enum hns3_flow_level_range new_flow_level; 2958 struct hns3_enet_tqp_vector *tqp_vector; 2959 int packets_per_msecs, bytes_per_msecs; 2960 u32 time_passed_ms; 2961 2962 tqp_vector = ring_group->ring->tqp_vector; 2963 time_passed_ms = 2964 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies); 2965 if (!time_passed_ms) 2966 return false; 2967 2968 do_div(ring_group->total_packets, time_passed_ms); 2969 packets_per_msecs = ring_group->total_packets; 2970 2971 do_div(ring_group->total_bytes, time_passed_ms); 2972 bytes_per_msecs = ring_group->total_bytes; 2973 2974 new_flow_level = ring_group->coal.flow_level; 2975 2976 /* Simple throttlerate management 2977 * 0-10MB/s lower (50000 ints/s) 2978 * 10-20MB/s middle (20000 ints/s) 2979 * 20-1249MB/s high (18000 ints/s) 2980 * > 40000pps ultra (8000 ints/s) 2981 */ 2982 switch (new_flow_level) { 2983 case HNS3_FLOW_LOW: 2984 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE) 2985 new_flow_level = HNS3_FLOW_MID; 2986 break; 2987 case HNS3_FLOW_MID: 2988 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE) 2989 new_flow_level = HNS3_FLOW_HIGH; 2990 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE) 2991 new_flow_level = HNS3_FLOW_LOW; 2992 break; 2993 case HNS3_FLOW_HIGH: 2994 case HNS3_FLOW_ULTRA: 2995 default: 2996 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE) 2997 new_flow_level = HNS3_FLOW_MID; 2998 break; 2999 } 3000 3001 if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE && 3002 &tqp_vector->rx_group == ring_group) 3003 new_flow_level = HNS3_FLOW_ULTRA; 3004 3005 ring_group->total_bytes = 0; 3006 ring_group->total_packets = 0; 3007 ring_group->coal.flow_level = new_flow_level; 3008 3009 return true; 3010 } 3011 3012 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) 3013 { 3014 struct hns3_enet_tqp_vector *tqp_vector; 3015 u16 new_int_gl; 3016 3017 if (!ring_group->ring) 3018 return false; 3019 3020 tqp_vector = ring_group->ring->tqp_vector; 3021 if (!tqp_vector->last_jiffies) 3022 return false; 3023 3024 if (ring_group->total_packets == 0) { 3025 ring_group->coal.int_gl = HNS3_INT_GL_50K; 3026 ring_group->coal.flow_level = HNS3_FLOW_LOW; 3027 return true; 3028 } 3029 3030 if (!hns3_get_new_flow_lvl(ring_group)) 3031 return false; 3032 3033 new_int_gl = ring_group->coal.int_gl; 3034 switch (ring_group->coal.flow_level) { 3035 case HNS3_FLOW_LOW: 3036 new_int_gl = HNS3_INT_GL_50K; 3037 break; 3038 case HNS3_FLOW_MID: 3039 new_int_gl = HNS3_INT_GL_20K; 3040 break; 3041 case HNS3_FLOW_HIGH: 3042 new_int_gl = HNS3_INT_GL_18K; 3043 break; 3044 case HNS3_FLOW_ULTRA: 3045 new_int_gl = HNS3_INT_GL_8K; 3046 break; 3047 default: 3048 break; 3049 } 3050 3051 if (new_int_gl != ring_group->coal.int_gl) { 3052 ring_group->coal.int_gl = new_int_gl; 3053 return true; 3054 } 3055 return false; 3056 } 3057 3058 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) 3059 { 3060 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; 3061 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; 3062 bool rx_update, tx_update; 3063 3064 /* update param every 1000ms */ 3065 if (time_before(jiffies, 3066 tqp_vector->last_jiffies + msecs_to_jiffies(1000))) 3067 return; 3068 3069 if (rx_group->coal.gl_adapt_enable) { 3070 rx_update = hns3_get_new_int_gl(rx_group); 3071 if (rx_update) 3072 hns3_set_vector_coalesce_rx_gl(tqp_vector, 3073 rx_group->coal.int_gl); 3074 } 3075 3076 if (tx_group->coal.gl_adapt_enable) { 3077 tx_update = hns3_get_new_int_gl(tx_group); 3078 if (tx_update) 3079 hns3_set_vector_coalesce_tx_gl(tqp_vector, 3080 tx_group->coal.int_gl); 3081 } 3082 3083 tqp_vector->last_jiffies = jiffies; 3084 } 3085 3086 static int hns3_nic_common_poll(struct napi_struct *napi, int budget) 3087 { 3088 struct hns3_nic_priv *priv = netdev_priv(napi->dev); 3089 struct hns3_enet_ring *ring; 3090 int rx_pkt_total = 0; 3091 3092 struct hns3_enet_tqp_vector *tqp_vector = 3093 container_of(napi, struct hns3_enet_tqp_vector, napi); 3094 bool clean_complete = true; 3095 int rx_budget = budget; 3096 3097 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { 3098 napi_complete(napi); 3099 return 0; 3100 } 3101 3102 /* Since the actual Tx work is minimal, we can give the Tx a larger 3103 * budget and be more aggressive about cleaning up the Tx descriptors. 3104 */ 3105 hns3_for_each_ring(ring, tqp_vector->tx_group) 3106 hns3_clean_tx_ring(ring); 3107 3108 /* make sure rx ring budget not smaller than 1 */ 3109 if (tqp_vector->num_tqps > 1) 3110 rx_budget = max(budget / tqp_vector->num_tqps, 1); 3111 3112 hns3_for_each_ring(ring, tqp_vector->rx_group) { 3113 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, 3114 hns3_rx_skb); 3115 3116 if (rx_cleaned >= rx_budget) 3117 clean_complete = false; 3118 3119 rx_pkt_total += rx_cleaned; 3120 } 3121 3122 tqp_vector->rx_group.total_packets += rx_pkt_total; 3123 3124 if (!clean_complete) 3125 return budget; 3126 3127 if (napi_complete(napi) && 3128 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { 3129 hns3_update_new_int_gl(tqp_vector); 3130 hns3_mask_vector_irq(tqp_vector, 1); 3131 } 3132 3133 return rx_pkt_total; 3134 } 3135 3136 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 3137 struct hnae3_ring_chain_node *head) 3138 { 3139 struct pci_dev *pdev = tqp_vector->handle->pdev; 3140 struct hnae3_ring_chain_node *cur_chain = head; 3141 struct hnae3_ring_chain_node *chain; 3142 struct hns3_enet_ring *tx_ring; 3143 struct hns3_enet_ring *rx_ring; 3144 3145 tx_ring = tqp_vector->tx_group.ring; 3146 if (tx_ring) { 3147 cur_chain->tqp_index = tx_ring->tqp->tqp_index; 3148 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 3149 HNAE3_RING_TYPE_TX); 3150 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 3151 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); 3152 3153 cur_chain->next = NULL; 3154 3155 while (tx_ring->next) { 3156 tx_ring = tx_ring->next; 3157 3158 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), 3159 GFP_KERNEL); 3160 if (!chain) 3161 goto err_free_chain; 3162 3163 cur_chain->next = chain; 3164 chain->tqp_index = tx_ring->tqp->tqp_index; 3165 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, 3166 HNAE3_RING_TYPE_TX); 3167 hnae3_set_field(chain->int_gl_idx, 3168 HNAE3_RING_GL_IDX_M, 3169 HNAE3_RING_GL_IDX_S, 3170 HNAE3_RING_GL_TX); 3171 3172 cur_chain = chain; 3173 } 3174 } 3175 3176 rx_ring = tqp_vector->rx_group.ring; 3177 if (!tx_ring && rx_ring) { 3178 cur_chain->next = NULL; 3179 cur_chain->tqp_index = rx_ring->tqp->tqp_index; 3180 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 3181 HNAE3_RING_TYPE_RX); 3182 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 3183 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 3184 3185 rx_ring = rx_ring->next; 3186 } 3187 3188 while (rx_ring) { 3189 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); 3190 if (!chain) 3191 goto err_free_chain; 3192 3193 cur_chain->next = chain; 3194 chain->tqp_index = rx_ring->tqp->tqp_index; 3195 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, 3196 HNAE3_RING_TYPE_RX); 3197 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 3198 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 3199 3200 cur_chain = chain; 3201 3202 rx_ring = rx_ring->next; 3203 } 3204 3205 return 0; 3206 3207 err_free_chain: 3208 cur_chain = head->next; 3209 while (cur_chain) { 3210 chain = cur_chain->next; 3211 devm_kfree(&pdev->dev, cur_chain); 3212 cur_chain = chain; 3213 } 3214 head->next = NULL; 3215 3216 return -ENOMEM; 3217 } 3218 3219 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 3220 struct hnae3_ring_chain_node *head) 3221 { 3222 struct pci_dev *pdev = tqp_vector->handle->pdev; 3223 struct hnae3_ring_chain_node *chain_tmp, *chain; 3224 3225 chain = head->next; 3226 3227 while (chain) { 3228 chain_tmp = chain->next; 3229 devm_kfree(&pdev->dev, chain); 3230 chain = chain_tmp; 3231 } 3232 } 3233 3234 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, 3235 struct hns3_enet_ring *ring) 3236 { 3237 ring->next = group->ring; 3238 group->ring = ring; 3239 3240 group->count++; 3241 } 3242 3243 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) 3244 { 3245 struct pci_dev *pdev = priv->ae_handle->pdev; 3246 struct hns3_enet_tqp_vector *tqp_vector; 3247 int num_vectors = priv->vector_num; 3248 int numa_node; 3249 int vector_i; 3250 3251 numa_node = dev_to_node(&pdev->dev); 3252 3253 for (vector_i = 0; vector_i < num_vectors; vector_i++) { 3254 tqp_vector = &priv->tqp_vector[vector_i]; 3255 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node), 3256 &tqp_vector->affinity_mask); 3257 } 3258 } 3259 3260 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) 3261 { 3262 struct hnae3_ring_chain_node vector_ring_chain; 3263 struct hnae3_handle *h = priv->ae_handle; 3264 struct hns3_enet_tqp_vector *tqp_vector; 3265 int ret = 0; 3266 int i; 3267 3268 hns3_nic_set_cpumask(priv); 3269 3270 for (i = 0; i < priv->vector_num; i++) { 3271 tqp_vector = &priv->tqp_vector[i]; 3272 hns3_vector_gl_rl_init_hw(tqp_vector, priv); 3273 tqp_vector->num_tqps = 0; 3274 } 3275 3276 for (i = 0; i < h->kinfo.num_tqps; i++) { 3277 u16 vector_i = i % priv->vector_num; 3278 u16 tqp_num = h->kinfo.num_tqps; 3279 3280 tqp_vector = &priv->tqp_vector[vector_i]; 3281 3282 hns3_add_ring_to_group(&tqp_vector->tx_group, 3283 priv->ring_data[i].ring); 3284 3285 hns3_add_ring_to_group(&tqp_vector->rx_group, 3286 priv->ring_data[i + tqp_num].ring); 3287 3288 priv->ring_data[i].ring->tqp_vector = tqp_vector; 3289 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; 3290 tqp_vector->num_tqps++; 3291 } 3292 3293 for (i = 0; i < priv->vector_num; i++) { 3294 tqp_vector = &priv->tqp_vector[i]; 3295 3296 tqp_vector->rx_group.total_bytes = 0; 3297 tqp_vector->rx_group.total_packets = 0; 3298 tqp_vector->tx_group.total_bytes = 0; 3299 tqp_vector->tx_group.total_packets = 0; 3300 tqp_vector->handle = h; 3301 3302 ret = hns3_get_vector_ring_chain(tqp_vector, 3303 &vector_ring_chain); 3304 if (ret) 3305 goto map_ring_fail; 3306 3307 ret = h->ae_algo->ops->map_ring_to_vector(h, 3308 tqp_vector->vector_irq, &vector_ring_chain); 3309 3310 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 3311 3312 if (ret) 3313 goto map_ring_fail; 3314 3315 netif_napi_add(priv->netdev, &tqp_vector->napi, 3316 hns3_nic_common_poll, NAPI_POLL_WEIGHT); 3317 } 3318 3319 return 0; 3320 3321 map_ring_fail: 3322 while (i--) 3323 netif_napi_del(&priv->tqp_vector[i].napi); 3324 3325 return ret; 3326 } 3327 3328 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) 3329 { 3330 #define HNS3_VECTOR_PF_MAX_NUM 64 3331 3332 struct hnae3_handle *h = priv->ae_handle; 3333 struct hns3_enet_tqp_vector *tqp_vector; 3334 struct hnae3_vector_info *vector; 3335 struct pci_dev *pdev = h->pdev; 3336 u16 tqp_num = h->kinfo.num_tqps; 3337 u16 vector_num; 3338 int ret = 0; 3339 u16 i; 3340 3341 /* RSS size, cpu online and vector_num should be the same */ 3342 /* Should consider 2p/4p later */ 3343 vector_num = min_t(u16, num_online_cpus(), tqp_num); 3344 vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM); 3345 3346 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), 3347 GFP_KERNEL); 3348 if (!vector) 3349 return -ENOMEM; 3350 3351 /* save the actual available vector number */ 3352 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); 3353 3354 priv->vector_num = vector_num; 3355 priv->tqp_vector = (struct hns3_enet_tqp_vector *) 3356 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), 3357 GFP_KERNEL); 3358 if (!priv->tqp_vector) { 3359 ret = -ENOMEM; 3360 goto out; 3361 } 3362 3363 for (i = 0; i < priv->vector_num; i++) { 3364 tqp_vector = &priv->tqp_vector[i]; 3365 tqp_vector->idx = i; 3366 tqp_vector->mask_addr = vector[i].io_addr; 3367 tqp_vector->vector_irq = vector[i].vector; 3368 hns3_vector_gl_rl_init(tqp_vector, priv); 3369 } 3370 3371 out: 3372 devm_kfree(&pdev->dev, vector); 3373 return ret; 3374 } 3375 3376 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) 3377 { 3378 group->ring = NULL; 3379 group->count = 0; 3380 } 3381 3382 static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) 3383 { 3384 struct hnae3_ring_chain_node vector_ring_chain; 3385 struct hnae3_handle *h = priv->ae_handle; 3386 struct hns3_enet_tqp_vector *tqp_vector; 3387 int i; 3388 3389 for (i = 0; i < priv->vector_num; i++) { 3390 tqp_vector = &priv->tqp_vector[i]; 3391 3392 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) 3393 continue; 3394 3395 hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain); 3396 3397 h->ae_algo->ops->unmap_ring_from_vector(h, 3398 tqp_vector->vector_irq, &vector_ring_chain); 3399 3400 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 3401 3402 if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) { 3403 irq_set_affinity_hint(tqp_vector->vector_irq, NULL); 3404 free_irq(tqp_vector->vector_irq, tqp_vector); 3405 tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED; 3406 } 3407 3408 hns3_clear_ring_group(&tqp_vector->rx_group); 3409 hns3_clear_ring_group(&tqp_vector->tx_group); 3410 netif_napi_del(&priv->tqp_vector[i].napi); 3411 } 3412 } 3413 3414 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) 3415 { 3416 struct hnae3_handle *h = priv->ae_handle; 3417 struct pci_dev *pdev = h->pdev; 3418 int i, ret; 3419 3420 for (i = 0; i < priv->vector_num; i++) { 3421 struct hns3_enet_tqp_vector *tqp_vector; 3422 3423 tqp_vector = &priv->tqp_vector[i]; 3424 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); 3425 if (ret) 3426 return ret; 3427 } 3428 3429 devm_kfree(&pdev->dev, priv->tqp_vector); 3430 return 0; 3431 } 3432 3433 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, 3434 unsigned int ring_type) 3435 { 3436 struct hns3_nic_ring_data *ring_data = priv->ring_data; 3437 int queue_num = priv->ae_handle->kinfo.num_tqps; 3438 struct pci_dev *pdev = priv->ae_handle->pdev; 3439 struct hns3_enet_ring *ring; 3440 int desc_num; 3441 3442 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL); 3443 if (!ring) 3444 return -ENOMEM; 3445 3446 if (ring_type == HNAE3_RING_TYPE_TX) { 3447 desc_num = priv->ae_handle->kinfo.num_tx_desc; 3448 ring_data[q->tqp_index].ring = ring; 3449 ring_data[q->tqp_index].queue_index = q->tqp_index; 3450 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; 3451 } else { 3452 desc_num = priv->ae_handle->kinfo.num_rx_desc; 3453 ring_data[q->tqp_index + queue_num].ring = ring; 3454 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; 3455 ring->io_base = q->io_base; 3456 } 3457 3458 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); 3459 3460 ring->tqp = q; 3461 ring->desc = NULL; 3462 ring->desc_cb = NULL; 3463 ring->dev = priv->dev; 3464 ring->desc_dma_addr = 0; 3465 ring->buf_size = q->buf_size; 3466 ring->desc_num = desc_num; 3467 ring->next_to_use = 0; 3468 ring->next_to_clean = 0; 3469 3470 return 0; 3471 } 3472 3473 static int hns3_queue_to_ring(struct hnae3_queue *tqp, 3474 struct hns3_nic_priv *priv) 3475 { 3476 int ret; 3477 3478 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); 3479 if (ret) 3480 return ret; 3481 3482 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); 3483 if (ret) { 3484 devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring); 3485 return ret; 3486 } 3487 3488 return 0; 3489 } 3490 3491 static int hns3_get_ring_config(struct hns3_nic_priv *priv) 3492 { 3493 struct hnae3_handle *h = priv->ae_handle; 3494 struct pci_dev *pdev = h->pdev; 3495 int i, ret; 3496 3497 priv->ring_data = devm_kzalloc(&pdev->dev, 3498 array3_size(h->kinfo.num_tqps, 3499 sizeof(*priv->ring_data), 3500 2), 3501 GFP_KERNEL); 3502 if (!priv->ring_data) 3503 return -ENOMEM; 3504 3505 for (i = 0; i < h->kinfo.num_tqps; i++) { 3506 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv); 3507 if (ret) 3508 goto err; 3509 } 3510 3511 return 0; 3512 err: 3513 while (i--) { 3514 devm_kfree(priv->dev, priv->ring_data[i].ring); 3515 devm_kfree(priv->dev, 3516 priv->ring_data[i + h->kinfo.num_tqps].ring); 3517 } 3518 3519 devm_kfree(&pdev->dev, priv->ring_data); 3520 priv->ring_data = NULL; 3521 return ret; 3522 } 3523 3524 static void hns3_put_ring_config(struct hns3_nic_priv *priv) 3525 { 3526 struct hnae3_handle *h = priv->ae_handle; 3527 int i; 3528 3529 if (!priv->ring_data) 3530 return; 3531 3532 for (i = 0; i < h->kinfo.num_tqps; i++) { 3533 devm_kfree(priv->dev, priv->ring_data[i].ring); 3534 devm_kfree(priv->dev, 3535 priv->ring_data[i + h->kinfo.num_tqps].ring); 3536 } 3537 devm_kfree(priv->dev, priv->ring_data); 3538 priv->ring_data = NULL; 3539 } 3540 3541 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) 3542 { 3543 int ret; 3544 3545 if (ring->desc_num <= 0 || ring->buf_size <= 0) 3546 return -EINVAL; 3547 3548 ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, 3549 sizeof(ring->desc_cb[0]), GFP_KERNEL); 3550 if (!ring->desc_cb) { 3551 ret = -ENOMEM; 3552 goto out; 3553 } 3554 3555 ret = hns3_alloc_desc(ring); 3556 if (ret) 3557 goto out_with_desc_cb; 3558 3559 if (!HNAE3_IS_TX_RING(ring)) { 3560 ret = hns3_alloc_ring_buffers(ring); 3561 if (ret) 3562 goto out_with_desc; 3563 } 3564 3565 return 0; 3566 3567 out_with_desc: 3568 hns3_free_desc(ring); 3569 out_with_desc_cb: 3570 devm_kfree(ring_to_dev(ring), ring->desc_cb); 3571 ring->desc_cb = NULL; 3572 out: 3573 return ret; 3574 } 3575 3576 static void hns3_fini_ring(struct hns3_enet_ring *ring) 3577 { 3578 hns3_free_desc(ring); 3579 devm_kfree(ring_to_dev(ring), ring->desc_cb); 3580 ring->desc_cb = NULL; 3581 ring->next_to_clean = 0; 3582 ring->next_to_use = 0; 3583 ring->pending_buf = 0; 3584 if (ring->skb) { 3585 dev_kfree_skb_any(ring->skb); 3586 ring->skb = NULL; 3587 } 3588 } 3589 3590 static int hns3_buf_size2type(u32 buf_size) 3591 { 3592 int bd_size_type; 3593 3594 switch (buf_size) { 3595 case 512: 3596 bd_size_type = HNS3_BD_SIZE_512_TYPE; 3597 break; 3598 case 1024: 3599 bd_size_type = HNS3_BD_SIZE_1024_TYPE; 3600 break; 3601 case 2048: 3602 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 3603 break; 3604 case 4096: 3605 bd_size_type = HNS3_BD_SIZE_4096_TYPE; 3606 break; 3607 default: 3608 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 3609 } 3610 3611 return bd_size_type; 3612 } 3613 3614 static void hns3_init_ring_hw(struct hns3_enet_ring *ring) 3615 { 3616 dma_addr_t dma = ring->desc_dma_addr; 3617 struct hnae3_queue *q = ring->tqp; 3618 3619 if (!HNAE3_IS_TX_RING(ring)) { 3620 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma); 3621 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, 3622 (u32)((dma >> 31) >> 1)); 3623 3624 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, 3625 hns3_buf_size2type(ring->buf_size)); 3626 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, 3627 ring->desc_num / 8 - 1); 3628 3629 } else { 3630 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, 3631 (u32)dma); 3632 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, 3633 (u32)((dma >> 31) >> 1)); 3634 3635 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, 3636 ring->desc_num / 8 - 1); 3637 } 3638 } 3639 3640 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) 3641 { 3642 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 3643 int i; 3644 3645 for (i = 0; i < HNAE3_MAX_TC; i++) { 3646 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; 3647 int j; 3648 3649 if (!tc_info->enable) 3650 continue; 3651 3652 for (j = 0; j < tc_info->tqp_count; j++) { 3653 struct hnae3_queue *q; 3654 3655 q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp; 3656 hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, 3657 tc_info->tc); 3658 } 3659 } 3660 } 3661 3662 int hns3_init_all_ring(struct hns3_nic_priv *priv) 3663 { 3664 struct hnae3_handle *h = priv->ae_handle; 3665 int ring_num = h->kinfo.num_tqps * 2; 3666 int i, j; 3667 int ret; 3668 3669 for (i = 0; i < ring_num; i++) { 3670 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring); 3671 if (ret) { 3672 dev_err(priv->dev, 3673 "Alloc ring memory fail! ret=%d\n", ret); 3674 goto out_when_alloc_ring_memory; 3675 } 3676 3677 u64_stats_init(&priv->ring_data[i].ring->syncp); 3678 } 3679 3680 return 0; 3681 3682 out_when_alloc_ring_memory: 3683 for (j = i - 1; j >= 0; j--) 3684 hns3_fini_ring(priv->ring_data[j].ring); 3685 3686 return -ENOMEM; 3687 } 3688 3689 int hns3_uninit_all_ring(struct hns3_nic_priv *priv) 3690 { 3691 struct hnae3_handle *h = priv->ae_handle; 3692 int i; 3693 3694 for (i = 0; i < h->kinfo.num_tqps; i++) { 3695 hns3_fini_ring(priv->ring_data[i].ring); 3696 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); 3697 } 3698 return 0; 3699 } 3700 3701 /* Set mac addr if it is configured. or leave it to the AE driver */ 3702 static int hns3_init_mac_addr(struct net_device *netdev, bool init) 3703 { 3704 struct hns3_nic_priv *priv = netdev_priv(netdev); 3705 struct hnae3_handle *h = priv->ae_handle; 3706 u8 mac_addr_temp[ETH_ALEN]; 3707 int ret = 0; 3708 3709 if (h->ae_algo->ops->get_mac_addr && init) { 3710 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); 3711 ether_addr_copy(netdev->dev_addr, mac_addr_temp); 3712 } 3713 3714 /* Check if the MAC address is valid, if not get a random one */ 3715 if (!is_valid_ether_addr(netdev->dev_addr)) { 3716 eth_hw_addr_random(netdev); 3717 dev_warn(priv->dev, "using random MAC address %pM\n", 3718 netdev->dev_addr); 3719 } 3720 3721 if (h->ae_algo->ops->set_mac_addr) 3722 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); 3723 3724 return ret; 3725 } 3726 3727 static int hns3_init_phy(struct net_device *netdev) 3728 { 3729 struct hnae3_handle *h = hns3_get_handle(netdev); 3730 int ret = 0; 3731 3732 if (h->ae_algo->ops->mac_connect_phy) 3733 ret = h->ae_algo->ops->mac_connect_phy(h); 3734 3735 return ret; 3736 } 3737 3738 static void hns3_uninit_phy(struct net_device *netdev) 3739 { 3740 struct hnae3_handle *h = hns3_get_handle(netdev); 3741 3742 if (h->ae_algo->ops->mac_disconnect_phy) 3743 h->ae_algo->ops->mac_disconnect_phy(h); 3744 } 3745 3746 static int hns3_restore_fd_rules(struct net_device *netdev) 3747 { 3748 struct hnae3_handle *h = hns3_get_handle(netdev); 3749 int ret = 0; 3750 3751 if (h->ae_algo->ops->restore_fd_rules) 3752 ret = h->ae_algo->ops->restore_fd_rules(h); 3753 3754 return ret; 3755 } 3756 3757 static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) 3758 { 3759 struct hnae3_handle *h = hns3_get_handle(netdev); 3760 3761 if (h->ae_algo->ops->del_all_fd_entries) 3762 h->ae_algo->ops->del_all_fd_entries(h, clear_list); 3763 } 3764 3765 static int hns3_client_start(struct hnae3_handle *handle) 3766 { 3767 if (!handle->ae_algo->ops->client_start) 3768 return 0; 3769 3770 return handle->ae_algo->ops->client_start(handle); 3771 } 3772 3773 static void hns3_client_stop(struct hnae3_handle *handle) 3774 { 3775 if (!handle->ae_algo->ops->client_stop) 3776 return; 3777 3778 handle->ae_algo->ops->client_stop(handle); 3779 } 3780 3781 static void hns3_info_show(struct hns3_nic_priv *priv) 3782 { 3783 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 3784 3785 dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr); 3786 dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps); 3787 dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size); 3788 dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size); 3789 dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len); 3790 dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc); 3791 dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc); 3792 dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc); 3793 dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu); 3794 } 3795 3796 static int hns3_client_init(struct hnae3_handle *handle) 3797 { 3798 struct pci_dev *pdev = handle->pdev; 3799 u16 alloc_tqps, max_rss_size; 3800 struct hns3_nic_priv *priv; 3801 struct net_device *netdev; 3802 int ret; 3803 3804 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, 3805 &max_rss_size); 3806 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps); 3807 if (!netdev) 3808 return -ENOMEM; 3809 3810 priv = netdev_priv(netdev); 3811 priv->dev = &pdev->dev; 3812 priv->netdev = netdev; 3813 priv->ae_handle = handle; 3814 priv->tx_timeout_count = 0; 3815 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); 3816 3817 handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); 3818 3819 handle->kinfo.netdev = netdev; 3820 handle->priv = (void *)priv; 3821 3822 hns3_init_mac_addr(netdev, true); 3823 3824 hns3_set_default_feature(netdev); 3825 3826 netdev->watchdog_timeo = HNS3_TX_TIMEOUT; 3827 netdev->priv_flags |= IFF_UNICAST_FLT; 3828 netdev->netdev_ops = &hns3_nic_netdev_ops; 3829 SET_NETDEV_DEV(netdev, &pdev->dev); 3830 hns3_ethtool_set_ops(netdev); 3831 3832 /* Carrier off reporting is important to ethtool even BEFORE open */ 3833 netif_carrier_off(netdev); 3834 3835 ret = hns3_get_ring_config(priv); 3836 if (ret) { 3837 ret = -ENOMEM; 3838 goto out_get_ring_cfg; 3839 } 3840 3841 ret = hns3_nic_alloc_vector_data(priv); 3842 if (ret) { 3843 ret = -ENOMEM; 3844 goto out_alloc_vector_data; 3845 } 3846 3847 ret = hns3_nic_init_vector_data(priv); 3848 if (ret) { 3849 ret = -ENOMEM; 3850 goto out_init_vector_data; 3851 } 3852 3853 ret = hns3_init_all_ring(priv); 3854 if (ret) { 3855 ret = -ENOMEM; 3856 goto out_init_ring_data; 3857 } 3858 3859 ret = hns3_init_phy(netdev); 3860 if (ret) 3861 goto out_init_phy; 3862 3863 ret = register_netdev(netdev); 3864 if (ret) { 3865 dev_err(priv->dev, "probe register netdev fail!\n"); 3866 goto out_reg_netdev_fail; 3867 } 3868 3869 ret = hns3_client_start(handle); 3870 if (ret) { 3871 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); 3872 goto out_client_start; 3873 } 3874 3875 hns3_dcbnl_setup(handle); 3876 3877 hns3_dbg_init(handle); 3878 3879 /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */ 3880 netdev->max_mtu = HNS3_MAX_MTU; 3881 3882 set_bit(HNS3_NIC_STATE_INITED, &priv->state); 3883 3884 if (netif_msg_drv(handle)) 3885 hns3_info_show(priv); 3886 3887 return ret; 3888 3889 out_client_start: 3890 unregister_netdev(netdev); 3891 out_reg_netdev_fail: 3892 hns3_uninit_phy(netdev); 3893 out_init_phy: 3894 hns3_uninit_all_ring(priv); 3895 out_init_ring_data: 3896 hns3_nic_uninit_vector_data(priv); 3897 out_init_vector_data: 3898 hns3_nic_dealloc_vector_data(priv); 3899 out_alloc_vector_data: 3900 priv->ring_data = NULL; 3901 out_get_ring_cfg: 3902 priv->ae_handle = NULL; 3903 free_netdev(netdev); 3904 return ret; 3905 } 3906 3907 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) 3908 { 3909 struct net_device *netdev = handle->kinfo.netdev; 3910 struct hns3_nic_priv *priv = netdev_priv(netdev); 3911 int ret; 3912 3913 hns3_remove_hw_addr(netdev); 3914 3915 if (netdev->reg_state != NETREG_UNINITIALIZED) 3916 unregister_netdev(netdev); 3917 3918 hns3_client_stop(handle); 3919 3920 hns3_uninit_phy(netdev); 3921 3922 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 3923 netdev_warn(netdev, "already uninitialized\n"); 3924 goto out_netdev_free; 3925 } 3926 3927 hns3_del_all_fd_rules(netdev, true); 3928 3929 hns3_clear_all_ring(handle, true); 3930 3931 hns3_nic_uninit_vector_data(priv); 3932 3933 ret = hns3_nic_dealloc_vector_data(priv); 3934 if (ret) 3935 netdev_err(netdev, "dealloc vector error\n"); 3936 3937 ret = hns3_uninit_all_ring(priv); 3938 if (ret) 3939 netdev_err(netdev, "uninit ring error\n"); 3940 3941 hns3_put_ring_config(priv); 3942 3943 hns3_dbg_uninit(handle); 3944 3945 out_netdev_free: 3946 free_netdev(netdev); 3947 } 3948 3949 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) 3950 { 3951 struct net_device *netdev = handle->kinfo.netdev; 3952 3953 if (!netdev) 3954 return; 3955 3956 if (linkup) { 3957 netif_carrier_on(netdev); 3958 netif_tx_wake_all_queues(netdev); 3959 if (netif_msg_link(handle)) 3960 netdev_info(netdev, "link up\n"); 3961 } else { 3962 netif_carrier_off(netdev); 3963 netif_tx_stop_all_queues(netdev); 3964 if (netif_msg_link(handle)) 3965 netdev_info(netdev, "link down\n"); 3966 } 3967 } 3968 3969 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) 3970 { 3971 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3972 struct net_device *ndev = kinfo->netdev; 3973 3974 if (tc > HNAE3_MAX_TC) 3975 return -EINVAL; 3976 3977 if (!ndev) 3978 return -ENODEV; 3979 3980 return hns3_nic_set_real_num_queue(ndev); 3981 } 3982 3983 static int hns3_recover_hw_addr(struct net_device *ndev) 3984 { 3985 struct netdev_hw_addr_list *list; 3986 struct netdev_hw_addr *ha, *tmp; 3987 int ret = 0; 3988 3989 netif_addr_lock_bh(ndev); 3990 /* go through and sync uc_addr entries to the device */ 3991 list = &ndev->uc; 3992 list_for_each_entry_safe(ha, tmp, &list->list, list) { 3993 ret = hns3_nic_uc_sync(ndev, ha->addr); 3994 if (ret) 3995 goto out; 3996 } 3997 3998 /* go through and sync mc_addr entries to the device */ 3999 list = &ndev->mc; 4000 list_for_each_entry_safe(ha, tmp, &list->list, list) { 4001 ret = hns3_nic_mc_sync(ndev, ha->addr); 4002 if (ret) 4003 goto out; 4004 } 4005 4006 out: 4007 netif_addr_unlock_bh(ndev); 4008 return ret; 4009 } 4010 4011 static void hns3_remove_hw_addr(struct net_device *netdev) 4012 { 4013 struct netdev_hw_addr_list *list; 4014 struct netdev_hw_addr *ha, *tmp; 4015 4016 hns3_nic_uc_unsync(netdev, netdev->dev_addr); 4017 4018 netif_addr_lock_bh(netdev); 4019 /* go through and unsync uc_addr entries to the device */ 4020 list = &netdev->uc; 4021 list_for_each_entry_safe(ha, tmp, &list->list, list) 4022 hns3_nic_uc_unsync(netdev, ha->addr); 4023 4024 /* go through and unsync mc_addr entries to the device */ 4025 list = &netdev->mc; 4026 list_for_each_entry_safe(ha, tmp, &list->list, list) 4027 if (ha->refcount > 1) 4028 hns3_nic_mc_unsync(netdev, ha->addr); 4029 4030 netif_addr_unlock_bh(netdev); 4031 } 4032 4033 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) 4034 { 4035 while (ring->next_to_clean != ring->next_to_use) { 4036 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; 4037 hns3_free_buffer_detach(ring, ring->next_to_clean); 4038 ring_ptr_move_fw(ring, next_to_clean); 4039 } 4040 } 4041 4042 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) 4043 { 4044 struct hns3_desc_cb res_cbs; 4045 int ret; 4046 4047 while (ring->next_to_use != ring->next_to_clean) { 4048 /* When a buffer is not reused, it's memory has been 4049 * freed in hns3_handle_rx_bd or will be freed by 4050 * stack, so we need to replace the buffer here. 4051 */ 4052 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 4053 ret = hns3_reserve_buffer_map(ring, &res_cbs); 4054 if (ret) { 4055 u64_stats_update_begin(&ring->syncp); 4056 ring->stats.sw_err_cnt++; 4057 u64_stats_update_end(&ring->syncp); 4058 /* if alloc new buffer fail, exit directly 4059 * and reclear in up flow. 4060 */ 4061 netdev_warn(ring->tqp->handle->kinfo.netdev, 4062 "reserve buffer map failed, ret = %d\n", 4063 ret); 4064 return ret; 4065 } 4066 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 4067 } 4068 ring_ptr_move_fw(ring, next_to_use); 4069 } 4070 4071 /* Free the pending skb in rx ring */ 4072 if (ring->skb) { 4073 dev_kfree_skb_any(ring->skb); 4074 ring->skb = NULL; 4075 ring->pending_buf = 0; 4076 } 4077 4078 return 0; 4079 } 4080 4081 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring) 4082 { 4083 while (ring->next_to_use != ring->next_to_clean) { 4084 /* When a buffer is not reused, it's memory has been 4085 * freed in hns3_handle_rx_bd or will be freed by 4086 * stack, so only need to unmap the buffer here. 4087 */ 4088 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 4089 hns3_unmap_buffer(ring, 4090 &ring->desc_cb[ring->next_to_use]); 4091 ring->desc_cb[ring->next_to_use].dma = 0; 4092 } 4093 4094 ring_ptr_move_fw(ring, next_to_use); 4095 } 4096 } 4097 4098 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force) 4099 { 4100 struct net_device *ndev = h->kinfo.netdev; 4101 struct hns3_nic_priv *priv = netdev_priv(ndev); 4102 u32 i; 4103 4104 for (i = 0; i < h->kinfo.num_tqps; i++) { 4105 struct hns3_enet_ring *ring; 4106 4107 ring = priv->ring_data[i].ring; 4108 hns3_clear_tx_ring(ring); 4109 4110 ring = priv->ring_data[i + h->kinfo.num_tqps].ring; 4111 /* Continue to clear other rings even if clearing some 4112 * rings failed. 4113 */ 4114 if (force) 4115 hns3_force_clear_rx_ring(ring); 4116 else 4117 hns3_clear_rx_ring(ring); 4118 } 4119 } 4120 4121 int hns3_nic_reset_all_ring(struct hnae3_handle *h) 4122 { 4123 struct net_device *ndev = h->kinfo.netdev; 4124 struct hns3_nic_priv *priv = netdev_priv(ndev); 4125 struct hns3_enet_ring *rx_ring; 4126 int i, j; 4127 int ret; 4128 4129 for (i = 0; i < h->kinfo.num_tqps; i++) { 4130 ret = h->ae_algo->ops->reset_queue(h, i); 4131 if (ret) 4132 return ret; 4133 4134 hns3_init_ring_hw(priv->ring_data[i].ring); 4135 4136 /* We need to clear tx ring here because self test will 4137 * use the ring and will not run down before up 4138 */ 4139 hns3_clear_tx_ring(priv->ring_data[i].ring); 4140 priv->ring_data[i].ring->next_to_clean = 0; 4141 priv->ring_data[i].ring->next_to_use = 0; 4142 4143 rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring; 4144 hns3_init_ring_hw(rx_ring); 4145 ret = hns3_clear_rx_ring(rx_ring); 4146 if (ret) 4147 return ret; 4148 4149 /* We can not know the hardware head and tail when this 4150 * function is called in reset flow, so we reuse all desc. 4151 */ 4152 for (j = 0; j < rx_ring->desc_num; j++) 4153 hns3_reuse_buffer(rx_ring, j); 4154 4155 rx_ring->next_to_clean = 0; 4156 rx_ring->next_to_use = 0; 4157 } 4158 4159 hns3_init_tx_ring_tc(priv); 4160 4161 return 0; 4162 } 4163 4164 static void hns3_store_coal(struct hns3_nic_priv *priv) 4165 { 4166 /* ethtool only support setting and querying one coal 4167 * configuation for now, so save the vector 0' coal 4168 * configuation here in order to restore it. 4169 */ 4170 memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal, 4171 sizeof(struct hns3_enet_coalesce)); 4172 memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal, 4173 sizeof(struct hns3_enet_coalesce)); 4174 } 4175 4176 static void hns3_restore_coal(struct hns3_nic_priv *priv) 4177 { 4178 u16 vector_num = priv->vector_num; 4179 int i; 4180 4181 for (i = 0; i < vector_num; i++) { 4182 memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal, 4183 sizeof(struct hns3_enet_coalesce)); 4184 memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal, 4185 sizeof(struct hns3_enet_coalesce)); 4186 } 4187 } 4188 4189 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) 4190 { 4191 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 4192 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 4193 struct net_device *ndev = kinfo->netdev; 4194 struct hns3_nic_priv *priv = netdev_priv(ndev); 4195 4196 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) 4197 return 0; 4198 4199 /* it is cumbersome for hardware to pick-and-choose entries for deletion 4200 * from table space. Hence, for function reset software intervention is 4201 * required to delete the entries 4202 */ 4203 if (hns3_dev_ongoing_func_reset(ae_dev)) { 4204 hns3_remove_hw_addr(ndev); 4205 hns3_del_all_fd_rules(ndev, false); 4206 } 4207 4208 if (!netif_running(ndev)) 4209 return 0; 4210 4211 return hns3_nic_net_stop(ndev); 4212 } 4213 4214 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) 4215 { 4216 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 4217 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); 4218 int ret = 0; 4219 4220 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); 4221 4222 if (netif_running(kinfo->netdev)) { 4223 ret = hns3_nic_net_open(kinfo->netdev); 4224 if (ret) { 4225 set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); 4226 netdev_err(kinfo->netdev, 4227 "net up fail, ret=%d!\n", ret); 4228 return ret; 4229 } 4230 } 4231 4232 return ret; 4233 } 4234 4235 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) 4236 { 4237 struct net_device *netdev = handle->kinfo.netdev; 4238 struct hns3_nic_priv *priv = netdev_priv(netdev); 4239 int ret; 4240 4241 /* Carrier off reporting is important to ethtool even BEFORE open */ 4242 netif_carrier_off(netdev); 4243 4244 ret = hns3_get_ring_config(priv); 4245 if (ret) 4246 return ret; 4247 4248 ret = hns3_nic_alloc_vector_data(priv); 4249 if (ret) 4250 goto err_put_ring; 4251 4252 hns3_restore_coal(priv); 4253 4254 ret = hns3_nic_init_vector_data(priv); 4255 if (ret) 4256 goto err_dealloc_vector; 4257 4258 ret = hns3_init_all_ring(priv); 4259 if (ret) 4260 goto err_uninit_vector; 4261 4262 ret = hns3_client_start(handle); 4263 if (ret) { 4264 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); 4265 goto err_uninit_ring; 4266 } 4267 4268 set_bit(HNS3_NIC_STATE_INITED, &priv->state); 4269 4270 return ret; 4271 4272 err_uninit_ring: 4273 hns3_uninit_all_ring(priv); 4274 err_uninit_vector: 4275 hns3_nic_uninit_vector_data(priv); 4276 err_dealloc_vector: 4277 hns3_nic_dealloc_vector_data(priv); 4278 err_put_ring: 4279 hns3_put_ring_config(priv); 4280 4281 return ret; 4282 } 4283 4284 static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle) 4285 { 4286 struct net_device *netdev = handle->kinfo.netdev; 4287 bool vlan_filter_enable; 4288 int ret; 4289 4290 ret = hns3_init_mac_addr(netdev, false); 4291 if (ret) 4292 return ret; 4293 4294 ret = hns3_recover_hw_addr(netdev); 4295 if (ret) 4296 return ret; 4297 4298 ret = hns3_update_promisc_mode(netdev, handle->netdev_flags); 4299 if (ret) 4300 return ret; 4301 4302 vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true; 4303 hns3_enable_vlan_filter(netdev, vlan_filter_enable); 4304 4305 if (handle->ae_algo->ops->restore_vlan_table) 4306 handle->ae_algo->ops->restore_vlan_table(handle); 4307 4308 return hns3_restore_fd_rules(netdev); 4309 } 4310 4311 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) 4312 { 4313 struct net_device *netdev = handle->kinfo.netdev; 4314 struct hns3_nic_priv *priv = netdev_priv(netdev); 4315 int ret; 4316 4317 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 4318 netdev_warn(netdev, "already uninitialized\n"); 4319 return 0; 4320 } 4321 4322 hns3_clear_all_ring(handle, true); 4323 hns3_reset_tx_queue(priv->ae_handle); 4324 4325 hns3_nic_uninit_vector_data(priv); 4326 4327 hns3_store_coal(priv); 4328 4329 ret = hns3_nic_dealloc_vector_data(priv); 4330 if (ret) 4331 netdev_err(netdev, "dealloc vector error\n"); 4332 4333 ret = hns3_uninit_all_ring(priv); 4334 if (ret) 4335 netdev_err(netdev, "uninit ring error\n"); 4336 4337 hns3_put_ring_config(priv); 4338 4339 return ret; 4340 } 4341 4342 static int hns3_reset_notify(struct hnae3_handle *handle, 4343 enum hnae3_reset_notify_type type) 4344 { 4345 int ret = 0; 4346 4347 switch (type) { 4348 case HNAE3_UP_CLIENT: 4349 ret = hns3_reset_notify_up_enet(handle); 4350 break; 4351 case HNAE3_DOWN_CLIENT: 4352 ret = hns3_reset_notify_down_enet(handle); 4353 break; 4354 case HNAE3_INIT_CLIENT: 4355 ret = hns3_reset_notify_init_enet(handle); 4356 break; 4357 case HNAE3_UNINIT_CLIENT: 4358 ret = hns3_reset_notify_uninit_enet(handle); 4359 break; 4360 case HNAE3_RESTORE_CLIENT: 4361 ret = hns3_reset_notify_restore_enet(handle); 4362 break; 4363 default: 4364 break; 4365 } 4366 4367 return ret; 4368 } 4369 4370 int hns3_set_channels(struct net_device *netdev, 4371 struct ethtool_channels *ch) 4372 { 4373 struct hnae3_handle *h = hns3_get_handle(netdev); 4374 struct hnae3_knic_private_info *kinfo = &h->kinfo; 4375 bool rxfh_configured = netif_is_rxfh_configured(netdev); 4376 u32 new_tqp_num = ch->combined_count; 4377 u16 org_tqp_num; 4378 int ret; 4379 4380 if (ch->rx_count || ch->tx_count) 4381 return -EINVAL; 4382 4383 if (new_tqp_num > hns3_get_max_available_channels(h) || 4384 new_tqp_num < 1) { 4385 dev_err(&netdev->dev, 4386 "Change tqps fail, the tqp range is from 1 to %d", 4387 hns3_get_max_available_channels(h)); 4388 return -EINVAL; 4389 } 4390 4391 if (kinfo->rss_size == new_tqp_num) 4392 return 0; 4393 4394 ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); 4395 if (ret) 4396 return ret; 4397 4398 ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); 4399 if (ret) 4400 return ret; 4401 4402 org_tqp_num = h->kinfo.num_tqps; 4403 ret = h->ae_algo->ops->set_channels(h, new_tqp_num, rxfh_configured); 4404 if (ret) { 4405 ret = h->ae_algo->ops->set_channels(h, org_tqp_num, 4406 rxfh_configured); 4407 if (ret) { 4408 /* If revert to old tqp failed, fatal error occurred */ 4409 dev_err(&netdev->dev, 4410 "Revert to old tqp num fail, ret=%d", ret); 4411 return ret; 4412 } 4413 dev_info(&netdev->dev, 4414 "Change tqp num fail, Revert to old tqp num"); 4415 } 4416 ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT); 4417 if (ret) 4418 return ret; 4419 4420 return hns3_reset_notify(h, HNAE3_UP_CLIENT); 4421 } 4422 4423 static const struct hnae3_client_ops client_ops = { 4424 .init_instance = hns3_client_init, 4425 .uninit_instance = hns3_client_uninit, 4426 .link_status_change = hns3_link_status_change, 4427 .setup_tc = hns3_client_setup_tc, 4428 .reset_notify = hns3_reset_notify, 4429 }; 4430 4431 /* hns3_init_module - Driver registration routine 4432 * hns3_init_module is the first routine called when the driver is 4433 * loaded. All it does is register with the PCI subsystem. 4434 */ 4435 static int __init hns3_init_module(void) 4436 { 4437 int ret; 4438 4439 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); 4440 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); 4441 4442 client.type = HNAE3_CLIENT_KNIC; 4443 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s", 4444 hns3_driver_name); 4445 4446 client.ops = &client_ops; 4447 4448 INIT_LIST_HEAD(&client.node); 4449 4450 hns3_dbg_register_debugfs(hns3_driver_name); 4451 4452 ret = hnae3_register_client(&client); 4453 if (ret) 4454 goto err_reg_client; 4455 4456 ret = pci_register_driver(&hns3_driver); 4457 if (ret) 4458 goto err_reg_driver; 4459 4460 return ret; 4461 4462 err_reg_driver: 4463 hnae3_unregister_client(&client); 4464 err_reg_client: 4465 hns3_dbg_unregister_debugfs(); 4466 return ret; 4467 } 4468 module_init(hns3_init_module); 4469 4470 /* hns3_exit_module - Driver exit cleanup routine 4471 * hns3_exit_module is called just before the driver is removed 4472 * from memory. 4473 */ 4474 static void __exit hns3_exit_module(void) 4475 { 4476 pci_unregister_driver(&hns3_driver); 4477 hnae3_unregister_client(&client); 4478 hns3_dbg_unregister_debugfs(); 4479 } 4480 module_exit(hns3_exit_module); 4481 4482 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); 4483 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 4484 MODULE_LICENSE("GPL"); 4485 MODULE_ALIAS("pci:hns-nic"); 4486 MODULE_VERSION(HNS3_MOD_VERSION); 4487