1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/dma-mapping.h> 5 #include <linux/etherdevice.h> 6 #include <linux/interrupt.h> 7 #include <linux/if_vlan.h> 8 #include <linux/ip.h> 9 #include <linux/ipv6.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/aer.h> 13 #include <linux/skbuff.h> 14 #include <linux/sctp.h> 15 #include <linux/vermagic.h> 16 #include <net/gre.h> 17 #include <net/pkt_cls.h> 18 #include <net/vxlan.h> 19 20 #include "hnae3.h" 21 #include "hns3_enet.h" 22 23 static void hns3_clear_all_ring(struct hnae3_handle *h); 24 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); 25 static void hns3_remove_hw_addr(struct net_device *netdev); 26 27 static const char hns3_driver_name[] = "hns3"; 28 const char hns3_driver_version[] = VERMAGIC_STRING; 29 static const char hns3_driver_string[] = 30 "Hisilicon Ethernet Network Driver for Hip08 Family"; 31 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; 32 static struct hnae3_client client; 33 34 /* hns3_pci_tbl - PCI Device ID Table 35 * 36 * Last entry must be all 0s 37 * 38 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 39 * Class, Class Mask, private data (not used) } 40 */ 41 static const struct pci_device_id hns3_pci_tbl[] = { 42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 45 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 47 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 49 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 51 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 53 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 54 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 55 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 56 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 57 /* required last entry */ 58 {0, } 59 }; 60 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); 61 62 static irqreturn_t hns3_irq_handle(int irq, void *vector) 63 { 64 struct hns3_enet_tqp_vector *tqp_vector = vector; 65 66 napi_schedule(&tqp_vector->napi); 67 68 return IRQ_HANDLED; 69 } 70 71 /* This callback function is used to set affinity changes to the irq affinity 72 * masks when the irq_set_affinity_notifier function is used. 73 */ 74 static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify, 75 const cpumask_t *mask) 76 { 77 struct hns3_enet_tqp_vector *tqp_vectors = 78 container_of(notify, struct hns3_enet_tqp_vector, 79 affinity_notify); 80 81 tqp_vectors->affinity_mask = *mask; 82 } 83 84 static void hns3_nic_irq_affinity_release(struct kref *ref) 85 { 86 } 87 88 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) 89 { 90 struct hns3_enet_tqp_vector *tqp_vectors; 91 unsigned int i; 92 93 for (i = 0; i < priv->vector_num; i++) { 94 tqp_vectors = &priv->tqp_vector[i]; 95 96 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) 97 continue; 98 99 /* clear the affinity notifier and affinity mask */ 100 irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL); 101 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); 102 103 /* release the irq resource */ 104 free_irq(tqp_vectors->vector_irq, tqp_vectors); 105 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; 106 } 107 } 108 109 static int hns3_nic_init_irq(struct hns3_nic_priv *priv) 110 { 111 struct hns3_enet_tqp_vector *tqp_vectors; 112 int txrx_int_idx = 0; 113 int rx_int_idx = 0; 114 int tx_int_idx = 0; 115 unsigned int i; 116 int ret; 117 118 for (i = 0; i < priv->vector_num; i++) { 119 tqp_vectors = &priv->tqp_vector[i]; 120 121 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) 122 continue; 123 124 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { 125 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, 126 "%s-%s-%d", priv->netdev->name, "TxRx", 127 txrx_int_idx++); 128 txrx_int_idx++; 129 } else if (tqp_vectors->rx_group.ring) { 130 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, 131 "%s-%s-%d", priv->netdev->name, "Rx", 132 rx_int_idx++); 133 } else if (tqp_vectors->tx_group.ring) { 134 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, 135 "%s-%s-%d", priv->netdev->name, "Tx", 136 tx_int_idx++); 137 } else { 138 /* Skip this unused q_vector */ 139 continue; 140 } 141 142 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; 143 144 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, 145 tqp_vectors->name, 146 tqp_vectors); 147 if (ret) { 148 netdev_err(priv->netdev, "request irq(%d) fail\n", 149 tqp_vectors->vector_irq); 150 return ret; 151 } 152 153 tqp_vectors->affinity_notify.notify = 154 hns3_nic_irq_affinity_notify; 155 tqp_vectors->affinity_notify.release = 156 hns3_nic_irq_affinity_release; 157 irq_set_affinity_notifier(tqp_vectors->vector_irq, 158 &tqp_vectors->affinity_notify); 159 irq_set_affinity_hint(tqp_vectors->vector_irq, 160 &tqp_vectors->affinity_mask); 161 162 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; 163 } 164 165 return 0; 166 } 167 168 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, 169 u32 mask_en) 170 { 171 writel(mask_en, tqp_vector->mask_addr); 172 } 173 174 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) 175 { 176 napi_enable(&tqp_vector->napi); 177 178 /* enable vector */ 179 hns3_mask_vector_irq(tqp_vector, 1); 180 } 181 182 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) 183 { 184 /* disable vector */ 185 hns3_mask_vector_irq(tqp_vector, 0); 186 187 disable_irq(tqp_vector->vector_irq); 188 napi_disable(&tqp_vector->napi); 189 } 190 191 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, 192 u32 rl_value) 193 { 194 u32 rl_reg = hns3_rl_usec_to_reg(rl_value); 195 196 /* this defines the configuration for RL (Interrupt Rate Limiter). 197 * Rl defines rate of interrupts i.e. number of interrupts-per-second 198 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing 199 */ 200 201 if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable && 202 !tqp_vector->rx_group.coal.gl_adapt_enable) 203 /* According to the hardware, the range of rl_reg is 204 * 0-59 and the unit is 4. 205 */ 206 rl_reg |= HNS3_INT_RL_ENABLE_MASK; 207 208 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); 209 } 210 211 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, 212 u32 gl_value) 213 { 214 u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value); 215 216 writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); 217 } 218 219 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, 220 u32 gl_value) 221 { 222 u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value); 223 224 writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); 225 } 226 227 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, 228 struct hns3_nic_priv *priv) 229 { 230 /* initialize the configuration for interrupt coalescing. 231 * 1. GL (Interrupt Gap Limiter) 232 * 2. RL (Interrupt Rate Limiter) 233 */ 234 235 /* Default: enable interrupt coalescing self-adaptive and GL */ 236 tqp_vector->tx_group.coal.gl_adapt_enable = 1; 237 tqp_vector->rx_group.coal.gl_adapt_enable = 1; 238 239 tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; 240 tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; 241 242 tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; 243 tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; 244 tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; 245 } 246 247 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector, 248 struct hns3_nic_priv *priv) 249 { 250 struct hnae3_handle *h = priv->ae_handle; 251 252 hns3_set_vector_coalesce_tx_gl(tqp_vector, 253 tqp_vector->tx_group.coal.int_gl); 254 hns3_set_vector_coalesce_rx_gl(tqp_vector, 255 tqp_vector->rx_group.coal.int_gl); 256 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); 257 } 258 259 static int hns3_nic_set_real_num_queue(struct net_device *netdev) 260 { 261 struct hnae3_handle *h = hns3_get_handle(netdev); 262 struct hnae3_knic_private_info *kinfo = &h->kinfo; 263 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; 264 int i, ret; 265 266 if (kinfo->num_tc <= 1) { 267 netdev_reset_tc(netdev); 268 } else { 269 ret = netdev_set_num_tc(netdev, kinfo->num_tc); 270 if (ret) { 271 netdev_err(netdev, 272 "netdev_set_num_tc fail, ret=%d!\n", ret); 273 return ret; 274 } 275 276 for (i = 0; i < HNAE3_MAX_TC; i++) { 277 if (!kinfo->tc_info[i].enable) 278 continue; 279 280 netdev_set_tc_queue(netdev, 281 kinfo->tc_info[i].tc, 282 kinfo->tc_info[i].tqp_count, 283 kinfo->tc_info[i].tqp_offset); 284 } 285 } 286 287 ret = netif_set_real_num_tx_queues(netdev, queue_size); 288 if (ret) { 289 netdev_err(netdev, 290 "netif_set_real_num_tx_queues fail, ret=%d!\n", 291 ret); 292 return ret; 293 } 294 295 ret = netif_set_real_num_rx_queues(netdev, queue_size); 296 if (ret) { 297 netdev_err(netdev, 298 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); 299 return ret; 300 } 301 302 return 0; 303 } 304 305 static u16 hns3_get_max_available_channels(struct hnae3_handle *h) 306 { 307 u16 alloc_tqps, max_rss_size, rss_size; 308 309 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); 310 rss_size = alloc_tqps / h->kinfo.num_tc; 311 312 return min_t(u16, rss_size, max_rss_size); 313 } 314 315 static void hns3_tqp_enable(struct hnae3_queue *tqp) 316 { 317 u32 rcb_reg; 318 319 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); 320 rcb_reg |= BIT(HNS3_RING_EN_B); 321 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); 322 } 323 324 static void hns3_tqp_disable(struct hnae3_queue *tqp) 325 { 326 u32 rcb_reg; 327 328 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); 329 rcb_reg &= ~BIT(HNS3_RING_EN_B); 330 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); 331 } 332 333 static int hns3_nic_net_up(struct net_device *netdev) 334 { 335 struct hns3_nic_priv *priv = netdev_priv(netdev); 336 struct hnae3_handle *h = priv->ae_handle; 337 int i, j; 338 int ret; 339 340 ret = hns3_nic_reset_all_ring(h); 341 if (ret) 342 return ret; 343 344 /* get irq resource for all vectors */ 345 ret = hns3_nic_init_irq(priv); 346 if (ret) { 347 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret); 348 return ret; 349 } 350 351 /* enable the vectors */ 352 for (i = 0; i < priv->vector_num; i++) 353 hns3_vector_enable(&priv->tqp_vector[i]); 354 355 /* enable rcb */ 356 for (j = 0; j < h->kinfo.num_tqps; j++) 357 hns3_tqp_enable(h->kinfo.tqp[j]); 358 359 /* start the ae_dev */ 360 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; 361 if (ret) 362 goto out_start_err; 363 364 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); 365 366 return 0; 367 368 out_start_err: 369 while (j--) 370 hns3_tqp_disable(h->kinfo.tqp[j]); 371 372 for (j = i - 1; j >= 0; j--) 373 hns3_vector_disable(&priv->tqp_vector[j]); 374 375 hns3_nic_uninit_irq(priv); 376 377 return ret; 378 } 379 380 static int hns3_nic_net_open(struct net_device *netdev) 381 { 382 struct hnae3_handle *h = hns3_get_handle(netdev); 383 struct hnae3_knic_private_info *kinfo; 384 int i, ret; 385 386 if (hns3_nic_resetting(netdev)) 387 return -EBUSY; 388 389 netif_carrier_off(netdev); 390 391 ret = hns3_nic_set_real_num_queue(netdev); 392 if (ret) 393 return ret; 394 395 ret = hns3_nic_net_up(netdev); 396 if (ret) { 397 netdev_err(netdev, 398 "hns net up fail, ret=%d!\n", ret); 399 return ret; 400 } 401 402 kinfo = &h->kinfo; 403 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { 404 netdev_set_prio_tc_map(netdev, i, 405 kinfo->prio_tc[i]); 406 } 407 408 return 0; 409 } 410 411 static void hns3_nic_net_down(struct net_device *netdev) 412 { 413 struct hns3_nic_priv *priv = netdev_priv(netdev); 414 struct hnae3_handle *h = hns3_get_handle(netdev); 415 const struct hnae3_ae_ops *ops; 416 int i; 417 418 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 419 return; 420 421 /* disable vectors */ 422 for (i = 0; i < priv->vector_num; i++) 423 hns3_vector_disable(&priv->tqp_vector[i]); 424 425 /* disable rcb */ 426 for (i = 0; i < h->kinfo.num_tqps; i++) 427 hns3_tqp_disable(h->kinfo.tqp[i]); 428 429 /* stop ae_dev */ 430 ops = priv->ae_handle->ae_algo->ops; 431 if (ops->stop) 432 ops->stop(priv->ae_handle); 433 434 /* free irq resources */ 435 hns3_nic_uninit_irq(priv); 436 437 hns3_clear_all_ring(priv->ae_handle); 438 } 439 440 static int hns3_nic_net_stop(struct net_device *netdev) 441 { 442 netif_tx_stop_all_queues(netdev); 443 netif_carrier_off(netdev); 444 445 hns3_nic_net_down(netdev); 446 447 return 0; 448 } 449 450 static int hns3_nic_uc_sync(struct net_device *netdev, 451 const unsigned char *addr) 452 { 453 struct hnae3_handle *h = hns3_get_handle(netdev); 454 455 if (h->ae_algo->ops->add_uc_addr) 456 return h->ae_algo->ops->add_uc_addr(h, addr); 457 458 return 0; 459 } 460 461 static int hns3_nic_uc_unsync(struct net_device *netdev, 462 const unsigned char *addr) 463 { 464 struct hnae3_handle *h = hns3_get_handle(netdev); 465 466 if (h->ae_algo->ops->rm_uc_addr) 467 return h->ae_algo->ops->rm_uc_addr(h, addr); 468 469 return 0; 470 } 471 472 static int hns3_nic_mc_sync(struct net_device *netdev, 473 const unsigned char *addr) 474 { 475 struct hnae3_handle *h = hns3_get_handle(netdev); 476 477 if (h->ae_algo->ops->add_mc_addr) 478 return h->ae_algo->ops->add_mc_addr(h, addr); 479 480 return 0; 481 } 482 483 static int hns3_nic_mc_unsync(struct net_device *netdev, 484 const unsigned char *addr) 485 { 486 struct hnae3_handle *h = hns3_get_handle(netdev); 487 488 if (h->ae_algo->ops->rm_mc_addr) 489 return h->ae_algo->ops->rm_mc_addr(h, addr); 490 491 return 0; 492 } 493 494 static u8 hns3_get_netdev_flags(struct net_device *netdev) 495 { 496 u8 flags = 0; 497 498 if (netdev->flags & IFF_PROMISC) { 499 flags = HNAE3_USER_UPE | HNAE3_USER_MPE; 500 } else { 501 flags |= HNAE3_VLAN_FLTR; 502 if (netdev->flags & IFF_ALLMULTI) 503 flags |= HNAE3_USER_MPE; 504 } 505 506 return flags; 507 } 508 509 static void hns3_nic_set_rx_mode(struct net_device *netdev) 510 { 511 struct hnae3_handle *h = hns3_get_handle(netdev); 512 u8 new_flags; 513 int ret; 514 515 new_flags = hns3_get_netdev_flags(netdev); 516 517 ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); 518 if (ret) { 519 netdev_err(netdev, "sync uc address fail\n"); 520 if (ret == -ENOSPC) 521 new_flags |= HNAE3_OVERFLOW_UPE; 522 } 523 524 if (netdev->flags & IFF_MULTICAST) { 525 ret = __dev_mc_sync(netdev, hns3_nic_mc_sync, 526 hns3_nic_mc_unsync); 527 if (ret) { 528 netdev_err(netdev, "sync mc address fail\n"); 529 if (ret == -ENOSPC) 530 new_flags |= HNAE3_OVERFLOW_MPE; 531 } 532 } 533 534 hns3_update_promisc_mode(netdev, new_flags); 535 /* User mode Promisc mode enable and vlan filtering is disabled to 536 * let all packets in. MAC-VLAN Table overflow Promisc enabled and 537 * vlan fitering is enabled 538 */ 539 hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR); 540 h->netdev_flags = new_flags; 541 } 542 543 int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags) 544 { 545 struct hns3_nic_priv *priv = netdev_priv(netdev); 546 struct hnae3_handle *h = priv->ae_handle; 547 548 if (h->ae_algo->ops->set_promisc_mode) { 549 return h->ae_algo->ops->set_promisc_mode(h, 550 promisc_flags & HNAE3_UPE, 551 promisc_flags & HNAE3_MPE); 552 } 553 554 return 0; 555 } 556 557 void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) 558 { 559 struct hns3_nic_priv *priv = netdev_priv(netdev); 560 struct hnae3_handle *h = priv->ae_handle; 561 bool last_state; 562 563 if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) { 564 last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false; 565 if (enable != last_state) { 566 netdev_info(netdev, 567 "%s vlan filter\n", 568 enable ? "enable" : "disable"); 569 h->ae_algo->ops->enable_vlan_filter(h, enable); 570 } 571 } 572 } 573 574 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, 575 u16 *mss, u32 *type_cs_vlan_tso) 576 { 577 u32 l4_offset, hdr_len; 578 union l3_hdr_info l3; 579 union l4_hdr_info l4; 580 u32 l4_paylen; 581 int ret; 582 583 if (!skb_is_gso(skb)) 584 return 0; 585 586 ret = skb_cow_head(skb, 0); 587 if (ret) 588 return ret; 589 590 l3.hdr = skb_network_header(skb); 591 l4.hdr = skb_transport_header(skb); 592 593 /* Software should clear the IPv4's checksum field when tso is 594 * needed. 595 */ 596 if (l3.v4->version == 4) 597 l3.v4->check = 0; 598 599 /* tunnel packet.*/ 600 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 601 SKB_GSO_GRE_CSUM | 602 SKB_GSO_UDP_TUNNEL | 603 SKB_GSO_UDP_TUNNEL_CSUM)) { 604 if ((!(skb_shinfo(skb)->gso_type & 605 SKB_GSO_PARTIAL)) && 606 (skb_shinfo(skb)->gso_type & 607 SKB_GSO_UDP_TUNNEL_CSUM)) { 608 /* Software should clear the udp's checksum 609 * field when tso is needed. 610 */ 611 l4.udp->check = 0; 612 } 613 /* reset l3&l4 pointers from outer to inner headers */ 614 l3.hdr = skb_inner_network_header(skb); 615 l4.hdr = skb_inner_transport_header(skb); 616 617 /* Software should clear the IPv4's checksum field when 618 * tso is needed. 619 */ 620 if (l3.v4->version == 4) 621 l3.v4->check = 0; 622 } 623 624 /* normal or tunnel packet*/ 625 l4_offset = l4.hdr - skb->data; 626 hdr_len = (l4.tcp->doff * 4) + l4_offset; 627 628 /* remove payload length from inner pseudo checksum when tso*/ 629 l4_paylen = skb->len - l4_offset; 630 csum_replace_by_diff(&l4.tcp->check, 631 (__force __wsum)htonl(l4_paylen)); 632 633 /* find the txbd field values */ 634 *paylen = skb->len - hdr_len; 635 hnae3_set_bit(*type_cs_vlan_tso, 636 HNS3_TXD_TSO_B, 1); 637 638 /* get MSS for TSO */ 639 *mss = skb_shinfo(skb)->gso_size; 640 641 return 0; 642 } 643 644 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, 645 u8 *il4_proto) 646 { 647 union { 648 struct iphdr *v4; 649 struct ipv6hdr *v6; 650 unsigned char *hdr; 651 } l3; 652 unsigned char *l4_hdr; 653 unsigned char *exthdr; 654 u8 l4_proto_tmp; 655 __be16 frag_off; 656 657 /* find outer header point */ 658 l3.hdr = skb_network_header(skb); 659 l4_hdr = skb_transport_header(skb); 660 661 if (skb->protocol == htons(ETH_P_IPV6)) { 662 exthdr = l3.hdr + sizeof(*l3.v6); 663 l4_proto_tmp = l3.v6->nexthdr; 664 if (l4_hdr != exthdr) 665 ipv6_skip_exthdr(skb, exthdr - skb->data, 666 &l4_proto_tmp, &frag_off); 667 } else if (skb->protocol == htons(ETH_P_IP)) { 668 l4_proto_tmp = l3.v4->protocol; 669 } else { 670 return -EINVAL; 671 } 672 673 *ol4_proto = l4_proto_tmp; 674 675 /* tunnel packet */ 676 if (!skb->encapsulation) { 677 *il4_proto = 0; 678 return 0; 679 } 680 681 /* find inner header point */ 682 l3.hdr = skb_inner_network_header(skb); 683 l4_hdr = skb_inner_transport_header(skb); 684 685 if (l3.v6->version == 6) { 686 exthdr = l3.hdr + sizeof(*l3.v6); 687 l4_proto_tmp = l3.v6->nexthdr; 688 if (l4_hdr != exthdr) 689 ipv6_skip_exthdr(skb, exthdr - skb->data, 690 &l4_proto_tmp, &frag_off); 691 } else if (l3.v4->version == 4) { 692 l4_proto_tmp = l3.v4->protocol; 693 } 694 695 *il4_proto = l4_proto_tmp; 696 697 return 0; 698 } 699 700 static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, 701 u8 il4_proto, u32 *type_cs_vlan_tso, 702 u32 *ol_type_vlan_len_msec) 703 { 704 union { 705 struct iphdr *v4; 706 struct ipv6hdr *v6; 707 unsigned char *hdr; 708 } l3; 709 union { 710 struct tcphdr *tcp; 711 struct udphdr *udp; 712 struct gre_base_hdr *gre; 713 unsigned char *hdr; 714 } l4; 715 unsigned char *l2_hdr; 716 u8 l4_proto = ol4_proto; 717 u32 ol2_len; 718 u32 ol3_len; 719 u32 ol4_len; 720 u32 l2_len; 721 u32 l3_len; 722 723 l3.hdr = skb_network_header(skb); 724 l4.hdr = skb_transport_header(skb); 725 726 /* compute L2 header size for normal packet, defined in 2 Bytes */ 727 l2_len = l3.hdr - skb->data; 728 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, 729 HNS3_TXD_L2LEN_S, l2_len >> 1); 730 731 /* tunnel packet*/ 732 if (skb->encapsulation) { 733 /* compute OL2 header size, defined in 2 Bytes */ 734 ol2_len = l2_len; 735 hnae3_set_field(*ol_type_vlan_len_msec, 736 HNS3_TXD_L2LEN_M, 737 HNS3_TXD_L2LEN_S, ol2_len >> 1); 738 739 /* compute OL3 header size, defined in 4 Bytes */ 740 ol3_len = l4.hdr - l3.hdr; 741 hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, 742 HNS3_TXD_L3LEN_S, ol3_len >> 2); 743 744 /* MAC in UDP, MAC in GRE (0x6558)*/ 745 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { 746 /* switch MAC header ptr from outer to inner header.*/ 747 l2_hdr = skb_inner_mac_header(skb); 748 749 /* compute OL4 header size, defined in 4 Bytes. */ 750 ol4_len = l2_hdr - l4.hdr; 751 hnae3_set_field(*ol_type_vlan_len_msec, 752 HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, 753 ol4_len >> 2); 754 755 /* switch IP header ptr from outer to inner header */ 756 l3.hdr = skb_inner_network_header(skb); 757 758 /* compute inner l2 header size, defined in 2 Bytes. */ 759 l2_len = l3.hdr - l2_hdr; 760 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, 761 HNS3_TXD_L2LEN_S, l2_len >> 1); 762 } else { 763 /* skb packet types not supported by hardware, 764 * txbd len fild doesn't be filled. 765 */ 766 return; 767 } 768 769 /* switch L4 header pointer from outer to inner */ 770 l4.hdr = skb_inner_transport_header(skb); 771 772 l4_proto = il4_proto; 773 } 774 775 /* compute inner(/normal) L3 header size, defined in 4 Bytes */ 776 l3_len = l4.hdr - l3.hdr; 777 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, 778 HNS3_TXD_L3LEN_S, l3_len >> 2); 779 780 /* compute inner(/normal) L4 header size, defined in 4 Bytes */ 781 switch (l4_proto) { 782 case IPPROTO_TCP: 783 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, 784 HNS3_TXD_L4LEN_S, l4.tcp->doff); 785 break; 786 case IPPROTO_SCTP: 787 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, 788 HNS3_TXD_L4LEN_S, 789 (sizeof(struct sctphdr) >> 2)); 790 break; 791 case IPPROTO_UDP: 792 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, 793 HNS3_TXD_L4LEN_S, 794 (sizeof(struct udphdr) >> 2)); 795 break; 796 default: 797 /* skb packet types not supported by hardware, 798 * txbd len fild doesn't be filled. 799 */ 800 return; 801 } 802 } 803 804 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL 805 * and it is udp packet, which has a dest port as the IANA assigned. 806 * the hardware is expected to do the checksum offload, but the 807 * hardware will not do the checksum offload when udp dest port is 808 * 4789. 809 */ 810 static bool hns3_tunnel_csum_bug(struct sk_buff *skb) 811 { 812 #define IANA_VXLAN_PORT 4789 813 union { 814 struct tcphdr *tcp; 815 struct udphdr *udp; 816 struct gre_base_hdr *gre; 817 unsigned char *hdr; 818 } l4; 819 820 l4.hdr = skb_transport_header(skb); 821 822 if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT))) 823 return false; 824 825 skb_checksum_help(skb); 826 827 return true; 828 } 829 830 static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, 831 u8 il4_proto, u32 *type_cs_vlan_tso, 832 u32 *ol_type_vlan_len_msec) 833 { 834 union { 835 struct iphdr *v4; 836 struct ipv6hdr *v6; 837 unsigned char *hdr; 838 } l3; 839 u32 l4_proto = ol4_proto; 840 841 l3.hdr = skb_network_header(skb); 842 843 /* define OL3 type and tunnel type(OL4).*/ 844 if (skb->encapsulation) { 845 /* define outer network header type.*/ 846 if (skb->protocol == htons(ETH_P_IP)) { 847 if (skb_is_gso(skb)) 848 hnae3_set_field(*ol_type_vlan_len_msec, 849 HNS3_TXD_OL3T_M, 850 HNS3_TXD_OL3T_S, 851 HNS3_OL3T_IPV4_CSUM); 852 else 853 hnae3_set_field(*ol_type_vlan_len_msec, 854 HNS3_TXD_OL3T_M, 855 HNS3_TXD_OL3T_S, 856 HNS3_OL3T_IPV4_NO_CSUM); 857 858 } else if (skb->protocol == htons(ETH_P_IPV6)) { 859 hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, 860 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); 861 } 862 863 /* define tunnel type(OL4).*/ 864 switch (l4_proto) { 865 case IPPROTO_UDP: 866 hnae3_set_field(*ol_type_vlan_len_msec, 867 HNS3_TXD_TUNTYPE_M, 868 HNS3_TXD_TUNTYPE_S, 869 HNS3_TUN_MAC_IN_UDP); 870 break; 871 case IPPROTO_GRE: 872 hnae3_set_field(*ol_type_vlan_len_msec, 873 HNS3_TXD_TUNTYPE_M, 874 HNS3_TXD_TUNTYPE_S, 875 HNS3_TUN_NVGRE); 876 break; 877 default: 878 /* drop the skb tunnel packet if hardware don't support, 879 * because hardware can't calculate csum when TSO. 880 */ 881 if (skb_is_gso(skb)) 882 return -EDOM; 883 884 /* the stack computes the IP header already, 885 * driver calculate l4 checksum when not TSO. 886 */ 887 skb_checksum_help(skb); 888 return 0; 889 } 890 891 l3.hdr = skb_inner_network_header(skb); 892 l4_proto = il4_proto; 893 } 894 895 if (l3.v4->version == 4) { 896 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, 897 HNS3_TXD_L3T_S, HNS3_L3T_IPV4); 898 899 /* the stack computes the IP header already, the only time we 900 * need the hardware to recompute it is in the case of TSO. 901 */ 902 if (skb_is_gso(skb)) 903 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); 904 } else if (l3.v6->version == 6) { 905 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, 906 HNS3_TXD_L3T_S, HNS3_L3T_IPV6); 907 } 908 909 switch (l4_proto) { 910 case IPPROTO_TCP: 911 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 912 hnae3_set_field(*type_cs_vlan_tso, 913 HNS3_TXD_L4T_M, 914 HNS3_TXD_L4T_S, 915 HNS3_L4T_TCP); 916 break; 917 case IPPROTO_UDP: 918 if (hns3_tunnel_csum_bug(skb)) 919 break; 920 921 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 922 hnae3_set_field(*type_cs_vlan_tso, 923 HNS3_TXD_L4T_M, 924 HNS3_TXD_L4T_S, 925 HNS3_L4T_UDP); 926 break; 927 case IPPROTO_SCTP: 928 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 929 hnae3_set_field(*type_cs_vlan_tso, 930 HNS3_TXD_L4T_M, 931 HNS3_TXD_L4T_S, 932 HNS3_L4T_SCTP); 933 break; 934 default: 935 /* drop the skb tunnel packet if hardware don't support, 936 * because hardware can't calculate csum when TSO. 937 */ 938 if (skb_is_gso(skb)) 939 return -EDOM; 940 941 /* the stack computes the IP header already, 942 * driver calculate l4 checksum when not TSO. 943 */ 944 skb_checksum_help(skb); 945 return 0; 946 } 947 948 return 0; 949 } 950 951 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) 952 { 953 /* Config bd buffer end */ 954 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, 955 HNS3_TXD_BDTYPE_S, 0); 956 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); 957 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); 958 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); 959 } 960 961 static int hns3_fill_desc_vtags(struct sk_buff *skb, 962 struct hns3_enet_ring *tx_ring, 963 u32 *inner_vlan_flag, 964 u32 *out_vlan_flag, 965 u16 *inner_vtag, 966 u16 *out_vtag) 967 { 968 #define HNS3_TX_VLAN_PRIO_SHIFT 13 969 970 if (skb->protocol == htons(ETH_P_8021Q) && 971 !(tx_ring->tqp->handle->kinfo.netdev->features & 972 NETIF_F_HW_VLAN_CTAG_TX)) { 973 /* When HW VLAN acceleration is turned off, and the stack 974 * sets the protocol to 802.1q, the driver just need to 975 * set the protocol to the encapsulated ethertype. 976 */ 977 skb->protocol = vlan_get_protocol(skb); 978 return 0; 979 } 980 981 if (skb_vlan_tag_present(skb)) { 982 u16 vlan_tag; 983 984 vlan_tag = skb_vlan_tag_get(skb); 985 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT; 986 987 /* Based on hw strategy, use out_vtag in two layer tag case, 988 * and use inner_vtag in one tag case. 989 */ 990 if (skb->protocol == htons(ETH_P_8021Q)) { 991 hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); 992 *out_vtag = vlan_tag; 993 } else { 994 hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); 995 *inner_vtag = vlan_tag; 996 } 997 } else if (skb->protocol == htons(ETH_P_8021Q)) { 998 struct vlan_ethhdr *vhdr; 999 int rc; 1000 1001 rc = skb_cow_head(skb, 0); 1002 if (rc < 0) 1003 return rc; 1004 vhdr = (struct vlan_ethhdr *)skb->data; 1005 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7) 1006 << HNS3_TX_VLAN_PRIO_SHIFT); 1007 } 1008 1009 skb->protocol = vlan_get_protocol(skb); 1010 return 0; 1011 } 1012 1013 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, 1014 int size, int frag_end, enum hns_desc_type type) 1015 { 1016 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 1017 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 1018 struct device *dev = ring_to_dev(ring); 1019 u32 ol_type_vlan_len_msec = 0; 1020 u16 bdtp_fe_sc_vld_ra_ri = 0; 1021 struct skb_frag_struct *frag; 1022 unsigned int frag_buf_num; 1023 u32 type_cs_vlan_tso = 0; 1024 struct sk_buff *skb; 1025 u16 inner_vtag = 0; 1026 u16 out_vtag = 0; 1027 unsigned int k; 1028 int sizeoflast; 1029 u32 paylen = 0; 1030 dma_addr_t dma; 1031 u16 mss = 0; 1032 u8 ol4_proto; 1033 u8 il4_proto; 1034 int ret; 1035 1036 if (type == DESC_TYPE_SKB) { 1037 skb = (struct sk_buff *)priv; 1038 paylen = skb->len; 1039 1040 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso, 1041 &ol_type_vlan_len_msec, 1042 &inner_vtag, &out_vtag); 1043 if (unlikely(ret)) 1044 return ret; 1045 1046 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1047 skb_reset_mac_len(skb); 1048 1049 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); 1050 if (ret) 1051 return ret; 1052 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, 1053 &type_cs_vlan_tso, 1054 &ol_type_vlan_len_msec); 1055 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto, 1056 &type_cs_vlan_tso, 1057 &ol_type_vlan_len_msec); 1058 if (ret) 1059 return ret; 1060 1061 ret = hns3_set_tso(skb, &paylen, &mss, 1062 &type_cs_vlan_tso); 1063 if (ret) 1064 return ret; 1065 } 1066 1067 /* Set txbd */ 1068 desc->tx.ol_type_vlan_len_msec = 1069 cpu_to_le32(ol_type_vlan_len_msec); 1070 desc->tx.type_cs_vlan_tso_len = 1071 cpu_to_le32(type_cs_vlan_tso); 1072 desc->tx.paylen = cpu_to_le32(paylen); 1073 desc->tx.mss = cpu_to_le16(mss); 1074 desc->tx.vlan_tag = cpu_to_le16(inner_vtag); 1075 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); 1076 1077 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 1078 } else { 1079 frag = (struct skb_frag_struct *)priv; 1080 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); 1081 } 1082 1083 if (dma_mapping_error(ring->dev, dma)) { 1084 ring->stats.sw_err_cnt++; 1085 return -ENOMEM; 1086 } 1087 1088 desc_cb->length = size; 1089 1090 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; 1091 sizeoflast = size % HNS3_MAX_BD_SIZE; 1092 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; 1093 1094 /* When frag size is bigger than hardware limit, split this frag */ 1095 for (k = 0; k < frag_buf_num; k++) { 1096 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ 1097 desc_cb->priv = priv; 1098 desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k; 1099 desc_cb->type = (type == DESC_TYPE_SKB && !k) ? 1100 DESC_TYPE_SKB : DESC_TYPE_PAGE; 1101 1102 /* now, fill the descriptor */ 1103 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); 1104 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? 1105 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); 1106 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, 1107 frag_end && (k == frag_buf_num - 1) ? 1108 1 : 0); 1109 desc->tx.bdtp_fe_sc_vld_ra_ri = 1110 cpu_to_le16(bdtp_fe_sc_vld_ra_ri); 1111 1112 /* move ring pointer to next.*/ 1113 ring_ptr_move_fw(ring, next_to_use); 1114 1115 desc_cb = &ring->desc_cb[ring->next_to_use]; 1116 desc = &ring->desc[ring->next_to_use]; 1117 } 1118 1119 return 0; 1120 } 1121 1122 static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, 1123 struct hns3_enet_ring *ring) 1124 { 1125 struct sk_buff *skb = *out_skb; 1126 struct skb_frag_struct *frag; 1127 int bdnum_for_frag; 1128 int frag_num; 1129 int buf_num; 1130 int size; 1131 int i; 1132 1133 size = skb_headlen(skb); 1134 buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; 1135 1136 frag_num = skb_shinfo(skb)->nr_frags; 1137 for (i = 0; i < frag_num; i++) { 1138 frag = &skb_shinfo(skb)->frags[i]; 1139 size = skb_frag_size(frag); 1140 bdnum_for_frag = 1141 (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; 1142 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG) 1143 return -ENOMEM; 1144 1145 buf_num += bdnum_for_frag; 1146 } 1147 1148 if (buf_num > ring_space(ring)) 1149 return -EBUSY; 1150 1151 *bnum = buf_num; 1152 return 0; 1153 } 1154 1155 static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, 1156 struct hns3_enet_ring *ring) 1157 { 1158 struct sk_buff *skb = *out_skb; 1159 int buf_num; 1160 1161 /* No. of segments (plus a header) */ 1162 buf_num = skb_shinfo(skb)->nr_frags + 1; 1163 1164 if (unlikely(ring_space(ring) < buf_num)) 1165 return -EBUSY; 1166 1167 *bnum = buf_num; 1168 1169 return 0; 1170 } 1171 1172 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) 1173 { 1174 struct device *dev = ring_to_dev(ring); 1175 unsigned int i; 1176 1177 for (i = 0; i < ring->desc_num; i++) { 1178 /* check if this is where we started */ 1179 if (ring->next_to_use == next_to_use_orig) 1180 break; 1181 1182 /* unmap the descriptor dma address */ 1183 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) 1184 dma_unmap_single(dev, 1185 ring->desc_cb[ring->next_to_use].dma, 1186 ring->desc_cb[ring->next_to_use].length, 1187 DMA_TO_DEVICE); 1188 else if (ring->desc_cb[ring->next_to_use].length) 1189 dma_unmap_page(dev, 1190 ring->desc_cb[ring->next_to_use].dma, 1191 ring->desc_cb[ring->next_to_use].length, 1192 DMA_TO_DEVICE); 1193 1194 ring->desc_cb[ring->next_to_use].length = 0; 1195 1196 /* rollback one */ 1197 ring_ptr_move_bw(ring, next_to_use); 1198 } 1199 } 1200 1201 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) 1202 { 1203 struct hns3_nic_priv *priv = netdev_priv(netdev); 1204 struct hns3_nic_ring_data *ring_data = 1205 &tx_ring_data(priv, skb->queue_mapping); 1206 struct hns3_enet_ring *ring = ring_data->ring; 1207 struct netdev_queue *dev_queue; 1208 struct skb_frag_struct *frag; 1209 int next_to_use_head; 1210 int next_to_use_frag; 1211 int buf_num; 1212 int seg_num; 1213 int size; 1214 int ret; 1215 int i; 1216 1217 /* Prefetch the data used later */ 1218 prefetch(skb->data); 1219 1220 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { 1221 case -EBUSY: 1222 u64_stats_update_begin(&ring->syncp); 1223 ring->stats.tx_busy++; 1224 u64_stats_update_end(&ring->syncp); 1225 1226 goto out_net_tx_busy; 1227 case -ENOMEM: 1228 u64_stats_update_begin(&ring->syncp); 1229 ring->stats.sw_err_cnt++; 1230 u64_stats_update_end(&ring->syncp); 1231 netdev_err(netdev, "no memory to xmit!\n"); 1232 1233 goto out_err_tx_ok; 1234 default: 1235 break; 1236 } 1237 1238 /* No. of segments (plus a header) */ 1239 seg_num = skb_shinfo(skb)->nr_frags + 1; 1240 /* Fill the first part */ 1241 size = skb_headlen(skb); 1242 1243 next_to_use_head = ring->next_to_use; 1244 1245 ret = priv->ops.fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0, 1246 DESC_TYPE_SKB); 1247 if (ret) 1248 goto head_fill_err; 1249 1250 next_to_use_frag = ring->next_to_use; 1251 /* Fill the fragments */ 1252 for (i = 1; i < seg_num; i++) { 1253 frag = &skb_shinfo(skb)->frags[i - 1]; 1254 size = skb_frag_size(frag); 1255 1256 ret = priv->ops.fill_desc(ring, frag, size, 1257 seg_num - 1 == i ? 1 : 0, 1258 DESC_TYPE_PAGE); 1259 1260 if (ret) 1261 goto frag_fill_err; 1262 } 1263 1264 /* Complete translate all packets */ 1265 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index); 1266 netdev_tx_sent_queue(dev_queue, skb->len); 1267 1268 wmb(); /* Commit all data before submit */ 1269 1270 hnae3_queue_xmit(ring->tqp, buf_num); 1271 1272 return NETDEV_TX_OK; 1273 1274 frag_fill_err: 1275 hns3_clear_desc(ring, next_to_use_frag); 1276 1277 head_fill_err: 1278 hns3_clear_desc(ring, next_to_use_head); 1279 1280 out_err_tx_ok: 1281 dev_kfree_skb_any(skb); 1282 return NETDEV_TX_OK; 1283 1284 out_net_tx_busy: 1285 netif_stop_subqueue(netdev, ring_data->queue_index); 1286 smp_mb(); /* Commit all data before submit */ 1287 1288 return NETDEV_TX_BUSY; 1289 } 1290 1291 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) 1292 { 1293 struct hnae3_handle *h = hns3_get_handle(netdev); 1294 struct sockaddr *mac_addr = p; 1295 int ret; 1296 1297 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) 1298 return -EADDRNOTAVAIL; 1299 1300 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { 1301 netdev_info(netdev, "already using mac address %pM\n", 1302 mac_addr->sa_data); 1303 return 0; 1304 } 1305 1306 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); 1307 if (ret) { 1308 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); 1309 return ret; 1310 } 1311 1312 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); 1313 1314 return 0; 1315 } 1316 1317 static int hns3_nic_do_ioctl(struct net_device *netdev, 1318 struct ifreq *ifr, int cmd) 1319 { 1320 struct hnae3_handle *h = hns3_get_handle(netdev); 1321 1322 if (!netif_running(netdev)) 1323 return -EINVAL; 1324 1325 if (!h->ae_algo->ops->do_ioctl) 1326 return -EOPNOTSUPP; 1327 1328 return h->ae_algo->ops->do_ioctl(h, ifr, cmd); 1329 } 1330 1331 static int hns3_nic_set_features(struct net_device *netdev, 1332 netdev_features_t features) 1333 { 1334 netdev_features_t changed = netdev->features ^ features; 1335 struct hns3_nic_priv *priv = netdev_priv(netdev); 1336 struct hnae3_handle *h = priv->ae_handle; 1337 int ret; 1338 1339 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { 1340 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) 1341 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; 1342 else 1343 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; 1344 } 1345 1346 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && 1347 h->ae_algo->ops->enable_vlan_filter) { 1348 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1349 h->ae_algo->ops->enable_vlan_filter(h, true); 1350 else 1351 h->ae_algo->ops->enable_vlan_filter(h, false); 1352 } 1353 1354 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && 1355 h->ae_algo->ops->enable_hw_strip_rxvtag) { 1356 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1357 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true); 1358 else 1359 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false); 1360 1361 if (ret) 1362 return ret; 1363 } 1364 1365 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { 1366 if (features & NETIF_F_NTUPLE) 1367 h->ae_algo->ops->enable_fd(h, true); 1368 else 1369 h->ae_algo->ops->enable_fd(h, false); 1370 } 1371 1372 netdev->features = features; 1373 return 0; 1374 } 1375 1376 static void hns3_nic_get_stats64(struct net_device *netdev, 1377 struct rtnl_link_stats64 *stats) 1378 { 1379 struct hns3_nic_priv *priv = netdev_priv(netdev); 1380 int queue_num = priv->ae_handle->kinfo.num_tqps; 1381 struct hnae3_handle *handle = priv->ae_handle; 1382 struct hns3_enet_ring *ring; 1383 unsigned int start; 1384 unsigned int idx; 1385 u64 tx_bytes = 0; 1386 u64 rx_bytes = 0; 1387 u64 tx_pkts = 0; 1388 u64 rx_pkts = 0; 1389 u64 tx_drop = 0; 1390 u64 rx_drop = 0; 1391 1392 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 1393 return; 1394 1395 handle->ae_algo->ops->update_stats(handle, &netdev->stats); 1396 1397 for (idx = 0; idx < queue_num; idx++) { 1398 /* fetch the tx stats */ 1399 ring = priv->ring_data[idx].ring; 1400 do { 1401 start = u64_stats_fetch_begin_irq(&ring->syncp); 1402 tx_bytes += ring->stats.tx_bytes; 1403 tx_pkts += ring->stats.tx_pkts; 1404 tx_drop += ring->stats.tx_busy; 1405 tx_drop += ring->stats.sw_err_cnt; 1406 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1407 1408 /* fetch the rx stats */ 1409 ring = priv->ring_data[idx + queue_num].ring; 1410 do { 1411 start = u64_stats_fetch_begin_irq(&ring->syncp); 1412 rx_bytes += ring->stats.rx_bytes; 1413 rx_pkts += ring->stats.rx_pkts; 1414 rx_drop += ring->stats.non_vld_descs; 1415 rx_drop += ring->stats.err_pkt_len; 1416 rx_drop += ring->stats.l2_err; 1417 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1418 } 1419 1420 stats->tx_bytes = tx_bytes; 1421 stats->tx_packets = tx_pkts; 1422 stats->rx_bytes = rx_bytes; 1423 stats->rx_packets = rx_pkts; 1424 1425 stats->rx_errors = netdev->stats.rx_errors; 1426 stats->multicast = netdev->stats.multicast; 1427 stats->rx_length_errors = netdev->stats.rx_length_errors; 1428 stats->rx_crc_errors = netdev->stats.rx_crc_errors; 1429 stats->rx_missed_errors = netdev->stats.rx_missed_errors; 1430 1431 stats->tx_errors = netdev->stats.tx_errors; 1432 stats->rx_dropped = rx_drop + netdev->stats.rx_dropped; 1433 stats->tx_dropped = tx_drop + netdev->stats.tx_dropped; 1434 stats->collisions = netdev->stats.collisions; 1435 stats->rx_over_errors = netdev->stats.rx_over_errors; 1436 stats->rx_frame_errors = netdev->stats.rx_frame_errors; 1437 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; 1438 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; 1439 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; 1440 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; 1441 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; 1442 stats->tx_window_errors = netdev->stats.tx_window_errors; 1443 stats->rx_compressed = netdev->stats.rx_compressed; 1444 stats->tx_compressed = netdev->stats.tx_compressed; 1445 } 1446 1447 static int hns3_setup_tc(struct net_device *netdev, void *type_data) 1448 { 1449 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 1450 struct hnae3_handle *h = hns3_get_handle(netdev); 1451 struct hnae3_knic_private_info *kinfo = &h->kinfo; 1452 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; 1453 u8 tc = mqprio_qopt->qopt.num_tc; 1454 u16 mode = mqprio_qopt->mode; 1455 u8 hw = mqprio_qopt->qopt.hw; 1456 bool if_running; 1457 int ret; 1458 1459 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && 1460 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) 1461 return -EOPNOTSUPP; 1462 1463 if (tc > HNAE3_MAX_TC) 1464 return -EINVAL; 1465 1466 if (!netdev) 1467 return -EINVAL; 1468 1469 if_running = netif_running(netdev); 1470 if (if_running) { 1471 hns3_nic_net_stop(netdev); 1472 msleep(100); 1473 } 1474 1475 ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? 1476 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; 1477 if (ret) 1478 goto out; 1479 1480 ret = hns3_nic_set_real_num_queue(netdev); 1481 1482 out: 1483 if (if_running) 1484 hns3_nic_net_open(netdev); 1485 1486 return ret; 1487 } 1488 1489 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, 1490 void *type_data) 1491 { 1492 if (type != TC_SETUP_QDISC_MQPRIO) 1493 return -EOPNOTSUPP; 1494 1495 return hns3_setup_tc(dev, type_data); 1496 } 1497 1498 static int hns3_vlan_rx_add_vid(struct net_device *netdev, 1499 __be16 proto, u16 vid) 1500 { 1501 struct hnae3_handle *h = hns3_get_handle(netdev); 1502 struct hns3_nic_priv *priv = netdev_priv(netdev); 1503 int ret = -EIO; 1504 1505 if (h->ae_algo->ops->set_vlan_filter) 1506 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); 1507 1508 if (!ret) 1509 set_bit(vid, priv->active_vlans); 1510 1511 return ret; 1512 } 1513 1514 static int hns3_vlan_rx_kill_vid(struct net_device *netdev, 1515 __be16 proto, u16 vid) 1516 { 1517 struct hnae3_handle *h = hns3_get_handle(netdev); 1518 struct hns3_nic_priv *priv = netdev_priv(netdev); 1519 int ret = -EIO; 1520 1521 if (h->ae_algo->ops->set_vlan_filter) 1522 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); 1523 1524 if (!ret) 1525 clear_bit(vid, priv->active_vlans); 1526 1527 return ret; 1528 } 1529 1530 static int hns3_restore_vlan(struct net_device *netdev) 1531 { 1532 struct hns3_nic_priv *priv = netdev_priv(netdev); 1533 int ret = 0; 1534 u16 vid; 1535 1536 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { 1537 ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); 1538 if (ret) { 1539 netdev_err(netdev, "Restore vlan: %d filter, ret:%d\n", 1540 vid, ret); 1541 return ret; 1542 } 1543 } 1544 1545 return ret; 1546 } 1547 1548 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 1549 u8 qos, __be16 vlan_proto) 1550 { 1551 struct hnae3_handle *h = hns3_get_handle(netdev); 1552 int ret = -EIO; 1553 1554 if (h->ae_algo->ops->set_vf_vlan_filter) 1555 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, 1556 qos, vlan_proto); 1557 1558 return ret; 1559 } 1560 1561 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) 1562 { 1563 struct hnae3_handle *h = hns3_get_handle(netdev); 1564 bool if_running = netif_running(netdev); 1565 int ret; 1566 1567 if (!h->ae_algo->ops->set_mtu) 1568 return -EOPNOTSUPP; 1569 1570 /* if this was called with netdev up then bring netdevice down */ 1571 if (if_running) { 1572 (void)hns3_nic_net_stop(netdev); 1573 msleep(100); 1574 } 1575 1576 ret = h->ae_algo->ops->set_mtu(h, new_mtu); 1577 if (ret) 1578 netdev_err(netdev, "failed to change MTU in hardware %d\n", 1579 ret); 1580 else 1581 netdev->mtu = new_mtu; 1582 1583 /* if the netdev was running earlier, bring it up again */ 1584 if (if_running && hns3_nic_net_open(netdev)) 1585 ret = -EINVAL; 1586 1587 return ret; 1588 } 1589 1590 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) 1591 { 1592 struct hns3_nic_priv *priv = netdev_priv(ndev); 1593 struct hns3_enet_ring *tx_ring = NULL; 1594 int timeout_queue = 0; 1595 int hw_head, hw_tail; 1596 int i; 1597 1598 /* Find the stopped queue the same way the stack does */ 1599 for (i = 0; i < ndev->real_num_tx_queues; i++) { 1600 struct netdev_queue *q; 1601 unsigned long trans_start; 1602 1603 q = netdev_get_tx_queue(ndev, i); 1604 trans_start = q->trans_start; 1605 if (netif_xmit_stopped(q) && 1606 time_after(jiffies, 1607 (trans_start + ndev->watchdog_timeo))) { 1608 timeout_queue = i; 1609 break; 1610 } 1611 } 1612 1613 if (i == ndev->num_tx_queues) { 1614 netdev_info(ndev, 1615 "no netdev TX timeout queue found, timeout count: %llu\n", 1616 priv->tx_timeout_count); 1617 return false; 1618 } 1619 1620 tx_ring = priv->ring_data[timeout_queue].ring; 1621 1622 hw_head = readl_relaxed(tx_ring->tqp->io_base + 1623 HNS3_RING_TX_RING_HEAD_REG); 1624 hw_tail = readl_relaxed(tx_ring->tqp->io_base + 1625 HNS3_RING_TX_RING_TAIL_REG); 1626 netdev_info(ndev, 1627 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n", 1628 priv->tx_timeout_count, 1629 timeout_queue, 1630 tx_ring->next_to_use, 1631 tx_ring->next_to_clean, 1632 hw_head, 1633 hw_tail, 1634 readl(tx_ring->tqp_vector->mask_addr)); 1635 1636 return true; 1637 } 1638 1639 static void hns3_nic_net_timeout(struct net_device *ndev) 1640 { 1641 struct hns3_nic_priv *priv = netdev_priv(ndev); 1642 struct hnae3_handle *h = priv->ae_handle; 1643 1644 if (!hns3_get_tx_timeo_queue_info(ndev)) 1645 return; 1646 1647 priv->tx_timeout_count++; 1648 1649 /* request the reset, and let the hclge to determine 1650 * which reset level should be done 1651 */ 1652 if (h->ae_algo->ops->reset_event) 1653 h->ae_algo->ops->reset_event(h->pdev, h); 1654 } 1655 1656 static const struct net_device_ops hns3_nic_netdev_ops = { 1657 .ndo_open = hns3_nic_net_open, 1658 .ndo_stop = hns3_nic_net_stop, 1659 .ndo_start_xmit = hns3_nic_net_xmit, 1660 .ndo_tx_timeout = hns3_nic_net_timeout, 1661 .ndo_set_mac_address = hns3_nic_net_set_mac_address, 1662 .ndo_do_ioctl = hns3_nic_do_ioctl, 1663 .ndo_change_mtu = hns3_nic_change_mtu, 1664 .ndo_set_features = hns3_nic_set_features, 1665 .ndo_get_stats64 = hns3_nic_get_stats64, 1666 .ndo_setup_tc = hns3_nic_setup_tc, 1667 .ndo_set_rx_mode = hns3_nic_set_rx_mode, 1668 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, 1669 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, 1670 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, 1671 }; 1672 1673 static bool hns3_is_phys_func(struct pci_dev *pdev) 1674 { 1675 u32 dev_id = pdev->device; 1676 1677 switch (dev_id) { 1678 case HNAE3_DEV_ID_GE: 1679 case HNAE3_DEV_ID_25GE: 1680 case HNAE3_DEV_ID_25GE_RDMA: 1681 case HNAE3_DEV_ID_25GE_RDMA_MACSEC: 1682 case HNAE3_DEV_ID_50GE_RDMA: 1683 case HNAE3_DEV_ID_50GE_RDMA_MACSEC: 1684 case HNAE3_DEV_ID_100G_RDMA_MACSEC: 1685 return true; 1686 case HNAE3_DEV_ID_100G_VF: 1687 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF: 1688 return false; 1689 default: 1690 dev_warn(&pdev->dev, "un-recognized pci device-id %d", 1691 dev_id); 1692 } 1693 1694 return false; 1695 } 1696 1697 static void hns3_disable_sriov(struct pci_dev *pdev) 1698 { 1699 /* If our VFs are assigned we cannot shut down SR-IOV 1700 * without causing issues, so just leave the hardware 1701 * available but disabled 1702 */ 1703 if (pci_vfs_assigned(pdev)) { 1704 dev_warn(&pdev->dev, 1705 "disabling driver while VFs are assigned\n"); 1706 return; 1707 } 1708 1709 pci_disable_sriov(pdev); 1710 } 1711 1712 static void hns3_get_dev_capability(struct pci_dev *pdev, 1713 struct hnae3_ae_dev *ae_dev) 1714 { 1715 if (pdev->revision >= 0x21) 1716 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1); 1717 } 1718 1719 /* hns3_probe - Device initialization routine 1720 * @pdev: PCI device information struct 1721 * @ent: entry in hns3_pci_tbl 1722 * 1723 * hns3_probe initializes a PF identified by a pci_dev structure. 1724 * The OS initialization, configuring of the PF private structure, 1725 * and a hardware reset occur. 1726 * 1727 * Returns 0 on success, negative on failure 1728 */ 1729 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1730 { 1731 struct hnae3_ae_dev *ae_dev; 1732 int ret; 1733 1734 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), 1735 GFP_KERNEL); 1736 if (!ae_dev) { 1737 ret = -ENOMEM; 1738 return ret; 1739 } 1740 1741 ae_dev->pdev = pdev; 1742 ae_dev->flag = ent->driver_data; 1743 ae_dev->dev_type = HNAE3_DEV_KNIC; 1744 ae_dev->reset_type = HNAE3_NONE_RESET; 1745 hns3_get_dev_capability(pdev, ae_dev); 1746 pci_set_drvdata(pdev, ae_dev); 1747 1748 hnae3_register_ae_dev(ae_dev); 1749 1750 return 0; 1751 } 1752 1753 /* hns3_remove - Device removal routine 1754 * @pdev: PCI device information struct 1755 */ 1756 static void hns3_remove(struct pci_dev *pdev) 1757 { 1758 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1759 1760 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) 1761 hns3_disable_sriov(pdev); 1762 1763 hnae3_unregister_ae_dev(ae_dev); 1764 } 1765 1766 /** 1767 * hns3_pci_sriov_configure 1768 * @pdev: pointer to a pci_dev structure 1769 * @num_vfs: number of VFs to allocate 1770 * 1771 * Enable or change the number of VFs. Called when the user updates the number 1772 * of VFs in sysfs. 1773 **/ 1774 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1775 { 1776 int ret; 1777 1778 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) { 1779 dev_warn(&pdev->dev, "Can not config SRIOV\n"); 1780 return -EINVAL; 1781 } 1782 1783 if (num_vfs) { 1784 ret = pci_enable_sriov(pdev, num_vfs); 1785 if (ret) 1786 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); 1787 else 1788 return num_vfs; 1789 } else if (!pci_vfs_assigned(pdev)) { 1790 pci_disable_sriov(pdev); 1791 } else { 1792 dev_warn(&pdev->dev, 1793 "Unable to free VFs because some are assigned to VMs.\n"); 1794 } 1795 1796 return 0; 1797 } 1798 1799 static void hns3_shutdown(struct pci_dev *pdev) 1800 { 1801 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1802 1803 hnae3_unregister_ae_dev(ae_dev); 1804 devm_kfree(&pdev->dev, ae_dev); 1805 pci_set_drvdata(pdev, NULL); 1806 1807 if (system_state == SYSTEM_POWER_OFF) 1808 pci_set_power_state(pdev, PCI_D3hot); 1809 } 1810 1811 static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, 1812 pci_channel_state_t state) 1813 { 1814 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1815 pci_ers_result_t ret; 1816 1817 dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state); 1818 1819 if (state == pci_channel_io_perm_failure) 1820 return PCI_ERS_RESULT_DISCONNECT; 1821 1822 if (!ae_dev) { 1823 dev_err(&pdev->dev, 1824 "Can't recover - error happened during device init\n"); 1825 return PCI_ERS_RESULT_NONE; 1826 } 1827 1828 if (ae_dev->ops->process_hw_error) 1829 ret = ae_dev->ops->process_hw_error(ae_dev); 1830 else 1831 return PCI_ERS_RESULT_NONE; 1832 1833 return ret; 1834 } 1835 1836 static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) 1837 { 1838 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1839 struct device *dev = &pdev->dev; 1840 1841 dev_info(dev, "requesting reset due to PCI error\n"); 1842 1843 /* request the reset */ 1844 if (ae_dev->ops->reset_event) { 1845 ae_dev->ops->reset_event(pdev, NULL); 1846 return PCI_ERS_RESULT_RECOVERED; 1847 } 1848 1849 return PCI_ERS_RESULT_DISCONNECT; 1850 } 1851 1852 static const struct pci_error_handlers hns3_err_handler = { 1853 .error_detected = hns3_error_detected, 1854 .slot_reset = hns3_slot_reset, 1855 }; 1856 1857 static struct pci_driver hns3_driver = { 1858 .name = hns3_driver_name, 1859 .id_table = hns3_pci_tbl, 1860 .probe = hns3_probe, 1861 .remove = hns3_remove, 1862 .shutdown = hns3_shutdown, 1863 .sriov_configure = hns3_pci_sriov_configure, 1864 .err_handler = &hns3_err_handler, 1865 }; 1866 1867 /* set default feature to hns3 */ 1868 static void hns3_set_default_feature(struct net_device *netdev) 1869 { 1870 struct hnae3_handle *h = hns3_get_handle(netdev); 1871 struct pci_dev *pdev = h->pdev; 1872 1873 netdev->priv_flags |= IFF_UNICAST_FLT; 1874 1875 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1876 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1877 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1878 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1879 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; 1880 1881 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 1882 1883 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 1884 1885 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1886 NETIF_F_HW_VLAN_CTAG_FILTER | 1887 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 1888 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1889 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1890 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1891 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; 1892 1893 netdev->vlan_features |= 1894 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | 1895 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | 1896 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1897 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1898 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; 1899 1900 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1901 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 1902 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1903 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1904 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1905 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; 1906 1907 if (pdev->revision >= 0x21) { 1908 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1909 1910 if (!(h->flags & HNAE3_SUPPORT_VF)) { 1911 netdev->hw_features |= NETIF_F_NTUPLE; 1912 netdev->features |= NETIF_F_NTUPLE; 1913 } 1914 } 1915 } 1916 1917 static int hns3_alloc_buffer(struct hns3_enet_ring *ring, 1918 struct hns3_desc_cb *cb) 1919 { 1920 unsigned int order = hnae3_page_order(ring); 1921 struct page *p; 1922 1923 p = dev_alloc_pages(order); 1924 if (!p) 1925 return -ENOMEM; 1926 1927 cb->priv = p; 1928 cb->page_offset = 0; 1929 cb->reuse_flag = 0; 1930 cb->buf = page_address(p); 1931 cb->length = hnae3_page_size(ring); 1932 cb->type = DESC_TYPE_PAGE; 1933 1934 return 0; 1935 } 1936 1937 static void hns3_free_buffer(struct hns3_enet_ring *ring, 1938 struct hns3_desc_cb *cb) 1939 { 1940 if (cb->type == DESC_TYPE_SKB) 1941 dev_kfree_skb_any((struct sk_buff *)cb->priv); 1942 else if (!HNAE3_IS_TX_RING(ring)) 1943 put_page((struct page *)cb->priv); 1944 memset(cb, 0, sizeof(*cb)); 1945 } 1946 1947 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) 1948 { 1949 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, 1950 cb->length, ring_to_dma_dir(ring)); 1951 1952 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) 1953 return -EIO; 1954 1955 return 0; 1956 } 1957 1958 static void hns3_unmap_buffer(struct hns3_enet_ring *ring, 1959 struct hns3_desc_cb *cb) 1960 { 1961 if (cb->type == DESC_TYPE_SKB) 1962 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 1963 ring_to_dma_dir(ring)); 1964 else if (cb->length) 1965 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 1966 ring_to_dma_dir(ring)); 1967 } 1968 1969 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) 1970 { 1971 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 1972 ring->desc[i].addr = 0; 1973 } 1974 1975 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) 1976 { 1977 struct hns3_desc_cb *cb = &ring->desc_cb[i]; 1978 1979 if (!ring->desc_cb[i].dma) 1980 return; 1981 1982 hns3_buffer_detach(ring, i); 1983 hns3_free_buffer(ring, cb); 1984 } 1985 1986 static void hns3_free_buffers(struct hns3_enet_ring *ring) 1987 { 1988 int i; 1989 1990 for (i = 0; i < ring->desc_num; i++) 1991 hns3_free_buffer_detach(ring, i); 1992 } 1993 1994 /* free desc along with its attached buffer */ 1995 static void hns3_free_desc(struct hns3_enet_ring *ring) 1996 { 1997 int size = ring->desc_num * sizeof(ring->desc[0]); 1998 1999 hns3_free_buffers(ring); 2000 2001 if (ring->desc) { 2002 dma_free_coherent(ring_to_dev(ring), size, 2003 ring->desc, ring->desc_dma_addr); 2004 ring->desc = NULL; 2005 } 2006 } 2007 2008 static int hns3_alloc_desc(struct hns3_enet_ring *ring) 2009 { 2010 int size = ring->desc_num * sizeof(ring->desc[0]); 2011 2012 ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, 2013 &ring->desc_dma_addr, 2014 GFP_KERNEL); 2015 if (!ring->desc) 2016 return -ENOMEM; 2017 2018 return 0; 2019 } 2020 2021 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, 2022 struct hns3_desc_cb *cb) 2023 { 2024 int ret; 2025 2026 ret = hns3_alloc_buffer(ring, cb); 2027 if (ret) 2028 goto out; 2029 2030 ret = hns3_map_buffer(ring, cb); 2031 if (ret) 2032 goto out_with_buf; 2033 2034 return 0; 2035 2036 out_with_buf: 2037 hns3_free_buffer(ring, cb); 2038 out: 2039 return ret; 2040 } 2041 2042 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i) 2043 { 2044 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]); 2045 2046 if (ret) 2047 return ret; 2048 2049 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 2050 2051 return 0; 2052 } 2053 2054 /* Allocate memory for raw pkg, and map with dma */ 2055 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) 2056 { 2057 int i, j, ret; 2058 2059 for (i = 0; i < ring->desc_num; i++) { 2060 ret = hns3_alloc_buffer_attach(ring, i); 2061 if (ret) 2062 goto out_buffer_fail; 2063 } 2064 2065 return 0; 2066 2067 out_buffer_fail: 2068 for (j = i - 1; j >= 0; j--) 2069 hns3_free_buffer_detach(ring, j); 2070 return ret; 2071 } 2072 2073 /* detach a in-used buffer and replace with a reserved one */ 2074 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, 2075 struct hns3_desc_cb *res_cb) 2076 { 2077 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 2078 ring->desc_cb[i] = *res_cb; 2079 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 2080 ring->desc[i].rx.bd_base_info = 0; 2081 } 2082 2083 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) 2084 { 2085 ring->desc_cb[i].reuse_flag = 0; 2086 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma 2087 + ring->desc_cb[i].page_offset); 2088 ring->desc[i].rx.bd_base_info = 0; 2089 } 2090 2091 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, 2092 int *pkts) 2093 { 2094 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; 2095 2096 (*pkts) += (desc_cb->type == DESC_TYPE_SKB); 2097 (*bytes) += desc_cb->length; 2098 /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/ 2099 hns3_free_buffer_detach(ring, ring->next_to_clean); 2100 2101 ring_ptr_move_fw(ring, next_to_clean); 2102 } 2103 2104 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) 2105 { 2106 int u = ring->next_to_use; 2107 int c = ring->next_to_clean; 2108 2109 if (unlikely(h > ring->desc_num)) 2110 return 0; 2111 2112 return u > c ? (h > c && h <= u) : (h > c || h <= u); 2113 } 2114 2115 void hns3_clean_tx_ring(struct hns3_enet_ring *ring) 2116 { 2117 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2118 struct hns3_nic_priv *priv = netdev_priv(netdev); 2119 struct netdev_queue *dev_queue; 2120 int bytes, pkts; 2121 int head; 2122 2123 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); 2124 rmb(); /* Make sure head is ready before touch any data */ 2125 2126 if (is_ring_empty(ring) || head == ring->next_to_clean) 2127 return; /* no data to poll */ 2128 2129 if (unlikely(!is_valid_clean_head(ring, head))) { 2130 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, 2131 ring->next_to_use, ring->next_to_clean); 2132 2133 u64_stats_update_begin(&ring->syncp); 2134 ring->stats.io_err_cnt++; 2135 u64_stats_update_end(&ring->syncp); 2136 return; 2137 } 2138 2139 bytes = 0; 2140 pkts = 0; 2141 while (head != ring->next_to_clean) { 2142 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); 2143 /* Issue prefetch for next Tx descriptor */ 2144 prefetch(&ring->desc_cb[ring->next_to_clean]); 2145 } 2146 2147 ring->tqp_vector->tx_group.total_bytes += bytes; 2148 ring->tqp_vector->tx_group.total_packets += pkts; 2149 2150 u64_stats_update_begin(&ring->syncp); 2151 ring->stats.tx_bytes += bytes; 2152 ring->stats.tx_pkts += pkts; 2153 u64_stats_update_end(&ring->syncp); 2154 2155 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); 2156 netdev_tx_completed_queue(dev_queue, pkts, bytes); 2157 2158 if (unlikely(pkts && netif_carrier_ok(netdev) && 2159 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) { 2160 /* Make sure that anybody stopping the queue after this 2161 * sees the new next_to_clean. 2162 */ 2163 smp_mb(); 2164 if (netif_tx_queue_stopped(dev_queue) && 2165 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 2166 netif_tx_wake_queue(dev_queue); 2167 ring->stats.restart_queue++; 2168 } 2169 } 2170 } 2171 2172 static int hns3_desc_unused(struct hns3_enet_ring *ring) 2173 { 2174 int ntc = ring->next_to_clean; 2175 int ntu = ring->next_to_use; 2176 2177 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; 2178 } 2179 2180 static void 2181 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) 2182 { 2183 struct hns3_desc_cb *desc_cb; 2184 struct hns3_desc_cb res_cbs; 2185 int i, ret; 2186 2187 for (i = 0; i < cleand_count; i++) { 2188 desc_cb = &ring->desc_cb[ring->next_to_use]; 2189 if (desc_cb->reuse_flag) { 2190 u64_stats_update_begin(&ring->syncp); 2191 ring->stats.reuse_pg_cnt++; 2192 u64_stats_update_end(&ring->syncp); 2193 2194 hns3_reuse_buffer(ring, ring->next_to_use); 2195 } else { 2196 ret = hns3_reserve_buffer_map(ring, &res_cbs); 2197 if (ret) { 2198 u64_stats_update_begin(&ring->syncp); 2199 ring->stats.sw_err_cnt++; 2200 u64_stats_update_end(&ring->syncp); 2201 2202 netdev_err(ring->tqp->handle->kinfo.netdev, 2203 "hnae reserve buffer map failed.\n"); 2204 break; 2205 } 2206 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 2207 } 2208 2209 ring_ptr_move_fw(ring, next_to_use); 2210 } 2211 2212 wmb(); /* Make all data has been write before submit */ 2213 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); 2214 } 2215 2216 static void hns3_nic_reuse_page(struct sk_buff *skb, int i, 2217 struct hns3_enet_ring *ring, int pull_len, 2218 struct hns3_desc_cb *desc_cb) 2219 { 2220 struct hns3_desc *desc; 2221 u32 truesize; 2222 int size; 2223 int last_offset; 2224 bool twobufs; 2225 2226 twobufs = ((PAGE_SIZE < 8192) && 2227 hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048); 2228 2229 desc = &ring->desc[ring->next_to_clean]; 2230 size = le16_to_cpu(desc->rx.size); 2231 2232 truesize = hnae3_buf_size(ring); 2233 2234 if (!twobufs) 2235 last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring); 2236 2237 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 2238 size - pull_len, truesize); 2239 2240 /* Avoid re-using remote pages,flag default unreuse */ 2241 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) 2242 return; 2243 2244 if (twobufs) { 2245 /* If we are only owner of page we can reuse it */ 2246 if (likely(page_count(desc_cb->priv) == 1)) { 2247 /* Flip page offset to other buffer */ 2248 desc_cb->page_offset ^= truesize; 2249 2250 desc_cb->reuse_flag = 1; 2251 /* bump ref count on page before it is given*/ 2252 get_page(desc_cb->priv); 2253 } 2254 return; 2255 } 2256 2257 /* Move offset up to the next cache line */ 2258 desc_cb->page_offset += truesize; 2259 2260 if (desc_cb->page_offset <= last_offset) { 2261 desc_cb->reuse_flag = 1; 2262 /* Bump ref count on page before it is given*/ 2263 get_page(desc_cb->priv); 2264 } 2265 } 2266 2267 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, 2268 struct hns3_desc *desc) 2269 { 2270 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2271 int l3_type, l4_type; 2272 u32 bd_base_info; 2273 int ol4_type; 2274 u32 l234info; 2275 2276 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2277 l234info = le32_to_cpu(desc->rx.l234_info); 2278 2279 skb->ip_summed = CHECKSUM_NONE; 2280 2281 skb_checksum_none_assert(skb); 2282 2283 if (!(netdev->features & NETIF_F_RXCSUM)) 2284 return; 2285 2286 /* check if hardware has done checksum */ 2287 if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) 2288 return; 2289 2290 if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) || 2291 hnae3_get_bit(l234info, HNS3_RXD_L4E_B) || 2292 hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) || 2293 hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) { 2294 u64_stats_update_begin(&ring->syncp); 2295 ring->stats.l3l4_csum_err++; 2296 u64_stats_update_end(&ring->syncp); 2297 2298 return; 2299 } 2300 2301 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, 2302 HNS3_RXD_L3ID_S); 2303 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, 2304 HNS3_RXD_L4ID_S); 2305 2306 ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M, 2307 HNS3_RXD_OL4ID_S); 2308 switch (ol4_type) { 2309 case HNS3_OL4_TYPE_MAC_IN_UDP: 2310 case HNS3_OL4_TYPE_NVGRE: 2311 skb->csum_level = 1; 2312 /* fall through */ 2313 case HNS3_OL4_TYPE_NO_TUN: 2314 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ 2315 if ((l3_type == HNS3_L3_TYPE_IPV4 || 2316 l3_type == HNS3_L3_TYPE_IPV6) && 2317 (l4_type == HNS3_L4_TYPE_UDP || 2318 l4_type == HNS3_L4_TYPE_TCP || 2319 l4_type == HNS3_L4_TYPE_SCTP)) 2320 skb->ip_summed = CHECKSUM_UNNECESSARY; 2321 break; 2322 default: 2323 break; 2324 } 2325 } 2326 2327 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) 2328 { 2329 napi_gro_receive(&ring->tqp_vector->napi, skb); 2330 } 2331 2332 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, 2333 struct hns3_desc *desc, u32 l234info, 2334 u16 *vlan_tag) 2335 { 2336 struct pci_dev *pdev = ring->tqp->handle->pdev; 2337 2338 if (pdev->revision == 0x20) { 2339 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 2340 if (!(*vlan_tag & VLAN_VID_MASK)) 2341 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 2342 2343 return (*vlan_tag != 0); 2344 } 2345 2346 #define HNS3_STRP_OUTER_VLAN 0x1 2347 #define HNS3_STRP_INNER_VLAN 0x2 2348 2349 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, 2350 HNS3_RXD_STRP_TAGP_S)) { 2351 case HNS3_STRP_OUTER_VLAN: 2352 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 2353 return true; 2354 case HNS3_STRP_INNER_VLAN: 2355 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 2356 return true; 2357 default: 2358 return false; 2359 } 2360 } 2361 2362 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, 2363 struct sk_buff *skb) 2364 { 2365 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; 2366 struct hnae3_handle *handle = ring->tqp->handle; 2367 enum pkt_hash_types rss_type; 2368 2369 if (le32_to_cpu(desc->rx.rss_hash)) 2370 rss_type = handle->kinfo.rss_type; 2371 else 2372 rss_type = PKT_HASH_TYPE_NONE; 2373 2374 skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type); 2375 } 2376 2377 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, 2378 struct sk_buff **out_skb, int *out_bnum) 2379 { 2380 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2381 struct hns3_desc_cb *desc_cb; 2382 struct hns3_desc *desc; 2383 struct sk_buff *skb; 2384 unsigned char *va; 2385 u32 bd_base_info; 2386 int pull_len; 2387 u32 l234info; 2388 int length; 2389 int bnum; 2390 2391 desc = &ring->desc[ring->next_to_clean]; 2392 desc_cb = &ring->desc_cb[ring->next_to_clean]; 2393 2394 prefetch(desc); 2395 2396 length = le16_to_cpu(desc->rx.size); 2397 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2398 2399 /* Check valid BD */ 2400 if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) 2401 return -EFAULT; 2402 2403 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; 2404 2405 /* Prefetch first cache line of first page 2406 * Idea is to cache few bytes of the header of the packet. Our L1 Cache 2407 * line size is 64B so need to prefetch twice to make it 128B. But in 2408 * actual we can have greater size of caches with 128B Level 1 cache 2409 * lines. In such a case, single fetch would suffice to cache in the 2410 * relevant part of the header. 2411 */ 2412 prefetch(va); 2413 #if L1_CACHE_BYTES < 128 2414 prefetch(va + L1_CACHE_BYTES); 2415 #endif 2416 2417 skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi, 2418 HNS3_RX_HEAD_SIZE); 2419 if (unlikely(!skb)) { 2420 netdev_err(netdev, "alloc rx skb fail\n"); 2421 2422 u64_stats_update_begin(&ring->syncp); 2423 ring->stats.sw_err_cnt++; 2424 u64_stats_update_end(&ring->syncp); 2425 2426 return -ENOMEM; 2427 } 2428 2429 prefetchw(skb->data); 2430 2431 bnum = 1; 2432 if (length <= HNS3_RX_HEAD_SIZE) { 2433 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); 2434 2435 /* We can reuse buffer as-is, just make sure it is local */ 2436 if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) 2437 desc_cb->reuse_flag = 1; 2438 else /* This page cannot be reused so discard it */ 2439 put_page(desc_cb->priv); 2440 2441 ring_ptr_move_fw(ring, next_to_clean); 2442 } else { 2443 u64_stats_update_begin(&ring->syncp); 2444 ring->stats.seg_pkt_cnt++; 2445 u64_stats_update_end(&ring->syncp); 2446 2447 pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE); 2448 2449 memcpy(__skb_put(skb, pull_len), va, 2450 ALIGN(pull_len, sizeof(long))); 2451 2452 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); 2453 ring_ptr_move_fw(ring, next_to_clean); 2454 2455 while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) { 2456 desc = &ring->desc[ring->next_to_clean]; 2457 desc_cb = &ring->desc_cb[ring->next_to_clean]; 2458 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2459 hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb); 2460 ring_ptr_move_fw(ring, next_to_clean); 2461 bnum++; 2462 } 2463 } 2464 2465 *out_bnum = bnum; 2466 2467 l234info = le32_to_cpu(desc->rx.l234_info); 2468 2469 /* Based on hw strategy, the tag offloaded will be stored at 2470 * ot_vlan_tag in two layer tag case, and stored at vlan_tag 2471 * in one layer tag case. 2472 */ 2473 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { 2474 u16 vlan_tag; 2475 2476 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) 2477 __vlan_hwaccel_put_tag(skb, 2478 htons(ETH_P_8021Q), 2479 vlan_tag); 2480 } 2481 2482 if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { 2483 u64_stats_update_begin(&ring->syncp); 2484 ring->stats.non_vld_descs++; 2485 u64_stats_update_end(&ring->syncp); 2486 2487 dev_kfree_skb_any(skb); 2488 return -EINVAL; 2489 } 2490 2491 if (unlikely((!desc->rx.pkt_len) || 2492 hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { 2493 u64_stats_update_begin(&ring->syncp); 2494 ring->stats.err_pkt_len++; 2495 u64_stats_update_end(&ring->syncp); 2496 2497 dev_kfree_skb_any(skb); 2498 return -EFAULT; 2499 } 2500 2501 if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) { 2502 u64_stats_update_begin(&ring->syncp); 2503 ring->stats.l2_err++; 2504 u64_stats_update_end(&ring->syncp); 2505 2506 dev_kfree_skb_any(skb); 2507 return -EFAULT; 2508 } 2509 2510 u64_stats_update_begin(&ring->syncp); 2511 ring->stats.rx_pkts++; 2512 ring->stats.rx_bytes += skb->len; 2513 u64_stats_update_end(&ring->syncp); 2514 2515 ring->tqp_vector->rx_group.total_bytes += skb->len; 2516 2517 hns3_rx_checksum(ring, skb, desc); 2518 hns3_set_rx_skb_rss_type(ring, skb); 2519 2520 return 0; 2521 } 2522 2523 int hns3_clean_rx_ring( 2524 struct hns3_enet_ring *ring, int budget, 2525 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) 2526 { 2527 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 2528 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2529 int recv_pkts, recv_bds, clean_count, err; 2530 int unused_count = hns3_desc_unused(ring); 2531 struct sk_buff *skb = NULL; 2532 int num, bnum = 0; 2533 2534 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); 2535 rmb(); /* Make sure num taken effect before the other data is touched */ 2536 2537 recv_pkts = 0, recv_bds = 0, clean_count = 0; 2538 num -= unused_count; 2539 2540 while (recv_pkts < budget && recv_bds < num) { 2541 /* Reuse or realloc buffers */ 2542 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { 2543 hns3_nic_alloc_rx_buffers(ring, 2544 clean_count + unused_count); 2545 clean_count = 0; 2546 unused_count = hns3_desc_unused(ring); 2547 } 2548 2549 /* Poll one pkt */ 2550 err = hns3_handle_rx_bd(ring, &skb, &bnum); 2551 if (unlikely(!skb)) /* This fault cannot be repaired */ 2552 goto out; 2553 2554 recv_bds += bnum; 2555 clean_count += bnum; 2556 if (unlikely(err)) { /* Do jump the err */ 2557 recv_pkts++; 2558 continue; 2559 } 2560 2561 /* Do update ip stack process */ 2562 skb->protocol = eth_type_trans(skb, netdev); 2563 rx_fn(ring, skb); 2564 2565 recv_pkts++; 2566 } 2567 2568 out: 2569 /* Make all data has been write before submit */ 2570 if (clean_count + unused_count > 0) 2571 hns3_nic_alloc_rx_buffers(ring, 2572 clean_count + unused_count); 2573 2574 return recv_pkts; 2575 } 2576 2577 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) 2578 { 2579 struct hns3_enet_tqp_vector *tqp_vector = 2580 ring_group->ring->tqp_vector; 2581 enum hns3_flow_level_range new_flow_level; 2582 int packets_per_msecs; 2583 int bytes_per_msecs; 2584 u32 time_passed_ms; 2585 u16 new_int_gl; 2586 2587 if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies) 2588 return false; 2589 2590 if (ring_group->total_packets == 0) { 2591 ring_group->coal.int_gl = HNS3_INT_GL_50K; 2592 ring_group->coal.flow_level = HNS3_FLOW_LOW; 2593 return true; 2594 } 2595 2596 /* Simple throttlerate management 2597 * 0-10MB/s lower (50000 ints/s) 2598 * 10-20MB/s middle (20000 ints/s) 2599 * 20-1249MB/s high (18000 ints/s) 2600 * > 40000pps ultra (8000 ints/s) 2601 */ 2602 new_flow_level = ring_group->coal.flow_level; 2603 new_int_gl = ring_group->coal.int_gl; 2604 time_passed_ms = 2605 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies); 2606 2607 if (!time_passed_ms) 2608 return false; 2609 2610 do_div(ring_group->total_packets, time_passed_ms); 2611 packets_per_msecs = ring_group->total_packets; 2612 2613 do_div(ring_group->total_bytes, time_passed_ms); 2614 bytes_per_msecs = ring_group->total_bytes; 2615 2616 #define HNS3_RX_LOW_BYTE_RATE 10000 2617 #define HNS3_RX_MID_BYTE_RATE 20000 2618 2619 switch (new_flow_level) { 2620 case HNS3_FLOW_LOW: 2621 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE) 2622 new_flow_level = HNS3_FLOW_MID; 2623 break; 2624 case HNS3_FLOW_MID: 2625 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE) 2626 new_flow_level = HNS3_FLOW_HIGH; 2627 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE) 2628 new_flow_level = HNS3_FLOW_LOW; 2629 break; 2630 case HNS3_FLOW_HIGH: 2631 case HNS3_FLOW_ULTRA: 2632 default: 2633 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE) 2634 new_flow_level = HNS3_FLOW_MID; 2635 break; 2636 } 2637 2638 #define HNS3_RX_ULTRA_PACKET_RATE 40 2639 2640 if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE && 2641 &tqp_vector->rx_group == ring_group) 2642 new_flow_level = HNS3_FLOW_ULTRA; 2643 2644 switch (new_flow_level) { 2645 case HNS3_FLOW_LOW: 2646 new_int_gl = HNS3_INT_GL_50K; 2647 break; 2648 case HNS3_FLOW_MID: 2649 new_int_gl = HNS3_INT_GL_20K; 2650 break; 2651 case HNS3_FLOW_HIGH: 2652 new_int_gl = HNS3_INT_GL_18K; 2653 break; 2654 case HNS3_FLOW_ULTRA: 2655 new_int_gl = HNS3_INT_GL_8K; 2656 break; 2657 default: 2658 break; 2659 } 2660 2661 ring_group->total_bytes = 0; 2662 ring_group->total_packets = 0; 2663 ring_group->coal.flow_level = new_flow_level; 2664 if (new_int_gl != ring_group->coal.int_gl) { 2665 ring_group->coal.int_gl = new_int_gl; 2666 return true; 2667 } 2668 return false; 2669 } 2670 2671 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) 2672 { 2673 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; 2674 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; 2675 bool rx_update, tx_update; 2676 2677 if (tqp_vector->int_adapt_down > 0) { 2678 tqp_vector->int_adapt_down--; 2679 return; 2680 } 2681 2682 if (rx_group->coal.gl_adapt_enable) { 2683 rx_update = hns3_get_new_int_gl(rx_group); 2684 if (rx_update) 2685 hns3_set_vector_coalesce_rx_gl(tqp_vector, 2686 rx_group->coal.int_gl); 2687 } 2688 2689 if (tx_group->coal.gl_adapt_enable) { 2690 tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group); 2691 if (tx_update) 2692 hns3_set_vector_coalesce_tx_gl(tqp_vector, 2693 tx_group->coal.int_gl); 2694 } 2695 2696 tqp_vector->last_jiffies = jiffies; 2697 tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; 2698 } 2699 2700 static int hns3_nic_common_poll(struct napi_struct *napi, int budget) 2701 { 2702 struct hns3_enet_ring *ring; 2703 int rx_pkt_total = 0; 2704 2705 struct hns3_enet_tqp_vector *tqp_vector = 2706 container_of(napi, struct hns3_enet_tqp_vector, napi); 2707 bool clean_complete = true; 2708 int rx_budget; 2709 2710 /* Since the actual Tx work is minimal, we can give the Tx a larger 2711 * budget and be more aggressive about cleaning up the Tx descriptors. 2712 */ 2713 hns3_for_each_ring(ring, tqp_vector->tx_group) 2714 hns3_clean_tx_ring(ring); 2715 2716 /* make sure rx ring budget not smaller than 1 */ 2717 rx_budget = max(budget / tqp_vector->num_tqps, 1); 2718 2719 hns3_for_each_ring(ring, tqp_vector->rx_group) { 2720 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, 2721 hns3_rx_skb); 2722 2723 if (rx_cleaned >= rx_budget) 2724 clean_complete = false; 2725 2726 rx_pkt_total += rx_cleaned; 2727 } 2728 2729 tqp_vector->rx_group.total_packets += rx_pkt_total; 2730 2731 if (!clean_complete) 2732 return budget; 2733 2734 napi_complete(napi); 2735 hns3_update_new_int_gl(tqp_vector); 2736 hns3_mask_vector_irq(tqp_vector, 1); 2737 2738 return rx_pkt_total; 2739 } 2740 2741 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 2742 struct hnae3_ring_chain_node *head) 2743 { 2744 struct pci_dev *pdev = tqp_vector->handle->pdev; 2745 struct hnae3_ring_chain_node *cur_chain = head; 2746 struct hnae3_ring_chain_node *chain; 2747 struct hns3_enet_ring *tx_ring; 2748 struct hns3_enet_ring *rx_ring; 2749 2750 tx_ring = tqp_vector->tx_group.ring; 2751 if (tx_ring) { 2752 cur_chain->tqp_index = tx_ring->tqp->tqp_index; 2753 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 2754 HNAE3_RING_TYPE_TX); 2755 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 2756 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); 2757 2758 cur_chain->next = NULL; 2759 2760 while (tx_ring->next) { 2761 tx_ring = tx_ring->next; 2762 2763 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), 2764 GFP_KERNEL); 2765 if (!chain) 2766 goto err_free_chain; 2767 2768 cur_chain->next = chain; 2769 chain->tqp_index = tx_ring->tqp->tqp_index; 2770 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, 2771 HNAE3_RING_TYPE_TX); 2772 hnae3_set_field(chain->int_gl_idx, 2773 HNAE3_RING_GL_IDX_M, 2774 HNAE3_RING_GL_IDX_S, 2775 HNAE3_RING_GL_TX); 2776 2777 cur_chain = chain; 2778 } 2779 } 2780 2781 rx_ring = tqp_vector->rx_group.ring; 2782 if (!tx_ring && rx_ring) { 2783 cur_chain->next = NULL; 2784 cur_chain->tqp_index = rx_ring->tqp->tqp_index; 2785 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 2786 HNAE3_RING_TYPE_RX); 2787 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 2788 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 2789 2790 rx_ring = rx_ring->next; 2791 } 2792 2793 while (rx_ring) { 2794 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); 2795 if (!chain) 2796 goto err_free_chain; 2797 2798 cur_chain->next = chain; 2799 chain->tqp_index = rx_ring->tqp->tqp_index; 2800 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, 2801 HNAE3_RING_TYPE_RX); 2802 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 2803 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 2804 2805 cur_chain = chain; 2806 2807 rx_ring = rx_ring->next; 2808 } 2809 2810 return 0; 2811 2812 err_free_chain: 2813 cur_chain = head->next; 2814 while (cur_chain) { 2815 chain = cur_chain->next; 2816 devm_kfree(&pdev->dev, chain); 2817 cur_chain = chain; 2818 } 2819 2820 return -ENOMEM; 2821 } 2822 2823 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 2824 struct hnae3_ring_chain_node *head) 2825 { 2826 struct pci_dev *pdev = tqp_vector->handle->pdev; 2827 struct hnae3_ring_chain_node *chain_tmp, *chain; 2828 2829 chain = head->next; 2830 2831 while (chain) { 2832 chain_tmp = chain->next; 2833 devm_kfree(&pdev->dev, chain); 2834 chain = chain_tmp; 2835 } 2836 } 2837 2838 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, 2839 struct hns3_enet_ring *ring) 2840 { 2841 ring->next = group->ring; 2842 group->ring = ring; 2843 2844 group->count++; 2845 } 2846 2847 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) 2848 { 2849 struct pci_dev *pdev = priv->ae_handle->pdev; 2850 struct hns3_enet_tqp_vector *tqp_vector; 2851 int num_vectors = priv->vector_num; 2852 int numa_node; 2853 int vector_i; 2854 2855 numa_node = dev_to_node(&pdev->dev); 2856 2857 for (vector_i = 0; vector_i < num_vectors; vector_i++) { 2858 tqp_vector = &priv->tqp_vector[vector_i]; 2859 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node), 2860 &tqp_vector->affinity_mask); 2861 } 2862 } 2863 2864 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) 2865 { 2866 struct hnae3_ring_chain_node vector_ring_chain; 2867 struct hnae3_handle *h = priv->ae_handle; 2868 struct hns3_enet_tqp_vector *tqp_vector; 2869 int ret = 0; 2870 int i; 2871 2872 hns3_nic_set_cpumask(priv); 2873 2874 for (i = 0; i < priv->vector_num; i++) { 2875 tqp_vector = &priv->tqp_vector[i]; 2876 hns3_vector_gl_rl_init_hw(tqp_vector, priv); 2877 tqp_vector->num_tqps = 0; 2878 } 2879 2880 for (i = 0; i < h->kinfo.num_tqps; i++) { 2881 u16 vector_i = i % priv->vector_num; 2882 u16 tqp_num = h->kinfo.num_tqps; 2883 2884 tqp_vector = &priv->tqp_vector[vector_i]; 2885 2886 hns3_add_ring_to_group(&tqp_vector->tx_group, 2887 priv->ring_data[i].ring); 2888 2889 hns3_add_ring_to_group(&tqp_vector->rx_group, 2890 priv->ring_data[i + tqp_num].ring); 2891 2892 priv->ring_data[i].ring->tqp_vector = tqp_vector; 2893 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; 2894 tqp_vector->num_tqps++; 2895 } 2896 2897 for (i = 0; i < priv->vector_num; i++) { 2898 tqp_vector = &priv->tqp_vector[i]; 2899 2900 tqp_vector->rx_group.total_bytes = 0; 2901 tqp_vector->rx_group.total_packets = 0; 2902 tqp_vector->tx_group.total_bytes = 0; 2903 tqp_vector->tx_group.total_packets = 0; 2904 tqp_vector->handle = h; 2905 2906 ret = hns3_get_vector_ring_chain(tqp_vector, 2907 &vector_ring_chain); 2908 if (ret) 2909 return ret; 2910 2911 ret = h->ae_algo->ops->map_ring_to_vector(h, 2912 tqp_vector->vector_irq, &vector_ring_chain); 2913 2914 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 2915 2916 if (ret) 2917 goto map_ring_fail; 2918 2919 netif_napi_add(priv->netdev, &tqp_vector->napi, 2920 hns3_nic_common_poll, NAPI_POLL_WEIGHT); 2921 } 2922 2923 return 0; 2924 2925 map_ring_fail: 2926 while (i--) 2927 netif_napi_del(&priv->tqp_vector[i].napi); 2928 2929 return ret; 2930 } 2931 2932 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) 2933 { 2934 struct hnae3_handle *h = priv->ae_handle; 2935 struct hns3_enet_tqp_vector *tqp_vector; 2936 struct hnae3_vector_info *vector; 2937 struct pci_dev *pdev = h->pdev; 2938 u16 tqp_num = h->kinfo.num_tqps; 2939 u16 vector_num; 2940 int ret = 0; 2941 u16 i; 2942 2943 /* RSS size, cpu online and vector_num should be the same */ 2944 /* Should consider 2p/4p later */ 2945 vector_num = min_t(u16, num_online_cpus(), tqp_num); 2946 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), 2947 GFP_KERNEL); 2948 if (!vector) 2949 return -ENOMEM; 2950 2951 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); 2952 2953 priv->vector_num = vector_num; 2954 priv->tqp_vector = (struct hns3_enet_tqp_vector *) 2955 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), 2956 GFP_KERNEL); 2957 if (!priv->tqp_vector) { 2958 ret = -ENOMEM; 2959 goto out; 2960 } 2961 2962 for (i = 0; i < priv->vector_num; i++) { 2963 tqp_vector = &priv->tqp_vector[i]; 2964 tqp_vector->idx = i; 2965 tqp_vector->mask_addr = vector[i].io_addr; 2966 tqp_vector->vector_irq = vector[i].vector; 2967 hns3_vector_gl_rl_init(tqp_vector, priv); 2968 } 2969 2970 out: 2971 devm_kfree(&pdev->dev, vector); 2972 return ret; 2973 } 2974 2975 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) 2976 { 2977 group->ring = NULL; 2978 group->count = 0; 2979 } 2980 2981 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) 2982 { 2983 struct hnae3_ring_chain_node vector_ring_chain; 2984 struct hnae3_handle *h = priv->ae_handle; 2985 struct hns3_enet_tqp_vector *tqp_vector; 2986 int i, ret; 2987 2988 for (i = 0; i < priv->vector_num; i++) { 2989 tqp_vector = &priv->tqp_vector[i]; 2990 2991 ret = hns3_get_vector_ring_chain(tqp_vector, 2992 &vector_ring_chain); 2993 if (ret) 2994 return ret; 2995 2996 ret = h->ae_algo->ops->unmap_ring_from_vector(h, 2997 tqp_vector->vector_irq, &vector_ring_chain); 2998 if (ret) 2999 return ret; 3000 3001 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 3002 3003 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { 3004 (void)irq_set_affinity_hint( 3005 priv->tqp_vector[i].vector_irq, 3006 NULL); 3007 free_irq(priv->tqp_vector[i].vector_irq, 3008 &priv->tqp_vector[i]); 3009 } 3010 3011 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; 3012 hns3_clear_ring_group(&tqp_vector->rx_group); 3013 hns3_clear_ring_group(&tqp_vector->tx_group); 3014 netif_napi_del(&priv->tqp_vector[i].napi); 3015 } 3016 3017 return 0; 3018 } 3019 3020 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) 3021 { 3022 struct hnae3_handle *h = priv->ae_handle; 3023 struct pci_dev *pdev = h->pdev; 3024 int i, ret; 3025 3026 for (i = 0; i < priv->vector_num; i++) { 3027 struct hns3_enet_tqp_vector *tqp_vector; 3028 3029 tqp_vector = &priv->tqp_vector[i]; 3030 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); 3031 if (ret) 3032 return ret; 3033 } 3034 3035 devm_kfree(&pdev->dev, priv->tqp_vector); 3036 return 0; 3037 } 3038 3039 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, 3040 int ring_type) 3041 { 3042 struct hns3_nic_ring_data *ring_data = priv->ring_data; 3043 int queue_num = priv->ae_handle->kinfo.num_tqps; 3044 struct pci_dev *pdev = priv->ae_handle->pdev; 3045 struct hns3_enet_ring *ring; 3046 3047 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL); 3048 if (!ring) 3049 return -ENOMEM; 3050 3051 if (ring_type == HNAE3_RING_TYPE_TX) { 3052 ring_data[q->tqp_index].ring = ring; 3053 ring_data[q->tqp_index].queue_index = q->tqp_index; 3054 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; 3055 } else { 3056 ring_data[q->tqp_index + queue_num].ring = ring; 3057 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; 3058 ring->io_base = q->io_base; 3059 } 3060 3061 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); 3062 3063 ring->tqp = q; 3064 ring->desc = NULL; 3065 ring->desc_cb = NULL; 3066 ring->dev = priv->dev; 3067 ring->desc_dma_addr = 0; 3068 ring->buf_size = q->buf_size; 3069 ring->desc_num = q->desc_num; 3070 ring->next_to_use = 0; 3071 ring->next_to_clean = 0; 3072 3073 return 0; 3074 } 3075 3076 static int hns3_queue_to_ring(struct hnae3_queue *tqp, 3077 struct hns3_nic_priv *priv) 3078 { 3079 int ret; 3080 3081 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); 3082 if (ret) 3083 return ret; 3084 3085 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); 3086 if (ret) { 3087 devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring); 3088 return ret; 3089 } 3090 3091 return 0; 3092 } 3093 3094 static int hns3_get_ring_config(struct hns3_nic_priv *priv) 3095 { 3096 struct hnae3_handle *h = priv->ae_handle; 3097 struct pci_dev *pdev = h->pdev; 3098 int i, ret; 3099 3100 priv->ring_data = devm_kzalloc(&pdev->dev, 3101 array3_size(h->kinfo.num_tqps, 3102 sizeof(*priv->ring_data), 3103 2), 3104 GFP_KERNEL); 3105 if (!priv->ring_data) 3106 return -ENOMEM; 3107 3108 for (i = 0; i < h->kinfo.num_tqps; i++) { 3109 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv); 3110 if (ret) 3111 goto err; 3112 } 3113 3114 return 0; 3115 err: 3116 while (i--) { 3117 devm_kfree(priv->dev, priv->ring_data[i].ring); 3118 devm_kfree(priv->dev, 3119 priv->ring_data[i + h->kinfo.num_tqps].ring); 3120 } 3121 3122 devm_kfree(&pdev->dev, priv->ring_data); 3123 return ret; 3124 } 3125 3126 static void hns3_put_ring_config(struct hns3_nic_priv *priv) 3127 { 3128 struct hnae3_handle *h = priv->ae_handle; 3129 int i; 3130 3131 for (i = 0; i < h->kinfo.num_tqps; i++) { 3132 devm_kfree(priv->dev, priv->ring_data[i].ring); 3133 devm_kfree(priv->dev, 3134 priv->ring_data[i + h->kinfo.num_tqps].ring); 3135 } 3136 devm_kfree(priv->dev, priv->ring_data); 3137 } 3138 3139 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) 3140 { 3141 int ret; 3142 3143 if (ring->desc_num <= 0 || ring->buf_size <= 0) 3144 return -EINVAL; 3145 3146 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), 3147 GFP_KERNEL); 3148 if (!ring->desc_cb) { 3149 ret = -ENOMEM; 3150 goto out; 3151 } 3152 3153 ret = hns3_alloc_desc(ring); 3154 if (ret) 3155 goto out_with_desc_cb; 3156 3157 if (!HNAE3_IS_TX_RING(ring)) { 3158 ret = hns3_alloc_ring_buffers(ring); 3159 if (ret) 3160 goto out_with_desc; 3161 } 3162 3163 return 0; 3164 3165 out_with_desc: 3166 hns3_free_desc(ring); 3167 out_with_desc_cb: 3168 kfree(ring->desc_cb); 3169 ring->desc_cb = NULL; 3170 out: 3171 return ret; 3172 } 3173 3174 static void hns3_fini_ring(struct hns3_enet_ring *ring) 3175 { 3176 hns3_free_desc(ring); 3177 kfree(ring->desc_cb); 3178 ring->desc_cb = NULL; 3179 ring->next_to_clean = 0; 3180 ring->next_to_use = 0; 3181 } 3182 3183 static int hns3_buf_size2type(u32 buf_size) 3184 { 3185 int bd_size_type; 3186 3187 switch (buf_size) { 3188 case 512: 3189 bd_size_type = HNS3_BD_SIZE_512_TYPE; 3190 break; 3191 case 1024: 3192 bd_size_type = HNS3_BD_SIZE_1024_TYPE; 3193 break; 3194 case 2048: 3195 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 3196 break; 3197 case 4096: 3198 bd_size_type = HNS3_BD_SIZE_4096_TYPE; 3199 break; 3200 default: 3201 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 3202 } 3203 3204 return bd_size_type; 3205 } 3206 3207 static void hns3_init_ring_hw(struct hns3_enet_ring *ring) 3208 { 3209 dma_addr_t dma = ring->desc_dma_addr; 3210 struct hnae3_queue *q = ring->tqp; 3211 3212 if (!HNAE3_IS_TX_RING(ring)) { 3213 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, 3214 (u32)dma); 3215 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, 3216 (u32)((dma >> 31) >> 1)); 3217 3218 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, 3219 hns3_buf_size2type(ring->buf_size)); 3220 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, 3221 ring->desc_num / 8 - 1); 3222 3223 } else { 3224 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, 3225 (u32)dma); 3226 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, 3227 (u32)((dma >> 31) >> 1)); 3228 3229 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, 3230 ring->desc_num / 8 - 1); 3231 } 3232 } 3233 3234 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) 3235 { 3236 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 3237 int i; 3238 3239 for (i = 0; i < HNAE3_MAX_TC; i++) { 3240 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; 3241 int j; 3242 3243 if (!tc_info->enable) 3244 continue; 3245 3246 for (j = 0; j < tc_info->tqp_count; j++) { 3247 struct hnae3_queue *q; 3248 3249 q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp; 3250 hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, 3251 tc_info->tc); 3252 } 3253 } 3254 } 3255 3256 int hns3_init_all_ring(struct hns3_nic_priv *priv) 3257 { 3258 struct hnae3_handle *h = priv->ae_handle; 3259 int ring_num = h->kinfo.num_tqps * 2; 3260 int i, j; 3261 int ret; 3262 3263 for (i = 0; i < ring_num; i++) { 3264 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring); 3265 if (ret) { 3266 dev_err(priv->dev, 3267 "Alloc ring memory fail! ret=%d\n", ret); 3268 goto out_when_alloc_ring_memory; 3269 } 3270 3271 u64_stats_init(&priv->ring_data[i].ring->syncp); 3272 } 3273 3274 return 0; 3275 3276 out_when_alloc_ring_memory: 3277 for (j = i - 1; j >= 0; j--) 3278 hns3_fini_ring(priv->ring_data[j].ring); 3279 3280 return -ENOMEM; 3281 } 3282 3283 int hns3_uninit_all_ring(struct hns3_nic_priv *priv) 3284 { 3285 struct hnae3_handle *h = priv->ae_handle; 3286 int i; 3287 3288 for (i = 0; i < h->kinfo.num_tqps; i++) { 3289 hns3_fini_ring(priv->ring_data[i].ring); 3290 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); 3291 } 3292 return 0; 3293 } 3294 3295 /* Set mac addr if it is configured. or leave it to the AE driver */ 3296 static int hns3_init_mac_addr(struct net_device *netdev, bool init) 3297 { 3298 struct hns3_nic_priv *priv = netdev_priv(netdev); 3299 struct hnae3_handle *h = priv->ae_handle; 3300 u8 mac_addr_temp[ETH_ALEN]; 3301 int ret = 0; 3302 3303 if (h->ae_algo->ops->get_mac_addr && init) { 3304 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); 3305 ether_addr_copy(netdev->dev_addr, mac_addr_temp); 3306 } 3307 3308 /* Check if the MAC address is valid, if not get a random one */ 3309 if (!is_valid_ether_addr(netdev->dev_addr)) { 3310 eth_hw_addr_random(netdev); 3311 dev_warn(priv->dev, "using random MAC address %pM\n", 3312 netdev->dev_addr); 3313 } 3314 3315 if (h->ae_algo->ops->set_mac_addr) 3316 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); 3317 3318 return ret; 3319 } 3320 3321 static int hns3_restore_fd_rules(struct net_device *netdev) 3322 { 3323 struct hnae3_handle *h = hns3_get_handle(netdev); 3324 int ret = 0; 3325 3326 if (h->ae_algo->ops->restore_fd_rules) 3327 ret = h->ae_algo->ops->restore_fd_rules(h); 3328 3329 return ret; 3330 } 3331 3332 static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) 3333 { 3334 struct hnae3_handle *h = hns3_get_handle(netdev); 3335 3336 if (h->ae_algo->ops->del_all_fd_entries) 3337 h->ae_algo->ops->del_all_fd_entries(h, clear_list); 3338 } 3339 3340 static void hns3_nic_set_priv_ops(struct net_device *netdev) 3341 { 3342 struct hns3_nic_priv *priv = netdev_priv(netdev); 3343 3344 priv->ops.fill_desc = hns3_fill_desc; 3345 if ((netdev->features & NETIF_F_TSO) || 3346 (netdev->features & NETIF_F_TSO6)) 3347 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; 3348 else 3349 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; 3350 } 3351 3352 static int hns3_client_init(struct hnae3_handle *handle) 3353 { 3354 struct pci_dev *pdev = handle->pdev; 3355 u16 alloc_tqps, max_rss_size; 3356 struct hns3_nic_priv *priv; 3357 struct net_device *netdev; 3358 int ret; 3359 3360 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, 3361 &max_rss_size); 3362 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps); 3363 if (!netdev) 3364 return -ENOMEM; 3365 3366 priv = netdev_priv(netdev); 3367 priv->dev = &pdev->dev; 3368 priv->netdev = netdev; 3369 priv->ae_handle = handle; 3370 priv->tx_timeout_count = 0; 3371 3372 handle->kinfo.netdev = netdev; 3373 handle->priv = (void *)priv; 3374 3375 hns3_init_mac_addr(netdev, true); 3376 3377 hns3_set_default_feature(netdev); 3378 3379 netdev->watchdog_timeo = HNS3_TX_TIMEOUT; 3380 netdev->priv_flags |= IFF_UNICAST_FLT; 3381 netdev->netdev_ops = &hns3_nic_netdev_ops; 3382 SET_NETDEV_DEV(netdev, &pdev->dev); 3383 hns3_ethtool_set_ops(netdev); 3384 hns3_nic_set_priv_ops(netdev); 3385 3386 /* Carrier off reporting is important to ethtool even BEFORE open */ 3387 netif_carrier_off(netdev); 3388 3389 ret = hns3_get_ring_config(priv); 3390 if (ret) { 3391 ret = -ENOMEM; 3392 goto out_get_ring_cfg; 3393 } 3394 3395 ret = hns3_nic_alloc_vector_data(priv); 3396 if (ret) { 3397 ret = -ENOMEM; 3398 goto out_alloc_vector_data; 3399 } 3400 3401 ret = hns3_nic_init_vector_data(priv); 3402 if (ret) { 3403 ret = -ENOMEM; 3404 goto out_init_vector_data; 3405 } 3406 3407 ret = hns3_init_all_ring(priv); 3408 if (ret) { 3409 ret = -ENOMEM; 3410 goto out_init_ring_data; 3411 } 3412 3413 ret = register_netdev(netdev); 3414 if (ret) { 3415 dev_err(priv->dev, "probe register netdev fail!\n"); 3416 goto out_reg_netdev_fail; 3417 } 3418 3419 hns3_dcbnl_setup(handle); 3420 3421 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */ 3422 netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 3423 3424 set_bit(HNS3_NIC_STATE_INITED, &priv->state); 3425 3426 return ret; 3427 3428 out_reg_netdev_fail: 3429 out_init_ring_data: 3430 (void)hns3_nic_uninit_vector_data(priv); 3431 out_init_vector_data: 3432 hns3_nic_dealloc_vector_data(priv); 3433 out_alloc_vector_data: 3434 priv->ring_data = NULL; 3435 out_get_ring_cfg: 3436 priv->ae_handle = NULL; 3437 free_netdev(netdev); 3438 return ret; 3439 } 3440 3441 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) 3442 { 3443 struct net_device *netdev = handle->kinfo.netdev; 3444 struct hns3_nic_priv *priv = netdev_priv(netdev); 3445 int ret; 3446 3447 hns3_remove_hw_addr(netdev); 3448 3449 if (netdev->reg_state != NETREG_UNINITIALIZED) 3450 unregister_netdev(netdev); 3451 3452 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 3453 netdev_warn(netdev, "already uninitialized\n"); 3454 goto out_netdev_free; 3455 } 3456 3457 hns3_del_all_fd_rules(netdev, true); 3458 3459 hns3_force_clear_all_rx_ring(handle); 3460 3461 ret = hns3_nic_uninit_vector_data(priv); 3462 if (ret) 3463 netdev_err(netdev, "uninit vector error\n"); 3464 3465 ret = hns3_nic_dealloc_vector_data(priv); 3466 if (ret) 3467 netdev_err(netdev, "dealloc vector error\n"); 3468 3469 ret = hns3_uninit_all_ring(priv); 3470 if (ret) 3471 netdev_err(netdev, "uninit ring error\n"); 3472 3473 hns3_put_ring_config(priv); 3474 3475 priv->ring_data = NULL; 3476 3477 out_netdev_free: 3478 free_netdev(netdev); 3479 } 3480 3481 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) 3482 { 3483 struct net_device *netdev = handle->kinfo.netdev; 3484 3485 if (!netdev) 3486 return; 3487 3488 if (linkup) { 3489 netif_carrier_on(netdev); 3490 netif_tx_wake_all_queues(netdev); 3491 netdev_info(netdev, "link up\n"); 3492 } else { 3493 netif_carrier_off(netdev); 3494 netif_tx_stop_all_queues(netdev); 3495 netdev_info(netdev, "link down\n"); 3496 } 3497 } 3498 3499 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) 3500 { 3501 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3502 struct net_device *ndev = kinfo->netdev; 3503 bool if_running; 3504 int ret; 3505 3506 if (tc > HNAE3_MAX_TC) 3507 return -EINVAL; 3508 3509 if (!ndev) 3510 return -ENODEV; 3511 3512 if_running = netif_running(ndev); 3513 3514 if (if_running) { 3515 (void)hns3_nic_net_stop(ndev); 3516 msleep(100); 3517 } 3518 3519 ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ? 3520 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP; 3521 if (ret) 3522 goto err_out; 3523 3524 ret = hns3_nic_set_real_num_queue(ndev); 3525 3526 err_out: 3527 if (if_running) 3528 (void)hns3_nic_net_open(ndev); 3529 3530 return ret; 3531 } 3532 3533 static int hns3_recover_hw_addr(struct net_device *ndev) 3534 { 3535 struct netdev_hw_addr_list *list; 3536 struct netdev_hw_addr *ha, *tmp; 3537 int ret = 0; 3538 3539 /* go through and sync uc_addr entries to the device */ 3540 list = &ndev->uc; 3541 list_for_each_entry_safe(ha, tmp, &list->list, list) { 3542 ret = hns3_nic_uc_sync(ndev, ha->addr); 3543 if (ret) 3544 return ret; 3545 } 3546 3547 /* go through and sync mc_addr entries to the device */ 3548 list = &ndev->mc; 3549 list_for_each_entry_safe(ha, tmp, &list->list, list) { 3550 ret = hns3_nic_mc_sync(ndev, ha->addr); 3551 if (ret) 3552 return ret; 3553 } 3554 3555 return ret; 3556 } 3557 3558 static void hns3_remove_hw_addr(struct net_device *netdev) 3559 { 3560 struct netdev_hw_addr_list *list; 3561 struct netdev_hw_addr *ha, *tmp; 3562 3563 hns3_nic_uc_unsync(netdev, netdev->dev_addr); 3564 3565 /* go through and unsync uc_addr entries to the device */ 3566 list = &netdev->uc; 3567 list_for_each_entry_safe(ha, tmp, &list->list, list) 3568 hns3_nic_uc_unsync(netdev, ha->addr); 3569 3570 /* go through and unsync mc_addr entries to the device */ 3571 list = &netdev->mc; 3572 list_for_each_entry_safe(ha, tmp, &list->list, list) 3573 if (ha->refcount > 1) 3574 hns3_nic_mc_unsync(netdev, ha->addr); 3575 } 3576 3577 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) 3578 { 3579 while (ring->next_to_clean != ring->next_to_use) { 3580 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; 3581 hns3_free_buffer_detach(ring, ring->next_to_clean); 3582 ring_ptr_move_fw(ring, next_to_clean); 3583 } 3584 } 3585 3586 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) 3587 { 3588 struct hns3_desc_cb res_cbs; 3589 int ret; 3590 3591 while (ring->next_to_use != ring->next_to_clean) { 3592 /* When a buffer is not reused, it's memory has been 3593 * freed in hns3_handle_rx_bd or will be freed by 3594 * stack, so we need to replace the buffer here. 3595 */ 3596 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 3597 ret = hns3_reserve_buffer_map(ring, &res_cbs); 3598 if (ret) { 3599 u64_stats_update_begin(&ring->syncp); 3600 ring->stats.sw_err_cnt++; 3601 u64_stats_update_end(&ring->syncp); 3602 /* if alloc new buffer fail, exit directly 3603 * and reclear in up flow. 3604 */ 3605 netdev_warn(ring->tqp->handle->kinfo.netdev, 3606 "reserve buffer map failed, ret = %d\n", 3607 ret); 3608 return ret; 3609 } 3610 hns3_replace_buffer(ring, ring->next_to_use, 3611 &res_cbs); 3612 } 3613 ring_ptr_move_fw(ring, next_to_use); 3614 } 3615 3616 return 0; 3617 } 3618 3619 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring) 3620 { 3621 while (ring->next_to_use != ring->next_to_clean) { 3622 /* When a buffer is not reused, it's memory has been 3623 * freed in hns3_handle_rx_bd or will be freed by 3624 * stack, so only need to unmap the buffer here. 3625 */ 3626 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 3627 hns3_unmap_buffer(ring, 3628 &ring->desc_cb[ring->next_to_use]); 3629 ring->desc_cb[ring->next_to_use].dma = 0; 3630 } 3631 3632 ring_ptr_move_fw(ring, next_to_use); 3633 } 3634 } 3635 3636 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h) 3637 { 3638 struct net_device *ndev = h->kinfo.netdev; 3639 struct hns3_nic_priv *priv = netdev_priv(ndev); 3640 struct hns3_enet_ring *ring; 3641 u32 i; 3642 3643 for (i = 0; i < h->kinfo.num_tqps; i++) { 3644 ring = priv->ring_data[i + h->kinfo.num_tqps].ring; 3645 hns3_force_clear_rx_ring(ring); 3646 } 3647 } 3648 3649 static void hns3_clear_all_ring(struct hnae3_handle *h) 3650 { 3651 struct net_device *ndev = h->kinfo.netdev; 3652 struct hns3_nic_priv *priv = netdev_priv(ndev); 3653 u32 i; 3654 3655 for (i = 0; i < h->kinfo.num_tqps; i++) { 3656 struct netdev_queue *dev_queue; 3657 struct hns3_enet_ring *ring; 3658 3659 ring = priv->ring_data[i].ring; 3660 hns3_clear_tx_ring(ring); 3661 dev_queue = netdev_get_tx_queue(ndev, 3662 priv->ring_data[i].queue_index); 3663 netdev_tx_reset_queue(dev_queue); 3664 3665 ring = priv->ring_data[i + h->kinfo.num_tqps].ring; 3666 /* Continue to clear other rings even if clearing some 3667 * rings failed. 3668 */ 3669 hns3_clear_rx_ring(ring); 3670 } 3671 } 3672 3673 int hns3_nic_reset_all_ring(struct hnae3_handle *h) 3674 { 3675 struct net_device *ndev = h->kinfo.netdev; 3676 struct hns3_nic_priv *priv = netdev_priv(ndev); 3677 struct hns3_enet_ring *rx_ring; 3678 int i, j; 3679 int ret; 3680 3681 for (i = 0; i < h->kinfo.num_tqps; i++) { 3682 ret = h->ae_algo->ops->reset_queue(h, i); 3683 if (ret) 3684 return ret; 3685 3686 hns3_init_ring_hw(priv->ring_data[i].ring); 3687 3688 /* We need to clear tx ring here because self test will 3689 * use the ring and will not run down before up 3690 */ 3691 hns3_clear_tx_ring(priv->ring_data[i].ring); 3692 priv->ring_data[i].ring->next_to_clean = 0; 3693 priv->ring_data[i].ring->next_to_use = 0; 3694 3695 rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring; 3696 hns3_init_ring_hw(rx_ring); 3697 ret = hns3_clear_rx_ring(rx_ring); 3698 if (ret) 3699 return ret; 3700 3701 /* We can not know the hardware head and tail when this 3702 * function is called in reset flow, so we reuse all desc. 3703 */ 3704 for (j = 0; j < rx_ring->desc_num; j++) 3705 hns3_reuse_buffer(rx_ring, j); 3706 3707 rx_ring->next_to_clean = 0; 3708 rx_ring->next_to_use = 0; 3709 } 3710 3711 hns3_init_tx_ring_tc(priv); 3712 3713 return 0; 3714 } 3715 3716 static void hns3_store_coal(struct hns3_nic_priv *priv) 3717 { 3718 /* ethtool only support setting and querying one coal 3719 * configuation for now, so save the vector 0' coal 3720 * configuation here in order to restore it. 3721 */ 3722 memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal, 3723 sizeof(struct hns3_enet_coalesce)); 3724 memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal, 3725 sizeof(struct hns3_enet_coalesce)); 3726 } 3727 3728 static void hns3_restore_coal(struct hns3_nic_priv *priv) 3729 { 3730 u16 vector_num = priv->vector_num; 3731 int i; 3732 3733 for (i = 0; i < vector_num; i++) { 3734 memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal, 3735 sizeof(struct hns3_enet_coalesce)); 3736 memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal, 3737 sizeof(struct hns3_enet_coalesce)); 3738 } 3739 } 3740 3741 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) 3742 { 3743 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 3744 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3745 struct net_device *ndev = kinfo->netdev; 3746 struct hns3_nic_priv *priv = netdev_priv(ndev); 3747 3748 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) 3749 return 0; 3750 3751 /* it is cumbersome for hardware to pick-and-choose entries for deletion 3752 * from table space. Hence, for function reset software intervention is 3753 * required to delete the entries 3754 */ 3755 if (hns3_dev_ongoing_func_reset(ae_dev)) { 3756 hns3_remove_hw_addr(ndev); 3757 hns3_del_all_fd_rules(ndev, false); 3758 } 3759 3760 if (!netif_running(ndev)) 3761 return 0; 3762 3763 return hns3_nic_net_stop(ndev); 3764 } 3765 3766 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) 3767 { 3768 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3769 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); 3770 int ret = 0; 3771 3772 if (netif_running(kinfo->netdev)) { 3773 ret = hns3_nic_net_up(kinfo->netdev); 3774 if (ret) { 3775 netdev_err(kinfo->netdev, 3776 "hns net up fail, ret=%d!\n", ret); 3777 return ret; 3778 } 3779 } 3780 3781 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); 3782 3783 return ret; 3784 } 3785 3786 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) 3787 { 3788 struct net_device *netdev = handle->kinfo.netdev; 3789 struct hns3_nic_priv *priv = netdev_priv(netdev); 3790 bool vlan_filter_enable; 3791 int ret; 3792 3793 ret = hns3_init_mac_addr(netdev, false); 3794 if (ret) 3795 return ret; 3796 3797 ret = hns3_recover_hw_addr(netdev); 3798 if (ret) 3799 return ret; 3800 3801 ret = hns3_update_promisc_mode(netdev, handle->netdev_flags); 3802 if (ret) 3803 return ret; 3804 3805 vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true; 3806 hns3_enable_vlan_filter(netdev, vlan_filter_enable); 3807 3808 /* Hardware table is only clear when pf resets */ 3809 if (!(handle->flags & HNAE3_SUPPORT_VF)) { 3810 ret = hns3_restore_vlan(netdev); 3811 return ret; 3812 } 3813 3814 ret = hns3_restore_fd_rules(netdev); 3815 if (ret) 3816 return ret; 3817 3818 /* Carrier off reporting is important to ethtool even BEFORE open */ 3819 netif_carrier_off(netdev); 3820 3821 hns3_restore_coal(priv); 3822 3823 ret = hns3_nic_init_vector_data(priv); 3824 if (ret) 3825 return ret; 3826 3827 ret = hns3_init_all_ring(priv); 3828 if (ret) { 3829 hns3_nic_uninit_vector_data(priv); 3830 priv->ring_data = NULL; 3831 } 3832 3833 set_bit(HNS3_NIC_STATE_INITED, &priv->state); 3834 3835 return ret; 3836 } 3837 3838 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) 3839 { 3840 struct net_device *netdev = handle->kinfo.netdev; 3841 struct hns3_nic_priv *priv = netdev_priv(netdev); 3842 int ret; 3843 3844 if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 3845 netdev_warn(netdev, "already uninitialized\n"); 3846 return 0; 3847 } 3848 3849 hns3_force_clear_all_rx_ring(handle); 3850 3851 ret = hns3_nic_uninit_vector_data(priv); 3852 if (ret) { 3853 netdev_err(netdev, "uninit vector error\n"); 3854 return ret; 3855 } 3856 3857 hns3_store_coal(priv); 3858 3859 ret = hns3_uninit_all_ring(priv); 3860 if (ret) 3861 netdev_err(netdev, "uninit ring error\n"); 3862 3863 clear_bit(HNS3_NIC_STATE_INITED, &priv->state); 3864 3865 return ret; 3866 } 3867 3868 static int hns3_reset_notify(struct hnae3_handle *handle, 3869 enum hnae3_reset_notify_type type) 3870 { 3871 int ret = 0; 3872 3873 switch (type) { 3874 case HNAE3_UP_CLIENT: 3875 ret = hns3_reset_notify_up_enet(handle); 3876 break; 3877 case HNAE3_DOWN_CLIENT: 3878 ret = hns3_reset_notify_down_enet(handle); 3879 break; 3880 case HNAE3_INIT_CLIENT: 3881 ret = hns3_reset_notify_init_enet(handle); 3882 break; 3883 case HNAE3_UNINIT_CLIENT: 3884 ret = hns3_reset_notify_uninit_enet(handle); 3885 break; 3886 default: 3887 break; 3888 } 3889 3890 return ret; 3891 } 3892 3893 static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num) 3894 { 3895 struct hns3_nic_priv *priv = netdev_priv(netdev); 3896 struct hnae3_handle *h = hns3_get_handle(netdev); 3897 int ret; 3898 3899 ret = h->ae_algo->ops->set_channels(h, new_tqp_num); 3900 if (ret) 3901 return ret; 3902 3903 ret = hns3_get_ring_config(priv); 3904 if (ret) 3905 return ret; 3906 3907 ret = hns3_nic_alloc_vector_data(priv); 3908 if (ret) 3909 goto err_alloc_vector; 3910 3911 hns3_restore_coal(priv); 3912 3913 ret = hns3_nic_init_vector_data(priv); 3914 if (ret) 3915 goto err_uninit_vector; 3916 3917 ret = hns3_init_all_ring(priv); 3918 if (ret) 3919 goto err_put_ring; 3920 3921 return 0; 3922 3923 err_put_ring: 3924 hns3_put_ring_config(priv); 3925 err_uninit_vector: 3926 hns3_nic_uninit_vector_data(priv); 3927 err_alloc_vector: 3928 hns3_nic_dealloc_vector_data(priv); 3929 return ret; 3930 } 3931 3932 static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num) 3933 { 3934 return (new_tqp_num / num_tc) * num_tc; 3935 } 3936 3937 int hns3_set_channels(struct net_device *netdev, 3938 struct ethtool_channels *ch) 3939 { 3940 struct hns3_nic_priv *priv = netdev_priv(netdev); 3941 struct hnae3_handle *h = hns3_get_handle(netdev); 3942 struct hnae3_knic_private_info *kinfo = &h->kinfo; 3943 bool if_running = netif_running(netdev); 3944 u32 new_tqp_num = ch->combined_count; 3945 u16 org_tqp_num; 3946 int ret; 3947 3948 if (ch->rx_count || ch->tx_count) 3949 return -EINVAL; 3950 3951 if (new_tqp_num > hns3_get_max_available_channels(h) || 3952 new_tqp_num < kinfo->num_tc) { 3953 dev_err(&netdev->dev, 3954 "Change tqps fail, the tqp range is from %d to %d", 3955 kinfo->num_tc, 3956 hns3_get_max_available_channels(h)); 3957 return -EINVAL; 3958 } 3959 3960 new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num); 3961 if (kinfo->num_tqps == new_tqp_num) 3962 return 0; 3963 3964 if (if_running) 3965 hns3_nic_net_stop(netdev); 3966 3967 ret = hns3_nic_uninit_vector_data(priv); 3968 if (ret) { 3969 dev_err(&netdev->dev, 3970 "Unbind vector with tqp fail, nothing is changed"); 3971 goto open_netdev; 3972 } 3973 3974 hns3_store_coal(priv); 3975 3976 hns3_nic_dealloc_vector_data(priv); 3977 3978 hns3_uninit_all_ring(priv); 3979 hns3_put_ring_config(priv); 3980 3981 org_tqp_num = h->kinfo.num_tqps; 3982 ret = hns3_modify_tqp_num(netdev, new_tqp_num); 3983 if (ret) { 3984 ret = hns3_modify_tqp_num(netdev, org_tqp_num); 3985 if (ret) { 3986 /* If revert to old tqp failed, fatal error occurred */ 3987 dev_err(&netdev->dev, 3988 "Revert to old tqp num fail, ret=%d", ret); 3989 return ret; 3990 } 3991 dev_info(&netdev->dev, 3992 "Change tqp num fail, Revert to old tqp num"); 3993 } 3994 3995 open_netdev: 3996 if (if_running) 3997 hns3_nic_net_open(netdev); 3998 3999 return ret; 4000 } 4001 4002 static const struct hnae3_client_ops client_ops = { 4003 .init_instance = hns3_client_init, 4004 .uninit_instance = hns3_client_uninit, 4005 .link_status_change = hns3_link_status_change, 4006 .setup_tc = hns3_client_setup_tc, 4007 .reset_notify = hns3_reset_notify, 4008 }; 4009 4010 /* hns3_init_module - Driver registration routine 4011 * hns3_init_module is the first routine called when the driver is 4012 * loaded. All it does is register with the PCI subsystem. 4013 */ 4014 static int __init hns3_init_module(void) 4015 { 4016 int ret; 4017 4018 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); 4019 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); 4020 4021 client.type = HNAE3_CLIENT_KNIC; 4022 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s", 4023 hns3_driver_name); 4024 4025 client.ops = &client_ops; 4026 4027 INIT_LIST_HEAD(&client.node); 4028 4029 ret = hnae3_register_client(&client); 4030 if (ret) 4031 return ret; 4032 4033 ret = pci_register_driver(&hns3_driver); 4034 if (ret) 4035 hnae3_unregister_client(&client); 4036 4037 return ret; 4038 } 4039 module_init(hns3_init_module); 4040 4041 /* hns3_exit_module - Driver exit cleanup routine 4042 * hns3_exit_module is called just before the driver is removed 4043 * from memory. 4044 */ 4045 static void __exit hns3_exit_module(void) 4046 { 4047 pci_unregister_driver(&hns3_driver); 4048 hnae3_unregister_client(&client); 4049 } 4050 module_exit(hns3_exit_module); 4051 4052 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); 4053 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 4054 MODULE_LICENSE("GPL"); 4055 MODULE_ALIAS("pci:hns-nic"); 4056 MODULE_VERSION(HNS3_MOD_VERSION); 4057