1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/dma-mapping.h> 5 #include <linux/etherdevice.h> 6 #include <linux/interrupt.h> 7 #include <linux/if_vlan.h> 8 #include <linux/ip.h> 9 #include <linux/ipv6.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/skbuff.h> 13 #include <linux/sctp.h> 14 #include <linux/vermagic.h> 15 #include <net/gre.h> 16 #include <net/pkt_cls.h> 17 #include <net/vxlan.h> 18 19 #include "hnae3.h" 20 #include "hns3_enet.h" 21 22 static void hns3_clear_all_ring(struct hnae3_handle *h); 23 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); 24 static void hns3_remove_hw_addr(struct net_device *netdev); 25 26 static const char hns3_driver_name[] = "hns3"; 27 const char hns3_driver_version[] = VERMAGIC_STRING; 28 static const char hns3_driver_string[] = 29 "Hisilicon Ethernet Network Driver for Hip08 Family"; 30 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; 31 static struct hnae3_client client; 32 33 /* hns3_pci_tbl - PCI Device ID Table 34 * 35 * Last entry must be all 0s 36 * 37 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 38 * Class, Class Mask, private data (not used) } 39 */ 40 static const struct pci_device_id hns3_pci_tbl[] = { 41 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 44 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 46 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 48 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 50 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 51 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 52 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 53 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 54 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 55 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 56 /* required last entry */ 57 {0, } 58 }; 59 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); 60 61 static irqreturn_t hns3_irq_handle(int irq, void *vector) 62 { 63 struct hns3_enet_tqp_vector *tqp_vector = vector; 64 65 napi_schedule(&tqp_vector->napi); 66 67 return IRQ_HANDLED; 68 } 69 70 /* This callback function is used to set affinity changes to the irq affinity 71 * masks when the irq_set_affinity_notifier function is used. 72 */ 73 static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify, 74 const cpumask_t *mask) 75 { 76 struct hns3_enet_tqp_vector *tqp_vectors = 77 container_of(notify, struct hns3_enet_tqp_vector, 78 affinity_notify); 79 80 tqp_vectors->affinity_mask = *mask; 81 } 82 83 static void hns3_nic_irq_affinity_release(struct kref *ref) 84 { 85 } 86 87 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) 88 { 89 struct hns3_enet_tqp_vector *tqp_vectors; 90 unsigned int i; 91 92 for (i = 0; i < priv->vector_num; i++) { 93 tqp_vectors = &priv->tqp_vector[i]; 94 95 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) 96 continue; 97 98 /* clear the affinity notifier and affinity mask */ 99 irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL); 100 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); 101 102 /* release the irq resource */ 103 free_irq(tqp_vectors->vector_irq, tqp_vectors); 104 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; 105 } 106 } 107 108 static int hns3_nic_init_irq(struct hns3_nic_priv *priv) 109 { 110 struct hns3_enet_tqp_vector *tqp_vectors; 111 int txrx_int_idx = 0; 112 int rx_int_idx = 0; 113 int tx_int_idx = 0; 114 unsigned int i; 115 int ret; 116 117 for (i = 0; i < priv->vector_num; i++) { 118 tqp_vectors = &priv->tqp_vector[i]; 119 120 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) 121 continue; 122 123 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { 124 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, 125 "%s-%s-%d", priv->netdev->name, "TxRx", 126 txrx_int_idx++); 127 txrx_int_idx++; 128 } else if (tqp_vectors->rx_group.ring) { 129 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, 130 "%s-%s-%d", priv->netdev->name, "Rx", 131 rx_int_idx++); 132 } else if (tqp_vectors->tx_group.ring) { 133 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, 134 "%s-%s-%d", priv->netdev->name, "Tx", 135 tx_int_idx++); 136 } else { 137 /* Skip this unused q_vector */ 138 continue; 139 } 140 141 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; 142 143 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, 144 tqp_vectors->name, 145 tqp_vectors); 146 if (ret) { 147 netdev_err(priv->netdev, "request irq(%d) fail\n", 148 tqp_vectors->vector_irq); 149 return ret; 150 } 151 152 tqp_vectors->affinity_notify.notify = 153 hns3_nic_irq_affinity_notify; 154 tqp_vectors->affinity_notify.release = 155 hns3_nic_irq_affinity_release; 156 irq_set_affinity_notifier(tqp_vectors->vector_irq, 157 &tqp_vectors->affinity_notify); 158 irq_set_affinity_hint(tqp_vectors->vector_irq, 159 &tqp_vectors->affinity_mask); 160 161 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; 162 } 163 164 return 0; 165 } 166 167 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, 168 u32 mask_en) 169 { 170 writel(mask_en, tqp_vector->mask_addr); 171 } 172 173 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) 174 { 175 napi_enable(&tqp_vector->napi); 176 177 /* enable vector */ 178 hns3_mask_vector_irq(tqp_vector, 1); 179 } 180 181 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) 182 { 183 /* disable vector */ 184 hns3_mask_vector_irq(tqp_vector, 0); 185 186 disable_irq(tqp_vector->vector_irq); 187 napi_disable(&tqp_vector->napi); 188 } 189 190 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, 191 u32 rl_value) 192 { 193 u32 rl_reg = hns3_rl_usec_to_reg(rl_value); 194 195 /* this defines the configuration for RL (Interrupt Rate Limiter). 196 * Rl defines rate of interrupts i.e. number of interrupts-per-second 197 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing 198 */ 199 200 if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable && 201 !tqp_vector->rx_group.coal.gl_adapt_enable) 202 /* According to the hardware, the range of rl_reg is 203 * 0-59 and the unit is 4. 204 */ 205 rl_reg |= HNS3_INT_RL_ENABLE_MASK; 206 207 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); 208 } 209 210 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, 211 u32 gl_value) 212 { 213 u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value); 214 215 writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); 216 } 217 218 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, 219 u32 gl_value) 220 { 221 u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value); 222 223 writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); 224 } 225 226 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, 227 struct hns3_nic_priv *priv) 228 { 229 /* initialize the configuration for interrupt coalescing. 230 * 1. GL (Interrupt Gap Limiter) 231 * 2. RL (Interrupt Rate Limiter) 232 */ 233 234 /* Default: enable interrupt coalescing self-adaptive and GL */ 235 tqp_vector->tx_group.coal.gl_adapt_enable = 1; 236 tqp_vector->rx_group.coal.gl_adapt_enable = 1; 237 238 tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; 239 tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; 240 241 tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; 242 tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; 243 tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; 244 } 245 246 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector, 247 struct hns3_nic_priv *priv) 248 { 249 struct hnae3_handle *h = priv->ae_handle; 250 251 hns3_set_vector_coalesce_tx_gl(tqp_vector, 252 tqp_vector->tx_group.coal.int_gl); 253 hns3_set_vector_coalesce_rx_gl(tqp_vector, 254 tqp_vector->rx_group.coal.int_gl); 255 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); 256 } 257 258 static int hns3_nic_set_real_num_queue(struct net_device *netdev) 259 { 260 struct hnae3_handle *h = hns3_get_handle(netdev); 261 struct hnae3_knic_private_info *kinfo = &h->kinfo; 262 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; 263 int i, ret; 264 265 if (kinfo->num_tc <= 1) { 266 netdev_reset_tc(netdev); 267 } else { 268 ret = netdev_set_num_tc(netdev, kinfo->num_tc); 269 if (ret) { 270 netdev_err(netdev, 271 "netdev_set_num_tc fail, ret=%d!\n", ret); 272 return ret; 273 } 274 275 for (i = 0; i < HNAE3_MAX_TC; i++) { 276 if (!kinfo->tc_info[i].enable) 277 continue; 278 279 netdev_set_tc_queue(netdev, 280 kinfo->tc_info[i].tc, 281 kinfo->tc_info[i].tqp_count, 282 kinfo->tc_info[i].tqp_offset); 283 } 284 } 285 286 ret = netif_set_real_num_tx_queues(netdev, queue_size); 287 if (ret) { 288 netdev_err(netdev, 289 "netif_set_real_num_tx_queues fail, ret=%d!\n", 290 ret); 291 return ret; 292 } 293 294 ret = netif_set_real_num_rx_queues(netdev, queue_size); 295 if (ret) { 296 netdev_err(netdev, 297 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); 298 return ret; 299 } 300 301 return 0; 302 } 303 304 static u16 hns3_get_max_available_channels(struct hnae3_handle *h) 305 { 306 u16 alloc_tqps, max_rss_size, rss_size; 307 308 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); 309 rss_size = alloc_tqps / h->kinfo.num_tc; 310 311 return min_t(u16, rss_size, max_rss_size); 312 } 313 314 static int hns3_nic_net_up(struct net_device *netdev) 315 { 316 struct hns3_nic_priv *priv = netdev_priv(netdev); 317 struct hnae3_handle *h = priv->ae_handle; 318 int i, j; 319 int ret; 320 321 ret = hns3_nic_reset_all_ring(h); 322 if (ret) 323 return ret; 324 325 /* get irq resource for all vectors */ 326 ret = hns3_nic_init_irq(priv); 327 if (ret) { 328 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret); 329 return ret; 330 } 331 332 /* enable the vectors */ 333 for (i = 0; i < priv->vector_num; i++) 334 hns3_vector_enable(&priv->tqp_vector[i]); 335 336 /* start the ae_dev */ 337 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; 338 if (ret) 339 goto out_start_err; 340 341 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); 342 343 return 0; 344 345 out_start_err: 346 for (j = i - 1; j >= 0; j--) 347 hns3_vector_disable(&priv->tqp_vector[j]); 348 349 hns3_nic_uninit_irq(priv); 350 351 return ret; 352 } 353 354 static int hns3_nic_net_open(struct net_device *netdev) 355 { 356 struct hns3_nic_priv *priv = netdev_priv(netdev); 357 struct hnae3_handle *h = hns3_get_handle(netdev); 358 struct hnae3_knic_private_info *kinfo; 359 int i, ret; 360 361 netif_carrier_off(netdev); 362 363 ret = hns3_nic_set_real_num_queue(netdev); 364 if (ret) 365 return ret; 366 367 ret = hns3_nic_net_up(netdev); 368 if (ret) { 369 netdev_err(netdev, 370 "hns net up fail, ret=%d!\n", ret); 371 return ret; 372 } 373 374 kinfo = &h->kinfo; 375 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { 376 netdev_set_prio_tc_map(netdev, i, 377 kinfo->prio_tc[i]); 378 } 379 380 priv->ae_handle->last_reset_time = jiffies; 381 return 0; 382 } 383 384 static void hns3_nic_net_down(struct net_device *netdev) 385 { 386 struct hns3_nic_priv *priv = netdev_priv(netdev); 387 const struct hnae3_ae_ops *ops; 388 int i; 389 390 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 391 return; 392 393 /* disable vectors */ 394 for (i = 0; i < priv->vector_num; i++) 395 hns3_vector_disable(&priv->tqp_vector[i]); 396 397 /* stop ae_dev */ 398 ops = priv->ae_handle->ae_algo->ops; 399 if (ops->stop) 400 ops->stop(priv->ae_handle); 401 402 /* free irq resources */ 403 hns3_nic_uninit_irq(priv); 404 405 hns3_clear_all_ring(priv->ae_handle); 406 } 407 408 static int hns3_nic_net_stop(struct net_device *netdev) 409 { 410 netif_tx_stop_all_queues(netdev); 411 netif_carrier_off(netdev); 412 413 hns3_nic_net_down(netdev); 414 415 return 0; 416 } 417 418 static int hns3_nic_uc_sync(struct net_device *netdev, 419 const unsigned char *addr) 420 { 421 struct hnae3_handle *h = hns3_get_handle(netdev); 422 423 if (h->ae_algo->ops->add_uc_addr) 424 return h->ae_algo->ops->add_uc_addr(h, addr); 425 426 return 0; 427 } 428 429 static int hns3_nic_uc_unsync(struct net_device *netdev, 430 const unsigned char *addr) 431 { 432 struct hnae3_handle *h = hns3_get_handle(netdev); 433 434 if (h->ae_algo->ops->rm_uc_addr) 435 return h->ae_algo->ops->rm_uc_addr(h, addr); 436 437 return 0; 438 } 439 440 static int hns3_nic_mc_sync(struct net_device *netdev, 441 const unsigned char *addr) 442 { 443 struct hnae3_handle *h = hns3_get_handle(netdev); 444 445 if (h->ae_algo->ops->add_mc_addr) 446 return h->ae_algo->ops->add_mc_addr(h, addr); 447 448 return 0; 449 } 450 451 static int hns3_nic_mc_unsync(struct net_device *netdev, 452 const unsigned char *addr) 453 { 454 struct hnae3_handle *h = hns3_get_handle(netdev); 455 456 if (h->ae_algo->ops->rm_mc_addr) 457 return h->ae_algo->ops->rm_mc_addr(h, addr); 458 459 return 0; 460 } 461 462 static u8 hns3_get_netdev_flags(struct net_device *netdev) 463 { 464 u8 flags = 0; 465 466 if (netdev->flags & IFF_PROMISC) { 467 flags = HNAE3_USER_UPE | HNAE3_USER_MPE; 468 } else { 469 flags |= HNAE3_VLAN_FLTR; 470 if (netdev->flags & IFF_ALLMULTI) 471 flags |= HNAE3_USER_MPE; 472 } 473 474 return flags; 475 } 476 477 static void hns3_nic_set_rx_mode(struct net_device *netdev) 478 { 479 struct hnae3_handle *h = hns3_get_handle(netdev); 480 u8 new_flags; 481 int ret; 482 483 new_flags = hns3_get_netdev_flags(netdev); 484 485 ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); 486 if (ret) { 487 netdev_err(netdev, "sync uc address fail\n"); 488 if (ret == -ENOSPC) 489 new_flags |= HNAE3_OVERFLOW_UPE; 490 } 491 492 if (netdev->flags & IFF_MULTICAST) { 493 ret = __dev_mc_sync(netdev, hns3_nic_mc_sync, 494 hns3_nic_mc_unsync); 495 if (ret) { 496 netdev_err(netdev, "sync mc address fail\n"); 497 if (ret == -ENOSPC) 498 new_flags |= HNAE3_OVERFLOW_MPE; 499 } 500 } 501 502 hns3_update_promisc_mode(netdev, new_flags); 503 /* User mode Promisc mode enable and vlan filtering is disabled to 504 * let all packets in. MAC-VLAN Table overflow Promisc enabled and 505 * vlan fitering is enabled 506 */ 507 hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR); 508 h->netdev_flags = new_flags; 509 } 510 511 void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags) 512 { 513 struct hns3_nic_priv *priv = netdev_priv(netdev); 514 struct hnae3_handle *h = priv->ae_handle; 515 516 if (h->ae_algo->ops->set_promisc_mode) { 517 h->ae_algo->ops->set_promisc_mode(h, 518 promisc_flags & HNAE3_UPE, 519 promisc_flags & HNAE3_MPE); 520 } 521 } 522 523 void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) 524 { 525 struct hns3_nic_priv *priv = netdev_priv(netdev); 526 struct hnae3_handle *h = priv->ae_handle; 527 bool last_state; 528 529 if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) { 530 last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false; 531 if (enable != last_state) { 532 netdev_info(netdev, 533 "%s vlan filter\n", 534 enable ? "enable" : "disable"); 535 h->ae_algo->ops->enable_vlan_filter(h, enable); 536 } 537 } 538 } 539 540 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, 541 u16 *mss, u32 *type_cs_vlan_tso) 542 { 543 u32 l4_offset, hdr_len; 544 union l3_hdr_info l3; 545 union l4_hdr_info l4; 546 u32 l4_paylen; 547 int ret; 548 549 if (!skb_is_gso(skb)) 550 return 0; 551 552 ret = skb_cow_head(skb, 0); 553 if (ret) 554 return ret; 555 556 l3.hdr = skb_network_header(skb); 557 l4.hdr = skb_transport_header(skb); 558 559 /* Software should clear the IPv4's checksum field when tso is 560 * needed. 561 */ 562 if (l3.v4->version == 4) 563 l3.v4->check = 0; 564 565 /* tunnel packet.*/ 566 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 567 SKB_GSO_GRE_CSUM | 568 SKB_GSO_UDP_TUNNEL | 569 SKB_GSO_UDP_TUNNEL_CSUM)) { 570 if ((!(skb_shinfo(skb)->gso_type & 571 SKB_GSO_PARTIAL)) && 572 (skb_shinfo(skb)->gso_type & 573 SKB_GSO_UDP_TUNNEL_CSUM)) { 574 /* Software should clear the udp's checksum 575 * field when tso is needed. 576 */ 577 l4.udp->check = 0; 578 } 579 /* reset l3&l4 pointers from outer to inner headers */ 580 l3.hdr = skb_inner_network_header(skb); 581 l4.hdr = skb_inner_transport_header(skb); 582 583 /* Software should clear the IPv4's checksum field when 584 * tso is needed. 585 */ 586 if (l3.v4->version == 4) 587 l3.v4->check = 0; 588 } 589 590 /* normal or tunnel packet*/ 591 l4_offset = l4.hdr - skb->data; 592 hdr_len = (l4.tcp->doff * 4) + l4_offset; 593 594 /* remove payload length from inner pseudo checksum when tso*/ 595 l4_paylen = skb->len - l4_offset; 596 csum_replace_by_diff(&l4.tcp->check, 597 (__force __wsum)htonl(l4_paylen)); 598 599 /* find the txbd field values */ 600 *paylen = skb->len - hdr_len; 601 hnae3_set_bit(*type_cs_vlan_tso, 602 HNS3_TXD_TSO_B, 1); 603 604 /* get MSS for TSO */ 605 *mss = skb_shinfo(skb)->gso_size; 606 607 return 0; 608 } 609 610 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, 611 u8 *il4_proto) 612 { 613 union { 614 struct iphdr *v4; 615 struct ipv6hdr *v6; 616 unsigned char *hdr; 617 } l3; 618 unsigned char *l4_hdr; 619 unsigned char *exthdr; 620 u8 l4_proto_tmp; 621 __be16 frag_off; 622 623 /* find outer header point */ 624 l3.hdr = skb_network_header(skb); 625 l4_hdr = skb_transport_header(skb); 626 627 if (skb->protocol == htons(ETH_P_IPV6)) { 628 exthdr = l3.hdr + sizeof(*l3.v6); 629 l4_proto_tmp = l3.v6->nexthdr; 630 if (l4_hdr != exthdr) 631 ipv6_skip_exthdr(skb, exthdr - skb->data, 632 &l4_proto_tmp, &frag_off); 633 } else if (skb->protocol == htons(ETH_P_IP)) { 634 l4_proto_tmp = l3.v4->protocol; 635 } else { 636 return -EINVAL; 637 } 638 639 *ol4_proto = l4_proto_tmp; 640 641 /* tunnel packet */ 642 if (!skb->encapsulation) { 643 *il4_proto = 0; 644 return 0; 645 } 646 647 /* find inner header point */ 648 l3.hdr = skb_inner_network_header(skb); 649 l4_hdr = skb_inner_transport_header(skb); 650 651 if (l3.v6->version == 6) { 652 exthdr = l3.hdr + sizeof(*l3.v6); 653 l4_proto_tmp = l3.v6->nexthdr; 654 if (l4_hdr != exthdr) 655 ipv6_skip_exthdr(skb, exthdr - skb->data, 656 &l4_proto_tmp, &frag_off); 657 } else if (l3.v4->version == 4) { 658 l4_proto_tmp = l3.v4->protocol; 659 } 660 661 *il4_proto = l4_proto_tmp; 662 663 return 0; 664 } 665 666 static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, 667 u8 il4_proto, u32 *type_cs_vlan_tso, 668 u32 *ol_type_vlan_len_msec) 669 { 670 union { 671 struct iphdr *v4; 672 struct ipv6hdr *v6; 673 unsigned char *hdr; 674 } l3; 675 union { 676 struct tcphdr *tcp; 677 struct udphdr *udp; 678 struct gre_base_hdr *gre; 679 unsigned char *hdr; 680 } l4; 681 unsigned char *l2_hdr; 682 u8 l4_proto = ol4_proto; 683 u32 ol2_len; 684 u32 ol3_len; 685 u32 ol4_len; 686 u32 l2_len; 687 u32 l3_len; 688 689 l3.hdr = skb_network_header(skb); 690 l4.hdr = skb_transport_header(skb); 691 692 /* compute L2 header size for normal packet, defined in 2 Bytes */ 693 l2_len = l3.hdr - skb->data; 694 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, 695 HNS3_TXD_L2LEN_S, l2_len >> 1); 696 697 /* tunnel packet*/ 698 if (skb->encapsulation) { 699 /* compute OL2 header size, defined in 2 Bytes */ 700 ol2_len = l2_len; 701 hnae3_set_field(*ol_type_vlan_len_msec, 702 HNS3_TXD_L2LEN_M, 703 HNS3_TXD_L2LEN_S, ol2_len >> 1); 704 705 /* compute OL3 header size, defined in 4 Bytes */ 706 ol3_len = l4.hdr - l3.hdr; 707 hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, 708 HNS3_TXD_L3LEN_S, ol3_len >> 2); 709 710 /* MAC in UDP, MAC in GRE (0x6558)*/ 711 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { 712 /* switch MAC header ptr from outer to inner header.*/ 713 l2_hdr = skb_inner_mac_header(skb); 714 715 /* compute OL4 header size, defined in 4 Bytes. */ 716 ol4_len = l2_hdr - l4.hdr; 717 hnae3_set_field(*ol_type_vlan_len_msec, 718 HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, 719 ol4_len >> 2); 720 721 /* switch IP header ptr from outer to inner header */ 722 l3.hdr = skb_inner_network_header(skb); 723 724 /* compute inner l2 header size, defined in 2 Bytes. */ 725 l2_len = l3.hdr - l2_hdr; 726 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, 727 HNS3_TXD_L2LEN_S, l2_len >> 1); 728 } else { 729 /* skb packet types not supported by hardware, 730 * txbd len fild doesn't be filled. 731 */ 732 return; 733 } 734 735 /* switch L4 header pointer from outer to inner */ 736 l4.hdr = skb_inner_transport_header(skb); 737 738 l4_proto = il4_proto; 739 } 740 741 /* compute inner(/normal) L3 header size, defined in 4 Bytes */ 742 l3_len = l4.hdr - l3.hdr; 743 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, 744 HNS3_TXD_L3LEN_S, l3_len >> 2); 745 746 /* compute inner(/normal) L4 header size, defined in 4 Bytes */ 747 switch (l4_proto) { 748 case IPPROTO_TCP: 749 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, 750 HNS3_TXD_L4LEN_S, l4.tcp->doff); 751 break; 752 case IPPROTO_SCTP: 753 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, 754 HNS3_TXD_L4LEN_S, 755 (sizeof(struct sctphdr) >> 2)); 756 break; 757 case IPPROTO_UDP: 758 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, 759 HNS3_TXD_L4LEN_S, 760 (sizeof(struct udphdr) >> 2)); 761 break; 762 default: 763 /* skb packet types not supported by hardware, 764 * txbd len fild doesn't be filled. 765 */ 766 return; 767 } 768 } 769 770 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL 771 * and it is udp packet, which has a dest port as the IANA assigned. 772 * the hardware is expected to do the checksum offload, but the 773 * hardware will not do the checksum offload when udp dest port is 774 * 4789. 775 */ 776 static bool hns3_tunnel_csum_bug(struct sk_buff *skb) 777 { 778 #define IANA_VXLAN_PORT 4789 779 union { 780 struct tcphdr *tcp; 781 struct udphdr *udp; 782 struct gre_base_hdr *gre; 783 unsigned char *hdr; 784 } l4; 785 786 l4.hdr = skb_transport_header(skb); 787 788 if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT))) 789 return false; 790 791 skb_checksum_help(skb); 792 793 return true; 794 } 795 796 static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, 797 u8 il4_proto, u32 *type_cs_vlan_tso, 798 u32 *ol_type_vlan_len_msec) 799 { 800 union { 801 struct iphdr *v4; 802 struct ipv6hdr *v6; 803 unsigned char *hdr; 804 } l3; 805 u32 l4_proto = ol4_proto; 806 807 l3.hdr = skb_network_header(skb); 808 809 /* define OL3 type and tunnel type(OL4).*/ 810 if (skb->encapsulation) { 811 /* define outer network header type.*/ 812 if (skb->protocol == htons(ETH_P_IP)) { 813 if (skb_is_gso(skb)) 814 hnae3_set_field(*ol_type_vlan_len_msec, 815 HNS3_TXD_OL3T_M, 816 HNS3_TXD_OL3T_S, 817 HNS3_OL3T_IPV4_CSUM); 818 else 819 hnae3_set_field(*ol_type_vlan_len_msec, 820 HNS3_TXD_OL3T_M, 821 HNS3_TXD_OL3T_S, 822 HNS3_OL3T_IPV4_NO_CSUM); 823 824 } else if (skb->protocol == htons(ETH_P_IPV6)) { 825 hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, 826 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); 827 } 828 829 /* define tunnel type(OL4).*/ 830 switch (l4_proto) { 831 case IPPROTO_UDP: 832 hnae3_set_field(*ol_type_vlan_len_msec, 833 HNS3_TXD_TUNTYPE_M, 834 HNS3_TXD_TUNTYPE_S, 835 HNS3_TUN_MAC_IN_UDP); 836 break; 837 case IPPROTO_GRE: 838 hnae3_set_field(*ol_type_vlan_len_msec, 839 HNS3_TXD_TUNTYPE_M, 840 HNS3_TXD_TUNTYPE_S, 841 HNS3_TUN_NVGRE); 842 break; 843 default: 844 /* drop the skb tunnel packet if hardware don't support, 845 * because hardware can't calculate csum when TSO. 846 */ 847 if (skb_is_gso(skb)) 848 return -EDOM; 849 850 /* the stack computes the IP header already, 851 * driver calculate l4 checksum when not TSO. 852 */ 853 skb_checksum_help(skb); 854 return 0; 855 } 856 857 l3.hdr = skb_inner_network_header(skb); 858 l4_proto = il4_proto; 859 } 860 861 if (l3.v4->version == 4) { 862 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, 863 HNS3_TXD_L3T_S, HNS3_L3T_IPV4); 864 865 /* the stack computes the IP header already, the only time we 866 * need the hardware to recompute it is in the case of TSO. 867 */ 868 if (skb_is_gso(skb)) 869 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); 870 } else if (l3.v6->version == 6) { 871 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, 872 HNS3_TXD_L3T_S, HNS3_L3T_IPV6); 873 } 874 875 switch (l4_proto) { 876 case IPPROTO_TCP: 877 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 878 hnae3_set_field(*type_cs_vlan_tso, 879 HNS3_TXD_L4T_M, 880 HNS3_TXD_L4T_S, 881 HNS3_L4T_TCP); 882 break; 883 case IPPROTO_UDP: 884 if (hns3_tunnel_csum_bug(skb)) 885 break; 886 887 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 888 hnae3_set_field(*type_cs_vlan_tso, 889 HNS3_TXD_L4T_M, 890 HNS3_TXD_L4T_S, 891 HNS3_L4T_UDP); 892 break; 893 case IPPROTO_SCTP: 894 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 895 hnae3_set_field(*type_cs_vlan_tso, 896 HNS3_TXD_L4T_M, 897 HNS3_TXD_L4T_S, 898 HNS3_L4T_SCTP); 899 break; 900 default: 901 /* drop the skb tunnel packet if hardware don't support, 902 * because hardware can't calculate csum when TSO. 903 */ 904 if (skb_is_gso(skb)) 905 return -EDOM; 906 907 /* the stack computes the IP header already, 908 * driver calculate l4 checksum when not TSO. 909 */ 910 skb_checksum_help(skb); 911 return 0; 912 } 913 914 return 0; 915 } 916 917 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) 918 { 919 /* Config bd buffer end */ 920 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, 921 HNS3_TXD_BDTYPE_S, 0); 922 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); 923 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); 924 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); 925 } 926 927 static int hns3_fill_desc_vtags(struct sk_buff *skb, 928 struct hns3_enet_ring *tx_ring, 929 u32 *inner_vlan_flag, 930 u32 *out_vlan_flag, 931 u16 *inner_vtag, 932 u16 *out_vtag) 933 { 934 #define HNS3_TX_VLAN_PRIO_SHIFT 13 935 936 if (skb->protocol == htons(ETH_P_8021Q) && 937 !(tx_ring->tqp->handle->kinfo.netdev->features & 938 NETIF_F_HW_VLAN_CTAG_TX)) { 939 /* When HW VLAN acceleration is turned off, and the stack 940 * sets the protocol to 802.1q, the driver just need to 941 * set the protocol to the encapsulated ethertype. 942 */ 943 skb->protocol = vlan_get_protocol(skb); 944 return 0; 945 } 946 947 if (skb_vlan_tag_present(skb)) { 948 u16 vlan_tag; 949 950 vlan_tag = skb_vlan_tag_get(skb); 951 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT; 952 953 /* Based on hw strategy, use out_vtag in two layer tag case, 954 * and use inner_vtag in one tag case. 955 */ 956 if (skb->protocol == htons(ETH_P_8021Q)) { 957 hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); 958 *out_vtag = vlan_tag; 959 } else { 960 hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); 961 *inner_vtag = vlan_tag; 962 } 963 } else if (skb->protocol == htons(ETH_P_8021Q)) { 964 struct vlan_ethhdr *vhdr; 965 int rc; 966 967 rc = skb_cow_head(skb, 0); 968 if (rc < 0) 969 return rc; 970 vhdr = (struct vlan_ethhdr *)skb->data; 971 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7) 972 << HNS3_TX_VLAN_PRIO_SHIFT); 973 } 974 975 skb->protocol = vlan_get_protocol(skb); 976 return 0; 977 } 978 979 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, 980 int size, dma_addr_t dma, int frag_end, 981 enum hns_desc_type type) 982 { 983 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 984 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 985 u32 ol_type_vlan_len_msec = 0; 986 u16 bdtp_fe_sc_vld_ra_ri = 0; 987 u32 type_cs_vlan_tso = 0; 988 struct sk_buff *skb; 989 u16 inner_vtag = 0; 990 u16 out_vtag = 0; 991 u32 paylen = 0; 992 u16 mss = 0; 993 u8 ol4_proto; 994 u8 il4_proto; 995 int ret; 996 997 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ 998 desc_cb->priv = priv; 999 desc_cb->length = size; 1000 desc_cb->dma = dma; 1001 desc_cb->type = type; 1002 1003 /* now, fill the descriptor */ 1004 desc->addr = cpu_to_le64(dma); 1005 desc->tx.send_size = cpu_to_le16((u16)size); 1006 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); 1007 desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri); 1008 1009 if (type == DESC_TYPE_SKB) { 1010 skb = (struct sk_buff *)priv; 1011 paylen = skb->len; 1012 1013 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso, 1014 &ol_type_vlan_len_msec, 1015 &inner_vtag, &out_vtag); 1016 if (unlikely(ret)) 1017 return ret; 1018 1019 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1020 skb_reset_mac_len(skb); 1021 1022 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); 1023 if (ret) 1024 return ret; 1025 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, 1026 &type_cs_vlan_tso, 1027 &ol_type_vlan_len_msec); 1028 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto, 1029 &type_cs_vlan_tso, 1030 &ol_type_vlan_len_msec); 1031 if (ret) 1032 return ret; 1033 1034 ret = hns3_set_tso(skb, &paylen, &mss, 1035 &type_cs_vlan_tso); 1036 if (ret) 1037 return ret; 1038 } 1039 1040 /* Set txbd */ 1041 desc->tx.ol_type_vlan_len_msec = 1042 cpu_to_le32(ol_type_vlan_len_msec); 1043 desc->tx.type_cs_vlan_tso_len = 1044 cpu_to_le32(type_cs_vlan_tso); 1045 desc->tx.paylen = cpu_to_le32(paylen); 1046 desc->tx.mss = cpu_to_le16(mss); 1047 desc->tx.vlan_tag = cpu_to_le16(inner_vtag); 1048 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); 1049 } 1050 1051 /* move ring pointer to next.*/ 1052 ring_ptr_move_fw(ring, next_to_use); 1053 1054 return 0; 1055 } 1056 1057 static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv, 1058 int size, dma_addr_t dma, int frag_end, 1059 enum hns_desc_type type) 1060 { 1061 unsigned int frag_buf_num; 1062 unsigned int k; 1063 int sizeoflast; 1064 int ret; 1065 1066 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; 1067 sizeoflast = size % HNS3_MAX_BD_SIZE; 1068 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; 1069 1070 /* When the frag size is bigger than hardware, split this frag */ 1071 for (k = 0; k < frag_buf_num; k++) { 1072 ret = hns3_fill_desc(ring, priv, 1073 (k == frag_buf_num - 1) ? 1074 sizeoflast : HNS3_MAX_BD_SIZE, 1075 dma + HNS3_MAX_BD_SIZE * k, 1076 frag_end && (k == frag_buf_num - 1) ? 1 : 0, 1077 (type == DESC_TYPE_SKB && !k) ? 1078 DESC_TYPE_SKB : DESC_TYPE_PAGE); 1079 if (ret) 1080 return ret; 1081 } 1082 1083 return 0; 1084 } 1085 1086 static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, 1087 struct hns3_enet_ring *ring) 1088 { 1089 struct sk_buff *skb = *out_skb; 1090 struct skb_frag_struct *frag; 1091 int bdnum_for_frag; 1092 int frag_num; 1093 int buf_num; 1094 int size; 1095 int i; 1096 1097 size = skb_headlen(skb); 1098 buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; 1099 1100 frag_num = skb_shinfo(skb)->nr_frags; 1101 for (i = 0; i < frag_num; i++) { 1102 frag = &skb_shinfo(skb)->frags[i]; 1103 size = skb_frag_size(frag); 1104 bdnum_for_frag = 1105 (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; 1106 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG) 1107 return -ENOMEM; 1108 1109 buf_num += bdnum_for_frag; 1110 } 1111 1112 if (buf_num > ring_space(ring)) 1113 return -EBUSY; 1114 1115 *bnum = buf_num; 1116 return 0; 1117 } 1118 1119 static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, 1120 struct hns3_enet_ring *ring) 1121 { 1122 struct sk_buff *skb = *out_skb; 1123 int buf_num; 1124 1125 /* No. of segments (plus a header) */ 1126 buf_num = skb_shinfo(skb)->nr_frags + 1; 1127 1128 if (unlikely(ring_space(ring) < buf_num)) 1129 return -EBUSY; 1130 1131 *bnum = buf_num; 1132 1133 return 0; 1134 } 1135 1136 static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) 1137 { 1138 struct device *dev = ring_to_dev(ring); 1139 unsigned int i; 1140 1141 for (i = 0; i < ring->desc_num; i++) { 1142 /* check if this is where we started */ 1143 if (ring->next_to_use == next_to_use_orig) 1144 break; 1145 1146 /* unmap the descriptor dma address */ 1147 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) 1148 dma_unmap_single(dev, 1149 ring->desc_cb[ring->next_to_use].dma, 1150 ring->desc_cb[ring->next_to_use].length, 1151 DMA_TO_DEVICE); 1152 else 1153 dma_unmap_page(dev, 1154 ring->desc_cb[ring->next_to_use].dma, 1155 ring->desc_cb[ring->next_to_use].length, 1156 DMA_TO_DEVICE); 1157 1158 /* rollback one */ 1159 ring_ptr_move_bw(ring, next_to_use); 1160 } 1161 } 1162 1163 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) 1164 { 1165 struct hns3_nic_priv *priv = netdev_priv(netdev); 1166 struct hns3_nic_ring_data *ring_data = 1167 &tx_ring_data(priv, skb->queue_mapping); 1168 struct hns3_enet_ring *ring = ring_data->ring; 1169 struct device *dev = priv->dev; 1170 struct netdev_queue *dev_queue; 1171 struct skb_frag_struct *frag; 1172 int next_to_use_head; 1173 int next_to_use_frag; 1174 dma_addr_t dma; 1175 int buf_num; 1176 int seg_num; 1177 int size; 1178 int ret; 1179 int i; 1180 1181 /* Prefetch the data used later */ 1182 prefetch(skb->data); 1183 1184 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { 1185 case -EBUSY: 1186 u64_stats_update_begin(&ring->syncp); 1187 ring->stats.tx_busy++; 1188 u64_stats_update_end(&ring->syncp); 1189 1190 goto out_net_tx_busy; 1191 case -ENOMEM: 1192 u64_stats_update_begin(&ring->syncp); 1193 ring->stats.sw_err_cnt++; 1194 u64_stats_update_end(&ring->syncp); 1195 netdev_err(netdev, "no memory to xmit!\n"); 1196 1197 goto out_err_tx_ok; 1198 default: 1199 break; 1200 } 1201 1202 /* No. of segments (plus a header) */ 1203 seg_num = skb_shinfo(skb)->nr_frags + 1; 1204 /* Fill the first part */ 1205 size = skb_headlen(skb); 1206 1207 next_to_use_head = ring->next_to_use; 1208 1209 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 1210 if (dma_mapping_error(dev, dma)) { 1211 netdev_err(netdev, "TX head DMA map failed\n"); 1212 ring->stats.sw_err_cnt++; 1213 goto out_err_tx_ok; 1214 } 1215 1216 ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, 1217 DESC_TYPE_SKB); 1218 if (ret) 1219 goto head_dma_map_err; 1220 1221 next_to_use_frag = ring->next_to_use; 1222 /* Fill the fragments */ 1223 for (i = 1; i < seg_num; i++) { 1224 frag = &skb_shinfo(skb)->frags[i - 1]; 1225 size = skb_frag_size(frag); 1226 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); 1227 if (dma_mapping_error(dev, dma)) { 1228 netdev_err(netdev, "TX frag(%d) DMA map failed\n", i); 1229 ring->stats.sw_err_cnt++; 1230 goto frag_dma_map_err; 1231 } 1232 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, 1233 seg_num - 1 == i ? 1 : 0, 1234 DESC_TYPE_PAGE); 1235 1236 if (ret) 1237 goto frag_dma_map_err; 1238 } 1239 1240 /* Complete translate all packets */ 1241 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index); 1242 netdev_tx_sent_queue(dev_queue, skb->len); 1243 1244 wmb(); /* Commit all data before submit */ 1245 1246 hnae3_queue_xmit(ring->tqp, buf_num); 1247 1248 return NETDEV_TX_OK; 1249 1250 frag_dma_map_err: 1251 hns_nic_dma_unmap(ring, next_to_use_frag); 1252 1253 head_dma_map_err: 1254 hns_nic_dma_unmap(ring, next_to_use_head); 1255 1256 out_err_tx_ok: 1257 dev_kfree_skb_any(skb); 1258 return NETDEV_TX_OK; 1259 1260 out_net_tx_busy: 1261 netif_stop_subqueue(netdev, ring_data->queue_index); 1262 smp_mb(); /* Commit all data before submit */ 1263 1264 return NETDEV_TX_BUSY; 1265 } 1266 1267 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) 1268 { 1269 struct hnae3_handle *h = hns3_get_handle(netdev); 1270 struct sockaddr *mac_addr = p; 1271 int ret; 1272 1273 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) 1274 return -EADDRNOTAVAIL; 1275 1276 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { 1277 netdev_info(netdev, "already using mac address %pM\n", 1278 mac_addr->sa_data); 1279 return 0; 1280 } 1281 1282 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); 1283 if (ret) { 1284 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); 1285 return ret; 1286 } 1287 1288 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); 1289 1290 return 0; 1291 } 1292 1293 static int hns3_nic_do_ioctl(struct net_device *netdev, 1294 struct ifreq *ifr, int cmd) 1295 { 1296 struct hnae3_handle *h = hns3_get_handle(netdev); 1297 1298 if (!netif_running(netdev)) 1299 return -EINVAL; 1300 1301 if (!h->ae_algo->ops->do_ioctl) 1302 return -EOPNOTSUPP; 1303 1304 return h->ae_algo->ops->do_ioctl(h, ifr, cmd); 1305 } 1306 1307 static int hns3_nic_set_features(struct net_device *netdev, 1308 netdev_features_t features) 1309 { 1310 netdev_features_t changed = netdev->features ^ features; 1311 struct hns3_nic_priv *priv = netdev_priv(netdev); 1312 struct hnae3_handle *h = priv->ae_handle; 1313 int ret; 1314 1315 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { 1316 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { 1317 priv->ops.fill_desc = hns3_fill_desc_tso; 1318 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; 1319 } else { 1320 priv->ops.fill_desc = hns3_fill_desc; 1321 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; 1322 } 1323 } 1324 1325 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && 1326 h->ae_algo->ops->enable_vlan_filter) { 1327 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1328 h->ae_algo->ops->enable_vlan_filter(h, true); 1329 else 1330 h->ae_algo->ops->enable_vlan_filter(h, false); 1331 } 1332 1333 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && 1334 h->ae_algo->ops->enable_hw_strip_rxvtag) { 1335 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1336 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true); 1337 else 1338 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false); 1339 1340 if (ret) 1341 return ret; 1342 } 1343 1344 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { 1345 if (features & NETIF_F_NTUPLE) 1346 h->ae_algo->ops->enable_fd(h, true); 1347 else 1348 h->ae_algo->ops->enable_fd(h, false); 1349 } 1350 1351 netdev->features = features; 1352 return 0; 1353 } 1354 1355 static void hns3_nic_get_stats64(struct net_device *netdev, 1356 struct rtnl_link_stats64 *stats) 1357 { 1358 struct hns3_nic_priv *priv = netdev_priv(netdev); 1359 int queue_num = priv->ae_handle->kinfo.num_tqps; 1360 struct hnae3_handle *handle = priv->ae_handle; 1361 struct hns3_enet_ring *ring; 1362 unsigned int start; 1363 unsigned int idx; 1364 u64 tx_bytes = 0; 1365 u64 rx_bytes = 0; 1366 u64 tx_pkts = 0; 1367 u64 rx_pkts = 0; 1368 u64 tx_drop = 0; 1369 u64 rx_drop = 0; 1370 1371 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 1372 return; 1373 1374 handle->ae_algo->ops->update_stats(handle, &netdev->stats); 1375 1376 for (idx = 0; idx < queue_num; idx++) { 1377 /* fetch the tx stats */ 1378 ring = priv->ring_data[idx].ring; 1379 do { 1380 start = u64_stats_fetch_begin_irq(&ring->syncp); 1381 tx_bytes += ring->stats.tx_bytes; 1382 tx_pkts += ring->stats.tx_pkts; 1383 tx_drop += ring->stats.tx_busy; 1384 tx_drop += ring->stats.sw_err_cnt; 1385 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1386 1387 /* fetch the rx stats */ 1388 ring = priv->ring_data[idx + queue_num].ring; 1389 do { 1390 start = u64_stats_fetch_begin_irq(&ring->syncp); 1391 rx_bytes += ring->stats.rx_bytes; 1392 rx_pkts += ring->stats.rx_pkts; 1393 rx_drop += ring->stats.non_vld_descs; 1394 rx_drop += ring->stats.err_pkt_len; 1395 rx_drop += ring->stats.l2_err; 1396 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1397 } 1398 1399 stats->tx_bytes = tx_bytes; 1400 stats->tx_packets = tx_pkts; 1401 stats->rx_bytes = rx_bytes; 1402 stats->rx_packets = rx_pkts; 1403 1404 stats->rx_errors = netdev->stats.rx_errors; 1405 stats->multicast = netdev->stats.multicast; 1406 stats->rx_length_errors = netdev->stats.rx_length_errors; 1407 stats->rx_crc_errors = netdev->stats.rx_crc_errors; 1408 stats->rx_missed_errors = netdev->stats.rx_missed_errors; 1409 1410 stats->tx_errors = netdev->stats.tx_errors; 1411 stats->rx_dropped = rx_drop + netdev->stats.rx_dropped; 1412 stats->tx_dropped = tx_drop + netdev->stats.tx_dropped; 1413 stats->collisions = netdev->stats.collisions; 1414 stats->rx_over_errors = netdev->stats.rx_over_errors; 1415 stats->rx_frame_errors = netdev->stats.rx_frame_errors; 1416 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; 1417 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; 1418 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; 1419 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; 1420 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; 1421 stats->tx_window_errors = netdev->stats.tx_window_errors; 1422 stats->rx_compressed = netdev->stats.rx_compressed; 1423 stats->tx_compressed = netdev->stats.tx_compressed; 1424 } 1425 1426 static int hns3_setup_tc(struct net_device *netdev, void *type_data) 1427 { 1428 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 1429 struct hnae3_handle *h = hns3_get_handle(netdev); 1430 struct hnae3_knic_private_info *kinfo = &h->kinfo; 1431 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; 1432 u8 tc = mqprio_qopt->qopt.num_tc; 1433 u16 mode = mqprio_qopt->mode; 1434 u8 hw = mqprio_qopt->qopt.hw; 1435 bool if_running; 1436 int ret; 1437 1438 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && 1439 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) 1440 return -EOPNOTSUPP; 1441 1442 if (tc > HNAE3_MAX_TC) 1443 return -EINVAL; 1444 1445 if (!netdev) 1446 return -EINVAL; 1447 1448 if_running = netif_running(netdev); 1449 if (if_running) { 1450 hns3_nic_net_stop(netdev); 1451 msleep(100); 1452 } 1453 1454 ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? 1455 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; 1456 if (ret) 1457 goto out; 1458 1459 ret = hns3_nic_set_real_num_queue(netdev); 1460 1461 out: 1462 if (if_running) 1463 hns3_nic_net_open(netdev); 1464 1465 return ret; 1466 } 1467 1468 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, 1469 void *type_data) 1470 { 1471 if (type != TC_SETUP_QDISC_MQPRIO) 1472 return -EOPNOTSUPP; 1473 1474 return hns3_setup_tc(dev, type_data); 1475 } 1476 1477 static int hns3_vlan_rx_add_vid(struct net_device *netdev, 1478 __be16 proto, u16 vid) 1479 { 1480 struct hnae3_handle *h = hns3_get_handle(netdev); 1481 struct hns3_nic_priv *priv = netdev_priv(netdev); 1482 int ret = -EIO; 1483 1484 if (h->ae_algo->ops->set_vlan_filter) 1485 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); 1486 1487 if (!ret) 1488 set_bit(vid, priv->active_vlans); 1489 1490 return ret; 1491 } 1492 1493 static int hns3_vlan_rx_kill_vid(struct net_device *netdev, 1494 __be16 proto, u16 vid) 1495 { 1496 struct hnae3_handle *h = hns3_get_handle(netdev); 1497 struct hns3_nic_priv *priv = netdev_priv(netdev); 1498 int ret = -EIO; 1499 1500 if (h->ae_algo->ops->set_vlan_filter) 1501 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); 1502 1503 if (!ret) 1504 clear_bit(vid, priv->active_vlans); 1505 1506 return ret; 1507 } 1508 1509 static void hns3_restore_vlan(struct net_device *netdev) 1510 { 1511 struct hns3_nic_priv *priv = netdev_priv(netdev); 1512 u16 vid; 1513 int ret; 1514 1515 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { 1516 ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); 1517 if (ret) 1518 netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n", 1519 vid, ret); 1520 } 1521 } 1522 1523 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 1524 u8 qos, __be16 vlan_proto) 1525 { 1526 struct hnae3_handle *h = hns3_get_handle(netdev); 1527 int ret = -EIO; 1528 1529 if (h->ae_algo->ops->set_vf_vlan_filter) 1530 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, 1531 qos, vlan_proto); 1532 1533 return ret; 1534 } 1535 1536 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) 1537 { 1538 struct hnae3_handle *h = hns3_get_handle(netdev); 1539 bool if_running = netif_running(netdev); 1540 int ret; 1541 1542 if (!h->ae_algo->ops->set_mtu) 1543 return -EOPNOTSUPP; 1544 1545 /* if this was called with netdev up then bring netdevice down */ 1546 if (if_running) { 1547 (void)hns3_nic_net_stop(netdev); 1548 msleep(100); 1549 } 1550 1551 ret = h->ae_algo->ops->set_mtu(h, new_mtu); 1552 if (ret) 1553 netdev_err(netdev, "failed to change MTU in hardware %d\n", 1554 ret); 1555 else 1556 netdev->mtu = new_mtu; 1557 1558 /* if the netdev was running earlier, bring it up again */ 1559 if (if_running && hns3_nic_net_open(netdev)) 1560 ret = -EINVAL; 1561 1562 return ret; 1563 } 1564 1565 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) 1566 { 1567 struct hns3_nic_priv *priv = netdev_priv(ndev); 1568 struct hns3_enet_ring *tx_ring = NULL; 1569 int timeout_queue = 0; 1570 int hw_head, hw_tail; 1571 int i; 1572 1573 /* Find the stopped queue the same way the stack does */ 1574 for (i = 0; i < ndev->real_num_tx_queues; i++) { 1575 struct netdev_queue *q; 1576 unsigned long trans_start; 1577 1578 q = netdev_get_tx_queue(ndev, i); 1579 trans_start = q->trans_start; 1580 if (netif_xmit_stopped(q) && 1581 time_after(jiffies, 1582 (trans_start + ndev->watchdog_timeo))) { 1583 timeout_queue = i; 1584 break; 1585 } 1586 } 1587 1588 if (i == ndev->num_tx_queues) { 1589 netdev_info(ndev, 1590 "no netdev TX timeout queue found, timeout count: %llu\n", 1591 priv->tx_timeout_count); 1592 return false; 1593 } 1594 1595 tx_ring = priv->ring_data[timeout_queue].ring; 1596 1597 hw_head = readl_relaxed(tx_ring->tqp->io_base + 1598 HNS3_RING_TX_RING_HEAD_REG); 1599 hw_tail = readl_relaxed(tx_ring->tqp->io_base + 1600 HNS3_RING_TX_RING_TAIL_REG); 1601 netdev_info(ndev, 1602 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n", 1603 priv->tx_timeout_count, 1604 timeout_queue, 1605 tx_ring->next_to_use, 1606 tx_ring->next_to_clean, 1607 hw_head, 1608 hw_tail, 1609 readl(tx_ring->tqp_vector->mask_addr)); 1610 1611 return true; 1612 } 1613 1614 static void hns3_nic_net_timeout(struct net_device *ndev) 1615 { 1616 struct hns3_nic_priv *priv = netdev_priv(ndev); 1617 struct hnae3_handle *h = priv->ae_handle; 1618 1619 if (!hns3_get_tx_timeo_queue_info(ndev)) 1620 return; 1621 1622 priv->tx_timeout_count++; 1623 1624 if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo))) 1625 return; 1626 1627 /* request the reset */ 1628 if (h->ae_algo->ops->reset_event) 1629 h->ae_algo->ops->reset_event(h); 1630 } 1631 1632 static const struct net_device_ops hns3_nic_netdev_ops = { 1633 .ndo_open = hns3_nic_net_open, 1634 .ndo_stop = hns3_nic_net_stop, 1635 .ndo_start_xmit = hns3_nic_net_xmit, 1636 .ndo_tx_timeout = hns3_nic_net_timeout, 1637 .ndo_set_mac_address = hns3_nic_net_set_mac_address, 1638 .ndo_do_ioctl = hns3_nic_do_ioctl, 1639 .ndo_change_mtu = hns3_nic_change_mtu, 1640 .ndo_set_features = hns3_nic_set_features, 1641 .ndo_get_stats64 = hns3_nic_get_stats64, 1642 .ndo_setup_tc = hns3_nic_setup_tc, 1643 .ndo_set_rx_mode = hns3_nic_set_rx_mode, 1644 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, 1645 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, 1646 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, 1647 }; 1648 1649 static bool hns3_is_phys_func(struct pci_dev *pdev) 1650 { 1651 u32 dev_id = pdev->device; 1652 1653 switch (dev_id) { 1654 case HNAE3_DEV_ID_GE: 1655 case HNAE3_DEV_ID_25GE: 1656 case HNAE3_DEV_ID_25GE_RDMA: 1657 case HNAE3_DEV_ID_25GE_RDMA_MACSEC: 1658 case HNAE3_DEV_ID_50GE_RDMA: 1659 case HNAE3_DEV_ID_50GE_RDMA_MACSEC: 1660 case HNAE3_DEV_ID_100G_RDMA_MACSEC: 1661 return true; 1662 case HNAE3_DEV_ID_100G_VF: 1663 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF: 1664 return false; 1665 default: 1666 dev_warn(&pdev->dev, "un-recognized pci device-id %d", 1667 dev_id); 1668 } 1669 1670 return false; 1671 } 1672 1673 static void hns3_disable_sriov(struct pci_dev *pdev) 1674 { 1675 /* If our VFs are assigned we cannot shut down SR-IOV 1676 * without causing issues, so just leave the hardware 1677 * available but disabled 1678 */ 1679 if (pci_vfs_assigned(pdev)) { 1680 dev_warn(&pdev->dev, 1681 "disabling driver while VFs are assigned\n"); 1682 return; 1683 } 1684 1685 pci_disable_sriov(pdev); 1686 } 1687 1688 static void hns3_get_dev_capability(struct pci_dev *pdev, 1689 struct hnae3_ae_dev *ae_dev) 1690 { 1691 if (pdev->revision >= 0x21) 1692 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1); 1693 } 1694 1695 /* hns3_probe - Device initialization routine 1696 * @pdev: PCI device information struct 1697 * @ent: entry in hns3_pci_tbl 1698 * 1699 * hns3_probe initializes a PF identified by a pci_dev structure. 1700 * The OS initialization, configuring of the PF private structure, 1701 * and a hardware reset occur. 1702 * 1703 * Returns 0 on success, negative on failure 1704 */ 1705 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1706 { 1707 struct hnae3_ae_dev *ae_dev; 1708 int ret; 1709 1710 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), 1711 GFP_KERNEL); 1712 if (!ae_dev) { 1713 ret = -ENOMEM; 1714 return ret; 1715 } 1716 1717 ae_dev->pdev = pdev; 1718 ae_dev->flag = ent->driver_data; 1719 ae_dev->dev_type = HNAE3_DEV_KNIC; 1720 ae_dev->reset_type = HNAE3_NONE_RESET; 1721 hns3_get_dev_capability(pdev, ae_dev); 1722 pci_set_drvdata(pdev, ae_dev); 1723 1724 hnae3_register_ae_dev(ae_dev); 1725 1726 return 0; 1727 } 1728 1729 /* hns3_remove - Device removal routine 1730 * @pdev: PCI device information struct 1731 */ 1732 static void hns3_remove(struct pci_dev *pdev) 1733 { 1734 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1735 1736 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) 1737 hns3_disable_sriov(pdev); 1738 1739 hnae3_unregister_ae_dev(ae_dev); 1740 } 1741 1742 /** 1743 * hns3_pci_sriov_configure 1744 * @pdev: pointer to a pci_dev structure 1745 * @num_vfs: number of VFs to allocate 1746 * 1747 * Enable or change the number of VFs. Called when the user updates the number 1748 * of VFs in sysfs. 1749 **/ 1750 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1751 { 1752 int ret; 1753 1754 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) { 1755 dev_warn(&pdev->dev, "Can not config SRIOV\n"); 1756 return -EINVAL; 1757 } 1758 1759 if (num_vfs) { 1760 ret = pci_enable_sriov(pdev, num_vfs); 1761 if (ret) 1762 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); 1763 else 1764 return num_vfs; 1765 } else if (!pci_vfs_assigned(pdev)) { 1766 pci_disable_sriov(pdev); 1767 } else { 1768 dev_warn(&pdev->dev, 1769 "Unable to free VFs because some are assigned to VMs.\n"); 1770 } 1771 1772 return 0; 1773 } 1774 1775 static void hns3_shutdown(struct pci_dev *pdev) 1776 { 1777 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1778 1779 hnae3_unregister_ae_dev(ae_dev); 1780 devm_kfree(&pdev->dev, ae_dev); 1781 pci_set_drvdata(pdev, NULL); 1782 1783 if (system_state == SYSTEM_POWER_OFF) 1784 pci_set_power_state(pdev, PCI_D3hot); 1785 } 1786 1787 static struct pci_driver hns3_driver = { 1788 .name = hns3_driver_name, 1789 .id_table = hns3_pci_tbl, 1790 .probe = hns3_probe, 1791 .remove = hns3_remove, 1792 .shutdown = hns3_shutdown, 1793 .sriov_configure = hns3_pci_sriov_configure, 1794 }; 1795 1796 /* set default feature to hns3 */ 1797 static void hns3_set_default_feature(struct net_device *netdev) 1798 { 1799 struct hnae3_handle *h = hns3_get_handle(netdev); 1800 struct pci_dev *pdev = h->pdev; 1801 1802 netdev->priv_flags |= IFF_UNICAST_FLT; 1803 1804 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1805 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1806 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1807 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1808 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; 1809 1810 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 1811 1812 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 1813 1814 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1815 NETIF_F_HW_VLAN_CTAG_FILTER | 1816 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 1817 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1818 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1819 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1820 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; 1821 1822 netdev->vlan_features |= 1823 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | 1824 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | 1825 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1826 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1827 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; 1828 1829 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1830 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 1831 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1832 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1833 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1834 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; 1835 1836 if (pdev->revision >= 0x21) { 1837 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1838 1839 if (!(h->flags & HNAE3_SUPPORT_VF)) { 1840 netdev->hw_features |= NETIF_F_NTUPLE; 1841 netdev->features |= NETIF_F_NTUPLE; 1842 } 1843 } 1844 } 1845 1846 static int hns3_alloc_buffer(struct hns3_enet_ring *ring, 1847 struct hns3_desc_cb *cb) 1848 { 1849 unsigned int order = hnae3_page_order(ring); 1850 struct page *p; 1851 1852 p = dev_alloc_pages(order); 1853 if (!p) 1854 return -ENOMEM; 1855 1856 cb->priv = p; 1857 cb->page_offset = 0; 1858 cb->reuse_flag = 0; 1859 cb->buf = page_address(p); 1860 cb->length = hnae3_page_size(ring); 1861 cb->type = DESC_TYPE_PAGE; 1862 1863 return 0; 1864 } 1865 1866 static void hns3_free_buffer(struct hns3_enet_ring *ring, 1867 struct hns3_desc_cb *cb) 1868 { 1869 if (cb->type == DESC_TYPE_SKB) 1870 dev_kfree_skb_any((struct sk_buff *)cb->priv); 1871 else if (!HNAE3_IS_TX_RING(ring)) 1872 put_page((struct page *)cb->priv); 1873 memset(cb, 0, sizeof(*cb)); 1874 } 1875 1876 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) 1877 { 1878 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, 1879 cb->length, ring_to_dma_dir(ring)); 1880 1881 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) 1882 return -EIO; 1883 1884 return 0; 1885 } 1886 1887 static void hns3_unmap_buffer(struct hns3_enet_ring *ring, 1888 struct hns3_desc_cb *cb) 1889 { 1890 if (cb->type == DESC_TYPE_SKB) 1891 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 1892 ring_to_dma_dir(ring)); 1893 else 1894 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 1895 ring_to_dma_dir(ring)); 1896 } 1897 1898 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) 1899 { 1900 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 1901 ring->desc[i].addr = 0; 1902 } 1903 1904 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) 1905 { 1906 struct hns3_desc_cb *cb = &ring->desc_cb[i]; 1907 1908 if (!ring->desc_cb[i].dma) 1909 return; 1910 1911 hns3_buffer_detach(ring, i); 1912 hns3_free_buffer(ring, cb); 1913 } 1914 1915 static void hns3_free_buffers(struct hns3_enet_ring *ring) 1916 { 1917 int i; 1918 1919 for (i = 0; i < ring->desc_num; i++) 1920 hns3_free_buffer_detach(ring, i); 1921 } 1922 1923 /* free desc along with its attached buffer */ 1924 static void hns3_free_desc(struct hns3_enet_ring *ring) 1925 { 1926 int size = ring->desc_num * sizeof(ring->desc[0]); 1927 1928 hns3_free_buffers(ring); 1929 1930 if (ring->desc) { 1931 dma_free_coherent(ring_to_dev(ring), size, 1932 ring->desc, ring->desc_dma_addr); 1933 ring->desc = NULL; 1934 } 1935 } 1936 1937 static int hns3_alloc_desc(struct hns3_enet_ring *ring) 1938 { 1939 int size = ring->desc_num * sizeof(ring->desc[0]); 1940 1941 ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, 1942 &ring->desc_dma_addr, 1943 GFP_KERNEL); 1944 if (!ring->desc) 1945 return -ENOMEM; 1946 1947 return 0; 1948 } 1949 1950 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, 1951 struct hns3_desc_cb *cb) 1952 { 1953 int ret; 1954 1955 ret = hns3_alloc_buffer(ring, cb); 1956 if (ret) 1957 goto out; 1958 1959 ret = hns3_map_buffer(ring, cb); 1960 if (ret) 1961 goto out_with_buf; 1962 1963 return 0; 1964 1965 out_with_buf: 1966 hns3_free_buffer(ring, cb); 1967 out: 1968 return ret; 1969 } 1970 1971 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i) 1972 { 1973 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]); 1974 1975 if (ret) 1976 return ret; 1977 1978 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 1979 1980 return 0; 1981 } 1982 1983 /* Allocate memory for raw pkg, and map with dma */ 1984 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) 1985 { 1986 int i, j, ret; 1987 1988 for (i = 0; i < ring->desc_num; i++) { 1989 ret = hns3_alloc_buffer_attach(ring, i); 1990 if (ret) 1991 goto out_buffer_fail; 1992 } 1993 1994 return 0; 1995 1996 out_buffer_fail: 1997 for (j = i - 1; j >= 0; j--) 1998 hns3_free_buffer_detach(ring, j); 1999 return ret; 2000 } 2001 2002 /* detach a in-used buffer and replace with a reserved one */ 2003 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, 2004 struct hns3_desc_cb *res_cb) 2005 { 2006 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 2007 ring->desc_cb[i] = *res_cb; 2008 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 2009 ring->desc[i].rx.bd_base_info = 0; 2010 } 2011 2012 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) 2013 { 2014 ring->desc_cb[i].reuse_flag = 0; 2015 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma 2016 + ring->desc_cb[i].page_offset); 2017 ring->desc[i].rx.bd_base_info = 0; 2018 } 2019 2020 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, 2021 int *pkts) 2022 { 2023 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; 2024 2025 (*pkts) += (desc_cb->type == DESC_TYPE_SKB); 2026 (*bytes) += desc_cb->length; 2027 /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/ 2028 hns3_free_buffer_detach(ring, ring->next_to_clean); 2029 2030 ring_ptr_move_fw(ring, next_to_clean); 2031 } 2032 2033 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) 2034 { 2035 int u = ring->next_to_use; 2036 int c = ring->next_to_clean; 2037 2038 if (unlikely(h > ring->desc_num)) 2039 return 0; 2040 2041 return u > c ? (h > c && h <= u) : (h > c || h <= u); 2042 } 2043 2044 void hns3_clean_tx_ring(struct hns3_enet_ring *ring) 2045 { 2046 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2047 struct hns3_nic_priv *priv = netdev_priv(netdev); 2048 struct netdev_queue *dev_queue; 2049 int bytes, pkts; 2050 int head; 2051 2052 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); 2053 rmb(); /* Make sure head is ready before touch any data */ 2054 2055 if (is_ring_empty(ring) || head == ring->next_to_clean) 2056 return; /* no data to poll */ 2057 2058 if (unlikely(!is_valid_clean_head(ring, head))) { 2059 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, 2060 ring->next_to_use, ring->next_to_clean); 2061 2062 u64_stats_update_begin(&ring->syncp); 2063 ring->stats.io_err_cnt++; 2064 u64_stats_update_end(&ring->syncp); 2065 return; 2066 } 2067 2068 bytes = 0; 2069 pkts = 0; 2070 while (head != ring->next_to_clean) { 2071 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); 2072 /* Issue prefetch for next Tx descriptor */ 2073 prefetch(&ring->desc_cb[ring->next_to_clean]); 2074 } 2075 2076 ring->tqp_vector->tx_group.total_bytes += bytes; 2077 ring->tqp_vector->tx_group.total_packets += pkts; 2078 2079 u64_stats_update_begin(&ring->syncp); 2080 ring->stats.tx_bytes += bytes; 2081 ring->stats.tx_pkts += pkts; 2082 u64_stats_update_end(&ring->syncp); 2083 2084 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); 2085 netdev_tx_completed_queue(dev_queue, pkts, bytes); 2086 2087 if (unlikely(pkts && netif_carrier_ok(netdev) && 2088 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) { 2089 /* Make sure that anybody stopping the queue after this 2090 * sees the new next_to_clean. 2091 */ 2092 smp_mb(); 2093 if (netif_tx_queue_stopped(dev_queue) && 2094 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 2095 netif_tx_wake_queue(dev_queue); 2096 ring->stats.restart_queue++; 2097 } 2098 } 2099 } 2100 2101 static int hns3_desc_unused(struct hns3_enet_ring *ring) 2102 { 2103 int ntc = ring->next_to_clean; 2104 int ntu = ring->next_to_use; 2105 2106 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; 2107 } 2108 2109 static void 2110 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) 2111 { 2112 struct hns3_desc_cb *desc_cb; 2113 struct hns3_desc_cb res_cbs; 2114 int i, ret; 2115 2116 for (i = 0; i < cleand_count; i++) { 2117 desc_cb = &ring->desc_cb[ring->next_to_use]; 2118 if (desc_cb->reuse_flag) { 2119 u64_stats_update_begin(&ring->syncp); 2120 ring->stats.reuse_pg_cnt++; 2121 u64_stats_update_end(&ring->syncp); 2122 2123 hns3_reuse_buffer(ring, ring->next_to_use); 2124 } else { 2125 ret = hns3_reserve_buffer_map(ring, &res_cbs); 2126 if (ret) { 2127 u64_stats_update_begin(&ring->syncp); 2128 ring->stats.sw_err_cnt++; 2129 u64_stats_update_end(&ring->syncp); 2130 2131 netdev_err(ring->tqp->handle->kinfo.netdev, 2132 "hnae reserve buffer map failed.\n"); 2133 break; 2134 } 2135 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 2136 } 2137 2138 ring_ptr_move_fw(ring, next_to_use); 2139 } 2140 2141 wmb(); /* Make all data has been write before submit */ 2142 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); 2143 } 2144 2145 static void hns3_nic_reuse_page(struct sk_buff *skb, int i, 2146 struct hns3_enet_ring *ring, int pull_len, 2147 struct hns3_desc_cb *desc_cb) 2148 { 2149 struct hns3_desc *desc; 2150 u32 truesize; 2151 int size; 2152 int last_offset; 2153 bool twobufs; 2154 2155 twobufs = ((PAGE_SIZE < 8192) && 2156 hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048); 2157 2158 desc = &ring->desc[ring->next_to_clean]; 2159 size = le16_to_cpu(desc->rx.size); 2160 2161 truesize = hnae3_buf_size(ring); 2162 2163 if (!twobufs) 2164 last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring); 2165 2166 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 2167 size - pull_len, truesize); 2168 2169 /* Avoid re-using remote pages,flag default unreuse */ 2170 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) 2171 return; 2172 2173 if (twobufs) { 2174 /* If we are only owner of page we can reuse it */ 2175 if (likely(page_count(desc_cb->priv) == 1)) { 2176 /* Flip page offset to other buffer */ 2177 desc_cb->page_offset ^= truesize; 2178 2179 desc_cb->reuse_flag = 1; 2180 /* bump ref count on page before it is given*/ 2181 get_page(desc_cb->priv); 2182 } 2183 return; 2184 } 2185 2186 /* Move offset up to the next cache line */ 2187 desc_cb->page_offset += truesize; 2188 2189 if (desc_cb->page_offset <= last_offset) { 2190 desc_cb->reuse_flag = 1; 2191 /* Bump ref count on page before it is given*/ 2192 get_page(desc_cb->priv); 2193 } 2194 } 2195 2196 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, 2197 struct hns3_desc *desc) 2198 { 2199 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2200 int l3_type, l4_type; 2201 u32 bd_base_info; 2202 int ol4_type; 2203 u32 l234info; 2204 2205 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2206 l234info = le32_to_cpu(desc->rx.l234_info); 2207 2208 skb->ip_summed = CHECKSUM_NONE; 2209 2210 skb_checksum_none_assert(skb); 2211 2212 if (!(netdev->features & NETIF_F_RXCSUM)) 2213 return; 2214 2215 /* check if hardware has done checksum */ 2216 if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) 2217 return; 2218 2219 if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) || 2220 hnae3_get_bit(l234info, HNS3_RXD_L4E_B) || 2221 hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) || 2222 hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) { 2223 u64_stats_update_begin(&ring->syncp); 2224 ring->stats.l3l4_csum_err++; 2225 u64_stats_update_end(&ring->syncp); 2226 2227 return; 2228 } 2229 2230 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, 2231 HNS3_RXD_L3ID_S); 2232 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, 2233 HNS3_RXD_L4ID_S); 2234 2235 ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M, 2236 HNS3_RXD_OL4ID_S); 2237 switch (ol4_type) { 2238 case HNS3_OL4_TYPE_MAC_IN_UDP: 2239 case HNS3_OL4_TYPE_NVGRE: 2240 skb->csum_level = 1; 2241 /* fall through */ 2242 case HNS3_OL4_TYPE_NO_TUN: 2243 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ 2244 if ((l3_type == HNS3_L3_TYPE_IPV4 || 2245 l3_type == HNS3_L3_TYPE_IPV6) && 2246 (l4_type == HNS3_L4_TYPE_UDP || 2247 l4_type == HNS3_L4_TYPE_TCP || 2248 l4_type == HNS3_L4_TYPE_SCTP)) 2249 skb->ip_summed = CHECKSUM_UNNECESSARY; 2250 break; 2251 default: 2252 break; 2253 } 2254 } 2255 2256 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) 2257 { 2258 napi_gro_receive(&ring->tqp_vector->napi, skb); 2259 } 2260 2261 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, 2262 struct hns3_desc *desc, u32 l234info, 2263 u16 *vlan_tag) 2264 { 2265 struct pci_dev *pdev = ring->tqp->handle->pdev; 2266 2267 if (pdev->revision == 0x20) { 2268 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 2269 if (!(*vlan_tag & VLAN_VID_MASK)) 2270 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 2271 2272 return (*vlan_tag != 0); 2273 } 2274 2275 #define HNS3_STRP_OUTER_VLAN 0x1 2276 #define HNS3_STRP_INNER_VLAN 0x2 2277 2278 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, 2279 HNS3_RXD_STRP_TAGP_S)) { 2280 case HNS3_STRP_OUTER_VLAN: 2281 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 2282 return true; 2283 case HNS3_STRP_INNER_VLAN: 2284 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 2285 return true; 2286 default: 2287 return false; 2288 } 2289 } 2290 2291 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, 2292 struct sk_buff *skb) 2293 { 2294 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; 2295 struct hnae3_handle *handle = ring->tqp->handle; 2296 enum pkt_hash_types rss_type; 2297 2298 if (le32_to_cpu(desc->rx.rss_hash)) 2299 rss_type = handle->kinfo.rss_type; 2300 else 2301 rss_type = PKT_HASH_TYPE_NONE; 2302 2303 skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type); 2304 } 2305 2306 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, 2307 struct sk_buff **out_skb, int *out_bnum) 2308 { 2309 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2310 struct hns3_desc_cb *desc_cb; 2311 struct hns3_desc *desc; 2312 struct sk_buff *skb; 2313 unsigned char *va; 2314 u32 bd_base_info; 2315 int pull_len; 2316 u32 l234info; 2317 int length; 2318 int bnum; 2319 2320 desc = &ring->desc[ring->next_to_clean]; 2321 desc_cb = &ring->desc_cb[ring->next_to_clean]; 2322 2323 prefetch(desc); 2324 2325 length = le16_to_cpu(desc->rx.size); 2326 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2327 2328 /* Check valid BD */ 2329 if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) 2330 return -EFAULT; 2331 2332 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; 2333 2334 /* Prefetch first cache line of first page 2335 * Idea is to cache few bytes of the header of the packet. Our L1 Cache 2336 * line size is 64B so need to prefetch twice to make it 128B. But in 2337 * actual we can have greater size of caches with 128B Level 1 cache 2338 * lines. In such a case, single fetch would suffice to cache in the 2339 * relevant part of the header. 2340 */ 2341 prefetch(va); 2342 #if L1_CACHE_BYTES < 128 2343 prefetch(va + L1_CACHE_BYTES); 2344 #endif 2345 2346 skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi, 2347 HNS3_RX_HEAD_SIZE); 2348 if (unlikely(!skb)) { 2349 netdev_err(netdev, "alloc rx skb fail\n"); 2350 2351 u64_stats_update_begin(&ring->syncp); 2352 ring->stats.sw_err_cnt++; 2353 u64_stats_update_end(&ring->syncp); 2354 2355 return -ENOMEM; 2356 } 2357 2358 prefetchw(skb->data); 2359 2360 bnum = 1; 2361 if (length <= HNS3_RX_HEAD_SIZE) { 2362 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); 2363 2364 /* We can reuse buffer as-is, just make sure it is local */ 2365 if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) 2366 desc_cb->reuse_flag = 1; 2367 else /* This page cannot be reused so discard it */ 2368 put_page(desc_cb->priv); 2369 2370 ring_ptr_move_fw(ring, next_to_clean); 2371 } else { 2372 u64_stats_update_begin(&ring->syncp); 2373 ring->stats.seg_pkt_cnt++; 2374 u64_stats_update_end(&ring->syncp); 2375 2376 pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE); 2377 2378 memcpy(__skb_put(skb, pull_len), va, 2379 ALIGN(pull_len, sizeof(long))); 2380 2381 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); 2382 ring_ptr_move_fw(ring, next_to_clean); 2383 2384 while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) { 2385 desc = &ring->desc[ring->next_to_clean]; 2386 desc_cb = &ring->desc_cb[ring->next_to_clean]; 2387 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2388 hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb); 2389 ring_ptr_move_fw(ring, next_to_clean); 2390 bnum++; 2391 } 2392 } 2393 2394 *out_bnum = bnum; 2395 2396 l234info = le32_to_cpu(desc->rx.l234_info); 2397 2398 /* Based on hw strategy, the tag offloaded will be stored at 2399 * ot_vlan_tag in two layer tag case, and stored at vlan_tag 2400 * in one layer tag case. 2401 */ 2402 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { 2403 u16 vlan_tag; 2404 2405 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) 2406 __vlan_hwaccel_put_tag(skb, 2407 htons(ETH_P_8021Q), 2408 vlan_tag); 2409 } 2410 2411 if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { 2412 u64_stats_update_begin(&ring->syncp); 2413 ring->stats.non_vld_descs++; 2414 u64_stats_update_end(&ring->syncp); 2415 2416 dev_kfree_skb_any(skb); 2417 return -EINVAL; 2418 } 2419 2420 if (unlikely((!desc->rx.pkt_len) || 2421 hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { 2422 u64_stats_update_begin(&ring->syncp); 2423 ring->stats.err_pkt_len++; 2424 u64_stats_update_end(&ring->syncp); 2425 2426 dev_kfree_skb_any(skb); 2427 return -EFAULT; 2428 } 2429 2430 if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) { 2431 u64_stats_update_begin(&ring->syncp); 2432 ring->stats.l2_err++; 2433 u64_stats_update_end(&ring->syncp); 2434 2435 dev_kfree_skb_any(skb); 2436 return -EFAULT; 2437 } 2438 2439 u64_stats_update_begin(&ring->syncp); 2440 ring->stats.rx_pkts++; 2441 ring->stats.rx_bytes += skb->len; 2442 u64_stats_update_end(&ring->syncp); 2443 2444 ring->tqp_vector->rx_group.total_bytes += skb->len; 2445 2446 hns3_rx_checksum(ring, skb, desc); 2447 hns3_set_rx_skb_rss_type(ring, skb); 2448 2449 return 0; 2450 } 2451 2452 int hns3_clean_rx_ring( 2453 struct hns3_enet_ring *ring, int budget, 2454 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) 2455 { 2456 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 2457 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2458 int recv_pkts, recv_bds, clean_count, err; 2459 int unused_count = hns3_desc_unused(ring); 2460 struct sk_buff *skb = NULL; 2461 int num, bnum = 0; 2462 2463 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); 2464 rmb(); /* Make sure num taken effect before the other data is touched */ 2465 2466 recv_pkts = 0, recv_bds = 0, clean_count = 0; 2467 num -= unused_count; 2468 2469 while (recv_pkts < budget && recv_bds < num) { 2470 /* Reuse or realloc buffers */ 2471 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { 2472 hns3_nic_alloc_rx_buffers(ring, 2473 clean_count + unused_count); 2474 clean_count = 0; 2475 unused_count = hns3_desc_unused(ring); 2476 } 2477 2478 /* Poll one pkt */ 2479 err = hns3_handle_rx_bd(ring, &skb, &bnum); 2480 if (unlikely(!skb)) /* This fault cannot be repaired */ 2481 goto out; 2482 2483 recv_bds += bnum; 2484 clean_count += bnum; 2485 if (unlikely(err)) { /* Do jump the err */ 2486 recv_pkts++; 2487 continue; 2488 } 2489 2490 /* Do update ip stack process */ 2491 skb->protocol = eth_type_trans(skb, netdev); 2492 rx_fn(ring, skb); 2493 2494 recv_pkts++; 2495 } 2496 2497 out: 2498 /* Make all data has been write before submit */ 2499 if (clean_count + unused_count > 0) 2500 hns3_nic_alloc_rx_buffers(ring, 2501 clean_count + unused_count); 2502 2503 return recv_pkts; 2504 } 2505 2506 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) 2507 { 2508 struct hns3_enet_tqp_vector *tqp_vector = 2509 ring_group->ring->tqp_vector; 2510 enum hns3_flow_level_range new_flow_level; 2511 int packets_per_msecs; 2512 int bytes_per_msecs; 2513 u32 time_passed_ms; 2514 u16 new_int_gl; 2515 2516 if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies) 2517 return false; 2518 2519 if (ring_group->total_packets == 0) { 2520 ring_group->coal.int_gl = HNS3_INT_GL_50K; 2521 ring_group->coal.flow_level = HNS3_FLOW_LOW; 2522 return true; 2523 } 2524 2525 /* Simple throttlerate management 2526 * 0-10MB/s lower (50000 ints/s) 2527 * 10-20MB/s middle (20000 ints/s) 2528 * 20-1249MB/s high (18000 ints/s) 2529 * > 40000pps ultra (8000 ints/s) 2530 */ 2531 new_flow_level = ring_group->coal.flow_level; 2532 new_int_gl = ring_group->coal.int_gl; 2533 time_passed_ms = 2534 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies); 2535 2536 if (!time_passed_ms) 2537 return false; 2538 2539 do_div(ring_group->total_packets, time_passed_ms); 2540 packets_per_msecs = ring_group->total_packets; 2541 2542 do_div(ring_group->total_bytes, time_passed_ms); 2543 bytes_per_msecs = ring_group->total_bytes; 2544 2545 #define HNS3_RX_LOW_BYTE_RATE 10000 2546 #define HNS3_RX_MID_BYTE_RATE 20000 2547 2548 switch (new_flow_level) { 2549 case HNS3_FLOW_LOW: 2550 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE) 2551 new_flow_level = HNS3_FLOW_MID; 2552 break; 2553 case HNS3_FLOW_MID: 2554 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE) 2555 new_flow_level = HNS3_FLOW_HIGH; 2556 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE) 2557 new_flow_level = HNS3_FLOW_LOW; 2558 break; 2559 case HNS3_FLOW_HIGH: 2560 case HNS3_FLOW_ULTRA: 2561 default: 2562 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE) 2563 new_flow_level = HNS3_FLOW_MID; 2564 break; 2565 } 2566 2567 #define HNS3_RX_ULTRA_PACKET_RATE 40 2568 2569 if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE && 2570 &tqp_vector->rx_group == ring_group) 2571 new_flow_level = HNS3_FLOW_ULTRA; 2572 2573 switch (new_flow_level) { 2574 case HNS3_FLOW_LOW: 2575 new_int_gl = HNS3_INT_GL_50K; 2576 break; 2577 case HNS3_FLOW_MID: 2578 new_int_gl = HNS3_INT_GL_20K; 2579 break; 2580 case HNS3_FLOW_HIGH: 2581 new_int_gl = HNS3_INT_GL_18K; 2582 break; 2583 case HNS3_FLOW_ULTRA: 2584 new_int_gl = HNS3_INT_GL_8K; 2585 break; 2586 default: 2587 break; 2588 } 2589 2590 ring_group->total_bytes = 0; 2591 ring_group->total_packets = 0; 2592 ring_group->coal.flow_level = new_flow_level; 2593 if (new_int_gl != ring_group->coal.int_gl) { 2594 ring_group->coal.int_gl = new_int_gl; 2595 return true; 2596 } 2597 return false; 2598 } 2599 2600 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) 2601 { 2602 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; 2603 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; 2604 bool rx_update, tx_update; 2605 2606 if (tqp_vector->int_adapt_down > 0) { 2607 tqp_vector->int_adapt_down--; 2608 return; 2609 } 2610 2611 if (rx_group->coal.gl_adapt_enable) { 2612 rx_update = hns3_get_new_int_gl(rx_group); 2613 if (rx_update) 2614 hns3_set_vector_coalesce_rx_gl(tqp_vector, 2615 rx_group->coal.int_gl); 2616 } 2617 2618 if (tx_group->coal.gl_adapt_enable) { 2619 tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group); 2620 if (tx_update) 2621 hns3_set_vector_coalesce_tx_gl(tqp_vector, 2622 tx_group->coal.int_gl); 2623 } 2624 2625 tqp_vector->last_jiffies = jiffies; 2626 tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; 2627 } 2628 2629 static int hns3_nic_common_poll(struct napi_struct *napi, int budget) 2630 { 2631 struct hns3_enet_ring *ring; 2632 int rx_pkt_total = 0; 2633 2634 struct hns3_enet_tqp_vector *tqp_vector = 2635 container_of(napi, struct hns3_enet_tqp_vector, napi); 2636 bool clean_complete = true; 2637 int rx_budget; 2638 2639 /* Since the actual Tx work is minimal, we can give the Tx a larger 2640 * budget and be more aggressive about cleaning up the Tx descriptors. 2641 */ 2642 hns3_for_each_ring(ring, tqp_vector->tx_group) 2643 hns3_clean_tx_ring(ring); 2644 2645 /* make sure rx ring budget not smaller than 1 */ 2646 rx_budget = max(budget / tqp_vector->num_tqps, 1); 2647 2648 hns3_for_each_ring(ring, tqp_vector->rx_group) { 2649 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, 2650 hns3_rx_skb); 2651 2652 if (rx_cleaned >= rx_budget) 2653 clean_complete = false; 2654 2655 rx_pkt_total += rx_cleaned; 2656 } 2657 2658 tqp_vector->rx_group.total_packets += rx_pkt_total; 2659 2660 if (!clean_complete) 2661 return budget; 2662 2663 napi_complete(napi); 2664 hns3_update_new_int_gl(tqp_vector); 2665 hns3_mask_vector_irq(tqp_vector, 1); 2666 2667 return rx_pkt_total; 2668 } 2669 2670 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 2671 struct hnae3_ring_chain_node *head) 2672 { 2673 struct pci_dev *pdev = tqp_vector->handle->pdev; 2674 struct hnae3_ring_chain_node *cur_chain = head; 2675 struct hnae3_ring_chain_node *chain; 2676 struct hns3_enet_ring *tx_ring; 2677 struct hns3_enet_ring *rx_ring; 2678 2679 tx_ring = tqp_vector->tx_group.ring; 2680 if (tx_ring) { 2681 cur_chain->tqp_index = tx_ring->tqp->tqp_index; 2682 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 2683 HNAE3_RING_TYPE_TX); 2684 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 2685 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); 2686 2687 cur_chain->next = NULL; 2688 2689 while (tx_ring->next) { 2690 tx_ring = tx_ring->next; 2691 2692 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), 2693 GFP_KERNEL); 2694 if (!chain) 2695 return -ENOMEM; 2696 2697 cur_chain->next = chain; 2698 chain->tqp_index = tx_ring->tqp->tqp_index; 2699 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, 2700 HNAE3_RING_TYPE_TX); 2701 hnae3_set_field(chain->int_gl_idx, 2702 HNAE3_RING_GL_IDX_M, 2703 HNAE3_RING_GL_IDX_S, 2704 HNAE3_RING_GL_TX); 2705 2706 cur_chain = chain; 2707 } 2708 } 2709 2710 rx_ring = tqp_vector->rx_group.ring; 2711 if (!tx_ring && rx_ring) { 2712 cur_chain->next = NULL; 2713 cur_chain->tqp_index = rx_ring->tqp->tqp_index; 2714 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 2715 HNAE3_RING_TYPE_RX); 2716 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 2717 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 2718 2719 rx_ring = rx_ring->next; 2720 } 2721 2722 while (rx_ring) { 2723 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); 2724 if (!chain) 2725 return -ENOMEM; 2726 2727 cur_chain->next = chain; 2728 chain->tqp_index = rx_ring->tqp->tqp_index; 2729 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, 2730 HNAE3_RING_TYPE_RX); 2731 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 2732 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 2733 2734 cur_chain = chain; 2735 2736 rx_ring = rx_ring->next; 2737 } 2738 2739 return 0; 2740 } 2741 2742 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 2743 struct hnae3_ring_chain_node *head) 2744 { 2745 struct pci_dev *pdev = tqp_vector->handle->pdev; 2746 struct hnae3_ring_chain_node *chain_tmp, *chain; 2747 2748 chain = head->next; 2749 2750 while (chain) { 2751 chain_tmp = chain->next; 2752 devm_kfree(&pdev->dev, chain); 2753 chain = chain_tmp; 2754 } 2755 } 2756 2757 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, 2758 struct hns3_enet_ring *ring) 2759 { 2760 ring->next = group->ring; 2761 group->ring = ring; 2762 2763 group->count++; 2764 } 2765 2766 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) 2767 { 2768 struct pci_dev *pdev = priv->ae_handle->pdev; 2769 struct hns3_enet_tqp_vector *tqp_vector; 2770 int num_vectors = priv->vector_num; 2771 int numa_node; 2772 int vector_i; 2773 2774 numa_node = dev_to_node(&pdev->dev); 2775 2776 for (vector_i = 0; vector_i < num_vectors; vector_i++) { 2777 tqp_vector = &priv->tqp_vector[vector_i]; 2778 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node), 2779 &tqp_vector->affinity_mask); 2780 } 2781 } 2782 2783 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) 2784 { 2785 struct hnae3_ring_chain_node vector_ring_chain; 2786 struct hnae3_handle *h = priv->ae_handle; 2787 struct hns3_enet_tqp_vector *tqp_vector; 2788 int ret = 0; 2789 u16 i; 2790 2791 hns3_nic_set_cpumask(priv); 2792 2793 for (i = 0; i < priv->vector_num; i++) { 2794 tqp_vector = &priv->tqp_vector[i]; 2795 hns3_vector_gl_rl_init_hw(tqp_vector, priv); 2796 tqp_vector->num_tqps = 0; 2797 } 2798 2799 for (i = 0; i < h->kinfo.num_tqps; i++) { 2800 u16 vector_i = i % priv->vector_num; 2801 u16 tqp_num = h->kinfo.num_tqps; 2802 2803 tqp_vector = &priv->tqp_vector[vector_i]; 2804 2805 hns3_add_ring_to_group(&tqp_vector->tx_group, 2806 priv->ring_data[i].ring); 2807 2808 hns3_add_ring_to_group(&tqp_vector->rx_group, 2809 priv->ring_data[i + tqp_num].ring); 2810 2811 priv->ring_data[i].ring->tqp_vector = tqp_vector; 2812 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; 2813 tqp_vector->num_tqps++; 2814 } 2815 2816 for (i = 0; i < priv->vector_num; i++) { 2817 tqp_vector = &priv->tqp_vector[i]; 2818 2819 tqp_vector->rx_group.total_bytes = 0; 2820 tqp_vector->rx_group.total_packets = 0; 2821 tqp_vector->tx_group.total_bytes = 0; 2822 tqp_vector->tx_group.total_packets = 0; 2823 tqp_vector->handle = h; 2824 2825 ret = hns3_get_vector_ring_chain(tqp_vector, 2826 &vector_ring_chain); 2827 if (ret) 2828 return ret; 2829 2830 ret = h->ae_algo->ops->map_ring_to_vector(h, 2831 tqp_vector->vector_irq, &vector_ring_chain); 2832 2833 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 2834 2835 if (ret) 2836 return ret; 2837 2838 netif_napi_add(priv->netdev, &tqp_vector->napi, 2839 hns3_nic_common_poll, NAPI_POLL_WEIGHT); 2840 } 2841 2842 return 0; 2843 } 2844 2845 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) 2846 { 2847 struct hnae3_handle *h = priv->ae_handle; 2848 struct hns3_enet_tqp_vector *tqp_vector; 2849 struct hnae3_vector_info *vector; 2850 struct pci_dev *pdev = h->pdev; 2851 u16 tqp_num = h->kinfo.num_tqps; 2852 u16 vector_num; 2853 int ret = 0; 2854 u16 i; 2855 2856 /* RSS size, cpu online and vector_num should be the same */ 2857 /* Should consider 2p/4p later */ 2858 vector_num = min_t(u16, num_online_cpus(), tqp_num); 2859 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), 2860 GFP_KERNEL); 2861 if (!vector) 2862 return -ENOMEM; 2863 2864 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); 2865 2866 priv->vector_num = vector_num; 2867 priv->tqp_vector = (struct hns3_enet_tqp_vector *) 2868 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), 2869 GFP_KERNEL); 2870 if (!priv->tqp_vector) { 2871 ret = -ENOMEM; 2872 goto out; 2873 } 2874 2875 for (i = 0; i < priv->vector_num; i++) { 2876 tqp_vector = &priv->tqp_vector[i]; 2877 tqp_vector->idx = i; 2878 tqp_vector->mask_addr = vector[i].io_addr; 2879 tqp_vector->vector_irq = vector[i].vector; 2880 hns3_vector_gl_rl_init(tqp_vector, priv); 2881 } 2882 2883 out: 2884 devm_kfree(&pdev->dev, vector); 2885 return ret; 2886 } 2887 2888 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) 2889 { 2890 group->ring = NULL; 2891 group->count = 0; 2892 } 2893 2894 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) 2895 { 2896 struct hnae3_ring_chain_node vector_ring_chain; 2897 struct hnae3_handle *h = priv->ae_handle; 2898 struct hns3_enet_tqp_vector *tqp_vector; 2899 int i, ret; 2900 2901 for (i = 0; i < priv->vector_num; i++) { 2902 tqp_vector = &priv->tqp_vector[i]; 2903 2904 ret = hns3_get_vector_ring_chain(tqp_vector, 2905 &vector_ring_chain); 2906 if (ret) 2907 return ret; 2908 2909 ret = h->ae_algo->ops->unmap_ring_from_vector(h, 2910 tqp_vector->vector_irq, &vector_ring_chain); 2911 if (ret) 2912 return ret; 2913 2914 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 2915 2916 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { 2917 (void)irq_set_affinity_hint( 2918 priv->tqp_vector[i].vector_irq, 2919 NULL); 2920 free_irq(priv->tqp_vector[i].vector_irq, 2921 &priv->tqp_vector[i]); 2922 } 2923 2924 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; 2925 hns3_clear_ring_group(&tqp_vector->rx_group); 2926 hns3_clear_ring_group(&tqp_vector->tx_group); 2927 netif_napi_del(&priv->tqp_vector[i].napi); 2928 } 2929 2930 return 0; 2931 } 2932 2933 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) 2934 { 2935 struct hnae3_handle *h = priv->ae_handle; 2936 struct pci_dev *pdev = h->pdev; 2937 int i, ret; 2938 2939 for (i = 0; i < priv->vector_num; i++) { 2940 struct hns3_enet_tqp_vector *tqp_vector; 2941 2942 tqp_vector = &priv->tqp_vector[i]; 2943 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); 2944 if (ret) 2945 return ret; 2946 } 2947 2948 devm_kfree(&pdev->dev, priv->tqp_vector); 2949 return 0; 2950 } 2951 2952 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, 2953 int ring_type) 2954 { 2955 struct hns3_nic_ring_data *ring_data = priv->ring_data; 2956 int queue_num = priv->ae_handle->kinfo.num_tqps; 2957 struct pci_dev *pdev = priv->ae_handle->pdev; 2958 struct hns3_enet_ring *ring; 2959 2960 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL); 2961 if (!ring) 2962 return -ENOMEM; 2963 2964 if (ring_type == HNAE3_RING_TYPE_TX) { 2965 ring_data[q->tqp_index].ring = ring; 2966 ring_data[q->tqp_index].queue_index = q->tqp_index; 2967 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; 2968 } else { 2969 ring_data[q->tqp_index + queue_num].ring = ring; 2970 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; 2971 ring->io_base = q->io_base; 2972 } 2973 2974 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); 2975 2976 ring->tqp = q; 2977 ring->desc = NULL; 2978 ring->desc_cb = NULL; 2979 ring->dev = priv->dev; 2980 ring->desc_dma_addr = 0; 2981 ring->buf_size = q->buf_size; 2982 ring->desc_num = q->desc_num; 2983 ring->next_to_use = 0; 2984 ring->next_to_clean = 0; 2985 2986 return 0; 2987 } 2988 2989 static int hns3_queue_to_ring(struct hnae3_queue *tqp, 2990 struct hns3_nic_priv *priv) 2991 { 2992 int ret; 2993 2994 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); 2995 if (ret) 2996 return ret; 2997 2998 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); 2999 if (ret) 3000 return ret; 3001 3002 return 0; 3003 } 3004 3005 static int hns3_get_ring_config(struct hns3_nic_priv *priv) 3006 { 3007 struct hnae3_handle *h = priv->ae_handle; 3008 struct pci_dev *pdev = h->pdev; 3009 int i, ret; 3010 3011 priv->ring_data = devm_kzalloc(&pdev->dev, 3012 array3_size(h->kinfo.num_tqps, 3013 sizeof(*priv->ring_data), 3014 2), 3015 GFP_KERNEL); 3016 if (!priv->ring_data) 3017 return -ENOMEM; 3018 3019 for (i = 0; i < h->kinfo.num_tqps; i++) { 3020 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv); 3021 if (ret) 3022 goto err; 3023 } 3024 3025 return 0; 3026 err: 3027 devm_kfree(&pdev->dev, priv->ring_data); 3028 return ret; 3029 } 3030 3031 static void hns3_put_ring_config(struct hns3_nic_priv *priv) 3032 { 3033 struct hnae3_handle *h = priv->ae_handle; 3034 int i; 3035 3036 for (i = 0; i < h->kinfo.num_tqps; i++) { 3037 devm_kfree(priv->dev, priv->ring_data[i].ring); 3038 devm_kfree(priv->dev, 3039 priv->ring_data[i + h->kinfo.num_tqps].ring); 3040 } 3041 devm_kfree(priv->dev, priv->ring_data); 3042 } 3043 3044 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) 3045 { 3046 int ret; 3047 3048 if (ring->desc_num <= 0 || ring->buf_size <= 0) 3049 return -EINVAL; 3050 3051 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), 3052 GFP_KERNEL); 3053 if (!ring->desc_cb) { 3054 ret = -ENOMEM; 3055 goto out; 3056 } 3057 3058 ret = hns3_alloc_desc(ring); 3059 if (ret) 3060 goto out_with_desc_cb; 3061 3062 if (!HNAE3_IS_TX_RING(ring)) { 3063 ret = hns3_alloc_ring_buffers(ring); 3064 if (ret) 3065 goto out_with_desc; 3066 } 3067 3068 return 0; 3069 3070 out_with_desc: 3071 hns3_free_desc(ring); 3072 out_with_desc_cb: 3073 kfree(ring->desc_cb); 3074 ring->desc_cb = NULL; 3075 out: 3076 return ret; 3077 } 3078 3079 static void hns3_fini_ring(struct hns3_enet_ring *ring) 3080 { 3081 hns3_free_desc(ring); 3082 kfree(ring->desc_cb); 3083 ring->desc_cb = NULL; 3084 ring->next_to_clean = 0; 3085 ring->next_to_use = 0; 3086 } 3087 3088 static int hns3_buf_size2type(u32 buf_size) 3089 { 3090 int bd_size_type; 3091 3092 switch (buf_size) { 3093 case 512: 3094 bd_size_type = HNS3_BD_SIZE_512_TYPE; 3095 break; 3096 case 1024: 3097 bd_size_type = HNS3_BD_SIZE_1024_TYPE; 3098 break; 3099 case 2048: 3100 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 3101 break; 3102 case 4096: 3103 bd_size_type = HNS3_BD_SIZE_4096_TYPE; 3104 break; 3105 default: 3106 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 3107 } 3108 3109 return bd_size_type; 3110 } 3111 3112 static void hns3_init_ring_hw(struct hns3_enet_ring *ring) 3113 { 3114 dma_addr_t dma = ring->desc_dma_addr; 3115 struct hnae3_queue *q = ring->tqp; 3116 3117 if (!HNAE3_IS_TX_RING(ring)) { 3118 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, 3119 (u32)dma); 3120 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, 3121 (u32)((dma >> 31) >> 1)); 3122 3123 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, 3124 hns3_buf_size2type(ring->buf_size)); 3125 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, 3126 ring->desc_num / 8 - 1); 3127 3128 } else { 3129 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, 3130 (u32)dma); 3131 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, 3132 (u32)((dma >> 31) >> 1)); 3133 3134 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, 3135 ring->desc_num / 8 - 1); 3136 } 3137 } 3138 3139 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) 3140 { 3141 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 3142 int i; 3143 3144 for (i = 0; i < HNAE3_MAX_TC; i++) { 3145 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; 3146 int j; 3147 3148 if (!tc_info->enable) 3149 continue; 3150 3151 for (j = 0; j < tc_info->tqp_count; j++) { 3152 struct hnae3_queue *q; 3153 3154 q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp; 3155 hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, 3156 tc_info->tc); 3157 } 3158 } 3159 } 3160 3161 int hns3_init_all_ring(struct hns3_nic_priv *priv) 3162 { 3163 struct hnae3_handle *h = priv->ae_handle; 3164 int ring_num = h->kinfo.num_tqps * 2; 3165 int i, j; 3166 int ret; 3167 3168 for (i = 0; i < ring_num; i++) { 3169 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring); 3170 if (ret) { 3171 dev_err(priv->dev, 3172 "Alloc ring memory fail! ret=%d\n", ret); 3173 goto out_when_alloc_ring_memory; 3174 } 3175 3176 u64_stats_init(&priv->ring_data[i].ring->syncp); 3177 } 3178 3179 return 0; 3180 3181 out_when_alloc_ring_memory: 3182 for (j = i - 1; j >= 0; j--) 3183 hns3_fini_ring(priv->ring_data[j].ring); 3184 3185 return -ENOMEM; 3186 } 3187 3188 int hns3_uninit_all_ring(struct hns3_nic_priv *priv) 3189 { 3190 struct hnae3_handle *h = priv->ae_handle; 3191 int i; 3192 3193 for (i = 0; i < h->kinfo.num_tqps; i++) { 3194 if (h->ae_algo->ops->reset_queue) 3195 h->ae_algo->ops->reset_queue(h, i); 3196 3197 hns3_fini_ring(priv->ring_data[i].ring); 3198 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); 3199 } 3200 return 0; 3201 } 3202 3203 /* Set mac addr if it is configured. or leave it to the AE driver */ 3204 static void hns3_init_mac_addr(struct net_device *netdev, bool init) 3205 { 3206 struct hns3_nic_priv *priv = netdev_priv(netdev); 3207 struct hnae3_handle *h = priv->ae_handle; 3208 u8 mac_addr_temp[ETH_ALEN]; 3209 3210 if (h->ae_algo->ops->get_mac_addr && init) { 3211 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); 3212 ether_addr_copy(netdev->dev_addr, mac_addr_temp); 3213 } 3214 3215 /* Check if the MAC address is valid, if not get a random one */ 3216 if (!is_valid_ether_addr(netdev->dev_addr)) { 3217 eth_hw_addr_random(netdev); 3218 dev_warn(priv->dev, "using random MAC address %pM\n", 3219 netdev->dev_addr); 3220 } 3221 3222 if (h->ae_algo->ops->set_mac_addr) 3223 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); 3224 3225 } 3226 3227 static int hns3_restore_fd_rules(struct net_device *netdev) 3228 { 3229 struct hnae3_handle *h = hns3_get_handle(netdev); 3230 int ret = 0; 3231 3232 if (h->ae_algo->ops->restore_fd_rules) 3233 ret = h->ae_algo->ops->restore_fd_rules(h); 3234 3235 return ret; 3236 } 3237 3238 static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) 3239 { 3240 struct hnae3_handle *h = hns3_get_handle(netdev); 3241 3242 if (h->ae_algo->ops->del_all_fd_entries) 3243 h->ae_algo->ops->del_all_fd_entries(h, clear_list); 3244 } 3245 3246 static void hns3_nic_set_priv_ops(struct net_device *netdev) 3247 { 3248 struct hns3_nic_priv *priv = netdev_priv(netdev); 3249 3250 if ((netdev->features & NETIF_F_TSO) || 3251 (netdev->features & NETIF_F_TSO6)) { 3252 priv->ops.fill_desc = hns3_fill_desc_tso; 3253 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; 3254 } else { 3255 priv->ops.fill_desc = hns3_fill_desc; 3256 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; 3257 } 3258 } 3259 3260 static int hns3_client_init(struct hnae3_handle *handle) 3261 { 3262 struct pci_dev *pdev = handle->pdev; 3263 u16 alloc_tqps, max_rss_size; 3264 struct hns3_nic_priv *priv; 3265 struct net_device *netdev; 3266 int ret; 3267 3268 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, 3269 &max_rss_size); 3270 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps); 3271 if (!netdev) 3272 return -ENOMEM; 3273 3274 priv = netdev_priv(netdev); 3275 priv->dev = &pdev->dev; 3276 priv->netdev = netdev; 3277 priv->ae_handle = handle; 3278 priv->ae_handle->last_reset_time = jiffies; 3279 priv->tx_timeout_count = 0; 3280 3281 handle->kinfo.netdev = netdev; 3282 handle->priv = (void *)priv; 3283 3284 hns3_init_mac_addr(netdev, true); 3285 3286 hns3_set_default_feature(netdev); 3287 3288 netdev->watchdog_timeo = HNS3_TX_TIMEOUT; 3289 netdev->priv_flags |= IFF_UNICAST_FLT; 3290 netdev->netdev_ops = &hns3_nic_netdev_ops; 3291 SET_NETDEV_DEV(netdev, &pdev->dev); 3292 hns3_ethtool_set_ops(netdev); 3293 hns3_nic_set_priv_ops(netdev); 3294 3295 /* Carrier off reporting is important to ethtool even BEFORE open */ 3296 netif_carrier_off(netdev); 3297 3298 if (handle->flags & HNAE3_SUPPORT_VF) 3299 handle->reset_level = HNAE3_VF_RESET; 3300 else 3301 handle->reset_level = HNAE3_FUNC_RESET; 3302 3303 ret = hns3_get_ring_config(priv); 3304 if (ret) { 3305 ret = -ENOMEM; 3306 goto out_get_ring_cfg; 3307 } 3308 3309 ret = hns3_nic_alloc_vector_data(priv); 3310 if (ret) { 3311 ret = -ENOMEM; 3312 goto out_alloc_vector_data; 3313 } 3314 3315 ret = hns3_nic_init_vector_data(priv); 3316 if (ret) { 3317 ret = -ENOMEM; 3318 goto out_init_vector_data; 3319 } 3320 3321 ret = hns3_init_all_ring(priv); 3322 if (ret) { 3323 ret = -ENOMEM; 3324 goto out_init_ring_data; 3325 } 3326 3327 ret = register_netdev(netdev); 3328 if (ret) { 3329 dev_err(priv->dev, "probe register netdev fail!\n"); 3330 goto out_reg_netdev_fail; 3331 } 3332 3333 hns3_dcbnl_setup(handle); 3334 3335 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */ 3336 netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 3337 3338 return ret; 3339 3340 out_reg_netdev_fail: 3341 out_init_ring_data: 3342 (void)hns3_nic_uninit_vector_data(priv); 3343 out_init_vector_data: 3344 hns3_nic_dealloc_vector_data(priv); 3345 out_alloc_vector_data: 3346 priv->ring_data = NULL; 3347 out_get_ring_cfg: 3348 priv->ae_handle = NULL; 3349 free_netdev(netdev); 3350 return ret; 3351 } 3352 3353 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) 3354 { 3355 struct net_device *netdev = handle->kinfo.netdev; 3356 struct hns3_nic_priv *priv = netdev_priv(netdev); 3357 int ret; 3358 3359 hns3_remove_hw_addr(netdev); 3360 3361 if (netdev->reg_state != NETREG_UNINITIALIZED) 3362 unregister_netdev(netdev); 3363 3364 hns3_del_all_fd_rules(netdev, true); 3365 3366 hns3_force_clear_all_rx_ring(handle); 3367 3368 ret = hns3_nic_uninit_vector_data(priv); 3369 if (ret) 3370 netdev_err(netdev, "uninit vector error\n"); 3371 3372 ret = hns3_nic_dealloc_vector_data(priv); 3373 if (ret) 3374 netdev_err(netdev, "dealloc vector error\n"); 3375 3376 ret = hns3_uninit_all_ring(priv); 3377 if (ret) 3378 netdev_err(netdev, "uninit ring error\n"); 3379 3380 hns3_put_ring_config(priv); 3381 3382 priv->ring_data = NULL; 3383 3384 free_netdev(netdev); 3385 } 3386 3387 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) 3388 { 3389 struct net_device *netdev = handle->kinfo.netdev; 3390 3391 if (!netdev) 3392 return; 3393 3394 if (linkup) { 3395 netif_carrier_on(netdev); 3396 netif_tx_wake_all_queues(netdev); 3397 netdev_info(netdev, "link up\n"); 3398 } else { 3399 netif_carrier_off(netdev); 3400 netif_tx_stop_all_queues(netdev); 3401 netdev_info(netdev, "link down\n"); 3402 } 3403 } 3404 3405 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) 3406 { 3407 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3408 struct net_device *ndev = kinfo->netdev; 3409 bool if_running; 3410 int ret; 3411 3412 if (tc > HNAE3_MAX_TC) 3413 return -EINVAL; 3414 3415 if (!ndev) 3416 return -ENODEV; 3417 3418 if_running = netif_running(ndev); 3419 3420 if (if_running) { 3421 (void)hns3_nic_net_stop(ndev); 3422 msleep(100); 3423 } 3424 3425 ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ? 3426 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP; 3427 if (ret) 3428 goto err_out; 3429 3430 ret = hns3_nic_set_real_num_queue(ndev); 3431 3432 err_out: 3433 if (if_running) 3434 (void)hns3_nic_net_open(ndev); 3435 3436 return ret; 3437 } 3438 3439 static void hns3_recover_hw_addr(struct net_device *ndev) 3440 { 3441 struct netdev_hw_addr_list *list; 3442 struct netdev_hw_addr *ha, *tmp; 3443 3444 /* go through and sync uc_addr entries to the device */ 3445 list = &ndev->uc; 3446 list_for_each_entry_safe(ha, tmp, &list->list, list) 3447 hns3_nic_uc_sync(ndev, ha->addr); 3448 3449 /* go through and sync mc_addr entries to the device */ 3450 list = &ndev->mc; 3451 list_for_each_entry_safe(ha, tmp, &list->list, list) 3452 hns3_nic_mc_sync(ndev, ha->addr); 3453 } 3454 3455 static void hns3_remove_hw_addr(struct net_device *netdev) 3456 { 3457 struct netdev_hw_addr_list *list; 3458 struct netdev_hw_addr *ha, *tmp; 3459 3460 hns3_nic_uc_unsync(netdev, netdev->dev_addr); 3461 3462 /* go through and unsync uc_addr entries to the device */ 3463 list = &netdev->uc; 3464 list_for_each_entry_safe(ha, tmp, &list->list, list) 3465 hns3_nic_uc_unsync(netdev, ha->addr); 3466 3467 /* go through and unsync mc_addr entries to the device */ 3468 list = &netdev->mc; 3469 list_for_each_entry_safe(ha, tmp, &list->list, list) 3470 if (ha->refcount > 1) 3471 hns3_nic_mc_unsync(netdev, ha->addr); 3472 } 3473 3474 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) 3475 { 3476 while (ring->next_to_clean != ring->next_to_use) { 3477 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; 3478 hns3_free_buffer_detach(ring, ring->next_to_clean); 3479 ring_ptr_move_fw(ring, next_to_clean); 3480 } 3481 } 3482 3483 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) 3484 { 3485 struct hns3_desc_cb res_cbs; 3486 int ret; 3487 3488 while (ring->next_to_use != ring->next_to_clean) { 3489 /* When a buffer is not reused, it's memory has been 3490 * freed in hns3_handle_rx_bd or will be freed by 3491 * stack, so we need to replace the buffer here. 3492 */ 3493 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 3494 ret = hns3_reserve_buffer_map(ring, &res_cbs); 3495 if (ret) { 3496 u64_stats_update_begin(&ring->syncp); 3497 ring->stats.sw_err_cnt++; 3498 u64_stats_update_end(&ring->syncp); 3499 /* if alloc new buffer fail, exit directly 3500 * and reclear in up flow. 3501 */ 3502 netdev_warn(ring->tqp->handle->kinfo.netdev, 3503 "reserve buffer map failed, ret = %d\n", 3504 ret); 3505 return ret; 3506 } 3507 hns3_replace_buffer(ring, ring->next_to_use, 3508 &res_cbs); 3509 } 3510 ring_ptr_move_fw(ring, next_to_use); 3511 } 3512 3513 return 0; 3514 } 3515 3516 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring) 3517 { 3518 while (ring->next_to_use != ring->next_to_clean) { 3519 /* When a buffer is not reused, it's memory has been 3520 * freed in hns3_handle_rx_bd or will be freed by 3521 * stack, so only need to unmap the buffer here. 3522 */ 3523 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 3524 hns3_unmap_buffer(ring, 3525 &ring->desc_cb[ring->next_to_use]); 3526 ring->desc_cb[ring->next_to_use].dma = 0; 3527 } 3528 3529 ring_ptr_move_fw(ring, next_to_use); 3530 } 3531 } 3532 3533 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h) 3534 { 3535 struct net_device *ndev = h->kinfo.netdev; 3536 struct hns3_nic_priv *priv = netdev_priv(ndev); 3537 struct hns3_enet_ring *ring; 3538 u32 i; 3539 3540 for (i = 0; i < h->kinfo.num_tqps; i++) { 3541 ring = priv->ring_data[i + h->kinfo.num_tqps].ring; 3542 hns3_force_clear_rx_ring(ring); 3543 } 3544 } 3545 3546 static void hns3_clear_all_ring(struct hnae3_handle *h) 3547 { 3548 struct net_device *ndev = h->kinfo.netdev; 3549 struct hns3_nic_priv *priv = netdev_priv(ndev); 3550 u32 i; 3551 3552 for (i = 0; i < h->kinfo.num_tqps; i++) { 3553 struct netdev_queue *dev_queue; 3554 struct hns3_enet_ring *ring; 3555 3556 ring = priv->ring_data[i].ring; 3557 hns3_clear_tx_ring(ring); 3558 dev_queue = netdev_get_tx_queue(ndev, 3559 priv->ring_data[i].queue_index); 3560 netdev_tx_reset_queue(dev_queue); 3561 3562 ring = priv->ring_data[i + h->kinfo.num_tqps].ring; 3563 /* Continue to clear other rings even if clearing some 3564 * rings failed. 3565 */ 3566 hns3_clear_rx_ring(ring); 3567 } 3568 } 3569 3570 int hns3_nic_reset_all_ring(struct hnae3_handle *h) 3571 { 3572 struct net_device *ndev = h->kinfo.netdev; 3573 struct hns3_nic_priv *priv = netdev_priv(ndev); 3574 struct hns3_enet_ring *rx_ring; 3575 int i, j; 3576 int ret; 3577 3578 for (i = 0; i < h->kinfo.num_tqps; i++) { 3579 h->ae_algo->ops->reset_queue(h, i); 3580 hns3_init_ring_hw(priv->ring_data[i].ring); 3581 3582 /* We need to clear tx ring here because self test will 3583 * use the ring and will not run down before up 3584 */ 3585 hns3_clear_tx_ring(priv->ring_data[i].ring); 3586 priv->ring_data[i].ring->next_to_clean = 0; 3587 priv->ring_data[i].ring->next_to_use = 0; 3588 3589 rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring; 3590 hns3_init_ring_hw(rx_ring); 3591 ret = hns3_clear_rx_ring(rx_ring); 3592 if (ret) 3593 return ret; 3594 3595 /* We can not know the hardware head and tail when this 3596 * function is called in reset flow, so we reuse all desc. 3597 */ 3598 for (j = 0; j < rx_ring->desc_num; j++) 3599 hns3_reuse_buffer(rx_ring, j); 3600 3601 rx_ring->next_to_clean = 0; 3602 rx_ring->next_to_use = 0; 3603 } 3604 3605 hns3_init_tx_ring_tc(priv); 3606 3607 return 0; 3608 } 3609 3610 static void hns3_store_coal(struct hns3_nic_priv *priv) 3611 { 3612 /* ethtool only support setting and querying one coal 3613 * configuation for now, so save the vector 0' coal 3614 * configuation here in order to restore it. 3615 */ 3616 memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal, 3617 sizeof(struct hns3_enet_coalesce)); 3618 memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal, 3619 sizeof(struct hns3_enet_coalesce)); 3620 } 3621 3622 static void hns3_restore_coal(struct hns3_nic_priv *priv) 3623 { 3624 u16 vector_num = priv->vector_num; 3625 int i; 3626 3627 for (i = 0; i < vector_num; i++) { 3628 memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal, 3629 sizeof(struct hns3_enet_coalesce)); 3630 memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal, 3631 sizeof(struct hns3_enet_coalesce)); 3632 } 3633 } 3634 3635 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) 3636 { 3637 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3638 struct net_device *ndev = kinfo->netdev; 3639 3640 if (!netif_running(ndev)) 3641 return 0; 3642 3643 return hns3_nic_net_stop(ndev); 3644 } 3645 3646 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) 3647 { 3648 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3649 int ret = 0; 3650 3651 if (netif_running(kinfo->netdev)) { 3652 ret = hns3_nic_net_up(kinfo->netdev); 3653 if (ret) { 3654 netdev_err(kinfo->netdev, 3655 "hns net up fail, ret=%d!\n", ret); 3656 return ret; 3657 } 3658 handle->last_reset_time = jiffies; 3659 } 3660 3661 return ret; 3662 } 3663 3664 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) 3665 { 3666 struct net_device *netdev = handle->kinfo.netdev; 3667 struct hns3_nic_priv *priv = netdev_priv(netdev); 3668 bool vlan_filter_enable; 3669 int ret; 3670 3671 hns3_init_mac_addr(netdev, false); 3672 hns3_recover_hw_addr(netdev); 3673 hns3_update_promisc_mode(netdev, handle->netdev_flags); 3674 vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true; 3675 hns3_enable_vlan_filter(netdev, vlan_filter_enable); 3676 3677 3678 /* Hardware table is only clear when pf resets */ 3679 if (!(handle->flags & HNAE3_SUPPORT_VF)) 3680 hns3_restore_vlan(netdev); 3681 3682 hns3_restore_fd_rules(netdev); 3683 3684 /* Carrier off reporting is important to ethtool even BEFORE open */ 3685 netif_carrier_off(netdev); 3686 3687 hns3_restore_coal(priv); 3688 3689 ret = hns3_nic_init_vector_data(priv); 3690 if (ret) 3691 return ret; 3692 3693 ret = hns3_init_all_ring(priv); 3694 if (ret) { 3695 hns3_nic_uninit_vector_data(priv); 3696 priv->ring_data = NULL; 3697 } 3698 3699 return ret; 3700 } 3701 3702 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) 3703 { 3704 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 3705 struct net_device *netdev = handle->kinfo.netdev; 3706 struct hns3_nic_priv *priv = netdev_priv(netdev); 3707 int ret; 3708 3709 hns3_force_clear_all_rx_ring(handle); 3710 3711 ret = hns3_nic_uninit_vector_data(priv); 3712 if (ret) { 3713 netdev_err(netdev, "uninit vector error\n"); 3714 return ret; 3715 } 3716 3717 hns3_store_coal(priv); 3718 3719 ret = hns3_uninit_all_ring(priv); 3720 if (ret) 3721 netdev_err(netdev, "uninit ring error\n"); 3722 3723 /* it is cumbersome for hardware to pick-and-choose entries for deletion 3724 * from table space. Hence, for function reset software intervention is 3725 * required to delete the entries 3726 */ 3727 if (hns3_dev_ongoing_func_reset(ae_dev)) { 3728 hns3_remove_hw_addr(netdev); 3729 hns3_del_all_fd_rules(netdev, false); 3730 } 3731 3732 return ret; 3733 } 3734 3735 static int hns3_reset_notify(struct hnae3_handle *handle, 3736 enum hnae3_reset_notify_type type) 3737 { 3738 int ret = 0; 3739 3740 switch (type) { 3741 case HNAE3_UP_CLIENT: 3742 ret = hns3_reset_notify_up_enet(handle); 3743 break; 3744 case HNAE3_DOWN_CLIENT: 3745 ret = hns3_reset_notify_down_enet(handle); 3746 break; 3747 case HNAE3_INIT_CLIENT: 3748 ret = hns3_reset_notify_init_enet(handle); 3749 break; 3750 case HNAE3_UNINIT_CLIENT: 3751 ret = hns3_reset_notify_uninit_enet(handle); 3752 break; 3753 default: 3754 break; 3755 } 3756 3757 return ret; 3758 } 3759 3760 static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num) 3761 { 3762 struct hns3_nic_priv *priv = netdev_priv(netdev); 3763 struct hnae3_handle *h = hns3_get_handle(netdev); 3764 int ret; 3765 3766 ret = h->ae_algo->ops->set_channels(h, new_tqp_num); 3767 if (ret) 3768 return ret; 3769 3770 ret = hns3_get_ring_config(priv); 3771 if (ret) 3772 return ret; 3773 3774 ret = hns3_nic_alloc_vector_data(priv); 3775 if (ret) 3776 goto err_alloc_vector; 3777 3778 hns3_restore_coal(priv); 3779 3780 ret = hns3_nic_init_vector_data(priv); 3781 if (ret) 3782 goto err_uninit_vector; 3783 3784 ret = hns3_init_all_ring(priv); 3785 if (ret) 3786 goto err_put_ring; 3787 3788 return 0; 3789 3790 err_put_ring: 3791 hns3_put_ring_config(priv); 3792 err_uninit_vector: 3793 hns3_nic_uninit_vector_data(priv); 3794 err_alloc_vector: 3795 hns3_nic_dealloc_vector_data(priv); 3796 return ret; 3797 } 3798 3799 static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num) 3800 { 3801 return (new_tqp_num / num_tc) * num_tc; 3802 } 3803 3804 int hns3_set_channels(struct net_device *netdev, 3805 struct ethtool_channels *ch) 3806 { 3807 struct hns3_nic_priv *priv = netdev_priv(netdev); 3808 struct hnae3_handle *h = hns3_get_handle(netdev); 3809 struct hnae3_knic_private_info *kinfo = &h->kinfo; 3810 bool if_running = netif_running(netdev); 3811 u32 new_tqp_num = ch->combined_count; 3812 u16 org_tqp_num; 3813 int ret; 3814 3815 if (ch->rx_count || ch->tx_count) 3816 return -EINVAL; 3817 3818 if (new_tqp_num > hns3_get_max_available_channels(h) || 3819 new_tqp_num < kinfo->num_tc) { 3820 dev_err(&netdev->dev, 3821 "Change tqps fail, the tqp range is from %d to %d", 3822 kinfo->num_tc, 3823 hns3_get_max_available_channels(h)); 3824 return -EINVAL; 3825 } 3826 3827 new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num); 3828 if (kinfo->num_tqps == new_tqp_num) 3829 return 0; 3830 3831 if (if_running) 3832 hns3_nic_net_stop(netdev); 3833 3834 ret = hns3_nic_uninit_vector_data(priv); 3835 if (ret) { 3836 dev_err(&netdev->dev, 3837 "Unbind vector with tqp fail, nothing is changed"); 3838 goto open_netdev; 3839 } 3840 3841 hns3_store_coal(priv); 3842 3843 hns3_nic_dealloc_vector_data(priv); 3844 3845 hns3_uninit_all_ring(priv); 3846 hns3_put_ring_config(priv); 3847 3848 org_tqp_num = h->kinfo.num_tqps; 3849 ret = hns3_modify_tqp_num(netdev, new_tqp_num); 3850 if (ret) { 3851 ret = hns3_modify_tqp_num(netdev, org_tqp_num); 3852 if (ret) { 3853 /* If revert to old tqp failed, fatal error occurred */ 3854 dev_err(&netdev->dev, 3855 "Revert to old tqp num fail, ret=%d", ret); 3856 return ret; 3857 } 3858 dev_info(&netdev->dev, 3859 "Change tqp num fail, Revert to old tqp num"); 3860 } 3861 3862 open_netdev: 3863 if (if_running) 3864 hns3_nic_net_open(netdev); 3865 3866 return ret; 3867 } 3868 3869 static const struct hnae3_client_ops client_ops = { 3870 .init_instance = hns3_client_init, 3871 .uninit_instance = hns3_client_uninit, 3872 .link_status_change = hns3_link_status_change, 3873 .setup_tc = hns3_client_setup_tc, 3874 .reset_notify = hns3_reset_notify, 3875 }; 3876 3877 /* hns3_init_module - Driver registration routine 3878 * hns3_init_module is the first routine called when the driver is 3879 * loaded. All it does is register with the PCI subsystem. 3880 */ 3881 static int __init hns3_init_module(void) 3882 { 3883 int ret; 3884 3885 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); 3886 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); 3887 3888 client.type = HNAE3_CLIENT_KNIC; 3889 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s", 3890 hns3_driver_name); 3891 3892 client.ops = &client_ops; 3893 3894 INIT_LIST_HEAD(&client.node); 3895 3896 ret = hnae3_register_client(&client); 3897 if (ret) 3898 return ret; 3899 3900 ret = pci_register_driver(&hns3_driver); 3901 if (ret) 3902 hnae3_unregister_client(&client); 3903 3904 return ret; 3905 } 3906 module_init(hns3_init_module); 3907 3908 /* hns3_exit_module - Driver exit cleanup routine 3909 * hns3_exit_module is called just before the driver is removed 3910 * from memory. 3911 */ 3912 static void __exit hns3_exit_module(void) 3913 { 3914 pci_unregister_driver(&hns3_driver); 3915 hnae3_unregister_client(&client); 3916 } 3917 module_exit(hns3_exit_module); 3918 3919 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); 3920 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3921 MODULE_LICENSE("GPL"); 3922 MODULE_ALIAS("pci:hns-nic"); 3923 MODULE_VERSION(HNS3_MOD_VERSION); 3924