1 /* 2 * Copyright (c) 2016~2017 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/dma-mapping.h> 11 #include <linux/etherdevice.h> 12 #include <linux/interrupt.h> 13 #include <linux/if_vlan.h> 14 #include <linux/ip.h> 15 #include <linux/ipv6.h> 16 #include <linux/module.h> 17 #include <linux/pci.h> 18 #include <linux/skbuff.h> 19 #include <linux/sctp.h> 20 #include <linux/vermagic.h> 21 #include <net/gre.h> 22 #include <net/pkt_cls.h> 23 #include <net/vxlan.h> 24 25 #include "hnae3.h" 26 #include "hns3_enet.h" 27 28 static const char hns3_driver_name[] = "hns3"; 29 const char hns3_driver_version[] = VERMAGIC_STRING; 30 static const char hns3_driver_string[] = 31 "Hisilicon Ethernet Network Driver for Hip08 Family"; 32 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; 33 static struct hnae3_client client; 34 35 /* hns3_pci_tbl - PCI Device ID Table 36 * 37 * Last entry must be all 0s 38 * 39 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 40 * Class, Class Mask, private data (not used) } 41 */ 42 static const struct pci_device_id hns3_pci_tbl[] = { 43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 46 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 48 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 50 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 51 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 52 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 53 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 54 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 55 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 56 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 57 /* required last entry */ 58 {0, } 59 }; 60 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); 61 62 static irqreturn_t hns3_irq_handle(int irq, void *dev) 63 { 64 struct hns3_enet_tqp_vector *tqp_vector = dev; 65 66 napi_schedule(&tqp_vector->napi); 67 68 return IRQ_HANDLED; 69 } 70 71 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) 72 { 73 struct hns3_enet_tqp_vector *tqp_vectors; 74 unsigned int i; 75 76 for (i = 0; i < priv->vector_num; i++) { 77 tqp_vectors = &priv->tqp_vector[i]; 78 79 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) 80 continue; 81 82 /* release the irq resource */ 83 free_irq(tqp_vectors->vector_irq, tqp_vectors); 84 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; 85 } 86 } 87 88 static int hns3_nic_init_irq(struct hns3_nic_priv *priv) 89 { 90 struct hns3_enet_tqp_vector *tqp_vectors; 91 int txrx_int_idx = 0; 92 int rx_int_idx = 0; 93 int tx_int_idx = 0; 94 unsigned int i; 95 int ret; 96 97 for (i = 0; i < priv->vector_num; i++) { 98 tqp_vectors = &priv->tqp_vector[i]; 99 100 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) 101 continue; 102 103 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { 104 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, 105 "%s-%s-%d", priv->netdev->name, "TxRx", 106 txrx_int_idx++); 107 txrx_int_idx++; 108 } else if (tqp_vectors->rx_group.ring) { 109 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, 110 "%s-%s-%d", priv->netdev->name, "Rx", 111 rx_int_idx++); 112 } else if (tqp_vectors->tx_group.ring) { 113 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, 114 "%s-%s-%d", priv->netdev->name, "Tx", 115 tx_int_idx++); 116 } else { 117 /* Skip this unused q_vector */ 118 continue; 119 } 120 121 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; 122 123 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, 124 tqp_vectors->name, 125 tqp_vectors); 126 if (ret) { 127 netdev_err(priv->netdev, "request irq(%d) fail\n", 128 tqp_vectors->vector_irq); 129 return ret; 130 } 131 132 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; 133 } 134 135 return 0; 136 } 137 138 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, 139 u32 mask_en) 140 { 141 writel(mask_en, tqp_vector->mask_addr); 142 } 143 144 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) 145 { 146 napi_enable(&tqp_vector->napi); 147 148 /* enable vector */ 149 hns3_mask_vector_irq(tqp_vector, 1); 150 } 151 152 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) 153 { 154 /* disable vector */ 155 hns3_mask_vector_irq(tqp_vector, 0); 156 157 disable_irq(tqp_vector->vector_irq); 158 napi_disable(&tqp_vector->napi); 159 } 160 161 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, 162 u32 rl_value) 163 { 164 u32 rl_reg = hns3_rl_usec_to_reg(rl_value); 165 166 /* this defines the configuration for RL (Interrupt Rate Limiter). 167 * Rl defines rate of interrupts i.e. number of interrupts-per-second 168 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing 169 */ 170 171 if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable && 172 !tqp_vector->rx_group.coal.gl_adapt_enable) 173 /* According to the hardware, the range of rl_reg is 174 * 0-59 and the unit is 4. 175 */ 176 rl_reg |= HNS3_INT_RL_ENABLE_MASK; 177 178 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); 179 } 180 181 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, 182 u32 gl_value) 183 { 184 u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value); 185 186 writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); 187 } 188 189 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, 190 u32 gl_value) 191 { 192 u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value); 193 194 writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); 195 } 196 197 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, 198 struct hns3_nic_priv *priv) 199 { 200 struct hnae3_handle *h = priv->ae_handle; 201 202 /* initialize the configuration for interrupt coalescing. 203 * 1. GL (Interrupt Gap Limiter) 204 * 2. RL (Interrupt Rate Limiter) 205 */ 206 207 /* Default: enable interrupt coalescing self-adaptive and GL */ 208 tqp_vector->tx_group.coal.gl_adapt_enable = 1; 209 tqp_vector->rx_group.coal.gl_adapt_enable = 1; 210 211 tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; 212 tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; 213 214 /* Default: disable RL */ 215 h->kinfo.int_rl_setting = 0; 216 217 tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; 218 tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; 219 tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; 220 } 221 222 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector, 223 struct hns3_nic_priv *priv) 224 { 225 struct hnae3_handle *h = priv->ae_handle; 226 227 hns3_set_vector_coalesce_tx_gl(tqp_vector, 228 tqp_vector->tx_group.coal.int_gl); 229 hns3_set_vector_coalesce_rx_gl(tqp_vector, 230 tqp_vector->rx_group.coal.int_gl); 231 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); 232 } 233 234 static int hns3_nic_set_real_num_queue(struct net_device *netdev) 235 { 236 struct hnae3_handle *h = hns3_get_handle(netdev); 237 struct hnae3_knic_private_info *kinfo = &h->kinfo; 238 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; 239 int ret; 240 241 ret = netif_set_real_num_tx_queues(netdev, queue_size); 242 if (ret) { 243 netdev_err(netdev, 244 "netif_set_real_num_tx_queues fail, ret=%d!\n", 245 ret); 246 return ret; 247 } 248 249 ret = netif_set_real_num_rx_queues(netdev, queue_size); 250 if (ret) { 251 netdev_err(netdev, 252 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); 253 return ret; 254 } 255 256 return 0; 257 } 258 259 static u16 hns3_get_max_available_channels(struct hnae3_handle *h) 260 { 261 u16 free_tqps, max_rss_size, max_tqps; 262 263 h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size); 264 max_tqps = h->kinfo.num_tc * max_rss_size; 265 266 return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps)); 267 } 268 269 static int hns3_nic_net_up(struct net_device *netdev) 270 { 271 struct hns3_nic_priv *priv = netdev_priv(netdev); 272 struct hnae3_handle *h = priv->ae_handle; 273 int i, j; 274 int ret; 275 276 /* get irq resource for all vectors */ 277 ret = hns3_nic_init_irq(priv); 278 if (ret) { 279 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret); 280 return ret; 281 } 282 283 /* enable the vectors */ 284 for (i = 0; i < priv->vector_num; i++) 285 hns3_vector_enable(&priv->tqp_vector[i]); 286 287 /* start the ae_dev */ 288 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; 289 if (ret) 290 goto out_start_err; 291 292 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); 293 294 return 0; 295 296 out_start_err: 297 for (j = i - 1; j >= 0; j--) 298 hns3_vector_disable(&priv->tqp_vector[j]); 299 300 hns3_nic_uninit_irq(priv); 301 302 return ret; 303 } 304 305 static int hns3_nic_net_open(struct net_device *netdev) 306 { 307 struct hns3_nic_priv *priv = netdev_priv(netdev); 308 int ret; 309 310 netif_carrier_off(netdev); 311 312 ret = hns3_nic_set_real_num_queue(netdev); 313 if (ret) 314 return ret; 315 316 ret = hns3_nic_net_up(netdev); 317 if (ret) { 318 netdev_err(netdev, 319 "hns net up fail, ret=%d!\n", ret); 320 return ret; 321 } 322 323 priv->ae_handle->last_reset_time = jiffies; 324 return 0; 325 } 326 327 static void hns3_nic_net_down(struct net_device *netdev) 328 { 329 struct hns3_nic_priv *priv = netdev_priv(netdev); 330 const struct hnae3_ae_ops *ops; 331 int i; 332 333 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 334 return; 335 336 /* stop ae_dev */ 337 ops = priv->ae_handle->ae_algo->ops; 338 if (ops->stop) 339 ops->stop(priv->ae_handle); 340 341 /* disable vectors */ 342 for (i = 0; i < priv->vector_num; i++) 343 hns3_vector_disable(&priv->tqp_vector[i]); 344 345 /* free irq resources */ 346 hns3_nic_uninit_irq(priv); 347 } 348 349 static int hns3_nic_net_stop(struct net_device *netdev) 350 { 351 netif_tx_stop_all_queues(netdev); 352 netif_carrier_off(netdev); 353 354 hns3_nic_net_down(netdev); 355 356 return 0; 357 } 358 359 static int hns3_nic_uc_sync(struct net_device *netdev, 360 const unsigned char *addr) 361 { 362 struct hnae3_handle *h = hns3_get_handle(netdev); 363 364 if (h->ae_algo->ops->add_uc_addr) 365 return h->ae_algo->ops->add_uc_addr(h, addr); 366 367 return 0; 368 } 369 370 static int hns3_nic_uc_unsync(struct net_device *netdev, 371 const unsigned char *addr) 372 { 373 struct hnae3_handle *h = hns3_get_handle(netdev); 374 375 if (h->ae_algo->ops->rm_uc_addr) 376 return h->ae_algo->ops->rm_uc_addr(h, addr); 377 378 return 0; 379 } 380 381 static int hns3_nic_mc_sync(struct net_device *netdev, 382 const unsigned char *addr) 383 { 384 struct hnae3_handle *h = hns3_get_handle(netdev); 385 386 if (h->ae_algo->ops->add_mc_addr) 387 return h->ae_algo->ops->add_mc_addr(h, addr); 388 389 return 0; 390 } 391 392 static int hns3_nic_mc_unsync(struct net_device *netdev, 393 const unsigned char *addr) 394 { 395 struct hnae3_handle *h = hns3_get_handle(netdev); 396 397 if (h->ae_algo->ops->rm_mc_addr) 398 return h->ae_algo->ops->rm_mc_addr(h, addr); 399 400 return 0; 401 } 402 403 static void hns3_nic_set_rx_mode(struct net_device *netdev) 404 { 405 struct hnae3_handle *h = hns3_get_handle(netdev); 406 407 if (h->ae_algo->ops->set_promisc_mode) { 408 if (netdev->flags & IFF_PROMISC) 409 h->ae_algo->ops->set_promisc_mode(h, 1); 410 else 411 h->ae_algo->ops->set_promisc_mode(h, 0); 412 } 413 if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) 414 netdev_err(netdev, "sync uc address fail\n"); 415 if (netdev->flags & IFF_MULTICAST) 416 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) 417 netdev_err(netdev, "sync mc address fail\n"); 418 } 419 420 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, 421 u16 *mss, u32 *type_cs_vlan_tso) 422 { 423 u32 l4_offset, hdr_len; 424 union l3_hdr_info l3; 425 union l4_hdr_info l4; 426 u32 l4_paylen; 427 int ret; 428 429 if (!skb_is_gso(skb)) 430 return 0; 431 432 ret = skb_cow_head(skb, 0); 433 if (ret) 434 return ret; 435 436 l3.hdr = skb_network_header(skb); 437 l4.hdr = skb_transport_header(skb); 438 439 /* Software should clear the IPv4's checksum field when tso is 440 * needed. 441 */ 442 if (l3.v4->version == 4) 443 l3.v4->check = 0; 444 445 /* tunnel packet.*/ 446 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 447 SKB_GSO_GRE_CSUM | 448 SKB_GSO_UDP_TUNNEL | 449 SKB_GSO_UDP_TUNNEL_CSUM)) { 450 if ((!(skb_shinfo(skb)->gso_type & 451 SKB_GSO_PARTIAL)) && 452 (skb_shinfo(skb)->gso_type & 453 SKB_GSO_UDP_TUNNEL_CSUM)) { 454 /* Software should clear the udp's checksum 455 * field when tso is needed. 456 */ 457 l4.udp->check = 0; 458 } 459 /* reset l3&l4 pointers from outer to inner headers */ 460 l3.hdr = skb_inner_network_header(skb); 461 l4.hdr = skb_inner_transport_header(skb); 462 463 /* Software should clear the IPv4's checksum field when 464 * tso is needed. 465 */ 466 if (l3.v4->version == 4) 467 l3.v4->check = 0; 468 } 469 470 /* normal or tunnel packet*/ 471 l4_offset = l4.hdr - skb->data; 472 hdr_len = (l4.tcp->doff * 4) + l4_offset; 473 474 /* remove payload length from inner pseudo checksum when tso*/ 475 l4_paylen = skb->len - l4_offset; 476 csum_replace_by_diff(&l4.tcp->check, 477 (__force __wsum)htonl(l4_paylen)); 478 479 /* find the txbd field values */ 480 *paylen = skb->len - hdr_len; 481 hnae_set_bit(*type_cs_vlan_tso, 482 HNS3_TXD_TSO_B, 1); 483 484 /* get MSS for TSO */ 485 *mss = skb_shinfo(skb)->gso_size; 486 487 return 0; 488 } 489 490 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, 491 u8 *il4_proto) 492 { 493 union { 494 struct iphdr *v4; 495 struct ipv6hdr *v6; 496 unsigned char *hdr; 497 } l3; 498 unsigned char *l4_hdr; 499 unsigned char *exthdr; 500 u8 l4_proto_tmp; 501 __be16 frag_off; 502 503 /* find outer header point */ 504 l3.hdr = skb_network_header(skb); 505 l4_hdr = skb_transport_header(skb); 506 507 if (skb->protocol == htons(ETH_P_IPV6)) { 508 exthdr = l3.hdr + sizeof(*l3.v6); 509 l4_proto_tmp = l3.v6->nexthdr; 510 if (l4_hdr != exthdr) 511 ipv6_skip_exthdr(skb, exthdr - skb->data, 512 &l4_proto_tmp, &frag_off); 513 } else if (skb->protocol == htons(ETH_P_IP)) { 514 l4_proto_tmp = l3.v4->protocol; 515 } else { 516 return -EINVAL; 517 } 518 519 *ol4_proto = l4_proto_tmp; 520 521 /* tunnel packet */ 522 if (!skb->encapsulation) { 523 *il4_proto = 0; 524 return 0; 525 } 526 527 /* find inner header point */ 528 l3.hdr = skb_inner_network_header(skb); 529 l4_hdr = skb_inner_transport_header(skb); 530 531 if (l3.v6->version == 6) { 532 exthdr = l3.hdr + sizeof(*l3.v6); 533 l4_proto_tmp = l3.v6->nexthdr; 534 if (l4_hdr != exthdr) 535 ipv6_skip_exthdr(skb, exthdr - skb->data, 536 &l4_proto_tmp, &frag_off); 537 } else if (l3.v4->version == 4) { 538 l4_proto_tmp = l3.v4->protocol; 539 } 540 541 *il4_proto = l4_proto_tmp; 542 543 return 0; 544 } 545 546 static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, 547 u8 il4_proto, u32 *type_cs_vlan_tso, 548 u32 *ol_type_vlan_len_msec) 549 { 550 union { 551 struct iphdr *v4; 552 struct ipv6hdr *v6; 553 unsigned char *hdr; 554 } l3; 555 union { 556 struct tcphdr *tcp; 557 struct udphdr *udp; 558 struct gre_base_hdr *gre; 559 unsigned char *hdr; 560 } l4; 561 unsigned char *l2_hdr; 562 u8 l4_proto = ol4_proto; 563 u32 ol2_len; 564 u32 ol3_len; 565 u32 ol4_len; 566 u32 l2_len; 567 u32 l3_len; 568 569 l3.hdr = skb_network_header(skb); 570 l4.hdr = skb_transport_header(skb); 571 572 /* compute L2 header size for normal packet, defined in 2 Bytes */ 573 l2_len = l3.hdr - skb->data; 574 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, 575 HNS3_TXD_L2LEN_S, l2_len >> 1); 576 577 /* tunnel packet*/ 578 if (skb->encapsulation) { 579 /* compute OL2 header size, defined in 2 Bytes */ 580 ol2_len = l2_len; 581 hnae_set_field(*ol_type_vlan_len_msec, 582 HNS3_TXD_L2LEN_M, 583 HNS3_TXD_L2LEN_S, ol2_len >> 1); 584 585 /* compute OL3 header size, defined in 4 Bytes */ 586 ol3_len = l4.hdr - l3.hdr; 587 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, 588 HNS3_TXD_L3LEN_S, ol3_len >> 2); 589 590 /* MAC in UDP, MAC in GRE (0x6558)*/ 591 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { 592 /* switch MAC header ptr from outer to inner header.*/ 593 l2_hdr = skb_inner_mac_header(skb); 594 595 /* compute OL4 header size, defined in 4 Bytes. */ 596 ol4_len = l2_hdr - l4.hdr; 597 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M, 598 HNS3_TXD_L4LEN_S, ol4_len >> 2); 599 600 /* switch IP header ptr from outer to inner header */ 601 l3.hdr = skb_inner_network_header(skb); 602 603 /* compute inner l2 header size, defined in 2 Bytes. */ 604 l2_len = l3.hdr - l2_hdr; 605 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, 606 HNS3_TXD_L2LEN_S, l2_len >> 1); 607 } else { 608 /* skb packet types not supported by hardware, 609 * txbd len fild doesn't be filled. 610 */ 611 return; 612 } 613 614 /* switch L4 header pointer from outer to inner */ 615 l4.hdr = skb_inner_transport_header(skb); 616 617 l4_proto = il4_proto; 618 } 619 620 /* compute inner(/normal) L3 header size, defined in 4 Bytes */ 621 l3_len = l4.hdr - l3.hdr; 622 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, 623 HNS3_TXD_L3LEN_S, l3_len >> 2); 624 625 /* compute inner(/normal) L4 header size, defined in 4 Bytes */ 626 switch (l4_proto) { 627 case IPPROTO_TCP: 628 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, 629 HNS3_TXD_L4LEN_S, l4.tcp->doff); 630 break; 631 case IPPROTO_SCTP: 632 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, 633 HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2)); 634 break; 635 case IPPROTO_UDP: 636 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, 637 HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2)); 638 break; 639 default: 640 /* skb packet types not supported by hardware, 641 * txbd len fild doesn't be filled. 642 */ 643 return; 644 } 645 } 646 647 static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, 648 u8 il4_proto, u32 *type_cs_vlan_tso, 649 u32 *ol_type_vlan_len_msec) 650 { 651 union { 652 struct iphdr *v4; 653 struct ipv6hdr *v6; 654 unsigned char *hdr; 655 } l3; 656 u32 l4_proto = ol4_proto; 657 658 l3.hdr = skb_network_header(skb); 659 660 /* define OL3 type and tunnel type(OL4).*/ 661 if (skb->encapsulation) { 662 /* define outer network header type.*/ 663 if (skb->protocol == htons(ETH_P_IP)) { 664 if (skb_is_gso(skb)) 665 hnae_set_field(*ol_type_vlan_len_msec, 666 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, 667 HNS3_OL3T_IPV4_CSUM); 668 else 669 hnae_set_field(*ol_type_vlan_len_msec, 670 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, 671 HNS3_OL3T_IPV4_NO_CSUM); 672 673 } else if (skb->protocol == htons(ETH_P_IPV6)) { 674 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, 675 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); 676 } 677 678 /* define tunnel type(OL4).*/ 679 switch (l4_proto) { 680 case IPPROTO_UDP: 681 hnae_set_field(*ol_type_vlan_len_msec, 682 HNS3_TXD_TUNTYPE_M, 683 HNS3_TXD_TUNTYPE_S, 684 HNS3_TUN_MAC_IN_UDP); 685 break; 686 case IPPROTO_GRE: 687 hnae_set_field(*ol_type_vlan_len_msec, 688 HNS3_TXD_TUNTYPE_M, 689 HNS3_TXD_TUNTYPE_S, 690 HNS3_TUN_NVGRE); 691 break; 692 default: 693 /* drop the skb tunnel packet if hardware don't support, 694 * because hardware can't calculate csum when TSO. 695 */ 696 if (skb_is_gso(skb)) 697 return -EDOM; 698 699 /* the stack computes the IP header already, 700 * driver calculate l4 checksum when not TSO. 701 */ 702 skb_checksum_help(skb); 703 return 0; 704 } 705 706 l3.hdr = skb_inner_network_header(skb); 707 l4_proto = il4_proto; 708 } 709 710 if (l3.v4->version == 4) { 711 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, 712 HNS3_TXD_L3T_S, HNS3_L3T_IPV4); 713 714 /* the stack computes the IP header already, the only time we 715 * need the hardware to recompute it is in the case of TSO. 716 */ 717 if (skb_is_gso(skb)) 718 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); 719 720 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 721 } else if (l3.v6->version == 6) { 722 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, 723 HNS3_TXD_L3T_S, HNS3_L3T_IPV6); 724 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 725 } 726 727 switch (l4_proto) { 728 case IPPROTO_TCP: 729 hnae_set_field(*type_cs_vlan_tso, 730 HNS3_TXD_L4T_M, 731 HNS3_TXD_L4T_S, 732 HNS3_L4T_TCP); 733 break; 734 case IPPROTO_UDP: 735 hnae_set_field(*type_cs_vlan_tso, 736 HNS3_TXD_L4T_M, 737 HNS3_TXD_L4T_S, 738 HNS3_L4T_UDP); 739 break; 740 case IPPROTO_SCTP: 741 hnae_set_field(*type_cs_vlan_tso, 742 HNS3_TXD_L4T_M, 743 HNS3_TXD_L4T_S, 744 HNS3_L4T_SCTP); 745 break; 746 default: 747 /* drop the skb tunnel packet if hardware don't support, 748 * because hardware can't calculate csum when TSO. 749 */ 750 if (skb_is_gso(skb)) 751 return -EDOM; 752 753 /* the stack computes the IP header already, 754 * driver calculate l4 checksum when not TSO. 755 */ 756 skb_checksum_help(skb); 757 return 0; 758 } 759 760 return 0; 761 } 762 763 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) 764 { 765 /* Config bd buffer end */ 766 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, 767 HNS3_TXD_BDTYPE_S, 0); 768 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); 769 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); 770 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); 771 } 772 773 static int hns3_fill_desc_vtags(struct sk_buff *skb, 774 struct hns3_enet_ring *tx_ring, 775 u32 *inner_vlan_flag, 776 u32 *out_vlan_flag, 777 u16 *inner_vtag, 778 u16 *out_vtag) 779 { 780 #define HNS3_TX_VLAN_PRIO_SHIFT 13 781 782 if (skb->protocol == htons(ETH_P_8021Q) && 783 !(tx_ring->tqp->handle->kinfo.netdev->features & 784 NETIF_F_HW_VLAN_CTAG_TX)) { 785 /* When HW VLAN acceleration is turned off, and the stack 786 * sets the protocol to 802.1q, the driver just need to 787 * set the protocol to the encapsulated ethertype. 788 */ 789 skb->protocol = vlan_get_protocol(skb); 790 return 0; 791 } 792 793 if (skb_vlan_tag_present(skb)) { 794 u16 vlan_tag; 795 796 vlan_tag = skb_vlan_tag_get(skb); 797 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT; 798 799 /* Based on hw strategy, use out_vtag in two layer tag case, 800 * and use inner_vtag in one tag case. 801 */ 802 if (skb->protocol == htons(ETH_P_8021Q)) { 803 hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); 804 *out_vtag = vlan_tag; 805 } else { 806 hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); 807 *inner_vtag = vlan_tag; 808 } 809 } else if (skb->protocol == htons(ETH_P_8021Q)) { 810 struct vlan_ethhdr *vhdr; 811 int rc; 812 813 rc = skb_cow_head(skb, 0); 814 if (rc < 0) 815 return rc; 816 vhdr = (struct vlan_ethhdr *)skb->data; 817 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7) 818 << HNS3_TX_VLAN_PRIO_SHIFT); 819 } 820 821 skb->protocol = vlan_get_protocol(skb); 822 return 0; 823 } 824 825 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, 826 int size, dma_addr_t dma, int frag_end, 827 enum hns_desc_type type) 828 { 829 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 830 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 831 u32 ol_type_vlan_len_msec = 0; 832 u16 bdtp_fe_sc_vld_ra_ri = 0; 833 u32 type_cs_vlan_tso = 0; 834 struct sk_buff *skb; 835 u16 inner_vtag = 0; 836 u16 out_vtag = 0; 837 u32 paylen = 0; 838 u16 mss = 0; 839 __be16 protocol; 840 u8 ol4_proto; 841 u8 il4_proto; 842 int ret; 843 844 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ 845 desc_cb->priv = priv; 846 desc_cb->length = size; 847 desc_cb->dma = dma; 848 desc_cb->type = type; 849 850 /* now, fill the descriptor */ 851 desc->addr = cpu_to_le64(dma); 852 desc->tx.send_size = cpu_to_le16((u16)size); 853 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); 854 desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri); 855 856 if (type == DESC_TYPE_SKB) { 857 skb = (struct sk_buff *)priv; 858 paylen = skb->len; 859 860 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso, 861 &ol_type_vlan_len_msec, 862 &inner_vtag, &out_vtag); 863 if (unlikely(ret)) 864 return ret; 865 866 if (skb->ip_summed == CHECKSUM_PARTIAL) { 867 skb_reset_mac_len(skb); 868 protocol = skb->protocol; 869 870 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); 871 if (ret) 872 return ret; 873 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, 874 &type_cs_vlan_tso, 875 &ol_type_vlan_len_msec); 876 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto, 877 &type_cs_vlan_tso, 878 &ol_type_vlan_len_msec); 879 if (ret) 880 return ret; 881 882 ret = hns3_set_tso(skb, &paylen, &mss, 883 &type_cs_vlan_tso); 884 if (ret) 885 return ret; 886 } 887 888 /* Set txbd */ 889 desc->tx.ol_type_vlan_len_msec = 890 cpu_to_le32(ol_type_vlan_len_msec); 891 desc->tx.type_cs_vlan_tso_len = 892 cpu_to_le32(type_cs_vlan_tso); 893 desc->tx.paylen = cpu_to_le32(paylen); 894 desc->tx.mss = cpu_to_le16(mss); 895 desc->tx.vlan_tag = cpu_to_le16(inner_vtag); 896 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); 897 } 898 899 /* move ring pointer to next.*/ 900 ring_ptr_move_fw(ring, next_to_use); 901 902 return 0; 903 } 904 905 static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv, 906 int size, dma_addr_t dma, int frag_end, 907 enum hns_desc_type type) 908 { 909 unsigned int frag_buf_num; 910 unsigned int k; 911 int sizeoflast; 912 int ret; 913 914 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; 915 sizeoflast = size % HNS3_MAX_BD_SIZE; 916 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; 917 918 /* When the frag size is bigger than hardware, split this frag */ 919 for (k = 0; k < frag_buf_num; k++) { 920 ret = hns3_fill_desc(ring, priv, 921 (k == frag_buf_num - 1) ? 922 sizeoflast : HNS3_MAX_BD_SIZE, 923 dma + HNS3_MAX_BD_SIZE * k, 924 frag_end && (k == frag_buf_num - 1) ? 1 : 0, 925 (type == DESC_TYPE_SKB && !k) ? 926 DESC_TYPE_SKB : DESC_TYPE_PAGE); 927 if (ret) 928 return ret; 929 } 930 931 return 0; 932 } 933 934 static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, 935 struct hns3_enet_ring *ring) 936 { 937 struct sk_buff *skb = *out_skb; 938 struct skb_frag_struct *frag; 939 int bdnum_for_frag; 940 int frag_num; 941 int buf_num; 942 int size; 943 int i; 944 945 size = skb_headlen(skb); 946 buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; 947 948 frag_num = skb_shinfo(skb)->nr_frags; 949 for (i = 0; i < frag_num; i++) { 950 frag = &skb_shinfo(skb)->frags[i]; 951 size = skb_frag_size(frag); 952 bdnum_for_frag = 953 (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; 954 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG) 955 return -ENOMEM; 956 957 buf_num += bdnum_for_frag; 958 } 959 960 if (buf_num > ring_space(ring)) 961 return -EBUSY; 962 963 *bnum = buf_num; 964 return 0; 965 } 966 967 static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, 968 struct hns3_enet_ring *ring) 969 { 970 struct sk_buff *skb = *out_skb; 971 int buf_num; 972 973 /* No. of segments (plus a header) */ 974 buf_num = skb_shinfo(skb)->nr_frags + 1; 975 976 if (buf_num > ring_space(ring)) 977 return -EBUSY; 978 979 *bnum = buf_num; 980 981 return 0; 982 } 983 984 static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) 985 { 986 struct device *dev = ring_to_dev(ring); 987 unsigned int i; 988 989 for (i = 0; i < ring->desc_num; i++) { 990 /* check if this is where we started */ 991 if (ring->next_to_use == next_to_use_orig) 992 break; 993 994 /* unmap the descriptor dma address */ 995 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) 996 dma_unmap_single(dev, 997 ring->desc_cb[ring->next_to_use].dma, 998 ring->desc_cb[ring->next_to_use].length, 999 DMA_TO_DEVICE); 1000 else 1001 dma_unmap_page(dev, 1002 ring->desc_cb[ring->next_to_use].dma, 1003 ring->desc_cb[ring->next_to_use].length, 1004 DMA_TO_DEVICE); 1005 1006 /* rollback one */ 1007 ring_ptr_move_bw(ring, next_to_use); 1008 } 1009 } 1010 1011 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) 1012 { 1013 struct hns3_nic_priv *priv = netdev_priv(netdev); 1014 struct hns3_nic_ring_data *ring_data = 1015 &tx_ring_data(priv, skb->queue_mapping); 1016 struct hns3_enet_ring *ring = ring_data->ring; 1017 struct device *dev = priv->dev; 1018 struct netdev_queue *dev_queue; 1019 struct skb_frag_struct *frag; 1020 int next_to_use_head; 1021 int next_to_use_frag; 1022 dma_addr_t dma; 1023 int buf_num; 1024 int seg_num; 1025 int size; 1026 int ret; 1027 int i; 1028 1029 /* Prefetch the data used later */ 1030 prefetch(skb->data); 1031 1032 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { 1033 case -EBUSY: 1034 u64_stats_update_begin(&ring->syncp); 1035 ring->stats.tx_busy++; 1036 u64_stats_update_end(&ring->syncp); 1037 1038 goto out_net_tx_busy; 1039 case -ENOMEM: 1040 u64_stats_update_begin(&ring->syncp); 1041 ring->stats.sw_err_cnt++; 1042 u64_stats_update_end(&ring->syncp); 1043 netdev_err(netdev, "no memory to xmit!\n"); 1044 1045 goto out_err_tx_ok; 1046 default: 1047 break; 1048 } 1049 1050 /* No. of segments (plus a header) */ 1051 seg_num = skb_shinfo(skb)->nr_frags + 1; 1052 /* Fill the first part */ 1053 size = skb_headlen(skb); 1054 1055 next_to_use_head = ring->next_to_use; 1056 1057 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 1058 if (dma_mapping_error(dev, dma)) { 1059 netdev_err(netdev, "TX head DMA map failed\n"); 1060 ring->stats.sw_err_cnt++; 1061 goto out_err_tx_ok; 1062 } 1063 1064 ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, 1065 DESC_TYPE_SKB); 1066 if (ret) 1067 goto head_dma_map_err; 1068 1069 next_to_use_frag = ring->next_to_use; 1070 /* Fill the fragments */ 1071 for (i = 1; i < seg_num; i++) { 1072 frag = &skb_shinfo(skb)->frags[i - 1]; 1073 size = skb_frag_size(frag); 1074 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); 1075 if (dma_mapping_error(dev, dma)) { 1076 netdev_err(netdev, "TX frag(%d) DMA map failed\n", i); 1077 ring->stats.sw_err_cnt++; 1078 goto frag_dma_map_err; 1079 } 1080 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, 1081 seg_num - 1 == i ? 1 : 0, 1082 DESC_TYPE_PAGE); 1083 1084 if (ret) 1085 goto frag_dma_map_err; 1086 } 1087 1088 /* Complete translate all packets */ 1089 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index); 1090 netdev_tx_sent_queue(dev_queue, skb->len); 1091 1092 wmb(); /* Commit all data before submit */ 1093 1094 hnae_queue_xmit(ring->tqp, buf_num); 1095 1096 return NETDEV_TX_OK; 1097 1098 frag_dma_map_err: 1099 hns_nic_dma_unmap(ring, next_to_use_frag); 1100 1101 head_dma_map_err: 1102 hns_nic_dma_unmap(ring, next_to_use_head); 1103 1104 out_err_tx_ok: 1105 dev_kfree_skb_any(skb); 1106 return NETDEV_TX_OK; 1107 1108 out_net_tx_busy: 1109 netif_stop_subqueue(netdev, ring_data->queue_index); 1110 smp_mb(); /* Commit all data before submit */ 1111 1112 return NETDEV_TX_BUSY; 1113 } 1114 1115 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) 1116 { 1117 struct hnae3_handle *h = hns3_get_handle(netdev); 1118 struct sockaddr *mac_addr = p; 1119 int ret; 1120 1121 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) 1122 return -EADDRNOTAVAIL; 1123 1124 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); 1125 if (ret) { 1126 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); 1127 return ret; 1128 } 1129 1130 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); 1131 1132 return 0; 1133 } 1134 1135 static int hns3_nic_set_features(struct net_device *netdev, 1136 netdev_features_t features) 1137 { 1138 netdev_features_t changed = netdev->features ^ features; 1139 struct hns3_nic_priv *priv = netdev_priv(netdev); 1140 struct hnae3_handle *h = priv->ae_handle; 1141 int ret; 1142 1143 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { 1144 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { 1145 priv->ops.fill_desc = hns3_fill_desc_tso; 1146 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; 1147 } else { 1148 priv->ops.fill_desc = hns3_fill_desc; 1149 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; 1150 } 1151 } 1152 1153 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && 1154 h->ae_algo->ops->enable_vlan_filter) { 1155 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1156 h->ae_algo->ops->enable_vlan_filter(h, true); 1157 else 1158 h->ae_algo->ops->enable_vlan_filter(h, false); 1159 } 1160 1161 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && 1162 h->ae_algo->ops->enable_hw_strip_rxvtag) { 1163 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1164 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true); 1165 else 1166 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false); 1167 1168 if (ret) 1169 return ret; 1170 } 1171 1172 netdev->features = features; 1173 return 0; 1174 } 1175 1176 static void hns3_nic_get_stats64(struct net_device *netdev, 1177 struct rtnl_link_stats64 *stats) 1178 { 1179 struct hns3_nic_priv *priv = netdev_priv(netdev); 1180 int queue_num = priv->ae_handle->kinfo.num_tqps; 1181 struct hnae3_handle *handle = priv->ae_handle; 1182 struct hns3_enet_ring *ring; 1183 unsigned int start; 1184 unsigned int idx; 1185 u64 tx_bytes = 0; 1186 u64 rx_bytes = 0; 1187 u64 tx_pkts = 0; 1188 u64 rx_pkts = 0; 1189 u64 tx_drop = 0; 1190 u64 rx_drop = 0; 1191 1192 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 1193 return; 1194 1195 handle->ae_algo->ops->update_stats(handle, &netdev->stats); 1196 1197 for (idx = 0; idx < queue_num; idx++) { 1198 /* fetch the tx stats */ 1199 ring = priv->ring_data[idx].ring; 1200 do { 1201 start = u64_stats_fetch_begin_irq(&ring->syncp); 1202 tx_bytes += ring->stats.tx_bytes; 1203 tx_pkts += ring->stats.tx_pkts; 1204 tx_drop += ring->stats.tx_busy; 1205 tx_drop += ring->stats.sw_err_cnt; 1206 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1207 1208 /* fetch the rx stats */ 1209 ring = priv->ring_data[idx + queue_num].ring; 1210 do { 1211 start = u64_stats_fetch_begin_irq(&ring->syncp); 1212 rx_bytes += ring->stats.rx_bytes; 1213 rx_pkts += ring->stats.rx_pkts; 1214 rx_drop += ring->stats.non_vld_descs; 1215 rx_drop += ring->stats.err_pkt_len; 1216 rx_drop += ring->stats.l2_err; 1217 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1218 } 1219 1220 stats->tx_bytes = tx_bytes; 1221 stats->tx_packets = tx_pkts; 1222 stats->rx_bytes = rx_bytes; 1223 stats->rx_packets = rx_pkts; 1224 1225 stats->rx_errors = netdev->stats.rx_errors; 1226 stats->multicast = netdev->stats.multicast; 1227 stats->rx_length_errors = netdev->stats.rx_length_errors; 1228 stats->rx_crc_errors = netdev->stats.rx_crc_errors; 1229 stats->rx_missed_errors = netdev->stats.rx_missed_errors; 1230 1231 stats->tx_errors = netdev->stats.tx_errors; 1232 stats->rx_dropped = rx_drop + netdev->stats.rx_dropped; 1233 stats->tx_dropped = tx_drop + netdev->stats.tx_dropped; 1234 stats->collisions = netdev->stats.collisions; 1235 stats->rx_over_errors = netdev->stats.rx_over_errors; 1236 stats->rx_frame_errors = netdev->stats.rx_frame_errors; 1237 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; 1238 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; 1239 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; 1240 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; 1241 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; 1242 stats->tx_window_errors = netdev->stats.tx_window_errors; 1243 stats->rx_compressed = netdev->stats.rx_compressed; 1244 stats->tx_compressed = netdev->stats.tx_compressed; 1245 } 1246 1247 static int hns3_setup_tc(struct net_device *netdev, void *type_data) 1248 { 1249 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 1250 struct hnae3_handle *h = hns3_get_handle(netdev); 1251 struct hnae3_knic_private_info *kinfo = &h->kinfo; 1252 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; 1253 u8 tc = mqprio_qopt->qopt.num_tc; 1254 u16 mode = mqprio_qopt->mode; 1255 u8 hw = mqprio_qopt->qopt.hw; 1256 bool if_running; 1257 unsigned int i; 1258 int ret; 1259 1260 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && 1261 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) 1262 return -EOPNOTSUPP; 1263 1264 if (tc > HNAE3_MAX_TC) 1265 return -EINVAL; 1266 1267 if (!netdev) 1268 return -EINVAL; 1269 1270 if_running = netif_running(netdev); 1271 if (if_running) { 1272 hns3_nic_net_stop(netdev); 1273 msleep(100); 1274 } 1275 1276 ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? 1277 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; 1278 if (ret) 1279 goto out; 1280 1281 if (tc <= 1) { 1282 netdev_reset_tc(netdev); 1283 } else { 1284 ret = netdev_set_num_tc(netdev, tc); 1285 if (ret) 1286 goto out; 1287 1288 for (i = 0; i < HNAE3_MAX_TC; i++) { 1289 if (!kinfo->tc_info[i].enable) 1290 continue; 1291 1292 netdev_set_tc_queue(netdev, 1293 kinfo->tc_info[i].tc, 1294 kinfo->tc_info[i].tqp_count, 1295 kinfo->tc_info[i].tqp_offset); 1296 } 1297 } 1298 1299 ret = hns3_nic_set_real_num_queue(netdev); 1300 1301 out: 1302 if (if_running) 1303 hns3_nic_net_open(netdev); 1304 1305 return ret; 1306 } 1307 1308 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, 1309 void *type_data) 1310 { 1311 if (type != TC_SETUP_QDISC_MQPRIO) 1312 return -EOPNOTSUPP; 1313 1314 return hns3_setup_tc(dev, type_data); 1315 } 1316 1317 static int hns3_vlan_rx_add_vid(struct net_device *netdev, 1318 __be16 proto, u16 vid) 1319 { 1320 struct hnae3_handle *h = hns3_get_handle(netdev); 1321 struct hns3_nic_priv *priv = netdev_priv(netdev); 1322 int ret = -EIO; 1323 1324 if (h->ae_algo->ops->set_vlan_filter) 1325 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); 1326 1327 if (!ret) 1328 set_bit(vid, priv->active_vlans); 1329 1330 return ret; 1331 } 1332 1333 static int hns3_vlan_rx_kill_vid(struct net_device *netdev, 1334 __be16 proto, u16 vid) 1335 { 1336 struct hnae3_handle *h = hns3_get_handle(netdev); 1337 struct hns3_nic_priv *priv = netdev_priv(netdev); 1338 int ret = -EIO; 1339 1340 if (h->ae_algo->ops->set_vlan_filter) 1341 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); 1342 1343 if (!ret) 1344 clear_bit(vid, priv->active_vlans); 1345 1346 return ret; 1347 } 1348 1349 static void hns3_restore_vlan(struct net_device *netdev) 1350 { 1351 struct hns3_nic_priv *priv = netdev_priv(netdev); 1352 u16 vid; 1353 int ret; 1354 1355 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { 1356 ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); 1357 if (ret) 1358 netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n", 1359 vid, ret); 1360 } 1361 } 1362 1363 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 1364 u8 qos, __be16 vlan_proto) 1365 { 1366 struct hnae3_handle *h = hns3_get_handle(netdev); 1367 int ret = -EIO; 1368 1369 if (h->ae_algo->ops->set_vf_vlan_filter) 1370 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, 1371 qos, vlan_proto); 1372 1373 return ret; 1374 } 1375 1376 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) 1377 { 1378 struct hnae3_handle *h = hns3_get_handle(netdev); 1379 bool if_running = netif_running(netdev); 1380 int ret; 1381 1382 if (!h->ae_algo->ops->set_mtu) 1383 return -EOPNOTSUPP; 1384 1385 /* if this was called with netdev up then bring netdevice down */ 1386 if (if_running) { 1387 (void)hns3_nic_net_stop(netdev); 1388 msleep(100); 1389 } 1390 1391 ret = h->ae_algo->ops->set_mtu(h, new_mtu); 1392 if (ret) { 1393 netdev_err(netdev, "failed to change MTU in hardware %d\n", 1394 ret); 1395 return ret; 1396 } 1397 1398 netdev->mtu = new_mtu; 1399 1400 /* if the netdev was running earlier, bring it up again */ 1401 if (if_running && hns3_nic_net_open(netdev)) 1402 ret = -EINVAL; 1403 1404 return ret; 1405 } 1406 1407 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) 1408 { 1409 struct hns3_nic_priv *priv = netdev_priv(ndev); 1410 struct hns3_enet_ring *tx_ring = NULL; 1411 int timeout_queue = 0; 1412 int hw_head, hw_tail; 1413 int i; 1414 1415 /* Find the stopped queue the same way the stack does */ 1416 for (i = 0; i < ndev->real_num_tx_queues; i++) { 1417 struct netdev_queue *q; 1418 unsigned long trans_start; 1419 1420 q = netdev_get_tx_queue(ndev, i); 1421 trans_start = q->trans_start; 1422 if (netif_xmit_stopped(q) && 1423 time_after(jiffies, 1424 (trans_start + ndev->watchdog_timeo))) { 1425 timeout_queue = i; 1426 break; 1427 } 1428 } 1429 1430 if (i == ndev->num_tx_queues) { 1431 netdev_info(ndev, 1432 "no netdev TX timeout queue found, timeout count: %llu\n", 1433 priv->tx_timeout_count); 1434 return false; 1435 } 1436 1437 tx_ring = priv->ring_data[timeout_queue].ring; 1438 1439 hw_head = readl_relaxed(tx_ring->tqp->io_base + 1440 HNS3_RING_TX_RING_HEAD_REG); 1441 hw_tail = readl_relaxed(tx_ring->tqp->io_base + 1442 HNS3_RING_TX_RING_TAIL_REG); 1443 netdev_info(ndev, 1444 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n", 1445 priv->tx_timeout_count, 1446 timeout_queue, 1447 tx_ring->next_to_use, 1448 tx_ring->next_to_clean, 1449 hw_head, 1450 hw_tail, 1451 readl(tx_ring->tqp_vector->mask_addr)); 1452 1453 return true; 1454 } 1455 1456 static void hns3_nic_net_timeout(struct net_device *ndev) 1457 { 1458 struct hns3_nic_priv *priv = netdev_priv(ndev); 1459 struct hnae3_handle *h = priv->ae_handle; 1460 1461 if (!hns3_get_tx_timeo_queue_info(ndev)) 1462 return; 1463 1464 priv->tx_timeout_count++; 1465 1466 if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo))) 1467 return; 1468 1469 /* request the reset */ 1470 if (h->ae_algo->ops->reset_event) 1471 h->ae_algo->ops->reset_event(h); 1472 } 1473 1474 static const struct net_device_ops hns3_nic_netdev_ops = { 1475 .ndo_open = hns3_nic_net_open, 1476 .ndo_stop = hns3_nic_net_stop, 1477 .ndo_start_xmit = hns3_nic_net_xmit, 1478 .ndo_tx_timeout = hns3_nic_net_timeout, 1479 .ndo_set_mac_address = hns3_nic_net_set_mac_address, 1480 .ndo_change_mtu = hns3_nic_change_mtu, 1481 .ndo_set_features = hns3_nic_set_features, 1482 .ndo_get_stats64 = hns3_nic_get_stats64, 1483 .ndo_setup_tc = hns3_nic_setup_tc, 1484 .ndo_set_rx_mode = hns3_nic_set_rx_mode, 1485 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, 1486 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, 1487 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, 1488 }; 1489 1490 static bool hns3_is_phys_func(struct pci_dev *pdev) 1491 { 1492 u32 dev_id = pdev->device; 1493 1494 switch (dev_id) { 1495 case HNAE3_DEV_ID_GE: 1496 case HNAE3_DEV_ID_25GE: 1497 case HNAE3_DEV_ID_25GE_RDMA: 1498 case HNAE3_DEV_ID_25GE_RDMA_MACSEC: 1499 case HNAE3_DEV_ID_50GE_RDMA: 1500 case HNAE3_DEV_ID_50GE_RDMA_MACSEC: 1501 case HNAE3_DEV_ID_100G_RDMA_MACSEC: 1502 return true; 1503 case HNAE3_DEV_ID_100G_VF: 1504 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF: 1505 return false; 1506 default: 1507 dev_warn(&pdev->dev, "un-recognized pci device-id %d", 1508 dev_id); 1509 } 1510 1511 return false; 1512 } 1513 1514 static void hns3_disable_sriov(struct pci_dev *pdev) 1515 { 1516 /* If our VFs are assigned we cannot shut down SR-IOV 1517 * without causing issues, so just leave the hardware 1518 * available but disabled 1519 */ 1520 if (pci_vfs_assigned(pdev)) { 1521 dev_warn(&pdev->dev, 1522 "disabling driver while VFs are assigned\n"); 1523 return; 1524 } 1525 1526 pci_disable_sriov(pdev); 1527 } 1528 1529 /* hns3_probe - Device initialization routine 1530 * @pdev: PCI device information struct 1531 * @ent: entry in hns3_pci_tbl 1532 * 1533 * hns3_probe initializes a PF identified by a pci_dev structure. 1534 * The OS initialization, configuring of the PF private structure, 1535 * and a hardware reset occur. 1536 * 1537 * Returns 0 on success, negative on failure 1538 */ 1539 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1540 { 1541 struct hnae3_ae_dev *ae_dev; 1542 int ret; 1543 1544 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), 1545 GFP_KERNEL); 1546 if (!ae_dev) { 1547 ret = -ENOMEM; 1548 return ret; 1549 } 1550 1551 ae_dev->pdev = pdev; 1552 ae_dev->flag = ent->driver_data; 1553 ae_dev->dev_type = HNAE3_DEV_KNIC; 1554 pci_set_drvdata(pdev, ae_dev); 1555 1556 hnae3_register_ae_dev(ae_dev); 1557 1558 return 0; 1559 } 1560 1561 /* hns3_remove - Device removal routine 1562 * @pdev: PCI device information struct 1563 */ 1564 static void hns3_remove(struct pci_dev *pdev) 1565 { 1566 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1567 1568 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) 1569 hns3_disable_sriov(pdev); 1570 1571 hnae3_unregister_ae_dev(ae_dev); 1572 } 1573 1574 /** 1575 * hns3_pci_sriov_configure 1576 * @pdev: pointer to a pci_dev structure 1577 * @num_vfs: number of VFs to allocate 1578 * 1579 * Enable or change the number of VFs. Called when the user updates the number 1580 * of VFs in sysfs. 1581 **/ 1582 int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1583 { 1584 int ret; 1585 1586 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) { 1587 dev_warn(&pdev->dev, "Can not config SRIOV\n"); 1588 return -EINVAL; 1589 } 1590 1591 if (num_vfs) { 1592 ret = pci_enable_sriov(pdev, num_vfs); 1593 if (ret) 1594 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); 1595 } else if (!pci_vfs_assigned(pdev)) { 1596 pci_disable_sriov(pdev); 1597 } else { 1598 dev_warn(&pdev->dev, 1599 "Unable to free VFs because some are assigned to VMs.\n"); 1600 } 1601 1602 return 0; 1603 } 1604 1605 static struct pci_driver hns3_driver = { 1606 .name = hns3_driver_name, 1607 .id_table = hns3_pci_tbl, 1608 .probe = hns3_probe, 1609 .remove = hns3_remove, 1610 .sriov_configure = hns3_pci_sriov_configure, 1611 }; 1612 1613 /* set default feature to hns3 */ 1614 static void hns3_set_default_feature(struct net_device *netdev) 1615 { 1616 netdev->priv_flags |= IFF_UNICAST_FLT; 1617 1618 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1619 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1620 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1621 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1622 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1623 1624 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 1625 1626 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 1627 1628 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1629 NETIF_F_HW_VLAN_CTAG_FILTER | 1630 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 1631 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1632 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1633 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1634 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1635 1636 netdev->vlan_features |= 1637 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | 1638 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | 1639 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1640 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1641 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1642 1643 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1644 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 1645 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1646 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1647 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1648 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1649 } 1650 1651 static int hns3_alloc_buffer(struct hns3_enet_ring *ring, 1652 struct hns3_desc_cb *cb) 1653 { 1654 unsigned int order = hnae_page_order(ring); 1655 struct page *p; 1656 1657 p = dev_alloc_pages(order); 1658 if (!p) 1659 return -ENOMEM; 1660 1661 cb->priv = p; 1662 cb->page_offset = 0; 1663 cb->reuse_flag = 0; 1664 cb->buf = page_address(p); 1665 cb->length = hnae_page_size(ring); 1666 cb->type = DESC_TYPE_PAGE; 1667 1668 return 0; 1669 } 1670 1671 static void hns3_free_buffer(struct hns3_enet_ring *ring, 1672 struct hns3_desc_cb *cb) 1673 { 1674 if (cb->type == DESC_TYPE_SKB) 1675 dev_kfree_skb_any((struct sk_buff *)cb->priv); 1676 else if (!HNAE3_IS_TX_RING(ring)) 1677 put_page((struct page *)cb->priv); 1678 memset(cb, 0, sizeof(*cb)); 1679 } 1680 1681 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) 1682 { 1683 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, 1684 cb->length, ring_to_dma_dir(ring)); 1685 1686 if (dma_mapping_error(ring_to_dev(ring), cb->dma)) 1687 return -EIO; 1688 1689 return 0; 1690 } 1691 1692 static void hns3_unmap_buffer(struct hns3_enet_ring *ring, 1693 struct hns3_desc_cb *cb) 1694 { 1695 if (cb->type == DESC_TYPE_SKB) 1696 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 1697 ring_to_dma_dir(ring)); 1698 else 1699 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 1700 ring_to_dma_dir(ring)); 1701 } 1702 1703 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) 1704 { 1705 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 1706 ring->desc[i].addr = 0; 1707 } 1708 1709 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) 1710 { 1711 struct hns3_desc_cb *cb = &ring->desc_cb[i]; 1712 1713 if (!ring->desc_cb[i].dma) 1714 return; 1715 1716 hns3_buffer_detach(ring, i); 1717 hns3_free_buffer(ring, cb); 1718 } 1719 1720 static void hns3_free_buffers(struct hns3_enet_ring *ring) 1721 { 1722 int i; 1723 1724 for (i = 0; i < ring->desc_num; i++) 1725 hns3_free_buffer_detach(ring, i); 1726 } 1727 1728 /* free desc along with its attached buffer */ 1729 static void hns3_free_desc(struct hns3_enet_ring *ring) 1730 { 1731 hns3_free_buffers(ring); 1732 1733 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, 1734 ring->desc_num * sizeof(ring->desc[0]), 1735 DMA_BIDIRECTIONAL); 1736 ring->desc_dma_addr = 0; 1737 kfree(ring->desc); 1738 ring->desc = NULL; 1739 } 1740 1741 static int hns3_alloc_desc(struct hns3_enet_ring *ring) 1742 { 1743 int size = ring->desc_num * sizeof(ring->desc[0]); 1744 1745 ring->desc = kzalloc(size, GFP_KERNEL); 1746 if (!ring->desc) 1747 return -ENOMEM; 1748 1749 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc, 1750 size, DMA_BIDIRECTIONAL); 1751 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) { 1752 ring->desc_dma_addr = 0; 1753 kfree(ring->desc); 1754 ring->desc = NULL; 1755 return -ENOMEM; 1756 } 1757 1758 return 0; 1759 } 1760 1761 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, 1762 struct hns3_desc_cb *cb) 1763 { 1764 int ret; 1765 1766 ret = hns3_alloc_buffer(ring, cb); 1767 if (ret) 1768 goto out; 1769 1770 ret = hns3_map_buffer(ring, cb); 1771 if (ret) 1772 goto out_with_buf; 1773 1774 return 0; 1775 1776 out_with_buf: 1777 hns3_free_buffer(ring, cb); 1778 out: 1779 return ret; 1780 } 1781 1782 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i) 1783 { 1784 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]); 1785 1786 if (ret) 1787 return ret; 1788 1789 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 1790 1791 return 0; 1792 } 1793 1794 /* Allocate memory for raw pkg, and map with dma */ 1795 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) 1796 { 1797 int i, j, ret; 1798 1799 for (i = 0; i < ring->desc_num; i++) { 1800 ret = hns3_alloc_buffer_attach(ring, i); 1801 if (ret) 1802 goto out_buffer_fail; 1803 } 1804 1805 return 0; 1806 1807 out_buffer_fail: 1808 for (j = i - 1; j >= 0; j--) 1809 hns3_free_buffer_detach(ring, j); 1810 return ret; 1811 } 1812 1813 /* detach a in-used buffer and replace with a reserved one */ 1814 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, 1815 struct hns3_desc_cb *res_cb) 1816 { 1817 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 1818 ring->desc_cb[i] = *res_cb; 1819 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 1820 } 1821 1822 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) 1823 { 1824 ring->desc_cb[i].reuse_flag = 0; 1825 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma 1826 + ring->desc_cb[i].page_offset); 1827 } 1828 1829 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, 1830 int *pkts) 1831 { 1832 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; 1833 1834 (*pkts) += (desc_cb->type == DESC_TYPE_SKB); 1835 (*bytes) += desc_cb->length; 1836 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ 1837 hns3_free_buffer_detach(ring, ring->next_to_clean); 1838 1839 ring_ptr_move_fw(ring, next_to_clean); 1840 } 1841 1842 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) 1843 { 1844 int u = ring->next_to_use; 1845 int c = ring->next_to_clean; 1846 1847 if (unlikely(h > ring->desc_num)) 1848 return 0; 1849 1850 return u > c ? (h > c && h <= u) : (h > c || h <= u); 1851 } 1852 1853 bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) 1854 { 1855 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 1856 struct netdev_queue *dev_queue; 1857 int bytes, pkts; 1858 int head; 1859 1860 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); 1861 rmb(); /* Make sure head is ready before touch any data */ 1862 1863 if (is_ring_empty(ring) || head == ring->next_to_clean) 1864 return true; /* no data to poll */ 1865 1866 if (!is_valid_clean_head(ring, head)) { 1867 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, 1868 ring->next_to_use, ring->next_to_clean); 1869 1870 u64_stats_update_begin(&ring->syncp); 1871 ring->stats.io_err_cnt++; 1872 u64_stats_update_end(&ring->syncp); 1873 return true; 1874 } 1875 1876 bytes = 0; 1877 pkts = 0; 1878 while (head != ring->next_to_clean && budget) { 1879 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); 1880 /* Issue prefetch for next Tx descriptor */ 1881 prefetch(&ring->desc_cb[ring->next_to_clean]); 1882 budget--; 1883 } 1884 1885 ring->tqp_vector->tx_group.total_bytes += bytes; 1886 ring->tqp_vector->tx_group.total_packets += pkts; 1887 1888 u64_stats_update_begin(&ring->syncp); 1889 ring->stats.tx_bytes += bytes; 1890 ring->stats.tx_pkts += pkts; 1891 u64_stats_update_end(&ring->syncp); 1892 1893 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); 1894 netdev_tx_completed_queue(dev_queue, pkts, bytes); 1895 1896 if (unlikely(pkts && netif_carrier_ok(netdev) && 1897 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) { 1898 /* Make sure that anybody stopping the queue after this 1899 * sees the new next_to_clean. 1900 */ 1901 smp_mb(); 1902 if (netif_tx_queue_stopped(dev_queue)) { 1903 netif_tx_wake_queue(dev_queue); 1904 ring->stats.restart_queue++; 1905 } 1906 } 1907 1908 return !!budget; 1909 } 1910 1911 static int hns3_desc_unused(struct hns3_enet_ring *ring) 1912 { 1913 int ntc = ring->next_to_clean; 1914 int ntu = ring->next_to_use; 1915 1916 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; 1917 } 1918 1919 static void 1920 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) 1921 { 1922 struct hns3_desc_cb *desc_cb; 1923 struct hns3_desc_cb res_cbs; 1924 int i, ret; 1925 1926 for (i = 0; i < cleand_count; i++) { 1927 desc_cb = &ring->desc_cb[ring->next_to_use]; 1928 if (desc_cb->reuse_flag) { 1929 u64_stats_update_begin(&ring->syncp); 1930 ring->stats.reuse_pg_cnt++; 1931 u64_stats_update_end(&ring->syncp); 1932 1933 hns3_reuse_buffer(ring, ring->next_to_use); 1934 } else { 1935 ret = hns3_reserve_buffer_map(ring, &res_cbs); 1936 if (ret) { 1937 u64_stats_update_begin(&ring->syncp); 1938 ring->stats.sw_err_cnt++; 1939 u64_stats_update_end(&ring->syncp); 1940 1941 netdev_err(ring->tqp->handle->kinfo.netdev, 1942 "hnae reserve buffer map failed.\n"); 1943 break; 1944 } 1945 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 1946 } 1947 1948 ring_ptr_move_fw(ring, next_to_use); 1949 } 1950 1951 wmb(); /* Make all data has been write before submit */ 1952 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); 1953 } 1954 1955 /* hns3_nic_get_headlen - determine size of header for LRO/GRO 1956 * @data: pointer to the start of the headers 1957 * @max: total length of section to find headers in 1958 * 1959 * This function is meant to determine the length of headers that will 1960 * be recognized by hardware for LRO, GRO, and RSC offloads. The main 1961 * motivation of doing this is to only perform one pull for IPv4 TCP 1962 * packets so that we can do basic things like calculating the gso_size 1963 * based on the average data per packet. 1964 */ 1965 static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag, 1966 unsigned int max_size) 1967 { 1968 unsigned char *network; 1969 u8 hlen; 1970 1971 /* This should never happen, but better safe than sorry */ 1972 if (max_size < ETH_HLEN) 1973 return max_size; 1974 1975 /* Initialize network frame pointer */ 1976 network = data; 1977 1978 /* Set first protocol and move network header forward */ 1979 network += ETH_HLEN; 1980 1981 /* Handle any vlan tag if present */ 1982 if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S) 1983 == HNS3_RX_FLAG_VLAN_PRESENT) { 1984 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) 1985 return max_size; 1986 1987 network += VLAN_HLEN; 1988 } 1989 1990 /* Handle L3 protocols */ 1991 if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) 1992 == HNS3_RX_FLAG_L3ID_IPV4) { 1993 if ((typeof(max_size))(network - data) > 1994 (max_size - sizeof(struct iphdr))) 1995 return max_size; 1996 1997 /* Access ihl as a u8 to avoid unaligned access on ia64 */ 1998 hlen = (network[0] & 0x0F) << 2; 1999 2000 /* Verify hlen meets minimum size requirements */ 2001 if (hlen < sizeof(struct iphdr)) 2002 return network - data; 2003 2004 /* Record next protocol if header is present */ 2005 } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) 2006 == HNS3_RX_FLAG_L3ID_IPV6) { 2007 if ((typeof(max_size))(network - data) > 2008 (max_size - sizeof(struct ipv6hdr))) 2009 return max_size; 2010 2011 /* Record next protocol */ 2012 hlen = sizeof(struct ipv6hdr); 2013 } else { 2014 return network - data; 2015 } 2016 2017 /* Relocate pointer to start of L4 header */ 2018 network += hlen; 2019 2020 /* Finally sort out TCP/UDP */ 2021 if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) 2022 == HNS3_RX_FLAG_L4ID_TCP) { 2023 if ((typeof(max_size))(network - data) > 2024 (max_size - sizeof(struct tcphdr))) 2025 return max_size; 2026 2027 /* Access doff as a u8 to avoid unaligned access on ia64 */ 2028 hlen = (network[12] & 0xF0) >> 2; 2029 2030 /* Verify hlen meets minimum size requirements */ 2031 if (hlen < sizeof(struct tcphdr)) 2032 return network - data; 2033 2034 network += hlen; 2035 } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) 2036 == HNS3_RX_FLAG_L4ID_UDP) { 2037 if ((typeof(max_size))(network - data) > 2038 (max_size - sizeof(struct udphdr))) 2039 return max_size; 2040 2041 network += sizeof(struct udphdr); 2042 } 2043 2044 /* If everything has gone correctly network should be the 2045 * data section of the packet and will be the end of the header. 2046 * If not then it probably represents the end of the last recognized 2047 * header. 2048 */ 2049 if ((typeof(max_size))(network - data) < max_size) 2050 return network - data; 2051 else 2052 return max_size; 2053 } 2054 2055 static void hns3_nic_reuse_page(struct sk_buff *skb, int i, 2056 struct hns3_enet_ring *ring, int pull_len, 2057 struct hns3_desc_cb *desc_cb) 2058 { 2059 struct hns3_desc *desc; 2060 int truesize, size; 2061 int last_offset; 2062 bool twobufs; 2063 2064 twobufs = ((PAGE_SIZE < 8192) && 2065 hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048); 2066 2067 desc = &ring->desc[ring->next_to_clean]; 2068 size = le16_to_cpu(desc->rx.size); 2069 2070 truesize = hnae_buf_size(ring); 2071 2072 if (!twobufs) 2073 last_offset = hnae_page_size(ring) - hnae_buf_size(ring); 2074 2075 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 2076 size - pull_len, truesize); 2077 2078 /* Avoid re-using remote pages,flag default unreuse */ 2079 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) 2080 return; 2081 2082 if (twobufs) { 2083 /* If we are only owner of page we can reuse it */ 2084 if (likely(page_count(desc_cb->priv) == 1)) { 2085 /* Flip page offset to other buffer */ 2086 desc_cb->page_offset ^= truesize; 2087 2088 desc_cb->reuse_flag = 1; 2089 /* bump ref count on page before it is given*/ 2090 get_page(desc_cb->priv); 2091 } 2092 return; 2093 } 2094 2095 /* Move offset up to the next cache line */ 2096 desc_cb->page_offset += truesize; 2097 2098 if (desc_cb->page_offset <= last_offset) { 2099 desc_cb->reuse_flag = 1; 2100 /* Bump ref count on page before it is given*/ 2101 get_page(desc_cb->priv); 2102 } 2103 } 2104 2105 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, 2106 struct hns3_desc *desc) 2107 { 2108 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2109 int l3_type, l4_type; 2110 u32 bd_base_info; 2111 int ol4_type; 2112 u32 l234info; 2113 2114 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2115 l234info = le32_to_cpu(desc->rx.l234_info); 2116 2117 skb->ip_summed = CHECKSUM_NONE; 2118 2119 skb_checksum_none_assert(skb); 2120 2121 if (!(netdev->features & NETIF_F_RXCSUM)) 2122 return; 2123 2124 /* check if hardware has done checksum */ 2125 if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) 2126 return; 2127 2128 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) || 2129 hnae_get_bit(l234info, HNS3_RXD_L4E_B) || 2130 hnae_get_bit(l234info, HNS3_RXD_OL3E_B) || 2131 hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) { 2132 netdev_err(netdev, "L3/L4 error pkt\n"); 2133 u64_stats_update_begin(&ring->syncp); 2134 ring->stats.l3l4_csum_err++; 2135 u64_stats_update_end(&ring->syncp); 2136 2137 return; 2138 } 2139 2140 l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M, 2141 HNS3_RXD_L3ID_S); 2142 l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M, 2143 HNS3_RXD_L4ID_S); 2144 2145 ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); 2146 switch (ol4_type) { 2147 case HNS3_OL4_TYPE_MAC_IN_UDP: 2148 case HNS3_OL4_TYPE_NVGRE: 2149 skb->csum_level = 1; 2150 case HNS3_OL4_TYPE_NO_TUN: 2151 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ 2152 if (l3_type == HNS3_L3_TYPE_IPV4 || 2153 (l3_type == HNS3_L3_TYPE_IPV6 && 2154 (l4_type == HNS3_L4_TYPE_UDP || 2155 l4_type == HNS3_L4_TYPE_TCP || 2156 l4_type == HNS3_L4_TYPE_SCTP))) 2157 skb->ip_summed = CHECKSUM_UNNECESSARY; 2158 break; 2159 } 2160 } 2161 2162 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) 2163 { 2164 napi_gro_receive(&ring->tqp_vector->napi, skb); 2165 } 2166 2167 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, 2168 struct sk_buff **out_skb, int *out_bnum) 2169 { 2170 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2171 struct hns3_desc_cb *desc_cb; 2172 struct hns3_desc *desc; 2173 struct sk_buff *skb; 2174 unsigned char *va; 2175 u32 bd_base_info; 2176 int pull_len; 2177 u32 l234info; 2178 int length; 2179 int bnum; 2180 2181 desc = &ring->desc[ring->next_to_clean]; 2182 desc_cb = &ring->desc_cb[ring->next_to_clean]; 2183 2184 prefetch(desc); 2185 2186 length = le16_to_cpu(desc->rx.pkt_len); 2187 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2188 l234info = le32_to_cpu(desc->rx.l234_info); 2189 2190 /* Check valid BD */ 2191 if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) 2192 return -EFAULT; 2193 2194 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; 2195 2196 /* Prefetch first cache line of first page 2197 * Idea is to cache few bytes of the header of the packet. Our L1 Cache 2198 * line size is 64B so need to prefetch twice to make it 128B. But in 2199 * actual we can have greater size of caches with 128B Level 1 cache 2200 * lines. In such a case, single fetch would suffice to cache in the 2201 * relevant part of the header. 2202 */ 2203 prefetch(va); 2204 #if L1_CACHE_BYTES < 128 2205 prefetch(va + L1_CACHE_BYTES); 2206 #endif 2207 2208 skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi, 2209 HNS3_RX_HEAD_SIZE); 2210 if (unlikely(!skb)) { 2211 netdev_err(netdev, "alloc rx skb fail\n"); 2212 2213 u64_stats_update_begin(&ring->syncp); 2214 ring->stats.sw_err_cnt++; 2215 u64_stats_update_end(&ring->syncp); 2216 2217 return -ENOMEM; 2218 } 2219 2220 prefetchw(skb->data); 2221 2222 /* Based on hw strategy, the tag offloaded will be stored at 2223 * ot_vlan_tag in two layer tag case, and stored at vlan_tag 2224 * in one layer tag case. 2225 */ 2226 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { 2227 u16 vlan_tag; 2228 2229 vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 2230 if (!(vlan_tag & VLAN_VID_MASK)) 2231 vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 2232 if (vlan_tag & VLAN_VID_MASK) 2233 __vlan_hwaccel_put_tag(skb, 2234 htons(ETH_P_8021Q), 2235 vlan_tag); 2236 } 2237 2238 bnum = 1; 2239 if (length <= HNS3_RX_HEAD_SIZE) { 2240 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); 2241 2242 /* We can reuse buffer as-is, just make sure it is local */ 2243 if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) 2244 desc_cb->reuse_flag = 1; 2245 else /* This page cannot be reused so discard it */ 2246 put_page(desc_cb->priv); 2247 2248 ring_ptr_move_fw(ring, next_to_clean); 2249 } else { 2250 u64_stats_update_begin(&ring->syncp); 2251 ring->stats.seg_pkt_cnt++; 2252 u64_stats_update_end(&ring->syncp); 2253 2254 pull_len = hns3_nic_get_headlen(va, l234info, 2255 HNS3_RX_HEAD_SIZE); 2256 memcpy(__skb_put(skb, pull_len), va, 2257 ALIGN(pull_len, sizeof(long))); 2258 2259 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); 2260 ring_ptr_move_fw(ring, next_to_clean); 2261 2262 while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) { 2263 desc = &ring->desc[ring->next_to_clean]; 2264 desc_cb = &ring->desc_cb[ring->next_to_clean]; 2265 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2266 hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb); 2267 ring_ptr_move_fw(ring, next_to_clean); 2268 bnum++; 2269 } 2270 } 2271 2272 *out_bnum = bnum; 2273 2274 if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { 2275 netdev_err(netdev, "no valid bd,%016llx,%016llx\n", 2276 ((u64 *)desc)[0], ((u64 *)desc)[1]); 2277 u64_stats_update_begin(&ring->syncp); 2278 ring->stats.non_vld_descs++; 2279 u64_stats_update_end(&ring->syncp); 2280 2281 dev_kfree_skb_any(skb); 2282 return -EINVAL; 2283 } 2284 2285 if (unlikely((!desc->rx.pkt_len) || 2286 hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { 2287 netdev_err(netdev, "truncated pkt\n"); 2288 u64_stats_update_begin(&ring->syncp); 2289 ring->stats.err_pkt_len++; 2290 u64_stats_update_end(&ring->syncp); 2291 2292 dev_kfree_skb_any(skb); 2293 return -EFAULT; 2294 } 2295 2296 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) { 2297 netdev_err(netdev, "L2 error pkt\n"); 2298 u64_stats_update_begin(&ring->syncp); 2299 ring->stats.l2_err++; 2300 u64_stats_update_end(&ring->syncp); 2301 2302 dev_kfree_skb_any(skb); 2303 return -EFAULT; 2304 } 2305 2306 u64_stats_update_begin(&ring->syncp); 2307 ring->stats.rx_pkts++; 2308 ring->stats.rx_bytes += skb->len; 2309 u64_stats_update_end(&ring->syncp); 2310 2311 ring->tqp_vector->rx_group.total_bytes += skb->len; 2312 2313 hns3_rx_checksum(ring, skb, desc); 2314 return 0; 2315 } 2316 2317 int hns3_clean_rx_ring( 2318 struct hns3_enet_ring *ring, int budget, 2319 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) 2320 { 2321 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 2322 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2323 int recv_pkts, recv_bds, clean_count, err; 2324 int unused_count = hns3_desc_unused(ring); 2325 struct sk_buff *skb = NULL; 2326 int num, bnum = 0; 2327 2328 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); 2329 rmb(); /* Make sure num taken effect before the other data is touched */ 2330 2331 recv_pkts = 0, recv_bds = 0, clean_count = 0; 2332 num -= unused_count; 2333 2334 while (recv_pkts < budget && recv_bds < num) { 2335 /* Reuse or realloc buffers */ 2336 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { 2337 hns3_nic_alloc_rx_buffers(ring, 2338 clean_count + unused_count); 2339 clean_count = 0; 2340 unused_count = hns3_desc_unused(ring); 2341 } 2342 2343 /* Poll one pkt */ 2344 err = hns3_handle_rx_bd(ring, &skb, &bnum); 2345 if (unlikely(!skb)) /* This fault cannot be repaired */ 2346 goto out; 2347 2348 recv_bds += bnum; 2349 clean_count += bnum; 2350 if (unlikely(err)) { /* Do jump the err */ 2351 recv_pkts++; 2352 continue; 2353 } 2354 2355 /* Do update ip stack process */ 2356 skb->protocol = eth_type_trans(skb, netdev); 2357 rx_fn(ring, skb); 2358 2359 recv_pkts++; 2360 } 2361 2362 out: 2363 /* Make all data has been write before submit */ 2364 if (clean_count + unused_count > 0) 2365 hns3_nic_alloc_rx_buffers(ring, 2366 clean_count + unused_count); 2367 2368 return recv_pkts; 2369 } 2370 2371 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) 2372 { 2373 struct hns3_enet_tqp_vector *tqp_vector = 2374 ring_group->ring->tqp_vector; 2375 enum hns3_flow_level_range new_flow_level; 2376 int packets_per_msecs; 2377 int bytes_per_msecs; 2378 u32 time_passed_ms; 2379 u16 new_int_gl; 2380 2381 if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies) 2382 return false; 2383 2384 if (ring_group->total_packets == 0) { 2385 ring_group->coal.int_gl = HNS3_INT_GL_50K; 2386 ring_group->coal.flow_level = HNS3_FLOW_LOW; 2387 return true; 2388 } 2389 2390 /* Simple throttlerate management 2391 * 0-10MB/s lower (50000 ints/s) 2392 * 10-20MB/s middle (20000 ints/s) 2393 * 20-1249MB/s high (18000 ints/s) 2394 * > 40000pps ultra (8000 ints/s) 2395 */ 2396 new_flow_level = ring_group->coal.flow_level; 2397 new_int_gl = ring_group->coal.int_gl; 2398 time_passed_ms = 2399 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies); 2400 2401 if (!time_passed_ms) 2402 return false; 2403 2404 do_div(ring_group->total_packets, time_passed_ms); 2405 packets_per_msecs = ring_group->total_packets; 2406 2407 do_div(ring_group->total_bytes, time_passed_ms); 2408 bytes_per_msecs = ring_group->total_bytes; 2409 2410 #define HNS3_RX_LOW_BYTE_RATE 10000 2411 #define HNS3_RX_MID_BYTE_RATE 20000 2412 2413 switch (new_flow_level) { 2414 case HNS3_FLOW_LOW: 2415 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE) 2416 new_flow_level = HNS3_FLOW_MID; 2417 break; 2418 case HNS3_FLOW_MID: 2419 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE) 2420 new_flow_level = HNS3_FLOW_HIGH; 2421 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE) 2422 new_flow_level = HNS3_FLOW_LOW; 2423 break; 2424 case HNS3_FLOW_HIGH: 2425 case HNS3_FLOW_ULTRA: 2426 default: 2427 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE) 2428 new_flow_level = HNS3_FLOW_MID; 2429 break; 2430 } 2431 2432 #define HNS3_RX_ULTRA_PACKET_RATE 40 2433 2434 if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE && 2435 &tqp_vector->rx_group == ring_group) 2436 new_flow_level = HNS3_FLOW_ULTRA; 2437 2438 switch (new_flow_level) { 2439 case HNS3_FLOW_LOW: 2440 new_int_gl = HNS3_INT_GL_50K; 2441 break; 2442 case HNS3_FLOW_MID: 2443 new_int_gl = HNS3_INT_GL_20K; 2444 break; 2445 case HNS3_FLOW_HIGH: 2446 new_int_gl = HNS3_INT_GL_18K; 2447 break; 2448 case HNS3_FLOW_ULTRA: 2449 new_int_gl = HNS3_INT_GL_8K; 2450 break; 2451 default: 2452 break; 2453 } 2454 2455 ring_group->total_bytes = 0; 2456 ring_group->total_packets = 0; 2457 ring_group->coal.flow_level = new_flow_level; 2458 if (new_int_gl != ring_group->coal.int_gl) { 2459 ring_group->coal.int_gl = new_int_gl; 2460 return true; 2461 } 2462 return false; 2463 } 2464 2465 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) 2466 { 2467 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; 2468 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; 2469 bool rx_update, tx_update; 2470 2471 if (tqp_vector->int_adapt_down > 0) { 2472 tqp_vector->int_adapt_down--; 2473 return; 2474 } 2475 2476 if (rx_group->coal.gl_adapt_enable) { 2477 rx_update = hns3_get_new_int_gl(rx_group); 2478 if (rx_update) 2479 hns3_set_vector_coalesce_rx_gl(tqp_vector, 2480 rx_group->coal.int_gl); 2481 } 2482 2483 if (tx_group->coal.gl_adapt_enable) { 2484 tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group); 2485 if (tx_update) 2486 hns3_set_vector_coalesce_tx_gl(tqp_vector, 2487 tx_group->coal.int_gl); 2488 } 2489 2490 tqp_vector->last_jiffies = jiffies; 2491 tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; 2492 } 2493 2494 static int hns3_nic_common_poll(struct napi_struct *napi, int budget) 2495 { 2496 struct hns3_enet_ring *ring; 2497 int rx_pkt_total = 0; 2498 2499 struct hns3_enet_tqp_vector *tqp_vector = 2500 container_of(napi, struct hns3_enet_tqp_vector, napi); 2501 bool clean_complete = true; 2502 int rx_budget; 2503 2504 /* Since the actual Tx work is minimal, we can give the Tx a larger 2505 * budget and be more aggressive about cleaning up the Tx descriptors. 2506 */ 2507 hns3_for_each_ring(ring, tqp_vector->tx_group) { 2508 if (!hns3_clean_tx_ring(ring, budget)) 2509 clean_complete = false; 2510 } 2511 2512 /* make sure rx ring budget not smaller than 1 */ 2513 rx_budget = max(budget / tqp_vector->num_tqps, 1); 2514 2515 hns3_for_each_ring(ring, tqp_vector->rx_group) { 2516 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, 2517 hns3_rx_skb); 2518 2519 if (rx_cleaned >= rx_budget) 2520 clean_complete = false; 2521 2522 rx_pkt_total += rx_cleaned; 2523 } 2524 2525 tqp_vector->rx_group.total_packets += rx_pkt_total; 2526 2527 if (!clean_complete) 2528 return budget; 2529 2530 napi_complete(napi); 2531 hns3_update_new_int_gl(tqp_vector); 2532 hns3_mask_vector_irq(tqp_vector, 1); 2533 2534 return rx_pkt_total; 2535 } 2536 2537 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 2538 struct hnae3_ring_chain_node *head) 2539 { 2540 struct pci_dev *pdev = tqp_vector->handle->pdev; 2541 struct hnae3_ring_chain_node *cur_chain = head; 2542 struct hnae3_ring_chain_node *chain; 2543 struct hns3_enet_ring *tx_ring; 2544 struct hns3_enet_ring *rx_ring; 2545 2546 tx_ring = tqp_vector->tx_group.ring; 2547 if (tx_ring) { 2548 cur_chain->tqp_index = tx_ring->tqp->tqp_index; 2549 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 2550 HNAE3_RING_TYPE_TX); 2551 hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 2552 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); 2553 2554 cur_chain->next = NULL; 2555 2556 while (tx_ring->next) { 2557 tx_ring = tx_ring->next; 2558 2559 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), 2560 GFP_KERNEL); 2561 if (!chain) 2562 return -ENOMEM; 2563 2564 cur_chain->next = chain; 2565 chain->tqp_index = tx_ring->tqp->tqp_index; 2566 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, 2567 HNAE3_RING_TYPE_TX); 2568 hnae_set_field(chain->int_gl_idx, 2569 HNAE3_RING_GL_IDX_M, 2570 HNAE3_RING_GL_IDX_S, 2571 HNAE3_RING_GL_TX); 2572 2573 cur_chain = chain; 2574 } 2575 } 2576 2577 rx_ring = tqp_vector->rx_group.ring; 2578 if (!tx_ring && rx_ring) { 2579 cur_chain->next = NULL; 2580 cur_chain->tqp_index = rx_ring->tqp->tqp_index; 2581 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 2582 HNAE3_RING_TYPE_RX); 2583 hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 2584 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 2585 2586 rx_ring = rx_ring->next; 2587 } 2588 2589 while (rx_ring) { 2590 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); 2591 if (!chain) 2592 return -ENOMEM; 2593 2594 cur_chain->next = chain; 2595 chain->tqp_index = rx_ring->tqp->tqp_index; 2596 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, 2597 HNAE3_RING_TYPE_RX); 2598 hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 2599 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 2600 2601 cur_chain = chain; 2602 2603 rx_ring = rx_ring->next; 2604 } 2605 2606 return 0; 2607 } 2608 2609 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 2610 struct hnae3_ring_chain_node *head) 2611 { 2612 struct pci_dev *pdev = tqp_vector->handle->pdev; 2613 struct hnae3_ring_chain_node *chain_tmp, *chain; 2614 2615 chain = head->next; 2616 2617 while (chain) { 2618 chain_tmp = chain->next; 2619 devm_kfree(&pdev->dev, chain); 2620 chain = chain_tmp; 2621 } 2622 } 2623 2624 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, 2625 struct hns3_enet_ring *ring) 2626 { 2627 ring->next = group->ring; 2628 group->ring = ring; 2629 2630 group->count++; 2631 } 2632 2633 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) 2634 { 2635 struct hnae3_ring_chain_node vector_ring_chain; 2636 struct hnae3_handle *h = priv->ae_handle; 2637 struct hns3_enet_tqp_vector *tqp_vector; 2638 int ret = 0; 2639 u16 i; 2640 2641 for (i = 0; i < priv->vector_num; i++) { 2642 tqp_vector = &priv->tqp_vector[i]; 2643 hns3_vector_gl_rl_init_hw(tqp_vector, priv); 2644 tqp_vector->num_tqps = 0; 2645 } 2646 2647 for (i = 0; i < h->kinfo.num_tqps; i++) { 2648 u16 vector_i = i % priv->vector_num; 2649 u16 tqp_num = h->kinfo.num_tqps; 2650 2651 tqp_vector = &priv->tqp_vector[vector_i]; 2652 2653 hns3_add_ring_to_group(&tqp_vector->tx_group, 2654 priv->ring_data[i].ring); 2655 2656 hns3_add_ring_to_group(&tqp_vector->rx_group, 2657 priv->ring_data[i + tqp_num].ring); 2658 2659 priv->ring_data[i].ring->tqp_vector = tqp_vector; 2660 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; 2661 tqp_vector->num_tqps++; 2662 } 2663 2664 for (i = 0; i < priv->vector_num; i++) { 2665 tqp_vector = &priv->tqp_vector[i]; 2666 2667 tqp_vector->rx_group.total_bytes = 0; 2668 tqp_vector->rx_group.total_packets = 0; 2669 tqp_vector->tx_group.total_bytes = 0; 2670 tqp_vector->tx_group.total_packets = 0; 2671 tqp_vector->handle = h; 2672 2673 ret = hns3_get_vector_ring_chain(tqp_vector, 2674 &vector_ring_chain); 2675 if (ret) 2676 return ret; 2677 2678 ret = h->ae_algo->ops->map_ring_to_vector(h, 2679 tqp_vector->vector_irq, &vector_ring_chain); 2680 2681 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 2682 2683 if (ret) 2684 return ret; 2685 2686 netif_napi_add(priv->netdev, &tqp_vector->napi, 2687 hns3_nic_common_poll, NAPI_POLL_WEIGHT); 2688 } 2689 2690 return 0; 2691 } 2692 2693 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) 2694 { 2695 struct hnae3_handle *h = priv->ae_handle; 2696 struct hns3_enet_tqp_vector *tqp_vector; 2697 struct hnae3_vector_info *vector; 2698 struct pci_dev *pdev = h->pdev; 2699 u16 tqp_num = h->kinfo.num_tqps; 2700 u16 vector_num; 2701 int ret = 0; 2702 u16 i; 2703 2704 /* RSS size, cpu online and vector_num should be the same */ 2705 /* Should consider 2p/4p later */ 2706 vector_num = min_t(u16, num_online_cpus(), tqp_num); 2707 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), 2708 GFP_KERNEL); 2709 if (!vector) 2710 return -ENOMEM; 2711 2712 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); 2713 2714 priv->vector_num = vector_num; 2715 priv->tqp_vector = (struct hns3_enet_tqp_vector *) 2716 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), 2717 GFP_KERNEL); 2718 if (!priv->tqp_vector) { 2719 ret = -ENOMEM; 2720 goto out; 2721 } 2722 2723 for (i = 0; i < priv->vector_num; i++) { 2724 tqp_vector = &priv->tqp_vector[i]; 2725 tqp_vector->idx = i; 2726 tqp_vector->mask_addr = vector[i].io_addr; 2727 tqp_vector->vector_irq = vector[i].vector; 2728 hns3_vector_gl_rl_init(tqp_vector, priv); 2729 } 2730 2731 out: 2732 devm_kfree(&pdev->dev, vector); 2733 return ret; 2734 } 2735 2736 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) 2737 { 2738 group->ring = NULL; 2739 group->count = 0; 2740 } 2741 2742 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) 2743 { 2744 struct hnae3_ring_chain_node vector_ring_chain; 2745 struct hnae3_handle *h = priv->ae_handle; 2746 struct hns3_enet_tqp_vector *tqp_vector; 2747 int i, ret; 2748 2749 for (i = 0; i < priv->vector_num; i++) { 2750 tqp_vector = &priv->tqp_vector[i]; 2751 2752 ret = hns3_get_vector_ring_chain(tqp_vector, 2753 &vector_ring_chain); 2754 if (ret) 2755 return ret; 2756 2757 ret = h->ae_algo->ops->unmap_ring_from_vector(h, 2758 tqp_vector->vector_irq, &vector_ring_chain); 2759 if (ret) 2760 return ret; 2761 2762 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); 2763 if (ret) 2764 return ret; 2765 2766 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 2767 2768 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { 2769 (void)irq_set_affinity_hint( 2770 priv->tqp_vector[i].vector_irq, 2771 NULL); 2772 free_irq(priv->tqp_vector[i].vector_irq, 2773 &priv->tqp_vector[i]); 2774 } 2775 2776 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; 2777 hns3_clear_ring_group(&tqp_vector->rx_group); 2778 hns3_clear_ring_group(&tqp_vector->tx_group); 2779 netif_napi_del(&priv->tqp_vector[i].napi); 2780 } 2781 2782 return 0; 2783 } 2784 2785 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) 2786 { 2787 struct hnae3_handle *h = priv->ae_handle; 2788 struct pci_dev *pdev = h->pdev; 2789 int i, ret; 2790 2791 for (i = 0; i < priv->vector_num; i++) { 2792 struct hns3_enet_tqp_vector *tqp_vector; 2793 2794 tqp_vector = &priv->tqp_vector[i]; 2795 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); 2796 if (ret) 2797 return ret; 2798 } 2799 2800 devm_kfree(&pdev->dev, priv->tqp_vector); 2801 return 0; 2802 } 2803 2804 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, 2805 int ring_type) 2806 { 2807 struct hns3_nic_ring_data *ring_data = priv->ring_data; 2808 int queue_num = priv->ae_handle->kinfo.num_tqps; 2809 struct pci_dev *pdev = priv->ae_handle->pdev; 2810 struct hns3_enet_ring *ring; 2811 2812 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL); 2813 if (!ring) 2814 return -ENOMEM; 2815 2816 if (ring_type == HNAE3_RING_TYPE_TX) { 2817 ring_data[q->tqp_index].ring = ring; 2818 ring_data[q->tqp_index].queue_index = q->tqp_index; 2819 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; 2820 } else { 2821 ring_data[q->tqp_index + queue_num].ring = ring; 2822 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; 2823 ring->io_base = q->io_base; 2824 } 2825 2826 hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); 2827 2828 ring->tqp = q; 2829 ring->desc = NULL; 2830 ring->desc_cb = NULL; 2831 ring->dev = priv->dev; 2832 ring->desc_dma_addr = 0; 2833 ring->buf_size = q->buf_size; 2834 ring->desc_num = q->desc_num; 2835 ring->next_to_use = 0; 2836 ring->next_to_clean = 0; 2837 2838 return 0; 2839 } 2840 2841 static int hns3_queue_to_ring(struct hnae3_queue *tqp, 2842 struct hns3_nic_priv *priv) 2843 { 2844 int ret; 2845 2846 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); 2847 if (ret) 2848 return ret; 2849 2850 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); 2851 if (ret) 2852 return ret; 2853 2854 return 0; 2855 } 2856 2857 static int hns3_get_ring_config(struct hns3_nic_priv *priv) 2858 { 2859 struct hnae3_handle *h = priv->ae_handle; 2860 struct pci_dev *pdev = h->pdev; 2861 int i, ret; 2862 2863 priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps * 2864 sizeof(*priv->ring_data) * 2, 2865 GFP_KERNEL); 2866 if (!priv->ring_data) 2867 return -ENOMEM; 2868 2869 for (i = 0; i < h->kinfo.num_tqps; i++) { 2870 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv); 2871 if (ret) 2872 goto err; 2873 } 2874 2875 return 0; 2876 err: 2877 devm_kfree(&pdev->dev, priv->ring_data); 2878 return ret; 2879 } 2880 2881 static void hns3_put_ring_config(struct hns3_nic_priv *priv) 2882 { 2883 struct hnae3_handle *h = priv->ae_handle; 2884 int i; 2885 2886 for (i = 0; i < h->kinfo.num_tqps; i++) { 2887 devm_kfree(priv->dev, priv->ring_data[i].ring); 2888 devm_kfree(priv->dev, 2889 priv->ring_data[i + h->kinfo.num_tqps].ring); 2890 } 2891 devm_kfree(priv->dev, priv->ring_data); 2892 } 2893 2894 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) 2895 { 2896 int ret; 2897 2898 if (ring->desc_num <= 0 || ring->buf_size <= 0) 2899 return -EINVAL; 2900 2901 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), 2902 GFP_KERNEL); 2903 if (!ring->desc_cb) { 2904 ret = -ENOMEM; 2905 goto out; 2906 } 2907 2908 ret = hns3_alloc_desc(ring); 2909 if (ret) 2910 goto out_with_desc_cb; 2911 2912 if (!HNAE3_IS_TX_RING(ring)) { 2913 ret = hns3_alloc_ring_buffers(ring); 2914 if (ret) 2915 goto out_with_desc; 2916 } 2917 2918 return 0; 2919 2920 out_with_desc: 2921 hns3_free_desc(ring); 2922 out_with_desc_cb: 2923 kfree(ring->desc_cb); 2924 ring->desc_cb = NULL; 2925 out: 2926 return ret; 2927 } 2928 2929 static void hns3_fini_ring(struct hns3_enet_ring *ring) 2930 { 2931 hns3_free_desc(ring); 2932 kfree(ring->desc_cb); 2933 ring->desc_cb = NULL; 2934 ring->next_to_clean = 0; 2935 ring->next_to_use = 0; 2936 } 2937 2938 static int hns3_buf_size2type(u32 buf_size) 2939 { 2940 int bd_size_type; 2941 2942 switch (buf_size) { 2943 case 512: 2944 bd_size_type = HNS3_BD_SIZE_512_TYPE; 2945 break; 2946 case 1024: 2947 bd_size_type = HNS3_BD_SIZE_1024_TYPE; 2948 break; 2949 case 2048: 2950 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 2951 break; 2952 case 4096: 2953 bd_size_type = HNS3_BD_SIZE_4096_TYPE; 2954 break; 2955 default: 2956 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 2957 } 2958 2959 return bd_size_type; 2960 } 2961 2962 static void hns3_init_ring_hw(struct hns3_enet_ring *ring) 2963 { 2964 dma_addr_t dma = ring->desc_dma_addr; 2965 struct hnae3_queue *q = ring->tqp; 2966 2967 if (!HNAE3_IS_TX_RING(ring)) { 2968 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, 2969 (u32)dma); 2970 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, 2971 (u32)((dma >> 31) >> 1)); 2972 2973 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, 2974 hns3_buf_size2type(ring->buf_size)); 2975 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, 2976 ring->desc_num / 8 - 1); 2977 2978 } else { 2979 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, 2980 (u32)dma); 2981 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, 2982 (u32)((dma >> 31) >> 1)); 2983 2984 hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG, 2985 hns3_buf_size2type(ring->buf_size)); 2986 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, 2987 ring->desc_num / 8 - 1); 2988 } 2989 } 2990 2991 int hns3_init_all_ring(struct hns3_nic_priv *priv) 2992 { 2993 struct hnae3_handle *h = priv->ae_handle; 2994 int ring_num = h->kinfo.num_tqps * 2; 2995 int i, j; 2996 int ret; 2997 2998 for (i = 0; i < ring_num; i++) { 2999 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring); 3000 if (ret) { 3001 dev_err(priv->dev, 3002 "Alloc ring memory fail! ret=%d\n", ret); 3003 goto out_when_alloc_ring_memory; 3004 } 3005 3006 hns3_init_ring_hw(priv->ring_data[i].ring); 3007 3008 u64_stats_init(&priv->ring_data[i].ring->syncp); 3009 } 3010 3011 return 0; 3012 3013 out_when_alloc_ring_memory: 3014 for (j = i - 1; j >= 0; j--) 3015 hns3_fini_ring(priv->ring_data[j].ring); 3016 3017 return -ENOMEM; 3018 } 3019 3020 int hns3_uninit_all_ring(struct hns3_nic_priv *priv) 3021 { 3022 struct hnae3_handle *h = priv->ae_handle; 3023 int i; 3024 3025 for (i = 0; i < h->kinfo.num_tqps; i++) { 3026 if (h->ae_algo->ops->reset_queue) 3027 h->ae_algo->ops->reset_queue(h, i); 3028 3029 hns3_fini_ring(priv->ring_data[i].ring); 3030 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); 3031 } 3032 return 0; 3033 } 3034 3035 /* Set mac addr if it is configured. or leave it to the AE driver */ 3036 static void hns3_init_mac_addr(struct net_device *netdev, bool init) 3037 { 3038 struct hns3_nic_priv *priv = netdev_priv(netdev); 3039 struct hnae3_handle *h = priv->ae_handle; 3040 u8 mac_addr_temp[ETH_ALEN]; 3041 3042 if (h->ae_algo->ops->get_mac_addr && init) { 3043 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); 3044 ether_addr_copy(netdev->dev_addr, mac_addr_temp); 3045 } 3046 3047 /* Check if the MAC address is valid, if not get a random one */ 3048 if (!is_valid_ether_addr(netdev->dev_addr)) { 3049 eth_hw_addr_random(netdev); 3050 dev_warn(priv->dev, "using random MAC address %pM\n", 3051 netdev->dev_addr); 3052 } 3053 3054 if (h->ae_algo->ops->set_mac_addr) 3055 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); 3056 3057 } 3058 3059 static void hns3_nic_set_priv_ops(struct net_device *netdev) 3060 { 3061 struct hns3_nic_priv *priv = netdev_priv(netdev); 3062 3063 if ((netdev->features & NETIF_F_TSO) || 3064 (netdev->features & NETIF_F_TSO6)) { 3065 priv->ops.fill_desc = hns3_fill_desc_tso; 3066 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; 3067 } else { 3068 priv->ops.fill_desc = hns3_fill_desc; 3069 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; 3070 } 3071 } 3072 3073 static int hns3_client_init(struct hnae3_handle *handle) 3074 { 3075 struct pci_dev *pdev = handle->pdev; 3076 struct hns3_nic_priv *priv; 3077 struct net_device *netdev; 3078 int ret; 3079 3080 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), 3081 hns3_get_max_available_channels(handle)); 3082 if (!netdev) 3083 return -ENOMEM; 3084 3085 priv = netdev_priv(netdev); 3086 priv->dev = &pdev->dev; 3087 priv->netdev = netdev; 3088 priv->ae_handle = handle; 3089 priv->ae_handle->reset_level = HNAE3_NONE_RESET; 3090 priv->ae_handle->last_reset_time = jiffies; 3091 priv->tx_timeout_count = 0; 3092 3093 handle->kinfo.netdev = netdev; 3094 handle->priv = (void *)priv; 3095 3096 hns3_init_mac_addr(netdev, true); 3097 3098 hns3_set_default_feature(netdev); 3099 3100 netdev->watchdog_timeo = HNS3_TX_TIMEOUT; 3101 netdev->priv_flags |= IFF_UNICAST_FLT; 3102 netdev->netdev_ops = &hns3_nic_netdev_ops; 3103 SET_NETDEV_DEV(netdev, &pdev->dev); 3104 hns3_ethtool_set_ops(netdev); 3105 hns3_nic_set_priv_ops(netdev); 3106 3107 /* Carrier off reporting is important to ethtool even BEFORE open */ 3108 netif_carrier_off(netdev); 3109 3110 ret = hns3_get_ring_config(priv); 3111 if (ret) { 3112 ret = -ENOMEM; 3113 goto out_get_ring_cfg; 3114 } 3115 3116 ret = hns3_nic_alloc_vector_data(priv); 3117 if (ret) { 3118 ret = -ENOMEM; 3119 goto out_alloc_vector_data; 3120 } 3121 3122 ret = hns3_nic_init_vector_data(priv); 3123 if (ret) { 3124 ret = -ENOMEM; 3125 goto out_init_vector_data; 3126 } 3127 3128 ret = hns3_init_all_ring(priv); 3129 if (ret) { 3130 ret = -ENOMEM; 3131 goto out_init_ring_data; 3132 } 3133 3134 ret = register_netdev(netdev); 3135 if (ret) { 3136 dev_err(priv->dev, "probe register netdev fail!\n"); 3137 goto out_reg_netdev_fail; 3138 } 3139 3140 hns3_dcbnl_setup(handle); 3141 3142 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */ 3143 netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 3144 3145 return ret; 3146 3147 out_reg_netdev_fail: 3148 out_init_ring_data: 3149 (void)hns3_nic_uninit_vector_data(priv); 3150 out_init_vector_data: 3151 hns3_nic_dealloc_vector_data(priv); 3152 out_alloc_vector_data: 3153 priv->ring_data = NULL; 3154 out_get_ring_cfg: 3155 priv->ae_handle = NULL; 3156 free_netdev(netdev); 3157 return ret; 3158 } 3159 3160 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) 3161 { 3162 struct net_device *netdev = handle->kinfo.netdev; 3163 struct hns3_nic_priv *priv = netdev_priv(netdev); 3164 int ret; 3165 3166 if (netdev->reg_state != NETREG_UNINITIALIZED) 3167 unregister_netdev(netdev); 3168 3169 ret = hns3_nic_uninit_vector_data(priv); 3170 if (ret) 3171 netdev_err(netdev, "uninit vector error\n"); 3172 3173 ret = hns3_nic_dealloc_vector_data(priv); 3174 if (ret) 3175 netdev_err(netdev, "dealloc vector error\n"); 3176 3177 ret = hns3_uninit_all_ring(priv); 3178 if (ret) 3179 netdev_err(netdev, "uninit ring error\n"); 3180 3181 hns3_put_ring_config(priv); 3182 3183 priv->ring_data = NULL; 3184 3185 free_netdev(netdev); 3186 } 3187 3188 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) 3189 { 3190 struct net_device *netdev = handle->kinfo.netdev; 3191 3192 if (!netdev) 3193 return; 3194 3195 if (linkup) { 3196 netif_carrier_on(netdev); 3197 netif_tx_wake_all_queues(netdev); 3198 netdev_info(netdev, "link up\n"); 3199 } else { 3200 netif_carrier_off(netdev); 3201 netif_tx_stop_all_queues(netdev); 3202 netdev_info(netdev, "link down\n"); 3203 } 3204 } 3205 3206 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) 3207 { 3208 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3209 struct net_device *ndev = kinfo->netdev; 3210 bool if_running; 3211 int ret; 3212 u8 i; 3213 3214 if (tc > HNAE3_MAX_TC) 3215 return -EINVAL; 3216 3217 if (!ndev) 3218 return -ENODEV; 3219 3220 if_running = netif_running(ndev); 3221 3222 ret = netdev_set_num_tc(ndev, tc); 3223 if (ret) 3224 return ret; 3225 3226 if (if_running) { 3227 (void)hns3_nic_net_stop(ndev); 3228 msleep(100); 3229 } 3230 3231 ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ? 3232 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP; 3233 if (ret) 3234 goto err_out; 3235 3236 if (tc <= 1) { 3237 netdev_reset_tc(ndev); 3238 goto out; 3239 } 3240 3241 for (i = 0; i < HNAE3_MAX_TC; i++) { 3242 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; 3243 3244 if (tc_info->enable) 3245 netdev_set_tc_queue(ndev, 3246 tc_info->tc, 3247 tc_info->tqp_count, 3248 tc_info->tqp_offset); 3249 } 3250 3251 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { 3252 netdev_set_prio_tc_map(ndev, i, 3253 kinfo->prio_tc[i]); 3254 } 3255 3256 out: 3257 ret = hns3_nic_set_real_num_queue(ndev); 3258 3259 err_out: 3260 if (if_running) 3261 (void)hns3_nic_net_open(ndev); 3262 3263 return ret; 3264 } 3265 3266 static void hns3_recover_hw_addr(struct net_device *ndev) 3267 { 3268 struct netdev_hw_addr_list *list; 3269 struct netdev_hw_addr *ha, *tmp; 3270 3271 /* go through and sync uc_addr entries to the device */ 3272 list = &ndev->uc; 3273 list_for_each_entry_safe(ha, tmp, &list->list, list) 3274 hns3_nic_uc_sync(ndev, ha->addr); 3275 3276 /* go through and sync mc_addr entries to the device */ 3277 list = &ndev->mc; 3278 list_for_each_entry_safe(ha, tmp, &list->list, list) 3279 hns3_nic_mc_sync(ndev, ha->addr); 3280 } 3281 3282 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) 3283 { 3284 if (!HNAE3_IS_TX_RING(ring)) 3285 return; 3286 3287 while (ring->next_to_clean != ring->next_to_use) { 3288 hns3_free_buffer_detach(ring, ring->next_to_clean); 3289 ring_ptr_move_fw(ring, next_to_clean); 3290 } 3291 } 3292 3293 static void hns3_clear_rx_ring(struct hns3_enet_ring *ring) 3294 { 3295 if (HNAE3_IS_TX_RING(ring)) 3296 return; 3297 3298 while (ring->next_to_use != ring->next_to_clean) { 3299 /* When a buffer is not reused, it's memory has been 3300 * freed in hns3_handle_rx_bd or will be freed by 3301 * stack, so only need to unmap the buffer here. 3302 */ 3303 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 3304 hns3_unmap_buffer(ring, 3305 &ring->desc_cb[ring->next_to_use]); 3306 ring->desc_cb[ring->next_to_use].dma = 0; 3307 } 3308 3309 ring_ptr_move_fw(ring, next_to_use); 3310 } 3311 } 3312 3313 static void hns3_clear_all_ring(struct hnae3_handle *h) 3314 { 3315 struct net_device *ndev = h->kinfo.netdev; 3316 struct hns3_nic_priv *priv = netdev_priv(ndev); 3317 u32 i; 3318 3319 for (i = 0; i < h->kinfo.num_tqps; i++) { 3320 struct netdev_queue *dev_queue; 3321 struct hns3_enet_ring *ring; 3322 3323 ring = priv->ring_data[i].ring; 3324 hns3_clear_tx_ring(ring); 3325 dev_queue = netdev_get_tx_queue(ndev, 3326 priv->ring_data[i].queue_index); 3327 netdev_tx_reset_queue(dev_queue); 3328 3329 ring = priv->ring_data[i + h->kinfo.num_tqps].ring; 3330 hns3_clear_rx_ring(ring); 3331 } 3332 } 3333 3334 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) 3335 { 3336 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3337 struct net_device *ndev = kinfo->netdev; 3338 3339 if (!netif_running(ndev)) 3340 return -EIO; 3341 3342 return hns3_nic_net_stop(ndev); 3343 } 3344 3345 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) 3346 { 3347 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3348 int ret = 0; 3349 3350 if (netif_running(kinfo->netdev)) { 3351 ret = hns3_nic_net_up(kinfo->netdev); 3352 if (ret) { 3353 netdev_err(kinfo->netdev, 3354 "hns net up fail, ret=%d!\n", ret); 3355 return ret; 3356 } 3357 handle->last_reset_time = jiffies; 3358 } 3359 3360 return ret; 3361 } 3362 3363 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) 3364 { 3365 struct net_device *netdev = handle->kinfo.netdev; 3366 struct hns3_nic_priv *priv = netdev_priv(netdev); 3367 int ret; 3368 3369 hns3_init_mac_addr(netdev, false); 3370 hns3_nic_set_rx_mode(netdev); 3371 hns3_recover_hw_addr(netdev); 3372 3373 /* Hardware table is only clear when pf resets */ 3374 if (!(handle->flags & HNAE3_SUPPORT_VF)) 3375 hns3_restore_vlan(netdev); 3376 3377 /* Carrier off reporting is important to ethtool even BEFORE open */ 3378 netif_carrier_off(netdev); 3379 3380 ret = hns3_get_ring_config(priv); 3381 if (ret) 3382 return ret; 3383 3384 ret = hns3_nic_init_vector_data(priv); 3385 if (ret) 3386 return ret; 3387 3388 ret = hns3_init_all_ring(priv); 3389 if (ret) { 3390 hns3_nic_uninit_vector_data(priv); 3391 priv->ring_data = NULL; 3392 } 3393 3394 return ret; 3395 } 3396 3397 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) 3398 { 3399 struct net_device *netdev = handle->kinfo.netdev; 3400 struct hns3_nic_priv *priv = netdev_priv(netdev); 3401 int ret; 3402 3403 hns3_clear_all_ring(handle); 3404 3405 ret = hns3_nic_uninit_vector_data(priv); 3406 if (ret) { 3407 netdev_err(netdev, "uninit vector error\n"); 3408 return ret; 3409 } 3410 3411 ret = hns3_uninit_all_ring(priv); 3412 if (ret) 3413 netdev_err(netdev, "uninit ring error\n"); 3414 3415 hns3_put_ring_config(priv); 3416 3417 priv->ring_data = NULL; 3418 3419 return ret; 3420 } 3421 3422 static int hns3_reset_notify(struct hnae3_handle *handle, 3423 enum hnae3_reset_notify_type type) 3424 { 3425 int ret = 0; 3426 3427 switch (type) { 3428 case HNAE3_UP_CLIENT: 3429 ret = hns3_reset_notify_up_enet(handle); 3430 break; 3431 case HNAE3_DOWN_CLIENT: 3432 ret = hns3_reset_notify_down_enet(handle); 3433 break; 3434 case HNAE3_INIT_CLIENT: 3435 ret = hns3_reset_notify_init_enet(handle); 3436 break; 3437 case HNAE3_UNINIT_CLIENT: 3438 ret = hns3_reset_notify_uninit_enet(handle); 3439 break; 3440 default: 3441 break; 3442 } 3443 3444 return ret; 3445 } 3446 3447 static void hns3_restore_coal(struct hns3_nic_priv *priv, 3448 struct hns3_enet_coalesce *tx, 3449 struct hns3_enet_coalesce *rx) 3450 { 3451 u16 vector_num = priv->vector_num; 3452 int i; 3453 3454 for (i = 0; i < vector_num; i++) { 3455 memcpy(&priv->tqp_vector[i].tx_group.coal, tx, 3456 sizeof(struct hns3_enet_coalesce)); 3457 memcpy(&priv->tqp_vector[i].rx_group.coal, rx, 3458 sizeof(struct hns3_enet_coalesce)); 3459 } 3460 } 3461 3462 static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num, 3463 struct hns3_enet_coalesce *tx, 3464 struct hns3_enet_coalesce *rx) 3465 { 3466 struct hns3_nic_priv *priv = netdev_priv(netdev); 3467 struct hnae3_handle *h = hns3_get_handle(netdev); 3468 int ret; 3469 3470 ret = h->ae_algo->ops->set_channels(h, new_tqp_num); 3471 if (ret) 3472 return ret; 3473 3474 ret = hns3_get_ring_config(priv); 3475 if (ret) 3476 return ret; 3477 3478 ret = hns3_nic_alloc_vector_data(priv); 3479 if (ret) 3480 goto err_alloc_vector; 3481 3482 hns3_restore_coal(priv, tx, rx); 3483 3484 ret = hns3_nic_init_vector_data(priv); 3485 if (ret) 3486 goto err_uninit_vector; 3487 3488 ret = hns3_init_all_ring(priv); 3489 if (ret) 3490 goto err_put_ring; 3491 3492 return 0; 3493 3494 err_put_ring: 3495 hns3_put_ring_config(priv); 3496 err_uninit_vector: 3497 hns3_nic_uninit_vector_data(priv); 3498 err_alloc_vector: 3499 hns3_nic_dealloc_vector_data(priv); 3500 return ret; 3501 } 3502 3503 static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num) 3504 { 3505 return (new_tqp_num / num_tc) * num_tc; 3506 } 3507 3508 int hns3_set_channels(struct net_device *netdev, 3509 struct ethtool_channels *ch) 3510 { 3511 struct hns3_nic_priv *priv = netdev_priv(netdev); 3512 struct hnae3_handle *h = hns3_get_handle(netdev); 3513 struct hnae3_knic_private_info *kinfo = &h->kinfo; 3514 struct hns3_enet_coalesce tx_coal, rx_coal; 3515 bool if_running = netif_running(netdev); 3516 u32 new_tqp_num = ch->combined_count; 3517 u16 org_tqp_num; 3518 int ret; 3519 3520 if (ch->rx_count || ch->tx_count) 3521 return -EINVAL; 3522 3523 if (new_tqp_num > hns3_get_max_available_channels(h) || 3524 new_tqp_num < kinfo->num_tc) { 3525 dev_err(&netdev->dev, 3526 "Change tqps fail, the tqp range is from %d to %d", 3527 kinfo->num_tc, 3528 hns3_get_max_available_channels(h)); 3529 return -EINVAL; 3530 } 3531 3532 new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num); 3533 if (kinfo->num_tqps == new_tqp_num) 3534 return 0; 3535 3536 if (if_running) 3537 hns3_nic_net_stop(netdev); 3538 3539 hns3_clear_all_ring(h); 3540 3541 ret = hns3_nic_uninit_vector_data(priv); 3542 if (ret) { 3543 dev_err(&netdev->dev, 3544 "Unbind vector with tqp fail, nothing is changed"); 3545 goto open_netdev; 3546 } 3547 3548 /* Changing the tqp num may also change the vector num, 3549 * ethtool only support setting and querying one coal 3550 * configuation for now, so save the vector 0' coal 3551 * configuation here in order to restore it. 3552 */ 3553 memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal, 3554 sizeof(struct hns3_enet_coalesce)); 3555 memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal, 3556 sizeof(struct hns3_enet_coalesce)); 3557 3558 hns3_nic_dealloc_vector_data(priv); 3559 3560 hns3_uninit_all_ring(priv); 3561 hns3_put_ring_config(priv); 3562 3563 org_tqp_num = h->kinfo.num_tqps; 3564 ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal); 3565 if (ret) { 3566 ret = hns3_modify_tqp_num(netdev, org_tqp_num, 3567 &tx_coal, &rx_coal); 3568 if (ret) { 3569 /* If revert to old tqp failed, fatal error occurred */ 3570 dev_err(&netdev->dev, 3571 "Revert to old tqp num fail, ret=%d", ret); 3572 return ret; 3573 } 3574 dev_info(&netdev->dev, 3575 "Change tqp num fail, Revert to old tqp num"); 3576 } 3577 3578 open_netdev: 3579 if (if_running) 3580 hns3_nic_net_open(netdev); 3581 3582 return ret; 3583 } 3584 3585 static const struct hnae3_client_ops client_ops = { 3586 .init_instance = hns3_client_init, 3587 .uninit_instance = hns3_client_uninit, 3588 .link_status_change = hns3_link_status_change, 3589 .setup_tc = hns3_client_setup_tc, 3590 .reset_notify = hns3_reset_notify, 3591 }; 3592 3593 /* hns3_init_module - Driver registration routine 3594 * hns3_init_module is the first routine called when the driver is 3595 * loaded. All it does is register with the PCI subsystem. 3596 */ 3597 static int __init hns3_init_module(void) 3598 { 3599 int ret; 3600 3601 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); 3602 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); 3603 3604 client.type = HNAE3_CLIENT_KNIC; 3605 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s", 3606 hns3_driver_name); 3607 3608 client.ops = &client_ops; 3609 3610 ret = hnae3_register_client(&client); 3611 if (ret) 3612 return ret; 3613 3614 ret = pci_register_driver(&hns3_driver); 3615 if (ret) 3616 hnae3_unregister_client(&client); 3617 3618 return ret; 3619 } 3620 module_init(hns3_init_module); 3621 3622 /* hns3_exit_module - Driver exit cleanup routine 3623 * hns3_exit_module is called just before the driver is removed 3624 * from memory. 3625 */ 3626 static void __exit hns3_exit_module(void) 3627 { 3628 pci_unregister_driver(&hns3_driver); 3629 hnae3_unregister_client(&client); 3630 } 3631 module_exit(hns3_exit_module); 3632 3633 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); 3634 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3635 MODULE_LICENSE("GPL"); 3636 MODULE_ALIAS("pci:hns-nic"); 3637