1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/dma-mapping.h> 5 #include <linux/etherdevice.h> 6 #include <linux/interrupt.h> 7 #ifdef CONFIG_RFS_ACCEL 8 #include <linux/cpu_rmap.h> 9 #endif 10 #include <linux/if_vlan.h> 11 #include <linux/irq.h> 12 #include <linux/ip.h> 13 #include <linux/ipv6.h> 14 #include <linux/module.h> 15 #include <linux/pci.h> 16 #include <linux/aer.h> 17 #include <linux/skbuff.h> 18 #include <linux/sctp.h> 19 #include <net/gre.h> 20 #include <net/ip6_checksum.h> 21 #include <net/pkt_cls.h> 22 #include <net/tcp.h> 23 #include <net/vxlan.h> 24 #include <net/geneve.h> 25 26 #include "hnae3.h" 27 #include "hns3_enet.h" 28 /* All hns3 tracepoints are defined by the include below, which 29 * must be included exactly once across the whole kernel with 30 * CREATE_TRACE_POINTS defined 31 */ 32 #define CREATE_TRACE_POINTS 33 #include "hns3_trace.h" 34 35 #define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) 36 #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE) 37 38 #define hns3_rl_err(fmt, ...) \ 39 do { \ 40 if (net_ratelimit()) \ 41 netdev_err(fmt, ##__VA_ARGS__); \ 42 } while (0) 43 44 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force); 45 46 static const char hns3_driver_name[] = "hns3"; 47 static const char hns3_driver_string[] = 48 "Hisilicon Ethernet Network Driver for Hip08 Family"; 49 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; 50 static struct hnae3_client client; 51 52 static int debug = -1; 53 module_param(debug, int, 0); 54 MODULE_PARM_DESC(debug, " Network interface message level setting"); 55 56 #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 57 NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) 58 59 #define HNS3_INNER_VLAN_TAG 1 60 #define HNS3_OUTER_VLAN_TAG 2 61 62 #define HNS3_MIN_TX_LEN 33U 63 64 /* hns3_pci_tbl - PCI Device ID Table 65 * 66 * Last entry must be all 0s 67 * 68 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 69 * Class, Class Mask, private data (not used) } 70 */ 71 static const struct pci_device_id hns3_pci_tbl[] = { 72 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 73 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 74 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 75 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 76 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 77 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 79 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 81 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 83 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 85 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 88 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 89 /* required last entry */ 90 {0, } 91 }; 92 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); 93 94 static irqreturn_t hns3_irq_handle(int irq, void *vector) 95 { 96 struct hns3_enet_tqp_vector *tqp_vector = vector; 97 98 napi_schedule_irqoff(&tqp_vector->napi); 99 100 return IRQ_HANDLED; 101 } 102 103 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) 104 { 105 struct hns3_enet_tqp_vector *tqp_vectors; 106 unsigned int i; 107 108 for (i = 0; i < priv->vector_num; i++) { 109 tqp_vectors = &priv->tqp_vector[i]; 110 111 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) 112 continue; 113 114 /* clear the affinity mask */ 115 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); 116 117 /* release the irq resource */ 118 free_irq(tqp_vectors->vector_irq, tqp_vectors); 119 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; 120 } 121 } 122 123 static int hns3_nic_init_irq(struct hns3_nic_priv *priv) 124 { 125 struct hns3_enet_tqp_vector *tqp_vectors; 126 int txrx_int_idx = 0; 127 int rx_int_idx = 0; 128 int tx_int_idx = 0; 129 unsigned int i; 130 int ret; 131 132 for (i = 0; i < priv->vector_num; i++) { 133 tqp_vectors = &priv->tqp_vector[i]; 134 135 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) 136 continue; 137 138 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { 139 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 140 "%s-%s-%s-%d", hns3_driver_name, 141 pci_name(priv->ae_handle->pdev), 142 "TxRx", txrx_int_idx++); 143 txrx_int_idx++; 144 } else if (tqp_vectors->rx_group.ring) { 145 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 146 "%s-%s-%s-%d", hns3_driver_name, 147 pci_name(priv->ae_handle->pdev), 148 "Rx", rx_int_idx++); 149 } else if (tqp_vectors->tx_group.ring) { 150 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 151 "%s-%s-%s-%d", hns3_driver_name, 152 pci_name(priv->ae_handle->pdev), 153 "Tx", tx_int_idx++); 154 } else { 155 /* Skip this unused q_vector */ 156 continue; 157 } 158 159 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; 160 161 irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN); 162 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, 163 tqp_vectors->name, tqp_vectors); 164 if (ret) { 165 netdev_err(priv->netdev, "request irq(%d) fail\n", 166 tqp_vectors->vector_irq); 167 hns3_nic_uninit_irq(priv); 168 return ret; 169 } 170 171 irq_set_affinity_hint(tqp_vectors->vector_irq, 172 &tqp_vectors->affinity_mask); 173 174 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; 175 } 176 177 return 0; 178 } 179 180 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, 181 u32 mask_en) 182 { 183 writel(mask_en, tqp_vector->mask_addr); 184 } 185 186 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) 187 { 188 napi_enable(&tqp_vector->napi); 189 enable_irq(tqp_vector->vector_irq); 190 191 /* enable vector */ 192 hns3_mask_vector_irq(tqp_vector, 1); 193 } 194 195 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) 196 { 197 /* disable vector */ 198 hns3_mask_vector_irq(tqp_vector, 0); 199 200 disable_irq(tqp_vector->vector_irq); 201 napi_disable(&tqp_vector->napi); 202 } 203 204 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, 205 u32 rl_value) 206 { 207 u32 rl_reg = hns3_rl_usec_to_reg(rl_value); 208 209 /* this defines the configuration for RL (Interrupt Rate Limiter). 210 * Rl defines rate of interrupts i.e. number of interrupts-per-second 211 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing 212 */ 213 214 if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable && 215 !tqp_vector->rx_group.coal.adapt_enable) 216 /* According to the hardware, the range of rl_reg is 217 * 0-59 and the unit is 4. 218 */ 219 rl_reg |= HNS3_INT_RL_ENABLE_MASK; 220 221 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); 222 } 223 224 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, 225 u32 gl_value) 226 { 227 u32 new_val; 228 229 if (tqp_vector->rx_group.coal.unit_1us) 230 new_val = gl_value | HNS3_INT_GL_1US; 231 else 232 new_val = hns3_gl_usec_to_reg(gl_value); 233 234 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); 235 } 236 237 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, 238 u32 gl_value) 239 { 240 u32 new_val; 241 242 if (tqp_vector->tx_group.coal.unit_1us) 243 new_val = gl_value | HNS3_INT_GL_1US; 244 else 245 new_val = hns3_gl_usec_to_reg(gl_value); 246 247 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); 248 } 249 250 void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector, 251 u32 ql_value) 252 { 253 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET); 254 } 255 256 void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector, 257 u32 ql_value) 258 { 259 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET); 260 } 261 262 static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector, 263 struct hns3_nic_priv *priv) 264 { 265 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); 266 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; 267 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; 268 269 /* initialize the configuration for interrupt coalescing. 270 * 1. GL (Interrupt Gap Limiter) 271 * 2. RL (Interrupt Rate Limiter) 272 * 3. QL (Interrupt Quantity Limiter) 273 * 274 * Default: enable interrupt coalescing self-adaptive and GL 275 */ 276 tx_coal->adapt_enable = 1; 277 rx_coal->adapt_enable = 1; 278 279 tx_coal->int_gl = HNS3_INT_GL_50K; 280 rx_coal->int_gl = HNS3_INT_GL_50K; 281 282 rx_coal->flow_level = HNS3_FLOW_LOW; 283 tx_coal->flow_level = HNS3_FLOW_LOW; 284 285 /* device version above V3(include V3), GL can configure 1us 286 * unit, so uses 1us unit. 287 */ 288 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { 289 tx_coal->unit_1us = 1; 290 rx_coal->unit_1us = 1; 291 } 292 293 if (ae_dev->dev_specs.int_ql_max) { 294 tx_coal->ql_enable = 1; 295 rx_coal->ql_enable = 1; 296 tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; 297 rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; 298 tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; 299 rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; 300 } 301 } 302 303 static void 304 hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector, 305 struct hns3_nic_priv *priv) 306 { 307 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; 308 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; 309 struct hnae3_handle *h = priv->ae_handle; 310 311 hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl); 312 hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl); 313 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); 314 315 if (tx_coal->ql_enable) 316 hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql); 317 318 if (rx_coal->ql_enable) 319 hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql); 320 } 321 322 static int hns3_nic_set_real_num_queue(struct net_device *netdev) 323 { 324 struct hnae3_handle *h = hns3_get_handle(netdev); 325 struct hnae3_knic_private_info *kinfo = &h->kinfo; 326 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; 327 int i, ret; 328 329 if (kinfo->num_tc <= 1) { 330 netdev_reset_tc(netdev); 331 } else { 332 ret = netdev_set_num_tc(netdev, kinfo->num_tc); 333 if (ret) { 334 netdev_err(netdev, 335 "netdev_set_num_tc fail, ret=%d!\n", ret); 336 return ret; 337 } 338 339 for (i = 0; i < HNAE3_MAX_TC; i++) { 340 if (!kinfo->tc_info[i].enable) 341 continue; 342 343 netdev_set_tc_queue(netdev, 344 kinfo->tc_info[i].tc, 345 kinfo->tc_info[i].tqp_count, 346 kinfo->tc_info[i].tqp_offset); 347 } 348 } 349 350 ret = netif_set_real_num_tx_queues(netdev, queue_size); 351 if (ret) { 352 netdev_err(netdev, 353 "netif_set_real_num_tx_queues fail, ret=%d!\n", ret); 354 return ret; 355 } 356 357 ret = netif_set_real_num_rx_queues(netdev, queue_size); 358 if (ret) { 359 netdev_err(netdev, 360 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); 361 return ret; 362 } 363 364 return 0; 365 } 366 367 static u16 hns3_get_max_available_channels(struct hnae3_handle *h) 368 { 369 u16 alloc_tqps, max_rss_size, rss_size; 370 371 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); 372 rss_size = alloc_tqps / h->kinfo.num_tc; 373 374 return min_t(u16, rss_size, max_rss_size); 375 } 376 377 static void hns3_tqp_enable(struct hnae3_queue *tqp) 378 { 379 u32 rcb_reg; 380 381 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); 382 rcb_reg |= BIT(HNS3_RING_EN_B); 383 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); 384 } 385 386 static void hns3_tqp_disable(struct hnae3_queue *tqp) 387 { 388 u32 rcb_reg; 389 390 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); 391 rcb_reg &= ~BIT(HNS3_RING_EN_B); 392 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); 393 } 394 395 static void hns3_free_rx_cpu_rmap(struct net_device *netdev) 396 { 397 #ifdef CONFIG_RFS_ACCEL 398 free_irq_cpu_rmap(netdev->rx_cpu_rmap); 399 netdev->rx_cpu_rmap = NULL; 400 #endif 401 } 402 403 static int hns3_set_rx_cpu_rmap(struct net_device *netdev) 404 { 405 #ifdef CONFIG_RFS_ACCEL 406 struct hns3_nic_priv *priv = netdev_priv(netdev); 407 struct hns3_enet_tqp_vector *tqp_vector; 408 int i, ret; 409 410 if (!netdev->rx_cpu_rmap) { 411 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num); 412 if (!netdev->rx_cpu_rmap) 413 return -ENOMEM; 414 } 415 416 for (i = 0; i < priv->vector_num; i++) { 417 tqp_vector = &priv->tqp_vector[i]; 418 ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap, 419 tqp_vector->vector_irq); 420 if (ret) { 421 hns3_free_rx_cpu_rmap(netdev); 422 return ret; 423 } 424 } 425 #endif 426 return 0; 427 } 428 429 static int hns3_nic_net_up(struct net_device *netdev) 430 { 431 struct hns3_nic_priv *priv = netdev_priv(netdev); 432 struct hnae3_handle *h = priv->ae_handle; 433 int i, j; 434 int ret; 435 436 ret = hns3_nic_reset_all_ring(h); 437 if (ret) 438 return ret; 439 440 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); 441 442 /* enable the vectors */ 443 for (i = 0; i < priv->vector_num; i++) 444 hns3_vector_enable(&priv->tqp_vector[i]); 445 446 /* enable rcb */ 447 for (j = 0; j < h->kinfo.num_tqps; j++) 448 hns3_tqp_enable(h->kinfo.tqp[j]); 449 450 /* start the ae_dev */ 451 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; 452 if (ret) { 453 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); 454 while (j--) 455 hns3_tqp_disable(h->kinfo.tqp[j]); 456 457 for (j = i - 1; j >= 0; j--) 458 hns3_vector_disable(&priv->tqp_vector[j]); 459 } 460 461 return ret; 462 } 463 464 static void hns3_config_xps(struct hns3_nic_priv *priv) 465 { 466 int i; 467 468 for (i = 0; i < priv->vector_num; i++) { 469 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; 470 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; 471 472 while (ring) { 473 int ret; 474 475 ret = netif_set_xps_queue(priv->netdev, 476 &tqp_vector->affinity_mask, 477 ring->tqp->tqp_index); 478 if (ret) 479 netdev_warn(priv->netdev, 480 "set xps queue failed: %d", ret); 481 482 ring = ring->next; 483 } 484 } 485 } 486 487 static int hns3_nic_net_open(struct net_device *netdev) 488 { 489 struct hns3_nic_priv *priv = netdev_priv(netdev); 490 struct hnae3_handle *h = hns3_get_handle(netdev); 491 struct hnae3_knic_private_info *kinfo; 492 int i, ret; 493 494 if (hns3_nic_resetting(netdev)) 495 return -EBUSY; 496 497 netif_carrier_off(netdev); 498 499 ret = hns3_nic_set_real_num_queue(netdev); 500 if (ret) 501 return ret; 502 503 ret = hns3_nic_net_up(netdev); 504 if (ret) { 505 netdev_err(netdev, "net up fail, ret=%d!\n", ret); 506 return ret; 507 } 508 509 kinfo = &h->kinfo; 510 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 511 netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]); 512 513 if (h->ae_algo->ops->set_timer_task) 514 h->ae_algo->ops->set_timer_task(priv->ae_handle, true); 515 516 hns3_config_xps(priv); 517 518 netif_dbg(h, drv, netdev, "net open\n"); 519 520 return 0; 521 } 522 523 static void hns3_reset_tx_queue(struct hnae3_handle *h) 524 { 525 struct net_device *ndev = h->kinfo.netdev; 526 struct hns3_nic_priv *priv = netdev_priv(ndev); 527 struct netdev_queue *dev_queue; 528 u32 i; 529 530 for (i = 0; i < h->kinfo.num_tqps; i++) { 531 dev_queue = netdev_get_tx_queue(ndev, 532 priv->ring[i].queue_index); 533 netdev_tx_reset_queue(dev_queue); 534 } 535 } 536 537 static void hns3_nic_net_down(struct net_device *netdev) 538 { 539 struct hns3_nic_priv *priv = netdev_priv(netdev); 540 struct hnae3_handle *h = hns3_get_handle(netdev); 541 const struct hnae3_ae_ops *ops; 542 int i; 543 544 /* disable vectors */ 545 for (i = 0; i < priv->vector_num; i++) 546 hns3_vector_disable(&priv->tqp_vector[i]); 547 548 /* disable rcb */ 549 for (i = 0; i < h->kinfo.num_tqps; i++) 550 hns3_tqp_disable(h->kinfo.tqp[i]); 551 552 /* stop ae_dev */ 553 ops = priv->ae_handle->ae_algo->ops; 554 if (ops->stop) 555 ops->stop(priv->ae_handle); 556 557 /* delay ring buffer clearing to hns3_reset_notify_uninit_enet 558 * during reset process, because driver may not be able 559 * to disable the ring through firmware when downing the netdev. 560 */ 561 if (!hns3_nic_resetting(netdev)) 562 hns3_clear_all_ring(priv->ae_handle, false); 563 564 hns3_reset_tx_queue(priv->ae_handle); 565 } 566 567 static int hns3_nic_net_stop(struct net_device *netdev) 568 { 569 struct hns3_nic_priv *priv = netdev_priv(netdev); 570 struct hnae3_handle *h = hns3_get_handle(netdev); 571 572 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 573 return 0; 574 575 netif_dbg(h, drv, netdev, "net stop\n"); 576 577 if (h->ae_algo->ops->set_timer_task) 578 h->ae_algo->ops->set_timer_task(priv->ae_handle, false); 579 580 netif_tx_stop_all_queues(netdev); 581 netif_carrier_off(netdev); 582 583 hns3_nic_net_down(netdev); 584 585 return 0; 586 } 587 588 static int hns3_nic_uc_sync(struct net_device *netdev, 589 const unsigned char *addr) 590 { 591 struct hnae3_handle *h = hns3_get_handle(netdev); 592 593 if (h->ae_algo->ops->add_uc_addr) 594 return h->ae_algo->ops->add_uc_addr(h, addr); 595 596 return 0; 597 } 598 599 static int hns3_nic_uc_unsync(struct net_device *netdev, 600 const unsigned char *addr) 601 { 602 struct hnae3_handle *h = hns3_get_handle(netdev); 603 604 /* need ignore the request of removing device address, because 605 * we store the device address and other addresses of uc list 606 * in the function's mac filter list. 607 */ 608 if (ether_addr_equal(addr, netdev->dev_addr)) 609 return 0; 610 611 if (h->ae_algo->ops->rm_uc_addr) 612 return h->ae_algo->ops->rm_uc_addr(h, addr); 613 614 return 0; 615 } 616 617 static int hns3_nic_mc_sync(struct net_device *netdev, 618 const unsigned char *addr) 619 { 620 struct hnae3_handle *h = hns3_get_handle(netdev); 621 622 if (h->ae_algo->ops->add_mc_addr) 623 return h->ae_algo->ops->add_mc_addr(h, addr); 624 625 return 0; 626 } 627 628 static int hns3_nic_mc_unsync(struct net_device *netdev, 629 const unsigned char *addr) 630 { 631 struct hnae3_handle *h = hns3_get_handle(netdev); 632 633 if (h->ae_algo->ops->rm_mc_addr) 634 return h->ae_algo->ops->rm_mc_addr(h, addr); 635 636 return 0; 637 } 638 639 static u8 hns3_get_netdev_flags(struct net_device *netdev) 640 { 641 u8 flags = 0; 642 643 if (netdev->flags & IFF_PROMISC) { 644 flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE; 645 } else { 646 flags |= HNAE3_VLAN_FLTR; 647 if (netdev->flags & IFF_ALLMULTI) 648 flags |= HNAE3_USER_MPE; 649 } 650 651 return flags; 652 } 653 654 static void hns3_nic_set_rx_mode(struct net_device *netdev) 655 { 656 struct hnae3_handle *h = hns3_get_handle(netdev); 657 u8 new_flags; 658 659 new_flags = hns3_get_netdev_flags(netdev); 660 661 __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); 662 __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync); 663 664 /* User mode Promisc mode enable and vlan filtering is disabled to 665 * let all packets in. 666 */ 667 h->netdev_flags = new_flags; 668 hns3_request_update_promisc_mode(h); 669 } 670 671 void hns3_request_update_promisc_mode(struct hnae3_handle *handle) 672 { 673 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 674 675 if (ops->request_update_promisc_mode) 676 ops->request_update_promisc_mode(handle); 677 } 678 679 void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) 680 { 681 struct hns3_nic_priv *priv = netdev_priv(netdev); 682 struct hnae3_handle *h = priv->ae_handle; 683 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); 684 bool last_state; 685 686 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 && 687 h->ae_algo->ops->enable_vlan_filter) { 688 last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false; 689 if (enable != last_state) { 690 netdev_info(netdev, 691 "%s vlan filter\n", 692 enable ? "enable" : "disable"); 693 h->ae_algo->ops->enable_vlan_filter(h, enable); 694 } 695 } 696 } 697 698 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs, 699 u16 *mss, u32 *type_cs_vlan_tso) 700 { 701 u32 l4_offset, hdr_len; 702 union l3_hdr_info l3; 703 union l4_hdr_info l4; 704 u32 l4_paylen; 705 int ret; 706 707 if (!skb_is_gso(skb)) 708 return 0; 709 710 ret = skb_cow_head(skb, 0); 711 if (unlikely(ret < 0)) 712 return ret; 713 714 l3.hdr = skb_network_header(skb); 715 l4.hdr = skb_transport_header(skb); 716 717 /* Software should clear the IPv4's checksum field when tso is 718 * needed. 719 */ 720 if (l3.v4->version == 4) 721 l3.v4->check = 0; 722 723 /* tunnel packet */ 724 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 725 SKB_GSO_GRE_CSUM | 726 SKB_GSO_UDP_TUNNEL | 727 SKB_GSO_UDP_TUNNEL_CSUM)) { 728 /* reset l3&l4 pointers from outer to inner headers */ 729 l3.hdr = skb_inner_network_header(skb); 730 l4.hdr = skb_inner_transport_header(skb); 731 732 /* Software should clear the IPv4's checksum field when 733 * tso is needed. 734 */ 735 if (l3.v4->version == 4) 736 l3.v4->check = 0; 737 } 738 739 /* normal or tunnel packet */ 740 l4_offset = l4.hdr - skb->data; 741 742 /* remove payload length from inner pseudo checksum when tso */ 743 l4_paylen = skb->len - l4_offset; 744 745 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 746 hdr_len = sizeof(*l4.udp) + l4_offset; 747 csum_replace_by_diff(&l4.udp->check, 748 (__force __wsum)htonl(l4_paylen)); 749 } else { 750 hdr_len = (l4.tcp->doff << 2) + l4_offset; 751 csum_replace_by_diff(&l4.tcp->check, 752 (__force __wsum)htonl(l4_paylen)); 753 } 754 755 /* find the txbd field values */ 756 *paylen_fdop_ol4cs = skb->len - hdr_len; 757 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); 758 759 /* offload outer UDP header checksum */ 760 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) 761 hns3_set_field(*paylen_fdop_ol4cs, HNS3_TXD_OL4CS_B, 1); 762 763 /* get MSS for TSO */ 764 *mss = skb_shinfo(skb)->gso_size; 765 766 trace_hns3_tso(skb); 767 768 return 0; 769 } 770 771 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, 772 u8 *il4_proto) 773 { 774 union l3_hdr_info l3; 775 unsigned char *l4_hdr; 776 unsigned char *exthdr; 777 u8 l4_proto_tmp; 778 __be16 frag_off; 779 780 /* find outer header point */ 781 l3.hdr = skb_network_header(skb); 782 l4_hdr = skb_transport_header(skb); 783 784 if (skb->protocol == htons(ETH_P_IPV6)) { 785 exthdr = l3.hdr + sizeof(*l3.v6); 786 l4_proto_tmp = l3.v6->nexthdr; 787 if (l4_hdr != exthdr) 788 ipv6_skip_exthdr(skb, exthdr - skb->data, 789 &l4_proto_tmp, &frag_off); 790 } else if (skb->protocol == htons(ETH_P_IP)) { 791 l4_proto_tmp = l3.v4->protocol; 792 } else { 793 return -EINVAL; 794 } 795 796 *ol4_proto = l4_proto_tmp; 797 798 /* tunnel packet */ 799 if (!skb->encapsulation) { 800 *il4_proto = 0; 801 return 0; 802 } 803 804 /* find inner header point */ 805 l3.hdr = skb_inner_network_header(skb); 806 l4_hdr = skb_inner_transport_header(skb); 807 808 if (l3.v6->version == 6) { 809 exthdr = l3.hdr + sizeof(*l3.v6); 810 l4_proto_tmp = l3.v6->nexthdr; 811 if (l4_hdr != exthdr) 812 ipv6_skip_exthdr(skb, exthdr - skb->data, 813 &l4_proto_tmp, &frag_off); 814 } else if (l3.v4->version == 4) { 815 l4_proto_tmp = l3.v4->protocol; 816 } 817 818 *il4_proto = l4_proto_tmp; 819 820 return 0; 821 } 822 823 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL 824 * and it is udp packet, which has a dest port as the IANA assigned. 825 * the hardware is expected to do the checksum offload, but the 826 * hardware will not do the checksum offload when udp dest port is 827 * 4789 or 6081. 828 */ 829 static bool hns3_tunnel_csum_bug(struct sk_buff *skb) 830 { 831 struct hns3_nic_priv *priv = netdev_priv(skb->dev); 832 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); 833 union l4_hdr_info l4; 834 835 /* device version above V3(include V3), the hardware can 836 * do this checksum offload. 837 */ 838 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 839 return false; 840 841 l4.hdr = skb_transport_header(skb); 842 843 if (!(!skb->encapsulation && 844 (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || 845 l4.udp->dest == htons(GENEVE_UDP_PORT)))) 846 return false; 847 848 skb_checksum_help(skb); 849 850 return true; 851 } 852 853 static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, 854 u32 *ol_type_vlan_len_msec) 855 { 856 u32 l2_len, l3_len, l4_len; 857 unsigned char *il2_hdr; 858 union l3_hdr_info l3; 859 union l4_hdr_info l4; 860 861 l3.hdr = skb_network_header(skb); 862 l4.hdr = skb_transport_header(skb); 863 864 /* compute OL2 header size, defined in 2 Bytes */ 865 l2_len = l3.hdr - skb->data; 866 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1); 867 868 /* compute OL3 header size, defined in 4 Bytes */ 869 l3_len = l4.hdr - l3.hdr; 870 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2); 871 872 il2_hdr = skb_inner_mac_header(skb); 873 /* compute OL4 header size, defined in 4 Bytes */ 874 l4_len = il2_hdr - l4.hdr; 875 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2); 876 877 /* define outer network header type */ 878 if (skb->protocol == htons(ETH_P_IP)) { 879 if (skb_is_gso(skb)) 880 hns3_set_field(*ol_type_vlan_len_msec, 881 HNS3_TXD_OL3T_S, 882 HNS3_OL3T_IPV4_CSUM); 883 else 884 hns3_set_field(*ol_type_vlan_len_msec, 885 HNS3_TXD_OL3T_S, 886 HNS3_OL3T_IPV4_NO_CSUM); 887 888 } else if (skb->protocol == htons(ETH_P_IPV6)) { 889 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, 890 HNS3_OL3T_IPV6); 891 } 892 893 if (ol4_proto == IPPROTO_UDP) 894 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, 895 HNS3_TUN_MAC_IN_UDP); 896 else if (ol4_proto == IPPROTO_GRE) 897 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, 898 HNS3_TUN_NVGRE); 899 } 900 901 static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, 902 u8 il4_proto, u32 *type_cs_vlan_tso, 903 u32 *ol_type_vlan_len_msec) 904 { 905 unsigned char *l2_hdr = skb->data; 906 u32 l4_proto = ol4_proto; 907 union l4_hdr_info l4; 908 union l3_hdr_info l3; 909 u32 l2_len, l3_len; 910 911 l4.hdr = skb_transport_header(skb); 912 l3.hdr = skb_network_header(skb); 913 914 /* handle encapsulation skb */ 915 if (skb->encapsulation) { 916 /* If this is a not UDP/GRE encapsulation skb */ 917 if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { 918 /* drop the skb tunnel packet if hardware don't support, 919 * because hardware can't calculate csum when TSO. 920 */ 921 if (skb_is_gso(skb)) 922 return -EDOM; 923 924 /* the stack computes the IP header already, 925 * driver calculate l4 checksum when not TSO. 926 */ 927 skb_checksum_help(skb); 928 return 0; 929 } 930 931 hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); 932 933 /* switch to inner header */ 934 l2_hdr = skb_inner_mac_header(skb); 935 l3.hdr = skb_inner_network_header(skb); 936 l4.hdr = skb_inner_transport_header(skb); 937 l4_proto = il4_proto; 938 } 939 940 if (l3.v4->version == 4) { 941 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, 942 HNS3_L3T_IPV4); 943 944 /* the stack computes the IP header already, the only time we 945 * need the hardware to recompute it is in the case of TSO. 946 */ 947 if (skb_is_gso(skb)) 948 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); 949 } else if (l3.v6->version == 6) { 950 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, 951 HNS3_L3T_IPV6); 952 } 953 954 /* compute inner(/normal) L2 header size, defined in 2 Bytes */ 955 l2_len = l3.hdr - l2_hdr; 956 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); 957 958 /* compute inner(/normal) L3 header size, defined in 4 Bytes */ 959 l3_len = l4.hdr - l3.hdr; 960 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); 961 962 /* compute inner(/normal) L4 header size, defined in 4 Bytes */ 963 switch (l4_proto) { 964 case IPPROTO_TCP: 965 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 966 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 967 HNS3_L4T_TCP); 968 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 969 l4.tcp->doff); 970 break; 971 case IPPROTO_UDP: 972 if (hns3_tunnel_csum_bug(skb)) 973 break; 974 975 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 976 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 977 HNS3_L4T_UDP); 978 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 979 (sizeof(struct udphdr) >> 2)); 980 break; 981 case IPPROTO_SCTP: 982 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 983 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 984 HNS3_L4T_SCTP); 985 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 986 (sizeof(struct sctphdr) >> 2)); 987 break; 988 default: 989 /* drop the skb tunnel packet if hardware don't support, 990 * because hardware can't calculate csum when TSO. 991 */ 992 if (skb_is_gso(skb)) 993 return -EDOM; 994 995 /* the stack computes the IP header already, 996 * driver calculate l4 checksum when not TSO. 997 */ 998 skb_checksum_help(skb); 999 return 0; 1000 } 1001 1002 return 0; 1003 } 1004 1005 static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, 1006 struct sk_buff *skb) 1007 { 1008 struct hnae3_handle *handle = tx_ring->tqp->handle; 1009 struct hnae3_ae_dev *ae_dev; 1010 struct vlan_ethhdr *vhdr; 1011 int rc; 1012 1013 if (!(skb->protocol == htons(ETH_P_8021Q) || 1014 skb_vlan_tag_present(skb))) 1015 return 0; 1016 1017 /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert 1018 * VLAN enabled, only one VLAN header is allowed in skb, otherwise it 1019 * will cause RAS error. 1020 */ 1021 ae_dev = pci_get_drvdata(handle->pdev); 1022 if (unlikely(skb_vlan_tagged_multi(skb) && 1023 ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && 1024 handle->port_base_vlan_state == 1025 HNAE3_PORT_BASE_VLAN_ENABLE)) 1026 return -EINVAL; 1027 1028 if (skb->protocol == htons(ETH_P_8021Q) && 1029 !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 1030 /* When HW VLAN acceleration is turned off, and the stack 1031 * sets the protocol to 802.1q, the driver just need to 1032 * set the protocol to the encapsulated ethertype. 1033 */ 1034 skb->protocol = vlan_get_protocol(skb); 1035 return 0; 1036 } 1037 1038 if (skb_vlan_tag_present(skb)) { 1039 /* Based on hw strategy, use out_vtag in two layer tag case, 1040 * and use inner_vtag in one tag case. 1041 */ 1042 if (skb->protocol == htons(ETH_P_8021Q) && 1043 handle->port_base_vlan_state == 1044 HNAE3_PORT_BASE_VLAN_DISABLE) 1045 rc = HNS3_OUTER_VLAN_TAG; 1046 else 1047 rc = HNS3_INNER_VLAN_TAG; 1048 1049 skb->protocol = vlan_get_protocol(skb); 1050 return rc; 1051 } 1052 1053 rc = skb_cow_head(skb, 0); 1054 if (unlikely(rc < 0)) 1055 return rc; 1056 1057 vhdr = (struct vlan_ethhdr *)skb->data; 1058 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) 1059 & VLAN_PRIO_MASK); 1060 1061 skb->protocol = vlan_get_protocol(skb); 1062 return 0; 1063 } 1064 1065 /* check if the hardware is capable of checksum offloading */ 1066 static bool hns3_check_hw_tx_csum(struct sk_buff *skb) 1067 { 1068 struct hns3_nic_priv *priv = netdev_priv(skb->dev); 1069 1070 /* Kindly note, due to backward compatibility of the TX descriptor, 1071 * HW checksum of the non-IP packets and GSO packets is handled at 1072 * different place in the following code 1073 */ 1074 if (skb->csum_not_inet || skb_is_gso(skb) || 1075 !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state)) 1076 return false; 1077 1078 return true; 1079 } 1080 1081 static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, 1082 struct sk_buff *skb, struct hns3_desc *desc) 1083 { 1084 u32 ol_type_vlan_len_msec = 0; 1085 u32 paylen_ol4cs = skb->len; 1086 u32 type_cs_vlan_tso = 0; 1087 u16 mss_hw_csum = 0; 1088 u16 inner_vtag = 0; 1089 u16 out_vtag = 0; 1090 int ret; 1091 1092 ret = hns3_handle_vtags(ring, skb); 1093 if (unlikely(ret < 0)) { 1094 u64_stats_update_begin(&ring->syncp); 1095 ring->stats.tx_vlan_err++; 1096 u64_stats_update_end(&ring->syncp); 1097 return ret; 1098 } else if (ret == HNS3_INNER_VLAN_TAG) { 1099 inner_vtag = skb_vlan_tag_get(skb); 1100 inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & 1101 VLAN_PRIO_MASK; 1102 hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); 1103 } else if (ret == HNS3_OUTER_VLAN_TAG) { 1104 out_vtag = skb_vlan_tag_get(skb); 1105 out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & 1106 VLAN_PRIO_MASK; 1107 hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, 1108 1); 1109 } 1110 1111 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1112 u8 ol4_proto, il4_proto; 1113 1114 if (hns3_check_hw_tx_csum(skb)) { 1115 /* set checksum start and offset, defined in 2 Bytes */ 1116 hns3_set_field(type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, 1117 skb_checksum_start_offset(skb) >> 1); 1118 hns3_set_field(ol_type_vlan_len_msec, 1119 HNS3_TXD_CSUM_OFFSET_S, 1120 skb->csum_offset >> 1); 1121 mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); 1122 goto out_hw_tx_csum; 1123 } 1124 1125 skb_reset_mac_len(skb); 1126 1127 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); 1128 if (unlikely(ret < 0)) { 1129 u64_stats_update_begin(&ring->syncp); 1130 ring->stats.tx_l4_proto_err++; 1131 u64_stats_update_end(&ring->syncp); 1132 return ret; 1133 } 1134 1135 ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, 1136 &type_cs_vlan_tso, 1137 &ol_type_vlan_len_msec); 1138 if (unlikely(ret < 0)) { 1139 u64_stats_update_begin(&ring->syncp); 1140 ring->stats.tx_l2l3l4_err++; 1141 u64_stats_update_end(&ring->syncp); 1142 return ret; 1143 } 1144 1145 ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum, 1146 &type_cs_vlan_tso); 1147 if (unlikely(ret < 0)) { 1148 u64_stats_update_begin(&ring->syncp); 1149 ring->stats.tx_tso_err++; 1150 u64_stats_update_end(&ring->syncp); 1151 return ret; 1152 } 1153 } 1154 1155 out_hw_tx_csum: 1156 /* Set txbd */ 1157 desc->tx.ol_type_vlan_len_msec = 1158 cpu_to_le32(ol_type_vlan_len_msec); 1159 desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); 1160 desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs); 1161 desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum); 1162 desc->tx.vlan_tag = cpu_to_le16(inner_vtag); 1163 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); 1164 1165 return 0; 1166 } 1167 1168 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, 1169 unsigned int size, enum hns_desc_type type) 1170 { 1171 #define HNS3_LIKELY_BD_NUM 1 1172 1173 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 1174 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 1175 struct device *dev = ring_to_dev(ring); 1176 skb_frag_t *frag; 1177 unsigned int frag_buf_num; 1178 int k, sizeoflast; 1179 dma_addr_t dma; 1180 1181 if (type == DESC_TYPE_FRAGLIST_SKB || 1182 type == DESC_TYPE_SKB) { 1183 struct sk_buff *skb = (struct sk_buff *)priv; 1184 1185 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 1186 } else { 1187 frag = (skb_frag_t *)priv; 1188 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); 1189 } 1190 1191 if (unlikely(dma_mapping_error(dev, dma))) { 1192 u64_stats_update_begin(&ring->syncp); 1193 ring->stats.sw_err_cnt++; 1194 u64_stats_update_end(&ring->syncp); 1195 return -ENOMEM; 1196 } 1197 1198 desc_cb->priv = priv; 1199 desc_cb->length = size; 1200 desc_cb->dma = dma; 1201 desc_cb->type = type; 1202 1203 if (likely(size <= HNS3_MAX_BD_SIZE)) { 1204 desc->addr = cpu_to_le64(dma); 1205 desc->tx.send_size = cpu_to_le16(size); 1206 desc->tx.bdtp_fe_sc_vld_ra_ri = 1207 cpu_to_le16(BIT(HNS3_TXD_VLD_B)); 1208 1209 trace_hns3_tx_desc(ring, ring->next_to_use); 1210 ring_ptr_move_fw(ring, next_to_use); 1211 return HNS3_LIKELY_BD_NUM; 1212 } 1213 1214 frag_buf_num = hns3_tx_bd_count(size); 1215 sizeoflast = size % HNS3_MAX_BD_SIZE; 1216 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; 1217 1218 /* When frag size is bigger than hardware limit, split this frag */ 1219 for (k = 0; k < frag_buf_num; k++) { 1220 /* now, fill the descriptor */ 1221 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); 1222 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? 1223 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); 1224 desc->tx.bdtp_fe_sc_vld_ra_ri = 1225 cpu_to_le16(BIT(HNS3_TXD_VLD_B)); 1226 1227 trace_hns3_tx_desc(ring, ring->next_to_use); 1228 /* move ring pointer to next */ 1229 ring_ptr_move_fw(ring, next_to_use); 1230 1231 desc = &ring->desc[ring->next_to_use]; 1232 } 1233 1234 return frag_buf_num; 1235 } 1236 1237 static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size, 1238 unsigned int bd_num) 1239 { 1240 unsigned int size; 1241 int i; 1242 1243 size = skb_headlen(skb); 1244 while (size > HNS3_MAX_BD_SIZE) { 1245 bd_size[bd_num++] = HNS3_MAX_BD_SIZE; 1246 size -= HNS3_MAX_BD_SIZE; 1247 1248 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1249 return bd_num; 1250 } 1251 1252 if (size) { 1253 bd_size[bd_num++] = size; 1254 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1255 return bd_num; 1256 } 1257 1258 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1259 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1260 size = skb_frag_size(frag); 1261 if (!size) 1262 continue; 1263 1264 while (size > HNS3_MAX_BD_SIZE) { 1265 bd_size[bd_num++] = HNS3_MAX_BD_SIZE; 1266 size -= HNS3_MAX_BD_SIZE; 1267 1268 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1269 return bd_num; 1270 } 1271 1272 bd_size[bd_num++] = size; 1273 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1274 return bd_num; 1275 } 1276 1277 return bd_num; 1278 } 1279 1280 static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size, 1281 u8 max_non_tso_bd_num) 1282 { 1283 struct sk_buff *frag_skb; 1284 unsigned int bd_num = 0; 1285 1286 /* If the total len is within the max bd limit */ 1287 if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) && 1288 skb_shinfo(skb)->nr_frags < max_non_tso_bd_num)) 1289 return skb_shinfo(skb)->nr_frags + 1U; 1290 1291 /* The below case will always be linearized, return 1292 * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized. 1293 */ 1294 if (unlikely(skb->len > HNS3_MAX_TSO_SIZE || 1295 (!skb_is_gso(skb) && skb->len > 1296 HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num)))) 1297 return HNS3_MAX_TSO_BD_NUM + 1U; 1298 1299 bd_num = hns3_skb_bd_num(skb, bd_size, bd_num); 1300 1301 if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM) 1302 return bd_num; 1303 1304 skb_walk_frags(skb, frag_skb) { 1305 bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num); 1306 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1307 return bd_num; 1308 } 1309 1310 return bd_num; 1311 } 1312 1313 static unsigned int hns3_gso_hdr_len(struct sk_buff *skb) 1314 { 1315 if (!skb->encapsulation) 1316 return skb_transport_offset(skb) + tcp_hdrlen(skb); 1317 1318 return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); 1319 } 1320 1321 /* HW need every continuous max_non_tso_bd_num buffer data to be larger 1322 * than MSS, we simplify it by ensuring skb_headlen + the first continuous 1323 * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss, 1324 * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger 1325 * than MSS except the last max_non_tso_bd_num - 1 frags. 1326 */ 1327 static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size, 1328 unsigned int bd_num, u8 max_non_tso_bd_num) 1329 { 1330 unsigned int tot_len = 0; 1331 int i; 1332 1333 for (i = 0; i < max_non_tso_bd_num - 1U; i++) 1334 tot_len += bd_size[i]; 1335 1336 /* ensure the first max_non_tso_bd_num frags is greater than 1337 * mss + header 1338 */ 1339 if (tot_len + bd_size[max_non_tso_bd_num - 1U] < 1340 skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb)) 1341 return true; 1342 1343 /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater 1344 * than mss except the last one. 1345 */ 1346 for (i = 0; i < bd_num - max_non_tso_bd_num; i++) { 1347 tot_len -= bd_size[i]; 1348 tot_len += bd_size[i + max_non_tso_bd_num - 1U]; 1349 1350 if (tot_len < skb_shinfo(skb)->gso_size) 1351 return true; 1352 } 1353 1354 return false; 1355 } 1356 1357 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) 1358 { 1359 int i; 1360 1361 for (i = 0; i < MAX_SKB_FRAGS; i++) 1362 size[i] = skb_frag_size(&shinfo->frags[i]); 1363 } 1364 1365 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, 1366 struct net_device *netdev, 1367 struct sk_buff *skb) 1368 { 1369 struct hns3_nic_priv *priv = netdev_priv(netdev); 1370 u8 max_non_tso_bd_num = priv->max_non_tso_bd_num; 1371 unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; 1372 unsigned int bd_num; 1373 1374 bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num); 1375 if (unlikely(bd_num > max_non_tso_bd_num)) { 1376 if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && 1377 !hns3_skb_need_linearized(skb, bd_size, bd_num, 1378 max_non_tso_bd_num)) { 1379 trace_hns3_over_max_bd(skb); 1380 goto out; 1381 } 1382 1383 if (__skb_linearize(skb)) 1384 return -ENOMEM; 1385 1386 bd_num = hns3_tx_bd_count(skb->len); 1387 if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) || 1388 (!skb_is_gso(skb) && 1389 bd_num > max_non_tso_bd_num)) { 1390 trace_hns3_over_max_bd(skb); 1391 return -ENOMEM; 1392 } 1393 1394 u64_stats_update_begin(&ring->syncp); 1395 ring->stats.tx_copy++; 1396 u64_stats_update_end(&ring->syncp); 1397 } 1398 1399 out: 1400 if (likely(ring_space(ring) >= bd_num)) 1401 return bd_num; 1402 1403 netif_stop_subqueue(netdev, ring->queue_index); 1404 smp_mb(); /* Memory barrier before checking ring_space */ 1405 1406 /* Start queue in case hns3_clean_tx_ring has just made room 1407 * available and has not seen the queue stopped state performed 1408 * by netif_stop_subqueue above. 1409 */ 1410 if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) && 1411 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 1412 netif_start_subqueue(netdev, ring->queue_index); 1413 return bd_num; 1414 } 1415 1416 return -EBUSY; 1417 } 1418 1419 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) 1420 { 1421 struct device *dev = ring_to_dev(ring); 1422 unsigned int i; 1423 1424 for (i = 0; i < ring->desc_num; i++) { 1425 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 1426 1427 memset(desc, 0, sizeof(*desc)); 1428 1429 /* check if this is where we started */ 1430 if (ring->next_to_use == next_to_use_orig) 1431 break; 1432 1433 /* rollback one */ 1434 ring_ptr_move_bw(ring, next_to_use); 1435 1436 if (!ring->desc_cb[ring->next_to_use].dma) 1437 continue; 1438 1439 /* unmap the descriptor dma address */ 1440 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB || 1441 ring->desc_cb[ring->next_to_use].type == 1442 DESC_TYPE_FRAGLIST_SKB) 1443 dma_unmap_single(dev, 1444 ring->desc_cb[ring->next_to_use].dma, 1445 ring->desc_cb[ring->next_to_use].length, 1446 DMA_TO_DEVICE); 1447 else if (ring->desc_cb[ring->next_to_use].length) 1448 dma_unmap_page(dev, 1449 ring->desc_cb[ring->next_to_use].dma, 1450 ring->desc_cb[ring->next_to_use].length, 1451 DMA_TO_DEVICE); 1452 1453 ring->desc_cb[ring->next_to_use].length = 0; 1454 ring->desc_cb[ring->next_to_use].dma = 0; 1455 ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN; 1456 } 1457 } 1458 1459 static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, 1460 struct sk_buff *skb, enum hns_desc_type type) 1461 { 1462 unsigned int size = skb_headlen(skb); 1463 int i, ret, bd_num = 0; 1464 1465 if (size) { 1466 ret = hns3_fill_desc(ring, skb, size, type); 1467 if (unlikely(ret < 0)) 1468 return ret; 1469 1470 bd_num += ret; 1471 } 1472 1473 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1474 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1475 1476 size = skb_frag_size(frag); 1477 if (!size) 1478 continue; 1479 1480 ret = hns3_fill_desc(ring, frag, size, DESC_TYPE_PAGE); 1481 if (unlikely(ret < 0)) 1482 return ret; 1483 1484 bd_num += ret; 1485 } 1486 1487 return bd_num; 1488 } 1489 1490 static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, 1491 bool doorbell) 1492 { 1493 ring->pending_buf += num; 1494 1495 if (!doorbell) { 1496 u64_stats_update_begin(&ring->syncp); 1497 ring->stats.tx_more++; 1498 u64_stats_update_end(&ring->syncp); 1499 return; 1500 } 1501 1502 if (!ring->pending_buf) 1503 return; 1504 1505 writel(ring->pending_buf, 1506 ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); 1507 ring->pending_buf = 0; 1508 WRITE_ONCE(ring->last_to_use, ring->next_to_use); 1509 } 1510 1511 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) 1512 { 1513 struct hns3_nic_priv *priv = netdev_priv(netdev); 1514 struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; 1515 struct netdev_queue *dev_queue; 1516 int pre_ntu, next_to_use_head; 1517 struct sk_buff *frag_skb; 1518 int bd_num = 0; 1519 bool doorbell; 1520 int ret; 1521 1522 /* Hardware can only handle short frames above 32 bytes */ 1523 if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { 1524 hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); 1525 return NETDEV_TX_OK; 1526 } 1527 1528 /* Prefetch the data used later */ 1529 prefetch(skb->data); 1530 1531 ret = hns3_nic_maybe_stop_tx(ring, netdev, skb); 1532 if (unlikely(ret <= 0)) { 1533 if (ret == -EBUSY) { 1534 u64_stats_update_begin(&ring->syncp); 1535 ring->stats.tx_busy++; 1536 u64_stats_update_end(&ring->syncp); 1537 hns3_tx_doorbell(ring, 0, true); 1538 return NETDEV_TX_BUSY; 1539 } else if (ret == -ENOMEM) { 1540 u64_stats_update_begin(&ring->syncp); 1541 ring->stats.sw_err_cnt++; 1542 u64_stats_update_end(&ring->syncp); 1543 } 1544 1545 hns3_rl_err(netdev, "xmit error: %d!\n", ret); 1546 goto out_err_tx_ok; 1547 } 1548 1549 next_to_use_head = ring->next_to_use; 1550 1551 ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]); 1552 if (unlikely(ret < 0)) 1553 goto fill_err; 1554 1555 ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); 1556 if (unlikely(ret < 0)) 1557 goto fill_err; 1558 1559 bd_num += ret; 1560 1561 skb_walk_frags(skb, frag_skb) { 1562 ret = hns3_fill_skb_to_desc(ring, frag_skb, 1563 DESC_TYPE_FRAGLIST_SKB); 1564 if (unlikely(ret < 0)) 1565 goto fill_err; 1566 1567 bd_num += ret; 1568 } 1569 1570 pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : 1571 (ring->desc_num - 1); 1572 ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= 1573 cpu_to_le16(BIT(HNS3_TXD_FE_B)); 1574 trace_hns3_tx_desc(ring, pre_ntu); 1575 1576 /* Complete translate all packets */ 1577 dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); 1578 doorbell = __netdev_tx_sent_queue(dev_queue, skb->len, 1579 netdev_xmit_more()); 1580 hns3_tx_doorbell(ring, bd_num, doorbell); 1581 1582 return NETDEV_TX_OK; 1583 1584 fill_err: 1585 hns3_clear_desc(ring, next_to_use_head); 1586 1587 out_err_tx_ok: 1588 dev_kfree_skb_any(skb); 1589 hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); 1590 return NETDEV_TX_OK; 1591 } 1592 1593 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) 1594 { 1595 struct hnae3_handle *h = hns3_get_handle(netdev); 1596 struct sockaddr *mac_addr = p; 1597 int ret; 1598 1599 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) 1600 return -EADDRNOTAVAIL; 1601 1602 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { 1603 netdev_info(netdev, "already using mac address %pM\n", 1604 mac_addr->sa_data); 1605 return 0; 1606 } 1607 1608 /* For VF device, if there is a perm_addr, then the user will not 1609 * be allowed to change the address. 1610 */ 1611 if (!hns3_is_phys_func(h->pdev) && 1612 !is_zero_ether_addr(netdev->perm_addr)) { 1613 netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n", 1614 netdev->perm_addr, mac_addr->sa_data); 1615 return -EPERM; 1616 } 1617 1618 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); 1619 if (ret) { 1620 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); 1621 return ret; 1622 } 1623 1624 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); 1625 1626 return 0; 1627 } 1628 1629 static int hns3_nic_do_ioctl(struct net_device *netdev, 1630 struct ifreq *ifr, int cmd) 1631 { 1632 struct hnae3_handle *h = hns3_get_handle(netdev); 1633 1634 if (!netif_running(netdev)) 1635 return -EINVAL; 1636 1637 if (!h->ae_algo->ops->do_ioctl) 1638 return -EOPNOTSUPP; 1639 1640 return h->ae_algo->ops->do_ioctl(h, ifr, cmd); 1641 } 1642 1643 static int hns3_nic_set_features(struct net_device *netdev, 1644 netdev_features_t features) 1645 { 1646 netdev_features_t changed = netdev->features ^ features; 1647 struct hns3_nic_priv *priv = netdev_priv(netdev); 1648 struct hnae3_handle *h = priv->ae_handle; 1649 bool enable; 1650 int ret; 1651 1652 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { 1653 enable = !!(features & NETIF_F_GRO_HW); 1654 ret = h->ae_algo->ops->set_gro_en(h, enable); 1655 if (ret) 1656 return ret; 1657 } 1658 1659 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && 1660 h->ae_algo->ops->enable_hw_strip_rxvtag) { 1661 enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); 1662 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); 1663 if (ret) 1664 return ret; 1665 } 1666 1667 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { 1668 enable = !!(features & NETIF_F_NTUPLE); 1669 h->ae_algo->ops->enable_fd(h, enable); 1670 } 1671 1672 netdev->features = features; 1673 return 0; 1674 } 1675 1676 static netdev_features_t hns3_features_check(struct sk_buff *skb, 1677 struct net_device *dev, 1678 netdev_features_t features) 1679 { 1680 #define HNS3_MAX_HDR_LEN 480U 1681 #define HNS3_MAX_L4_HDR_LEN 60U 1682 1683 size_t len; 1684 1685 if (skb->ip_summed != CHECKSUM_PARTIAL) 1686 return features; 1687 1688 if (skb->encapsulation) 1689 len = skb_inner_transport_header(skb) - skb->data; 1690 else 1691 len = skb_transport_header(skb) - skb->data; 1692 1693 /* Assume L4 is 60 byte as TCP is the only protocol with a 1694 * a flexible value, and it's max len is 60 bytes. 1695 */ 1696 len += HNS3_MAX_L4_HDR_LEN; 1697 1698 /* Hardware only supports checksum on the skb with a max header 1699 * len of 480 bytes. 1700 */ 1701 if (len > HNS3_MAX_HDR_LEN) 1702 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 1703 1704 return features; 1705 } 1706 1707 static void hns3_nic_get_stats64(struct net_device *netdev, 1708 struct rtnl_link_stats64 *stats) 1709 { 1710 struct hns3_nic_priv *priv = netdev_priv(netdev); 1711 int queue_num = priv->ae_handle->kinfo.num_tqps; 1712 struct hnae3_handle *handle = priv->ae_handle; 1713 struct hns3_enet_ring *ring; 1714 u64 rx_length_errors = 0; 1715 u64 rx_crc_errors = 0; 1716 u64 rx_multicast = 0; 1717 unsigned int start; 1718 u64 tx_errors = 0; 1719 u64 rx_errors = 0; 1720 unsigned int idx; 1721 u64 tx_bytes = 0; 1722 u64 rx_bytes = 0; 1723 u64 tx_pkts = 0; 1724 u64 rx_pkts = 0; 1725 u64 tx_drop = 0; 1726 u64 rx_drop = 0; 1727 1728 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 1729 return; 1730 1731 handle->ae_algo->ops->update_stats(handle, &netdev->stats); 1732 1733 for (idx = 0; idx < queue_num; idx++) { 1734 /* fetch the tx stats */ 1735 ring = &priv->ring[idx]; 1736 do { 1737 start = u64_stats_fetch_begin_irq(&ring->syncp); 1738 tx_bytes += ring->stats.tx_bytes; 1739 tx_pkts += ring->stats.tx_pkts; 1740 tx_drop += ring->stats.sw_err_cnt; 1741 tx_drop += ring->stats.tx_vlan_err; 1742 tx_drop += ring->stats.tx_l4_proto_err; 1743 tx_drop += ring->stats.tx_l2l3l4_err; 1744 tx_drop += ring->stats.tx_tso_err; 1745 tx_errors += ring->stats.sw_err_cnt; 1746 tx_errors += ring->stats.tx_vlan_err; 1747 tx_errors += ring->stats.tx_l4_proto_err; 1748 tx_errors += ring->stats.tx_l2l3l4_err; 1749 tx_errors += ring->stats.tx_tso_err; 1750 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1751 1752 /* fetch the rx stats */ 1753 ring = &priv->ring[idx + queue_num]; 1754 do { 1755 start = u64_stats_fetch_begin_irq(&ring->syncp); 1756 rx_bytes += ring->stats.rx_bytes; 1757 rx_pkts += ring->stats.rx_pkts; 1758 rx_drop += ring->stats.l2_err; 1759 rx_errors += ring->stats.l2_err; 1760 rx_errors += ring->stats.l3l4_csum_err; 1761 rx_crc_errors += ring->stats.l2_err; 1762 rx_multicast += ring->stats.rx_multicast; 1763 rx_length_errors += ring->stats.err_pkt_len; 1764 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1765 } 1766 1767 stats->tx_bytes = tx_bytes; 1768 stats->tx_packets = tx_pkts; 1769 stats->rx_bytes = rx_bytes; 1770 stats->rx_packets = rx_pkts; 1771 1772 stats->rx_errors = rx_errors; 1773 stats->multicast = rx_multicast; 1774 stats->rx_length_errors = rx_length_errors; 1775 stats->rx_crc_errors = rx_crc_errors; 1776 stats->rx_missed_errors = netdev->stats.rx_missed_errors; 1777 1778 stats->tx_errors = tx_errors; 1779 stats->rx_dropped = rx_drop; 1780 stats->tx_dropped = tx_drop; 1781 stats->collisions = netdev->stats.collisions; 1782 stats->rx_over_errors = netdev->stats.rx_over_errors; 1783 stats->rx_frame_errors = netdev->stats.rx_frame_errors; 1784 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; 1785 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; 1786 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; 1787 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; 1788 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; 1789 stats->tx_window_errors = netdev->stats.tx_window_errors; 1790 stats->rx_compressed = netdev->stats.rx_compressed; 1791 stats->tx_compressed = netdev->stats.tx_compressed; 1792 } 1793 1794 static int hns3_setup_tc(struct net_device *netdev, void *type_data) 1795 { 1796 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 1797 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; 1798 struct hnae3_knic_private_info *kinfo; 1799 u8 tc = mqprio_qopt->qopt.num_tc; 1800 u16 mode = mqprio_qopt->mode; 1801 u8 hw = mqprio_qopt->qopt.hw; 1802 struct hnae3_handle *h; 1803 1804 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && 1805 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) 1806 return -EOPNOTSUPP; 1807 1808 if (tc > HNAE3_MAX_TC) 1809 return -EINVAL; 1810 1811 if (!netdev) 1812 return -EINVAL; 1813 1814 h = hns3_get_handle(netdev); 1815 kinfo = &h->kinfo; 1816 1817 netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); 1818 1819 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? 1820 kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP; 1821 } 1822 1823 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, 1824 void *type_data) 1825 { 1826 if (type != TC_SETUP_QDISC_MQPRIO) 1827 return -EOPNOTSUPP; 1828 1829 return hns3_setup_tc(dev, type_data); 1830 } 1831 1832 static int hns3_vlan_rx_add_vid(struct net_device *netdev, 1833 __be16 proto, u16 vid) 1834 { 1835 struct hnae3_handle *h = hns3_get_handle(netdev); 1836 int ret = -EIO; 1837 1838 if (h->ae_algo->ops->set_vlan_filter) 1839 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); 1840 1841 return ret; 1842 } 1843 1844 static int hns3_vlan_rx_kill_vid(struct net_device *netdev, 1845 __be16 proto, u16 vid) 1846 { 1847 struct hnae3_handle *h = hns3_get_handle(netdev); 1848 int ret = -EIO; 1849 1850 if (h->ae_algo->ops->set_vlan_filter) 1851 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); 1852 1853 return ret; 1854 } 1855 1856 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 1857 u8 qos, __be16 vlan_proto) 1858 { 1859 struct hnae3_handle *h = hns3_get_handle(netdev); 1860 int ret = -EIO; 1861 1862 netif_dbg(h, drv, netdev, 1863 "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n", 1864 vf, vlan, qos, ntohs(vlan_proto)); 1865 1866 if (h->ae_algo->ops->set_vf_vlan_filter) 1867 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, 1868 qos, vlan_proto); 1869 1870 return ret; 1871 } 1872 1873 static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) 1874 { 1875 struct hnae3_handle *handle = hns3_get_handle(netdev); 1876 1877 if (hns3_nic_resetting(netdev)) 1878 return -EBUSY; 1879 1880 if (!handle->ae_algo->ops->set_vf_spoofchk) 1881 return -EOPNOTSUPP; 1882 1883 return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable); 1884 } 1885 1886 static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable) 1887 { 1888 struct hnae3_handle *handle = hns3_get_handle(netdev); 1889 1890 if (!handle->ae_algo->ops->set_vf_trust) 1891 return -EOPNOTSUPP; 1892 1893 return handle->ae_algo->ops->set_vf_trust(handle, vf, enable); 1894 } 1895 1896 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) 1897 { 1898 struct hnae3_handle *h = hns3_get_handle(netdev); 1899 int ret; 1900 1901 if (hns3_nic_resetting(netdev)) 1902 return -EBUSY; 1903 1904 if (!h->ae_algo->ops->set_mtu) 1905 return -EOPNOTSUPP; 1906 1907 netif_dbg(h, drv, netdev, 1908 "change mtu from %u to %d\n", netdev->mtu, new_mtu); 1909 1910 ret = h->ae_algo->ops->set_mtu(h, new_mtu); 1911 if (ret) 1912 netdev_err(netdev, "failed to change MTU in hardware %d\n", 1913 ret); 1914 else 1915 netdev->mtu = new_mtu; 1916 1917 return ret; 1918 } 1919 1920 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) 1921 { 1922 struct hns3_nic_priv *priv = netdev_priv(ndev); 1923 struct hnae3_handle *h = hns3_get_handle(ndev); 1924 struct hns3_enet_ring *tx_ring; 1925 struct napi_struct *napi; 1926 int timeout_queue = 0; 1927 int hw_head, hw_tail; 1928 int fbd_num, fbd_oft; 1929 int ebd_num, ebd_oft; 1930 int bd_num, bd_err; 1931 int ring_en, tc; 1932 int i; 1933 1934 /* Find the stopped queue the same way the stack does */ 1935 for (i = 0; i < ndev->num_tx_queues; i++) { 1936 struct netdev_queue *q; 1937 unsigned long trans_start; 1938 1939 q = netdev_get_tx_queue(ndev, i); 1940 trans_start = q->trans_start; 1941 if (netif_xmit_stopped(q) && 1942 time_after(jiffies, 1943 (trans_start + ndev->watchdog_timeo))) { 1944 timeout_queue = i; 1945 netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n", 1946 q->state, 1947 jiffies_to_msecs(jiffies - trans_start)); 1948 break; 1949 } 1950 } 1951 1952 if (i == ndev->num_tx_queues) { 1953 netdev_info(ndev, 1954 "no netdev TX timeout queue found, timeout count: %llu\n", 1955 priv->tx_timeout_count); 1956 return false; 1957 } 1958 1959 priv->tx_timeout_count++; 1960 1961 tx_ring = &priv->ring[timeout_queue]; 1962 napi = &tx_ring->tqp_vector->napi; 1963 1964 netdev_info(ndev, 1965 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", 1966 priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, 1967 tx_ring->next_to_clean, napi->state); 1968 1969 netdev_info(ndev, 1970 "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n", 1971 tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, 1972 tx_ring->stats.sw_err_cnt, tx_ring->pending_buf); 1973 1974 netdev_info(ndev, 1975 "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n", 1976 tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, 1977 tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); 1978 1979 /* When mac received many pause frames continuous, it's unable to send 1980 * packets, which may cause tx timeout 1981 */ 1982 if (h->ae_algo->ops->get_mac_stats) { 1983 struct hns3_mac_stats mac_stats; 1984 1985 h->ae_algo->ops->get_mac_stats(h, &mac_stats); 1986 netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n", 1987 mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt); 1988 } 1989 1990 hw_head = readl_relaxed(tx_ring->tqp->io_base + 1991 HNS3_RING_TX_RING_HEAD_REG); 1992 hw_tail = readl_relaxed(tx_ring->tqp->io_base + 1993 HNS3_RING_TX_RING_TAIL_REG); 1994 fbd_num = readl_relaxed(tx_ring->tqp->io_base + 1995 HNS3_RING_TX_RING_FBDNUM_REG); 1996 fbd_oft = readl_relaxed(tx_ring->tqp->io_base + 1997 HNS3_RING_TX_RING_OFFSET_REG); 1998 ebd_num = readl_relaxed(tx_ring->tqp->io_base + 1999 HNS3_RING_TX_RING_EBDNUM_REG); 2000 ebd_oft = readl_relaxed(tx_ring->tqp->io_base + 2001 HNS3_RING_TX_RING_EBD_OFFSET_REG); 2002 bd_num = readl_relaxed(tx_ring->tqp->io_base + 2003 HNS3_RING_TX_RING_BD_NUM_REG); 2004 bd_err = readl_relaxed(tx_ring->tqp->io_base + 2005 HNS3_RING_TX_RING_BD_ERR_REG); 2006 ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG); 2007 tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG); 2008 2009 netdev_info(ndev, 2010 "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", 2011 bd_num, hw_head, hw_tail, bd_err, 2012 readl(tx_ring->tqp_vector->mask_addr)); 2013 netdev_info(ndev, 2014 "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", 2015 ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft); 2016 2017 return true; 2018 } 2019 2020 static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue) 2021 { 2022 struct hns3_nic_priv *priv = netdev_priv(ndev); 2023 struct hnae3_handle *h = priv->ae_handle; 2024 2025 if (!hns3_get_tx_timeo_queue_info(ndev)) 2026 return; 2027 2028 /* request the reset, and let the hclge to determine 2029 * which reset level should be done 2030 */ 2031 if (h->ae_algo->ops->reset_event) 2032 h->ae_algo->ops->reset_event(h->pdev, h); 2033 } 2034 2035 #ifdef CONFIG_RFS_ACCEL 2036 static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 2037 u16 rxq_index, u32 flow_id) 2038 { 2039 struct hnae3_handle *h = hns3_get_handle(dev); 2040 struct flow_keys fkeys; 2041 2042 if (!h->ae_algo->ops->add_arfs_entry) 2043 return -EOPNOTSUPP; 2044 2045 if (skb->encapsulation) 2046 return -EPROTONOSUPPORT; 2047 2048 if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0)) 2049 return -EPROTONOSUPPORT; 2050 2051 if ((fkeys.basic.n_proto != htons(ETH_P_IP) && 2052 fkeys.basic.n_proto != htons(ETH_P_IPV6)) || 2053 (fkeys.basic.ip_proto != IPPROTO_TCP && 2054 fkeys.basic.ip_proto != IPPROTO_UDP)) 2055 return -EPROTONOSUPPORT; 2056 2057 return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys); 2058 } 2059 #endif 2060 2061 static int hns3_nic_get_vf_config(struct net_device *ndev, int vf, 2062 struct ifla_vf_info *ivf) 2063 { 2064 struct hnae3_handle *h = hns3_get_handle(ndev); 2065 2066 if (!h->ae_algo->ops->get_vf_config) 2067 return -EOPNOTSUPP; 2068 2069 return h->ae_algo->ops->get_vf_config(h, vf, ivf); 2070 } 2071 2072 static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf, 2073 int link_state) 2074 { 2075 struct hnae3_handle *h = hns3_get_handle(ndev); 2076 2077 if (!h->ae_algo->ops->set_vf_link_state) 2078 return -EOPNOTSUPP; 2079 2080 return h->ae_algo->ops->set_vf_link_state(h, vf, link_state); 2081 } 2082 2083 static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf, 2084 int min_tx_rate, int max_tx_rate) 2085 { 2086 struct hnae3_handle *h = hns3_get_handle(ndev); 2087 2088 if (!h->ae_algo->ops->set_vf_rate) 2089 return -EOPNOTSUPP; 2090 2091 return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate, 2092 false); 2093 } 2094 2095 static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 2096 { 2097 struct hnae3_handle *h = hns3_get_handle(netdev); 2098 2099 if (!h->ae_algo->ops->set_vf_mac) 2100 return -EOPNOTSUPP; 2101 2102 if (is_multicast_ether_addr(mac)) { 2103 netdev_err(netdev, 2104 "Invalid MAC:%pM specified. Could not set MAC\n", 2105 mac); 2106 return -EINVAL; 2107 } 2108 2109 return h->ae_algo->ops->set_vf_mac(h, vf_id, mac); 2110 } 2111 2112 static const struct net_device_ops hns3_nic_netdev_ops = { 2113 .ndo_open = hns3_nic_net_open, 2114 .ndo_stop = hns3_nic_net_stop, 2115 .ndo_start_xmit = hns3_nic_net_xmit, 2116 .ndo_tx_timeout = hns3_nic_net_timeout, 2117 .ndo_set_mac_address = hns3_nic_net_set_mac_address, 2118 .ndo_do_ioctl = hns3_nic_do_ioctl, 2119 .ndo_change_mtu = hns3_nic_change_mtu, 2120 .ndo_set_features = hns3_nic_set_features, 2121 .ndo_features_check = hns3_features_check, 2122 .ndo_get_stats64 = hns3_nic_get_stats64, 2123 .ndo_setup_tc = hns3_nic_setup_tc, 2124 .ndo_set_rx_mode = hns3_nic_set_rx_mode, 2125 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, 2126 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, 2127 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, 2128 .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk, 2129 .ndo_set_vf_trust = hns3_set_vf_trust, 2130 #ifdef CONFIG_RFS_ACCEL 2131 .ndo_rx_flow_steer = hns3_rx_flow_steer, 2132 #endif 2133 .ndo_get_vf_config = hns3_nic_get_vf_config, 2134 .ndo_set_vf_link_state = hns3_nic_set_vf_link_state, 2135 .ndo_set_vf_rate = hns3_nic_set_vf_rate, 2136 .ndo_set_vf_mac = hns3_nic_set_vf_mac, 2137 }; 2138 2139 bool hns3_is_phys_func(struct pci_dev *pdev) 2140 { 2141 u32 dev_id = pdev->device; 2142 2143 switch (dev_id) { 2144 case HNAE3_DEV_ID_GE: 2145 case HNAE3_DEV_ID_25GE: 2146 case HNAE3_DEV_ID_25GE_RDMA: 2147 case HNAE3_DEV_ID_25GE_RDMA_MACSEC: 2148 case HNAE3_DEV_ID_50GE_RDMA: 2149 case HNAE3_DEV_ID_50GE_RDMA_MACSEC: 2150 case HNAE3_DEV_ID_100G_RDMA_MACSEC: 2151 case HNAE3_DEV_ID_200G_RDMA: 2152 return true; 2153 case HNAE3_DEV_ID_VF: 2154 case HNAE3_DEV_ID_RDMA_DCB_PFC_VF: 2155 return false; 2156 default: 2157 dev_warn(&pdev->dev, "un-recognized pci device-id %u", 2158 dev_id); 2159 } 2160 2161 return false; 2162 } 2163 2164 static void hns3_disable_sriov(struct pci_dev *pdev) 2165 { 2166 /* If our VFs are assigned we cannot shut down SR-IOV 2167 * without causing issues, so just leave the hardware 2168 * available but disabled 2169 */ 2170 if (pci_vfs_assigned(pdev)) { 2171 dev_warn(&pdev->dev, 2172 "disabling driver while VFs are assigned\n"); 2173 return; 2174 } 2175 2176 pci_disable_sriov(pdev); 2177 } 2178 2179 /* hns3_probe - Device initialization routine 2180 * @pdev: PCI device information struct 2181 * @ent: entry in hns3_pci_tbl 2182 * 2183 * hns3_probe initializes a PF identified by a pci_dev structure. 2184 * The OS initialization, configuring of the PF private structure, 2185 * and a hardware reset occur. 2186 * 2187 * Returns 0 on success, negative on failure 2188 */ 2189 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2190 { 2191 struct hnae3_ae_dev *ae_dev; 2192 int ret; 2193 2194 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL); 2195 if (!ae_dev) 2196 return -ENOMEM; 2197 2198 ae_dev->pdev = pdev; 2199 ae_dev->flag = ent->driver_data; 2200 pci_set_drvdata(pdev, ae_dev); 2201 2202 ret = hnae3_register_ae_dev(ae_dev); 2203 if (ret) 2204 pci_set_drvdata(pdev, NULL); 2205 2206 return ret; 2207 } 2208 2209 /* hns3_remove - Device removal routine 2210 * @pdev: PCI device information struct 2211 */ 2212 static void hns3_remove(struct pci_dev *pdev) 2213 { 2214 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2215 2216 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) 2217 hns3_disable_sriov(pdev); 2218 2219 hnae3_unregister_ae_dev(ae_dev); 2220 pci_set_drvdata(pdev, NULL); 2221 } 2222 2223 /** 2224 * hns3_pci_sriov_configure 2225 * @pdev: pointer to a pci_dev structure 2226 * @num_vfs: number of VFs to allocate 2227 * 2228 * Enable or change the number of VFs. Called when the user updates the number 2229 * of VFs in sysfs. 2230 **/ 2231 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 2232 { 2233 int ret; 2234 2235 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) { 2236 dev_warn(&pdev->dev, "Can not config SRIOV\n"); 2237 return -EINVAL; 2238 } 2239 2240 if (num_vfs) { 2241 ret = pci_enable_sriov(pdev, num_vfs); 2242 if (ret) 2243 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); 2244 else 2245 return num_vfs; 2246 } else if (!pci_vfs_assigned(pdev)) { 2247 pci_disable_sriov(pdev); 2248 } else { 2249 dev_warn(&pdev->dev, 2250 "Unable to free VFs because some are assigned to VMs.\n"); 2251 } 2252 2253 return 0; 2254 } 2255 2256 static void hns3_shutdown(struct pci_dev *pdev) 2257 { 2258 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2259 2260 hnae3_unregister_ae_dev(ae_dev); 2261 pci_set_drvdata(pdev, NULL); 2262 2263 if (system_state == SYSTEM_POWER_OFF) 2264 pci_set_power_state(pdev, PCI_D3hot); 2265 } 2266 2267 static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, 2268 pci_channel_state_t state) 2269 { 2270 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2271 pci_ers_result_t ret; 2272 2273 dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state); 2274 2275 if (state == pci_channel_io_perm_failure) 2276 return PCI_ERS_RESULT_DISCONNECT; 2277 2278 if (!ae_dev || !ae_dev->ops) { 2279 dev_err(&pdev->dev, 2280 "Can't recover - error happened before device initialized\n"); 2281 return PCI_ERS_RESULT_NONE; 2282 } 2283 2284 if (ae_dev->ops->handle_hw_ras_error) 2285 ret = ae_dev->ops->handle_hw_ras_error(ae_dev); 2286 else 2287 return PCI_ERS_RESULT_NONE; 2288 2289 return ret; 2290 } 2291 2292 static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) 2293 { 2294 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2295 const struct hnae3_ae_ops *ops; 2296 enum hnae3_reset_type reset_type; 2297 struct device *dev = &pdev->dev; 2298 2299 if (!ae_dev || !ae_dev->ops) 2300 return PCI_ERS_RESULT_NONE; 2301 2302 ops = ae_dev->ops; 2303 /* request the reset */ 2304 if (ops->reset_event && ops->get_reset_level && 2305 ops->set_default_reset_request) { 2306 if (ae_dev->hw_err_reset_req) { 2307 reset_type = ops->get_reset_level(ae_dev, 2308 &ae_dev->hw_err_reset_req); 2309 ops->set_default_reset_request(ae_dev, reset_type); 2310 dev_info(dev, "requesting reset due to PCI error\n"); 2311 ops->reset_event(pdev, NULL); 2312 } 2313 2314 return PCI_ERS_RESULT_RECOVERED; 2315 } 2316 2317 return PCI_ERS_RESULT_DISCONNECT; 2318 } 2319 2320 static void hns3_reset_prepare(struct pci_dev *pdev) 2321 { 2322 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2323 2324 dev_info(&pdev->dev, "FLR prepare\n"); 2325 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare) 2326 ae_dev->ops->flr_prepare(ae_dev); 2327 } 2328 2329 static void hns3_reset_done(struct pci_dev *pdev) 2330 { 2331 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2332 2333 dev_info(&pdev->dev, "FLR done\n"); 2334 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done) 2335 ae_dev->ops->flr_done(ae_dev); 2336 } 2337 2338 static const struct pci_error_handlers hns3_err_handler = { 2339 .error_detected = hns3_error_detected, 2340 .slot_reset = hns3_slot_reset, 2341 .reset_prepare = hns3_reset_prepare, 2342 .reset_done = hns3_reset_done, 2343 }; 2344 2345 static struct pci_driver hns3_driver = { 2346 .name = hns3_driver_name, 2347 .id_table = hns3_pci_tbl, 2348 .probe = hns3_probe, 2349 .remove = hns3_remove, 2350 .shutdown = hns3_shutdown, 2351 .sriov_configure = hns3_pci_sriov_configure, 2352 .err_handler = &hns3_err_handler, 2353 }; 2354 2355 /* set default feature to hns3 */ 2356 static void hns3_set_default_feature(struct net_device *netdev) 2357 { 2358 struct hnae3_handle *h = hns3_get_handle(netdev); 2359 struct pci_dev *pdev = h->pdev; 2360 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2361 2362 netdev->priv_flags |= IFF_UNICAST_FLT; 2363 2364 netdev->hw_enc_features |= NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2365 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2366 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2367 NETIF_F_SCTP_CRC | NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST; 2368 2369 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 2370 2371 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | 2372 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 2373 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2374 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2375 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2376 NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; 2377 2378 netdev->vlan_features |= NETIF_F_RXCSUM | 2379 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | 2380 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2381 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2382 NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; 2383 2384 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | 2385 NETIF_F_HW_VLAN_CTAG_RX | 2386 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2387 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2388 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2389 NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; 2390 2391 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2392 netdev->hw_features |= NETIF_F_GRO_HW; 2393 netdev->features |= NETIF_F_GRO_HW; 2394 2395 if (!(h->flags & HNAE3_SUPPORT_VF)) { 2396 netdev->hw_features |= NETIF_F_NTUPLE; 2397 netdev->features |= NETIF_F_NTUPLE; 2398 } 2399 } 2400 2401 if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) { 2402 netdev->hw_features |= NETIF_F_GSO_UDP_L4; 2403 netdev->features |= NETIF_F_GSO_UDP_L4; 2404 netdev->vlan_features |= NETIF_F_GSO_UDP_L4; 2405 netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4; 2406 } 2407 2408 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) { 2409 netdev->hw_features |= NETIF_F_HW_CSUM; 2410 netdev->features |= NETIF_F_HW_CSUM; 2411 netdev->vlan_features |= NETIF_F_HW_CSUM; 2412 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 2413 } else { 2414 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2415 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2416 netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2417 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2418 } 2419 2420 if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) { 2421 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 2422 netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 2423 netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 2424 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 2425 } 2426 } 2427 2428 static int hns3_alloc_buffer(struct hns3_enet_ring *ring, 2429 struct hns3_desc_cb *cb) 2430 { 2431 unsigned int order = hns3_page_order(ring); 2432 struct page *p; 2433 2434 p = dev_alloc_pages(order); 2435 if (!p) 2436 return -ENOMEM; 2437 2438 cb->priv = p; 2439 cb->page_offset = 0; 2440 cb->reuse_flag = 0; 2441 cb->buf = page_address(p); 2442 cb->length = hns3_page_size(ring); 2443 cb->type = DESC_TYPE_PAGE; 2444 page_ref_add(p, USHRT_MAX - 1); 2445 cb->pagecnt_bias = USHRT_MAX; 2446 2447 return 0; 2448 } 2449 2450 static void hns3_free_buffer(struct hns3_enet_ring *ring, 2451 struct hns3_desc_cb *cb, int budget) 2452 { 2453 if (cb->type == DESC_TYPE_SKB) 2454 napi_consume_skb(cb->priv, budget); 2455 else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias) 2456 __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); 2457 memset(cb, 0, sizeof(*cb)); 2458 } 2459 2460 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) 2461 { 2462 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, 2463 cb->length, ring_to_dma_dir(ring)); 2464 2465 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) 2466 return -EIO; 2467 2468 return 0; 2469 } 2470 2471 static void hns3_unmap_buffer(struct hns3_enet_ring *ring, 2472 struct hns3_desc_cb *cb) 2473 { 2474 if (cb->type == DESC_TYPE_SKB || cb->type == DESC_TYPE_FRAGLIST_SKB) 2475 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 2476 ring_to_dma_dir(ring)); 2477 else if (cb->length) 2478 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 2479 ring_to_dma_dir(ring)); 2480 } 2481 2482 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) 2483 { 2484 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 2485 ring->desc[i].addr = 0; 2486 } 2487 2488 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i, 2489 int budget) 2490 { 2491 struct hns3_desc_cb *cb = &ring->desc_cb[i]; 2492 2493 if (!ring->desc_cb[i].dma) 2494 return; 2495 2496 hns3_buffer_detach(ring, i); 2497 hns3_free_buffer(ring, cb, budget); 2498 } 2499 2500 static void hns3_free_buffers(struct hns3_enet_ring *ring) 2501 { 2502 int i; 2503 2504 for (i = 0; i < ring->desc_num; i++) 2505 hns3_free_buffer_detach(ring, i, 0); 2506 } 2507 2508 /* free desc along with its attached buffer */ 2509 static void hns3_free_desc(struct hns3_enet_ring *ring) 2510 { 2511 int size = ring->desc_num * sizeof(ring->desc[0]); 2512 2513 hns3_free_buffers(ring); 2514 2515 if (ring->desc) { 2516 dma_free_coherent(ring_to_dev(ring), size, 2517 ring->desc, ring->desc_dma_addr); 2518 ring->desc = NULL; 2519 } 2520 } 2521 2522 static int hns3_alloc_desc(struct hns3_enet_ring *ring) 2523 { 2524 int size = ring->desc_num * sizeof(ring->desc[0]); 2525 2526 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, 2527 &ring->desc_dma_addr, GFP_KERNEL); 2528 if (!ring->desc) 2529 return -ENOMEM; 2530 2531 return 0; 2532 } 2533 2534 static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring, 2535 struct hns3_desc_cb *cb) 2536 { 2537 int ret; 2538 2539 ret = hns3_alloc_buffer(ring, cb); 2540 if (ret) 2541 goto out; 2542 2543 ret = hns3_map_buffer(ring, cb); 2544 if (ret) 2545 goto out_with_buf; 2546 2547 return 0; 2548 2549 out_with_buf: 2550 hns3_free_buffer(ring, cb, 0); 2551 out: 2552 return ret; 2553 } 2554 2555 static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i) 2556 { 2557 int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]); 2558 2559 if (ret) 2560 return ret; 2561 2562 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 2563 2564 return 0; 2565 } 2566 2567 /* Allocate memory for raw pkg, and map with dma */ 2568 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) 2569 { 2570 int i, j, ret; 2571 2572 for (i = 0; i < ring->desc_num; i++) { 2573 ret = hns3_alloc_and_attach_buffer(ring, i); 2574 if (ret) 2575 goto out_buffer_fail; 2576 } 2577 2578 return 0; 2579 2580 out_buffer_fail: 2581 for (j = i - 1; j >= 0; j--) 2582 hns3_free_buffer_detach(ring, j, 0); 2583 return ret; 2584 } 2585 2586 /* detach a in-used buffer and replace with a reserved one */ 2587 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, 2588 struct hns3_desc_cb *res_cb) 2589 { 2590 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 2591 ring->desc_cb[i] = *res_cb; 2592 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 2593 ring->desc[i].rx.bd_base_info = 0; 2594 } 2595 2596 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) 2597 { 2598 ring->desc_cb[i].reuse_flag = 0; 2599 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + 2600 ring->desc_cb[i].page_offset); 2601 ring->desc[i].rx.bd_base_info = 0; 2602 2603 dma_sync_single_for_device(ring_to_dev(ring), 2604 ring->desc_cb[i].dma + ring->desc_cb[i].page_offset, 2605 hns3_buf_size(ring), 2606 DMA_FROM_DEVICE); 2607 } 2608 2609 static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, 2610 int *bytes, int *pkts, int budget) 2611 { 2612 /* pair with ring->last_to_use update in hns3_tx_doorbell(), 2613 * smp_store_release() is not used in hns3_tx_doorbell() because 2614 * the doorbell operation already have the needed barrier operation. 2615 */ 2616 int ltu = smp_load_acquire(&ring->last_to_use); 2617 int ntc = ring->next_to_clean; 2618 struct hns3_desc_cb *desc_cb; 2619 bool reclaimed = false; 2620 struct hns3_desc *desc; 2621 2622 while (ltu != ntc) { 2623 desc = &ring->desc[ntc]; 2624 2625 if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) & 2626 BIT(HNS3_TXD_VLD_B)) 2627 break; 2628 2629 desc_cb = &ring->desc_cb[ntc]; 2630 (*pkts) += (desc_cb->type == DESC_TYPE_SKB); 2631 (*bytes) += desc_cb->length; 2632 /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ 2633 hns3_free_buffer_detach(ring, ntc, budget); 2634 2635 if (++ntc == ring->desc_num) 2636 ntc = 0; 2637 2638 /* Issue prefetch for next Tx descriptor */ 2639 prefetch(&ring->desc_cb[ntc]); 2640 reclaimed = true; 2641 } 2642 2643 if (unlikely(!reclaimed)) 2644 return false; 2645 2646 /* This smp_store_release() pairs with smp_load_acquire() in 2647 * ring_space called by hns3_nic_net_xmit. 2648 */ 2649 smp_store_release(&ring->next_to_clean, ntc); 2650 return true; 2651 } 2652 2653 void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) 2654 { 2655 struct net_device *netdev = ring_to_netdev(ring); 2656 struct hns3_nic_priv *priv = netdev_priv(netdev); 2657 struct netdev_queue *dev_queue; 2658 int bytes, pkts; 2659 2660 bytes = 0; 2661 pkts = 0; 2662 2663 if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget))) 2664 return; 2665 2666 ring->tqp_vector->tx_group.total_bytes += bytes; 2667 ring->tqp_vector->tx_group.total_packets += pkts; 2668 2669 u64_stats_update_begin(&ring->syncp); 2670 ring->stats.tx_bytes += bytes; 2671 ring->stats.tx_pkts += pkts; 2672 u64_stats_update_end(&ring->syncp); 2673 2674 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); 2675 netdev_tx_completed_queue(dev_queue, pkts, bytes); 2676 2677 if (unlikely(netif_carrier_ok(netdev) && 2678 ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) { 2679 /* Make sure that anybody stopping the queue after this 2680 * sees the new next_to_clean. 2681 */ 2682 smp_mb(); 2683 if (netif_tx_queue_stopped(dev_queue) && 2684 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 2685 netif_tx_wake_queue(dev_queue); 2686 ring->stats.restart_queue++; 2687 } 2688 } 2689 } 2690 2691 static int hns3_desc_unused(struct hns3_enet_ring *ring) 2692 { 2693 int ntc = ring->next_to_clean; 2694 int ntu = ring->next_to_use; 2695 2696 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; 2697 } 2698 2699 static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, 2700 int cleand_count) 2701 { 2702 struct hns3_desc_cb *desc_cb; 2703 struct hns3_desc_cb res_cbs; 2704 int i, ret; 2705 2706 for (i = 0; i < cleand_count; i++) { 2707 desc_cb = &ring->desc_cb[ring->next_to_use]; 2708 if (desc_cb->reuse_flag) { 2709 u64_stats_update_begin(&ring->syncp); 2710 ring->stats.reuse_pg_cnt++; 2711 u64_stats_update_end(&ring->syncp); 2712 2713 hns3_reuse_buffer(ring, ring->next_to_use); 2714 } else { 2715 ret = hns3_alloc_and_map_buffer(ring, &res_cbs); 2716 if (ret) { 2717 u64_stats_update_begin(&ring->syncp); 2718 ring->stats.sw_err_cnt++; 2719 u64_stats_update_end(&ring->syncp); 2720 2721 hns3_rl_err(ring_to_netdev(ring), 2722 "alloc rx buffer failed: %d\n", 2723 ret); 2724 break; 2725 } 2726 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 2727 2728 u64_stats_update_begin(&ring->syncp); 2729 ring->stats.non_reuse_pg++; 2730 u64_stats_update_end(&ring->syncp); 2731 } 2732 2733 ring_ptr_move_fw(ring, next_to_use); 2734 } 2735 2736 writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); 2737 } 2738 2739 static bool hns3_page_is_reusable(struct page *page) 2740 { 2741 return page_to_nid(page) == numa_mem_id() && 2742 !page_is_pfmemalloc(page); 2743 } 2744 2745 static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) 2746 { 2747 return (page_count(cb->priv) - cb->pagecnt_bias) == 1; 2748 } 2749 2750 static void hns3_nic_reuse_page(struct sk_buff *skb, int i, 2751 struct hns3_enet_ring *ring, int pull_len, 2752 struct hns3_desc_cb *desc_cb) 2753 { 2754 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; 2755 int size = le16_to_cpu(desc->rx.size); 2756 u32 truesize = hns3_buf_size(ring); 2757 2758 desc_cb->pagecnt_bias--; 2759 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 2760 size - pull_len, truesize); 2761 2762 /* Avoid re-using remote pages, or the stack is still using the page 2763 * when page_offset rollback to zero, flag default unreuse 2764 */ 2765 if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) || 2766 (!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) { 2767 __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); 2768 return; 2769 } 2770 2771 /* Move offset up to the next cache line */ 2772 desc_cb->page_offset += truesize; 2773 2774 if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) { 2775 desc_cb->reuse_flag = 1; 2776 } else if (hns3_can_reuse_page(desc_cb)) { 2777 desc_cb->reuse_flag = 1; 2778 desc_cb->page_offset = 0; 2779 } else if (desc_cb->pagecnt_bias) { 2780 __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); 2781 return; 2782 } 2783 2784 if (unlikely(!desc_cb->pagecnt_bias)) { 2785 page_ref_add(desc_cb->priv, USHRT_MAX); 2786 desc_cb->pagecnt_bias = USHRT_MAX; 2787 } 2788 } 2789 2790 static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) 2791 { 2792 __be16 type = skb->protocol; 2793 struct tcphdr *th; 2794 int depth = 0; 2795 2796 while (eth_type_vlan(type)) { 2797 struct vlan_hdr *vh; 2798 2799 if ((depth + VLAN_HLEN) > skb_headlen(skb)) 2800 return -EFAULT; 2801 2802 vh = (struct vlan_hdr *)(skb->data + depth); 2803 type = vh->h_vlan_encapsulated_proto; 2804 depth += VLAN_HLEN; 2805 } 2806 2807 skb_set_network_header(skb, depth); 2808 2809 if (type == htons(ETH_P_IP)) { 2810 const struct iphdr *iph = ip_hdr(skb); 2811 2812 depth += sizeof(struct iphdr); 2813 skb_set_transport_header(skb, depth); 2814 th = tcp_hdr(skb); 2815 th->check = ~tcp_v4_check(skb->len - depth, iph->saddr, 2816 iph->daddr, 0); 2817 } else if (type == htons(ETH_P_IPV6)) { 2818 const struct ipv6hdr *iph = ipv6_hdr(skb); 2819 2820 depth += sizeof(struct ipv6hdr); 2821 skb_set_transport_header(skb, depth); 2822 th = tcp_hdr(skb); 2823 th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr, 2824 &iph->daddr, 0); 2825 } else { 2826 hns3_rl_err(skb->dev, 2827 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n", 2828 be16_to_cpu(type), depth); 2829 return -EFAULT; 2830 } 2831 2832 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 2833 if (th->cwr) 2834 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 2835 2836 if (l234info & BIT(HNS3_RXD_GRO_FIXID_B)) 2837 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; 2838 2839 skb->csum_start = (unsigned char *)th - skb->head; 2840 skb->csum_offset = offsetof(struct tcphdr, check); 2841 skb->ip_summed = CHECKSUM_PARTIAL; 2842 2843 trace_hns3_gro(skb); 2844 2845 return 0; 2846 } 2847 2848 static void hns3_checksum_complete(struct hns3_enet_ring *ring, 2849 struct sk_buff *skb, u32 l234info) 2850 { 2851 u32 lo, hi; 2852 2853 u64_stats_update_begin(&ring->syncp); 2854 ring->stats.csum_complete++; 2855 u64_stats_update_end(&ring->syncp); 2856 skb->ip_summed = CHECKSUM_COMPLETE; 2857 lo = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_L_M, 2858 HNS3_RXD_L2_CSUM_L_S); 2859 hi = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_H_M, 2860 HNS3_RXD_L2_CSUM_H_S); 2861 skb->csum = csum_unfold((__force __sum16)(lo | hi << 8)); 2862 } 2863 2864 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, 2865 u32 l234info, u32 bd_base_info, u32 ol_info) 2866 { 2867 struct net_device *netdev = ring_to_netdev(ring); 2868 int l3_type, l4_type; 2869 int ol4_type; 2870 2871 skb->ip_summed = CHECKSUM_NONE; 2872 2873 skb_checksum_none_assert(skb); 2874 2875 if (!(netdev->features & NETIF_F_RXCSUM)) 2876 return; 2877 2878 if (l234info & BIT(HNS3_RXD_L2_CSUM_B)) { 2879 hns3_checksum_complete(ring, skb, l234info); 2880 return; 2881 } 2882 2883 /* check if hardware has done checksum */ 2884 if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) 2885 return; 2886 2887 if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | 2888 BIT(HNS3_RXD_OL3E_B) | 2889 BIT(HNS3_RXD_OL4E_B)))) { 2890 u64_stats_update_begin(&ring->syncp); 2891 ring->stats.l3l4_csum_err++; 2892 u64_stats_update_end(&ring->syncp); 2893 2894 return; 2895 } 2896 2897 ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M, 2898 HNS3_RXD_OL4ID_S); 2899 switch (ol4_type) { 2900 case HNS3_OL4_TYPE_MAC_IN_UDP: 2901 case HNS3_OL4_TYPE_NVGRE: 2902 skb->csum_level = 1; 2903 fallthrough; 2904 case HNS3_OL4_TYPE_NO_TUN: 2905 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, 2906 HNS3_RXD_L3ID_S); 2907 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, 2908 HNS3_RXD_L4ID_S); 2909 2910 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ 2911 if ((l3_type == HNS3_L3_TYPE_IPV4 || 2912 l3_type == HNS3_L3_TYPE_IPV6) && 2913 (l4_type == HNS3_L4_TYPE_UDP || 2914 l4_type == HNS3_L4_TYPE_TCP || 2915 l4_type == HNS3_L4_TYPE_SCTP)) 2916 skb->ip_summed = CHECKSUM_UNNECESSARY; 2917 break; 2918 default: 2919 break; 2920 } 2921 } 2922 2923 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) 2924 { 2925 if (skb_has_frag_list(skb)) 2926 napi_gro_flush(&ring->tqp_vector->napi, false); 2927 2928 napi_gro_receive(&ring->tqp_vector->napi, skb); 2929 } 2930 2931 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, 2932 struct hns3_desc *desc, u32 l234info, 2933 u16 *vlan_tag) 2934 { 2935 struct hnae3_handle *handle = ring->tqp->handle; 2936 struct pci_dev *pdev = ring->tqp->handle->pdev; 2937 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2938 2939 if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) { 2940 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 2941 if (!(*vlan_tag & VLAN_VID_MASK)) 2942 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 2943 2944 return (*vlan_tag != 0); 2945 } 2946 2947 #define HNS3_STRP_OUTER_VLAN 0x1 2948 #define HNS3_STRP_INNER_VLAN 0x2 2949 #define HNS3_STRP_BOTH 0x3 2950 2951 /* Hardware always insert VLAN tag into RX descriptor when 2952 * remove the tag from packet, driver needs to determine 2953 * reporting which tag to stack. 2954 */ 2955 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, 2956 HNS3_RXD_STRP_TAGP_S)) { 2957 case HNS3_STRP_OUTER_VLAN: 2958 if (handle->port_base_vlan_state != 2959 HNAE3_PORT_BASE_VLAN_DISABLE) 2960 return false; 2961 2962 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 2963 return true; 2964 case HNS3_STRP_INNER_VLAN: 2965 if (handle->port_base_vlan_state != 2966 HNAE3_PORT_BASE_VLAN_DISABLE) 2967 return false; 2968 2969 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 2970 return true; 2971 case HNS3_STRP_BOTH: 2972 if (handle->port_base_vlan_state == 2973 HNAE3_PORT_BASE_VLAN_DISABLE) 2974 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 2975 else 2976 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 2977 2978 return true; 2979 default: 2980 return false; 2981 } 2982 } 2983 2984 static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring) 2985 { 2986 ring->desc[ring->next_to_clean].rx.bd_base_info &= 2987 cpu_to_le32(~BIT(HNS3_RXD_VLD_B)); 2988 ring->next_to_clean += 1; 2989 2990 if (unlikely(ring->next_to_clean == ring->desc_num)) 2991 ring->next_to_clean = 0; 2992 } 2993 2994 static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, 2995 unsigned char *va) 2996 { 2997 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; 2998 struct net_device *netdev = ring_to_netdev(ring); 2999 struct sk_buff *skb; 3000 3001 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); 3002 skb = ring->skb; 3003 if (unlikely(!skb)) { 3004 hns3_rl_err(netdev, "alloc rx skb fail\n"); 3005 3006 u64_stats_update_begin(&ring->syncp); 3007 ring->stats.sw_err_cnt++; 3008 u64_stats_update_end(&ring->syncp); 3009 3010 return -ENOMEM; 3011 } 3012 3013 trace_hns3_rx_desc(ring); 3014 prefetchw(skb->data); 3015 3016 ring->pending_buf = 1; 3017 ring->frag_num = 0; 3018 ring->tail_skb = NULL; 3019 if (length <= HNS3_RX_HEAD_SIZE) { 3020 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); 3021 3022 /* We can reuse buffer as-is, just make sure it is local */ 3023 if (likely(hns3_page_is_reusable(desc_cb->priv))) 3024 desc_cb->reuse_flag = 1; 3025 else /* This page cannot be reused so discard it */ 3026 __page_frag_cache_drain(desc_cb->priv, 3027 desc_cb->pagecnt_bias); 3028 3029 hns3_rx_ring_move_fw(ring); 3030 return 0; 3031 } 3032 u64_stats_update_begin(&ring->syncp); 3033 ring->stats.seg_pkt_cnt++; 3034 u64_stats_update_end(&ring->syncp); 3035 3036 ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); 3037 __skb_put(skb, ring->pull_len); 3038 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, 3039 desc_cb); 3040 hns3_rx_ring_move_fw(ring); 3041 3042 return 0; 3043 } 3044 3045 static int hns3_add_frag(struct hns3_enet_ring *ring) 3046 { 3047 struct sk_buff *skb = ring->skb; 3048 struct sk_buff *head_skb = skb; 3049 struct sk_buff *new_skb; 3050 struct hns3_desc_cb *desc_cb; 3051 struct hns3_desc *desc; 3052 u32 bd_base_info; 3053 3054 do { 3055 desc = &ring->desc[ring->next_to_clean]; 3056 desc_cb = &ring->desc_cb[ring->next_to_clean]; 3057 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 3058 /* make sure HW write desc complete */ 3059 dma_rmb(); 3060 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B))) 3061 return -ENXIO; 3062 3063 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { 3064 new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0); 3065 if (unlikely(!new_skb)) { 3066 hns3_rl_err(ring_to_netdev(ring), 3067 "alloc rx fraglist skb fail\n"); 3068 return -ENXIO; 3069 } 3070 ring->frag_num = 0; 3071 3072 if (ring->tail_skb) { 3073 ring->tail_skb->next = new_skb; 3074 ring->tail_skb = new_skb; 3075 } else { 3076 skb_shinfo(skb)->frag_list = new_skb; 3077 ring->tail_skb = new_skb; 3078 } 3079 } 3080 3081 if (ring->tail_skb) { 3082 head_skb->truesize += hns3_buf_size(ring); 3083 head_skb->data_len += le16_to_cpu(desc->rx.size); 3084 head_skb->len += le16_to_cpu(desc->rx.size); 3085 skb = ring->tail_skb; 3086 } 3087 3088 dma_sync_single_for_cpu(ring_to_dev(ring), 3089 desc_cb->dma + desc_cb->page_offset, 3090 hns3_buf_size(ring), 3091 DMA_FROM_DEVICE); 3092 3093 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); 3094 trace_hns3_rx_desc(ring); 3095 hns3_rx_ring_move_fw(ring); 3096 ring->pending_buf++; 3097 } while (!(bd_base_info & BIT(HNS3_RXD_FE_B))); 3098 3099 return 0; 3100 } 3101 3102 static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, 3103 struct sk_buff *skb, u32 l234info, 3104 u32 bd_base_info, u32 ol_info) 3105 { 3106 u32 l3_type; 3107 3108 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, 3109 HNS3_RXD_GRO_SIZE_M, 3110 HNS3_RXD_GRO_SIZE_S); 3111 /* if there is no HW GRO, do not set gro params */ 3112 if (!skb_shinfo(skb)->gso_size) { 3113 hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info); 3114 return 0; 3115 } 3116 3117 NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info, 3118 HNS3_RXD_GRO_COUNT_M, 3119 HNS3_RXD_GRO_COUNT_S); 3120 3121 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); 3122 if (l3_type == HNS3_L3_TYPE_IPV4) 3123 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 3124 else if (l3_type == HNS3_L3_TYPE_IPV6) 3125 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 3126 else 3127 return -EFAULT; 3128 3129 return hns3_gro_complete(skb, l234info); 3130 } 3131 3132 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, 3133 struct sk_buff *skb, u32 rss_hash) 3134 { 3135 struct hnae3_handle *handle = ring->tqp->handle; 3136 enum pkt_hash_types rss_type; 3137 3138 if (rss_hash) 3139 rss_type = handle->kinfo.rss_type; 3140 else 3141 rss_type = PKT_HASH_TYPE_NONE; 3142 3143 skb_set_hash(skb, rss_hash, rss_type); 3144 } 3145 3146 static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) 3147 { 3148 struct net_device *netdev = ring_to_netdev(ring); 3149 enum hns3_pkt_l2t_type l2_frame_type; 3150 u32 bd_base_info, l234info, ol_info; 3151 struct hns3_desc *desc; 3152 unsigned int len; 3153 int pre_ntc, ret; 3154 3155 /* bdinfo handled below is only valid on the last BD of the 3156 * current packet, and ring->next_to_clean indicates the first 3157 * descriptor of next packet, so need - 1 below. 3158 */ 3159 pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : 3160 (ring->desc_num - 1); 3161 desc = &ring->desc[pre_ntc]; 3162 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 3163 l234info = le32_to_cpu(desc->rx.l234_info); 3164 ol_info = le32_to_cpu(desc->rx.ol_info); 3165 3166 /* Based on hw strategy, the tag offloaded will be stored at 3167 * ot_vlan_tag in two layer tag case, and stored at vlan_tag 3168 * in one layer tag case. 3169 */ 3170 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { 3171 u16 vlan_tag; 3172 3173 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) 3174 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 3175 vlan_tag); 3176 } 3177 3178 if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | 3179 BIT(HNS3_RXD_L2E_B))))) { 3180 u64_stats_update_begin(&ring->syncp); 3181 if (l234info & BIT(HNS3_RXD_L2E_B)) 3182 ring->stats.l2_err++; 3183 else 3184 ring->stats.err_pkt_len++; 3185 u64_stats_update_end(&ring->syncp); 3186 3187 return -EFAULT; 3188 } 3189 3190 len = skb->len; 3191 3192 /* Do update ip stack process */ 3193 skb->protocol = eth_type_trans(skb, netdev); 3194 3195 /* This is needed in order to enable forwarding support */ 3196 ret = hns3_set_gro_and_checksum(ring, skb, l234info, 3197 bd_base_info, ol_info); 3198 if (unlikely(ret)) { 3199 u64_stats_update_begin(&ring->syncp); 3200 ring->stats.rx_err_cnt++; 3201 u64_stats_update_end(&ring->syncp); 3202 return ret; 3203 } 3204 3205 l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, 3206 HNS3_RXD_DMAC_S); 3207 3208 u64_stats_update_begin(&ring->syncp); 3209 ring->stats.rx_pkts++; 3210 ring->stats.rx_bytes += len; 3211 3212 if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) 3213 ring->stats.rx_multicast++; 3214 3215 u64_stats_update_end(&ring->syncp); 3216 3217 ring->tqp_vector->rx_group.total_bytes += len; 3218 3219 hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash)); 3220 return 0; 3221 } 3222 3223 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) 3224 { 3225 struct sk_buff *skb = ring->skb; 3226 struct hns3_desc_cb *desc_cb; 3227 struct hns3_desc *desc; 3228 unsigned int length; 3229 u32 bd_base_info; 3230 int ret; 3231 3232 desc = &ring->desc[ring->next_to_clean]; 3233 desc_cb = &ring->desc_cb[ring->next_to_clean]; 3234 3235 prefetch(desc); 3236 3237 if (!skb) { 3238 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 3239 3240 /* Check valid BD */ 3241 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) 3242 return -ENXIO; 3243 3244 dma_rmb(); 3245 length = le16_to_cpu(desc->rx.size); 3246 3247 ring->va = desc_cb->buf + desc_cb->page_offset; 3248 3249 dma_sync_single_for_cpu(ring_to_dev(ring), 3250 desc_cb->dma + desc_cb->page_offset, 3251 hns3_buf_size(ring), 3252 DMA_FROM_DEVICE); 3253 3254 /* Prefetch first cache line of first page. 3255 * Idea is to cache few bytes of the header of the packet. 3256 * Our L1 Cache line size is 64B so need to prefetch twice to make 3257 * it 128B. But in actual we can have greater size of caches with 3258 * 128B Level 1 cache lines. In such a case, single fetch would 3259 * suffice to cache in the relevant part of the header. 3260 */ 3261 net_prefetch(ring->va); 3262 3263 ret = hns3_alloc_skb(ring, length, ring->va); 3264 skb = ring->skb; 3265 3266 if (ret < 0) /* alloc buffer fail */ 3267 return ret; 3268 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */ 3269 ret = hns3_add_frag(ring); 3270 if (ret) 3271 return ret; 3272 } 3273 } else { 3274 ret = hns3_add_frag(ring); 3275 if (ret) 3276 return ret; 3277 } 3278 3279 /* As the head data may be changed when GRO enable, copy 3280 * the head data in after other data rx completed 3281 */ 3282 if (skb->len > HNS3_RX_HEAD_SIZE) 3283 memcpy(skb->data, ring->va, 3284 ALIGN(ring->pull_len, sizeof(long))); 3285 3286 ret = hns3_handle_bdinfo(ring, skb); 3287 if (unlikely(ret)) { 3288 dev_kfree_skb_any(skb); 3289 return ret; 3290 } 3291 3292 skb_record_rx_queue(skb, ring->tqp->tqp_index); 3293 return 0; 3294 } 3295 3296 int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, 3297 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) 3298 { 3299 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 3300 int unused_count = hns3_desc_unused(ring); 3301 int recv_pkts = 0; 3302 int err; 3303 3304 unused_count -= ring->pending_buf; 3305 3306 while (recv_pkts < budget) { 3307 /* Reuse or realloc buffers */ 3308 if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { 3309 hns3_nic_alloc_rx_buffers(ring, unused_count); 3310 unused_count = hns3_desc_unused(ring) - 3311 ring->pending_buf; 3312 } 3313 3314 /* Poll one pkt */ 3315 err = hns3_handle_rx_bd(ring); 3316 /* Do not get FE for the packet or failed to alloc skb */ 3317 if (unlikely(!ring->skb || err == -ENXIO)) { 3318 goto out; 3319 } else if (likely(!err)) { 3320 rx_fn(ring, ring->skb); 3321 recv_pkts++; 3322 } 3323 3324 unused_count += ring->pending_buf; 3325 ring->skb = NULL; 3326 ring->pending_buf = 0; 3327 } 3328 3329 out: 3330 /* Make all data has been write before submit */ 3331 if (unused_count > 0) 3332 hns3_nic_alloc_rx_buffers(ring, unused_count); 3333 3334 return recv_pkts; 3335 } 3336 3337 static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group) 3338 { 3339 #define HNS3_RX_LOW_BYTE_RATE 10000 3340 #define HNS3_RX_MID_BYTE_RATE 20000 3341 #define HNS3_RX_ULTRA_PACKET_RATE 40 3342 3343 enum hns3_flow_level_range new_flow_level; 3344 struct hns3_enet_tqp_vector *tqp_vector; 3345 int packets_per_msecs, bytes_per_msecs; 3346 u32 time_passed_ms; 3347 3348 tqp_vector = ring_group->ring->tqp_vector; 3349 time_passed_ms = 3350 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies); 3351 if (!time_passed_ms) 3352 return false; 3353 3354 do_div(ring_group->total_packets, time_passed_ms); 3355 packets_per_msecs = ring_group->total_packets; 3356 3357 do_div(ring_group->total_bytes, time_passed_ms); 3358 bytes_per_msecs = ring_group->total_bytes; 3359 3360 new_flow_level = ring_group->coal.flow_level; 3361 3362 /* Simple throttlerate management 3363 * 0-10MB/s lower (50000 ints/s) 3364 * 10-20MB/s middle (20000 ints/s) 3365 * 20-1249MB/s high (18000 ints/s) 3366 * > 40000pps ultra (8000 ints/s) 3367 */ 3368 switch (new_flow_level) { 3369 case HNS3_FLOW_LOW: 3370 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE) 3371 new_flow_level = HNS3_FLOW_MID; 3372 break; 3373 case HNS3_FLOW_MID: 3374 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE) 3375 new_flow_level = HNS3_FLOW_HIGH; 3376 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE) 3377 new_flow_level = HNS3_FLOW_LOW; 3378 break; 3379 case HNS3_FLOW_HIGH: 3380 case HNS3_FLOW_ULTRA: 3381 default: 3382 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE) 3383 new_flow_level = HNS3_FLOW_MID; 3384 break; 3385 } 3386 3387 if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE && 3388 &tqp_vector->rx_group == ring_group) 3389 new_flow_level = HNS3_FLOW_ULTRA; 3390 3391 ring_group->total_bytes = 0; 3392 ring_group->total_packets = 0; 3393 ring_group->coal.flow_level = new_flow_level; 3394 3395 return true; 3396 } 3397 3398 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) 3399 { 3400 struct hns3_enet_tqp_vector *tqp_vector; 3401 u16 new_int_gl; 3402 3403 if (!ring_group->ring) 3404 return false; 3405 3406 tqp_vector = ring_group->ring->tqp_vector; 3407 if (!tqp_vector->last_jiffies) 3408 return false; 3409 3410 if (ring_group->total_packets == 0) { 3411 ring_group->coal.int_gl = HNS3_INT_GL_50K; 3412 ring_group->coal.flow_level = HNS3_FLOW_LOW; 3413 return true; 3414 } 3415 3416 if (!hns3_get_new_flow_lvl(ring_group)) 3417 return false; 3418 3419 new_int_gl = ring_group->coal.int_gl; 3420 switch (ring_group->coal.flow_level) { 3421 case HNS3_FLOW_LOW: 3422 new_int_gl = HNS3_INT_GL_50K; 3423 break; 3424 case HNS3_FLOW_MID: 3425 new_int_gl = HNS3_INT_GL_20K; 3426 break; 3427 case HNS3_FLOW_HIGH: 3428 new_int_gl = HNS3_INT_GL_18K; 3429 break; 3430 case HNS3_FLOW_ULTRA: 3431 new_int_gl = HNS3_INT_GL_8K; 3432 break; 3433 default: 3434 break; 3435 } 3436 3437 if (new_int_gl != ring_group->coal.int_gl) { 3438 ring_group->coal.int_gl = new_int_gl; 3439 return true; 3440 } 3441 return false; 3442 } 3443 3444 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) 3445 { 3446 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; 3447 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; 3448 bool rx_update, tx_update; 3449 3450 /* update param every 1000ms */ 3451 if (time_before(jiffies, 3452 tqp_vector->last_jiffies + msecs_to_jiffies(1000))) 3453 return; 3454 3455 if (rx_group->coal.adapt_enable) { 3456 rx_update = hns3_get_new_int_gl(rx_group); 3457 if (rx_update) 3458 hns3_set_vector_coalesce_rx_gl(tqp_vector, 3459 rx_group->coal.int_gl); 3460 } 3461 3462 if (tx_group->coal.adapt_enable) { 3463 tx_update = hns3_get_new_int_gl(tx_group); 3464 if (tx_update) 3465 hns3_set_vector_coalesce_tx_gl(tqp_vector, 3466 tx_group->coal.int_gl); 3467 } 3468 3469 tqp_vector->last_jiffies = jiffies; 3470 } 3471 3472 static int hns3_nic_common_poll(struct napi_struct *napi, int budget) 3473 { 3474 struct hns3_nic_priv *priv = netdev_priv(napi->dev); 3475 struct hns3_enet_ring *ring; 3476 int rx_pkt_total = 0; 3477 3478 struct hns3_enet_tqp_vector *tqp_vector = 3479 container_of(napi, struct hns3_enet_tqp_vector, napi); 3480 bool clean_complete = true; 3481 int rx_budget = budget; 3482 3483 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { 3484 napi_complete(napi); 3485 return 0; 3486 } 3487 3488 /* Since the actual Tx work is minimal, we can give the Tx a larger 3489 * budget and be more aggressive about cleaning up the Tx descriptors. 3490 */ 3491 hns3_for_each_ring(ring, tqp_vector->tx_group) 3492 hns3_clean_tx_ring(ring, budget); 3493 3494 /* make sure rx ring budget not smaller than 1 */ 3495 if (tqp_vector->num_tqps > 1) 3496 rx_budget = max(budget / tqp_vector->num_tqps, 1); 3497 3498 hns3_for_each_ring(ring, tqp_vector->rx_group) { 3499 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, 3500 hns3_rx_skb); 3501 3502 if (rx_cleaned >= rx_budget) 3503 clean_complete = false; 3504 3505 rx_pkt_total += rx_cleaned; 3506 } 3507 3508 tqp_vector->rx_group.total_packets += rx_pkt_total; 3509 3510 if (!clean_complete) 3511 return budget; 3512 3513 if (napi_complete(napi) && 3514 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { 3515 hns3_update_new_int_gl(tqp_vector); 3516 hns3_mask_vector_irq(tqp_vector, 1); 3517 } 3518 3519 return rx_pkt_total; 3520 } 3521 3522 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 3523 struct hnae3_ring_chain_node *head) 3524 { 3525 struct pci_dev *pdev = tqp_vector->handle->pdev; 3526 struct hnae3_ring_chain_node *cur_chain = head; 3527 struct hnae3_ring_chain_node *chain; 3528 struct hns3_enet_ring *tx_ring; 3529 struct hns3_enet_ring *rx_ring; 3530 3531 tx_ring = tqp_vector->tx_group.ring; 3532 if (tx_ring) { 3533 cur_chain->tqp_index = tx_ring->tqp->tqp_index; 3534 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 3535 HNAE3_RING_TYPE_TX); 3536 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 3537 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); 3538 3539 cur_chain->next = NULL; 3540 3541 while (tx_ring->next) { 3542 tx_ring = tx_ring->next; 3543 3544 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), 3545 GFP_KERNEL); 3546 if (!chain) 3547 goto err_free_chain; 3548 3549 cur_chain->next = chain; 3550 chain->tqp_index = tx_ring->tqp->tqp_index; 3551 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, 3552 HNAE3_RING_TYPE_TX); 3553 hnae3_set_field(chain->int_gl_idx, 3554 HNAE3_RING_GL_IDX_M, 3555 HNAE3_RING_GL_IDX_S, 3556 HNAE3_RING_GL_TX); 3557 3558 cur_chain = chain; 3559 } 3560 } 3561 3562 rx_ring = tqp_vector->rx_group.ring; 3563 if (!tx_ring && rx_ring) { 3564 cur_chain->next = NULL; 3565 cur_chain->tqp_index = rx_ring->tqp->tqp_index; 3566 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 3567 HNAE3_RING_TYPE_RX); 3568 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 3569 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 3570 3571 rx_ring = rx_ring->next; 3572 } 3573 3574 while (rx_ring) { 3575 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); 3576 if (!chain) 3577 goto err_free_chain; 3578 3579 cur_chain->next = chain; 3580 chain->tqp_index = rx_ring->tqp->tqp_index; 3581 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, 3582 HNAE3_RING_TYPE_RX); 3583 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 3584 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 3585 3586 cur_chain = chain; 3587 3588 rx_ring = rx_ring->next; 3589 } 3590 3591 return 0; 3592 3593 err_free_chain: 3594 cur_chain = head->next; 3595 while (cur_chain) { 3596 chain = cur_chain->next; 3597 devm_kfree(&pdev->dev, cur_chain); 3598 cur_chain = chain; 3599 } 3600 head->next = NULL; 3601 3602 return -ENOMEM; 3603 } 3604 3605 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 3606 struct hnae3_ring_chain_node *head) 3607 { 3608 struct pci_dev *pdev = tqp_vector->handle->pdev; 3609 struct hnae3_ring_chain_node *chain_tmp, *chain; 3610 3611 chain = head->next; 3612 3613 while (chain) { 3614 chain_tmp = chain->next; 3615 devm_kfree(&pdev->dev, chain); 3616 chain = chain_tmp; 3617 } 3618 } 3619 3620 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, 3621 struct hns3_enet_ring *ring) 3622 { 3623 ring->next = group->ring; 3624 group->ring = ring; 3625 3626 group->count++; 3627 } 3628 3629 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) 3630 { 3631 struct pci_dev *pdev = priv->ae_handle->pdev; 3632 struct hns3_enet_tqp_vector *tqp_vector; 3633 int num_vectors = priv->vector_num; 3634 int numa_node; 3635 int vector_i; 3636 3637 numa_node = dev_to_node(&pdev->dev); 3638 3639 for (vector_i = 0; vector_i < num_vectors; vector_i++) { 3640 tqp_vector = &priv->tqp_vector[vector_i]; 3641 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node), 3642 &tqp_vector->affinity_mask); 3643 } 3644 } 3645 3646 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) 3647 { 3648 struct hnae3_ring_chain_node vector_ring_chain; 3649 struct hnae3_handle *h = priv->ae_handle; 3650 struct hns3_enet_tqp_vector *tqp_vector; 3651 int ret; 3652 int i; 3653 3654 hns3_nic_set_cpumask(priv); 3655 3656 for (i = 0; i < priv->vector_num; i++) { 3657 tqp_vector = &priv->tqp_vector[i]; 3658 hns3_vector_coalesce_init_hw(tqp_vector, priv); 3659 tqp_vector->num_tqps = 0; 3660 } 3661 3662 for (i = 0; i < h->kinfo.num_tqps; i++) { 3663 u16 vector_i = i % priv->vector_num; 3664 u16 tqp_num = h->kinfo.num_tqps; 3665 3666 tqp_vector = &priv->tqp_vector[vector_i]; 3667 3668 hns3_add_ring_to_group(&tqp_vector->tx_group, 3669 &priv->ring[i]); 3670 3671 hns3_add_ring_to_group(&tqp_vector->rx_group, 3672 &priv->ring[i + tqp_num]); 3673 3674 priv->ring[i].tqp_vector = tqp_vector; 3675 priv->ring[i + tqp_num].tqp_vector = tqp_vector; 3676 tqp_vector->num_tqps++; 3677 } 3678 3679 for (i = 0; i < priv->vector_num; i++) { 3680 tqp_vector = &priv->tqp_vector[i]; 3681 3682 tqp_vector->rx_group.total_bytes = 0; 3683 tqp_vector->rx_group.total_packets = 0; 3684 tqp_vector->tx_group.total_bytes = 0; 3685 tqp_vector->tx_group.total_packets = 0; 3686 tqp_vector->handle = h; 3687 3688 ret = hns3_get_vector_ring_chain(tqp_vector, 3689 &vector_ring_chain); 3690 if (ret) 3691 goto map_ring_fail; 3692 3693 ret = h->ae_algo->ops->map_ring_to_vector(h, 3694 tqp_vector->vector_irq, &vector_ring_chain); 3695 3696 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 3697 3698 if (ret) 3699 goto map_ring_fail; 3700 3701 netif_napi_add(priv->netdev, &tqp_vector->napi, 3702 hns3_nic_common_poll, NAPI_POLL_WEIGHT); 3703 } 3704 3705 return 0; 3706 3707 map_ring_fail: 3708 while (i--) 3709 netif_napi_del(&priv->tqp_vector[i].napi); 3710 3711 return ret; 3712 } 3713 3714 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) 3715 { 3716 struct hnae3_handle *h = priv->ae_handle; 3717 struct hns3_enet_tqp_vector *tqp_vector; 3718 struct hnae3_vector_info *vector; 3719 struct pci_dev *pdev = h->pdev; 3720 u16 tqp_num = h->kinfo.num_tqps; 3721 u16 vector_num; 3722 int ret = 0; 3723 u16 i; 3724 3725 /* RSS size, cpu online and vector_num should be the same */ 3726 /* Should consider 2p/4p later */ 3727 vector_num = min_t(u16, num_online_cpus(), tqp_num); 3728 3729 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), 3730 GFP_KERNEL); 3731 if (!vector) 3732 return -ENOMEM; 3733 3734 /* save the actual available vector number */ 3735 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); 3736 3737 priv->vector_num = vector_num; 3738 priv->tqp_vector = (struct hns3_enet_tqp_vector *) 3739 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), 3740 GFP_KERNEL); 3741 if (!priv->tqp_vector) { 3742 ret = -ENOMEM; 3743 goto out; 3744 } 3745 3746 for (i = 0; i < priv->vector_num; i++) { 3747 tqp_vector = &priv->tqp_vector[i]; 3748 tqp_vector->idx = i; 3749 tqp_vector->mask_addr = vector[i].io_addr; 3750 tqp_vector->vector_irq = vector[i].vector; 3751 hns3_vector_coalesce_init(tqp_vector, priv); 3752 } 3753 3754 out: 3755 devm_kfree(&pdev->dev, vector); 3756 return ret; 3757 } 3758 3759 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) 3760 { 3761 group->ring = NULL; 3762 group->count = 0; 3763 } 3764 3765 static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) 3766 { 3767 struct hnae3_ring_chain_node vector_ring_chain; 3768 struct hnae3_handle *h = priv->ae_handle; 3769 struct hns3_enet_tqp_vector *tqp_vector; 3770 int i; 3771 3772 for (i = 0; i < priv->vector_num; i++) { 3773 tqp_vector = &priv->tqp_vector[i]; 3774 3775 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) 3776 continue; 3777 3778 /* Since the mapping can be overwritten, when fail to get the 3779 * chain between vector and ring, we should go on to deal with 3780 * the remaining options. 3781 */ 3782 if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain)) 3783 dev_warn(priv->dev, "failed to get ring chain\n"); 3784 3785 h->ae_algo->ops->unmap_ring_from_vector(h, 3786 tqp_vector->vector_irq, &vector_ring_chain); 3787 3788 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 3789 3790 hns3_clear_ring_group(&tqp_vector->rx_group); 3791 hns3_clear_ring_group(&tqp_vector->tx_group); 3792 netif_napi_del(&priv->tqp_vector[i].napi); 3793 } 3794 } 3795 3796 static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) 3797 { 3798 struct hnae3_handle *h = priv->ae_handle; 3799 struct pci_dev *pdev = h->pdev; 3800 int i, ret; 3801 3802 for (i = 0; i < priv->vector_num; i++) { 3803 struct hns3_enet_tqp_vector *tqp_vector; 3804 3805 tqp_vector = &priv->tqp_vector[i]; 3806 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); 3807 if (ret) 3808 return; 3809 } 3810 3811 devm_kfree(&pdev->dev, priv->tqp_vector); 3812 } 3813 3814 static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, 3815 unsigned int ring_type) 3816 { 3817 int queue_num = priv->ae_handle->kinfo.num_tqps; 3818 struct hns3_enet_ring *ring; 3819 int desc_num; 3820 3821 if (ring_type == HNAE3_RING_TYPE_TX) { 3822 ring = &priv->ring[q->tqp_index]; 3823 desc_num = priv->ae_handle->kinfo.num_tx_desc; 3824 ring->queue_index = q->tqp_index; 3825 } else { 3826 ring = &priv->ring[q->tqp_index + queue_num]; 3827 desc_num = priv->ae_handle->kinfo.num_rx_desc; 3828 ring->queue_index = q->tqp_index; 3829 } 3830 3831 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); 3832 3833 ring->tqp = q; 3834 ring->desc = NULL; 3835 ring->desc_cb = NULL; 3836 ring->dev = priv->dev; 3837 ring->desc_dma_addr = 0; 3838 ring->buf_size = q->buf_size; 3839 ring->desc_num = desc_num; 3840 ring->next_to_use = 0; 3841 ring->next_to_clean = 0; 3842 ring->last_to_use = 0; 3843 } 3844 3845 static void hns3_queue_to_ring(struct hnae3_queue *tqp, 3846 struct hns3_nic_priv *priv) 3847 { 3848 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); 3849 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); 3850 } 3851 3852 static int hns3_get_ring_config(struct hns3_nic_priv *priv) 3853 { 3854 struct hnae3_handle *h = priv->ae_handle; 3855 struct pci_dev *pdev = h->pdev; 3856 int i; 3857 3858 priv->ring = devm_kzalloc(&pdev->dev, 3859 array3_size(h->kinfo.num_tqps, 3860 sizeof(*priv->ring), 2), 3861 GFP_KERNEL); 3862 if (!priv->ring) 3863 return -ENOMEM; 3864 3865 for (i = 0; i < h->kinfo.num_tqps; i++) 3866 hns3_queue_to_ring(h->kinfo.tqp[i], priv); 3867 3868 return 0; 3869 } 3870 3871 static void hns3_put_ring_config(struct hns3_nic_priv *priv) 3872 { 3873 if (!priv->ring) 3874 return; 3875 3876 devm_kfree(priv->dev, priv->ring); 3877 priv->ring = NULL; 3878 } 3879 3880 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) 3881 { 3882 int ret; 3883 3884 if (ring->desc_num <= 0 || ring->buf_size <= 0) 3885 return -EINVAL; 3886 3887 ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, 3888 sizeof(ring->desc_cb[0]), GFP_KERNEL); 3889 if (!ring->desc_cb) { 3890 ret = -ENOMEM; 3891 goto out; 3892 } 3893 3894 ret = hns3_alloc_desc(ring); 3895 if (ret) 3896 goto out_with_desc_cb; 3897 3898 if (!HNAE3_IS_TX_RING(ring)) { 3899 ret = hns3_alloc_ring_buffers(ring); 3900 if (ret) 3901 goto out_with_desc; 3902 } 3903 3904 return 0; 3905 3906 out_with_desc: 3907 hns3_free_desc(ring); 3908 out_with_desc_cb: 3909 devm_kfree(ring_to_dev(ring), ring->desc_cb); 3910 ring->desc_cb = NULL; 3911 out: 3912 return ret; 3913 } 3914 3915 void hns3_fini_ring(struct hns3_enet_ring *ring) 3916 { 3917 hns3_free_desc(ring); 3918 devm_kfree(ring_to_dev(ring), ring->desc_cb); 3919 ring->desc_cb = NULL; 3920 ring->next_to_clean = 0; 3921 ring->next_to_use = 0; 3922 ring->last_to_use = 0; 3923 ring->pending_buf = 0; 3924 if (ring->skb) { 3925 dev_kfree_skb_any(ring->skb); 3926 ring->skb = NULL; 3927 } 3928 } 3929 3930 static int hns3_buf_size2type(u32 buf_size) 3931 { 3932 int bd_size_type; 3933 3934 switch (buf_size) { 3935 case 512: 3936 bd_size_type = HNS3_BD_SIZE_512_TYPE; 3937 break; 3938 case 1024: 3939 bd_size_type = HNS3_BD_SIZE_1024_TYPE; 3940 break; 3941 case 2048: 3942 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 3943 break; 3944 case 4096: 3945 bd_size_type = HNS3_BD_SIZE_4096_TYPE; 3946 break; 3947 default: 3948 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 3949 } 3950 3951 return bd_size_type; 3952 } 3953 3954 static void hns3_init_ring_hw(struct hns3_enet_ring *ring) 3955 { 3956 dma_addr_t dma = ring->desc_dma_addr; 3957 struct hnae3_queue *q = ring->tqp; 3958 3959 if (!HNAE3_IS_TX_RING(ring)) { 3960 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma); 3961 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, 3962 (u32)((dma >> 31) >> 1)); 3963 3964 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, 3965 hns3_buf_size2type(ring->buf_size)); 3966 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, 3967 ring->desc_num / 8 - 1); 3968 3969 } else { 3970 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, 3971 (u32)dma); 3972 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, 3973 (u32)((dma >> 31) >> 1)); 3974 3975 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, 3976 ring->desc_num / 8 - 1); 3977 } 3978 } 3979 3980 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) 3981 { 3982 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 3983 int i; 3984 3985 for (i = 0; i < HNAE3_MAX_TC; i++) { 3986 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; 3987 int j; 3988 3989 if (!tc_info->enable) 3990 continue; 3991 3992 for (j = 0; j < tc_info->tqp_count; j++) { 3993 struct hnae3_queue *q; 3994 3995 q = priv->ring[tc_info->tqp_offset + j].tqp; 3996 hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, 3997 tc_info->tc); 3998 } 3999 } 4000 } 4001 4002 int hns3_init_all_ring(struct hns3_nic_priv *priv) 4003 { 4004 struct hnae3_handle *h = priv->ae_handle; 4005 int ring_num = h->kinfo.num_tqps * 2; 4006 int i, j; 4007 int ret; 4008 4009 for (i = 0; i < ring_num; i++) { 4010 ret = hns3_alloc_ring_memory(&priv->ring[i]); 4011 if (ret) { 4012 dev_err(priv->dev, 4013 "Alloc ring memory fail! ret=%d\n", ret); 4014 goto out_when_alloc_ring_memory; 4015 } 4016 4017 u64_stats_init(&priv->ring[i].syncp); 4018 } 4019 4020 return 0; 4021 4022 out_when_alloc_ring_memory: 4023 for (j = i - 1; j >= 0; j--) 4024 hns3_fini_ring(&priv->ring[j]); 4025 4026 return -ENOMEM; 4027 } 4028 4029 int hns3_uninit_all_ring(struct hns3_nic_priv *priv) 4030 { 4031 struct hnae3_handle *h = priv->ae_handle; 4032 int i; 4033 4034 for (i = 0; i < h->kinfo.num_tqps; i++) { 4035 hns3_fini_ring(&priv->ring[i]); 4036 hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]); 4037 } 4038 return 0; 4039 } 4040 4041 /* Set mac addr if it is configured. or leave it to the AE driver */ 4042 static int hns3_init_mac_addr(struct net_device *netdev) 4043 { 4044 struct hns3_nic_priv *priv = netdev_priv(netdev); 4045 struct hnae3_handle *h = priv->ae_handle; 4046 u8 mac_addr_temp[ETH_ALEN]; 4047 int ret = 0; 4048 4049 if (h->ae_algo->ops->get_mac_addr) 4050 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); 4051 4052 /* Check if the MAC address is valid, if not get a random one */ 4053 if (!is_valid_ether_addr(mac_addr_temp)) { 4054 eth_hw_addr_random(netdev); 4055 dev_warn(priv->dev, "using random MAC address %pM\n", 4056 netdev->dev_addr); 4057 } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { 4058 ether_addr_copy(netdev->dev_addr, mac_addr_temp); 4059 ether_addr_copy(netdev->perm_addr, mac_addr_temp); 4060 } else { 4061 return 0; 4062 } 4063 4064 if (h->ae_algo->ops->set_mac_addr) 4065 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); 4066 4067 return ret; 4068 } 4069 4070 static int hns3_init_phy(struct net_device *netdev) 4071 { 4072 struct hnae3_handle *h = hns3_get_handle(netdev); 4073 int ret = 0; 4074 4075 if (h->ae_algo->ops->mac_connect_phy) 4076 ret = h->ae_algo->ops->mac_connect_phy(h); 4077 4078 return ret; 4079 } 4080 4081 static void hns3_uninit_phy(struct net_device *netdev) 4082 { 4083 struct hnae3_handle *h = hns3_get_handle(netdev); 4084 4085 if (h->ae_algo->ops->mac_disconnect_phy) 4086 h->ae_algo->ops->mac_disconnect_phy(h); 4087 } 4088 4089 static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) 4090 { 4091 struct hnae3_handle *h = hns3_get_handle(netdev); 4092 4093 if (h->ae_algo->ops->del_all_fd_entries) 4094 h->ae_algo->ops->del_all_fd_entries(h, clear_list); 4095 } 4096 4097 static int hns3_client_start(struct hnae3_handle *handle) 4098 { 4099 if (!handle->ae_algo->ops->client_start) 4100 return 0; 4101 4102 return handle->ae_algo->ops->client_start(handle); 4103 } 4104 4105 static void hns3_client_stop(struct hnae3_handle *handle) 4106 { 4107 if (!handle->ae_algo->ops->client_stop) 4108 return; 4109 4110 handle->ae_algo->ops->client_stop(handle); 4111 } 4112 4113 static void hns3_info_show(struct hns3_nic_priv *priv) 4114 { 4115 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 4116 4117 dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr); 4118 dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); 4119 dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); 4120 dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); 4121 dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); 4122 dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); 4123 dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); 4124 dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc); 4125 dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); 4126 } 4127 4128 static int hns3_client_init(struct hnae3_handle *handle) 4129 { 4130 struct pci_dev *pdev = handle->pdev; 4131 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 4132 u16 alloc_tqps, max_rss_size; 4133 struct hns3_nic_priv *priv; 4134 struct net_device *netdev; 4135 int ret; 4136 4137 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, 4138 &max_rss_size); 4139 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps); 4140 if (!netdev) 4141 return -ENOMEM; 4142 4143 priv = netdev_priv(netdev); 4144 priv->dev = &pdev->dev; 4145 priv->netdev = netdev; 4146 priv->ae_handle = handle; 4147 priv->tx_timeout_count = 0; 4148 priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num; 4149 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); 4150 4151 handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); 4152 4153 handle->kinfo.netdev = netdev; 4154 handle->priv = (void *)priv; 4155 4156 hns3_init_mac_addr(netdev); 4157 4158 hns3_set_default_feature(netdev); 4159 4160 netdev->watchdog_timeo = HNS3_TX_TIMEOUT; 4161 netdev->priv_flags |= IFF_UNICAST_FLT; 4162 netdev->netdev_ops = &hns3_nic_netdev_ops; 4163 SET_NETDEV_DEV(netdev, &pdev->dev); 4164 hns3_ethtool_set_ops(netdev); 4165 4166 /* Carrier off reporting is important to ethtool even BEFORE open */ 4167 netif_carrier_off(netdev); 4168 4169 ret = hns3_get_ring_config(priv); 4170 if (ret) { 4171 ret = -ENOMEM; 4172 goto out_get_ring_cfg; 4173 } 4174 4175 ret = hns3_nic_alloc_vector_data(priv); 4176 if (ret) { 4177 ret = -ENOMEM; 4178 goto out_alloc_vector_data; 4179 } 4180 4181 ret = hns3_nic_init_vector_data(priv); 4182 if (ret) { 4183 ret = -ENOMEM; 4184 goto out_init_vector_data; 4185 } 4186 4187 ret = hns3_init_all_ring(priv); 4188 if (ret) { 4189 ret = -ENOMEM; 4190 goto out_init_ring; 4191 } 4192 4193 ret = hns3_init_phy(netdev); 4194 if (ret) 4195 goto out_init_phy; 4196 4197 ret = register_netdev(netdev); 4198 if (ret) { 4199 dev_err(priv->dev, "probe register netdev fail!\n"); 4200 goto out_reg_netdev_fail; 4201 } 4202 4203 /* the device can work without cpu rmap, only aRFS needs it */ 4204 ret = hns3_set_rx_cpu_rmap(netdev); 4205 if (ret) 4206 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); 4207 4208 ret = hns3_nic_init_irq(priv); 4209 if (ret) { 4210 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); 4211 hns3_free_rx_cpu_rmap(netdev); 4212 goto out_init_irq_fail; 4213 } 4214 4215 ret = hns3_client_start(handle); 4216 if (ret) { 4217 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); 4218 goto out_client_start; 4219 } 4220 4221 hns3_dcbnl_setup(handle); 4222 4223 hns3_dbg_init(handle); 4224 4225 /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */ 4226 netdev->max_mtu = HNS3_MAX_MTU; 4227 4228 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) 4229 set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); 4230 4231 set_bit(HNS3_NIC_STATE_INITED, &priv->state); 4232 4233 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 4234 set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); 4235 4236 if (netif_msg_drv(handle)) 4237 hns3_info_show(priv); 4238 4239 return ret; 4240 4241 out_client_start: 4242 hns3_free_rx_cpu_rmap(netdev); 4243 hns3_nic_uninit_irq(priv); 4244 out_init_irq_fail: 4245 unregister_netdev(netdev); 4246 out_reg_netdev_fail: 4247 hns3_uninit_phy(netdev); 4248 out_init_phy: 4249 hns3_uninit_all_ring(priv); 4250 out_init_ring: 4251 hns3_nic_uninit_vector_data(priv); 4252 out_init_vector_data: 4253 hns3_nic_dealloc_vector_data(priv); 4254 out_alloc_vector_data: 4255 priv->ring = NULL; 4256 out_get_ring_cfg: 4257 priv->ae_handle = NULL; 4258 free_netdev(netdev); 4259 return ret; 4260 } 4261 4262 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) 4263 { 4264 struct net_device *netdev = handle->kinfo.netdev; 4265 struct hns3_nic_priv *priv = netdev_priv(netdev); 4266 int ret; 4267 4268 if (netdev->reg_state != NETREG_UNINITIALIZED) 4269 unregister_netdev(netdev); 4270 4271 hns3_client_stop(handle); 4272 4273 hns3_uninit_phy(netdev); 4274 4275 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 4276 netdev_warn(netdev, "already uninitialized\n"); 4277 goto out_netdev_free; 4278 } 4279 4280 hns3_free_rx_cpu_rmap(netdev); 4281 4282 hns3_nic_uninit_irq(priv); 4283 4284 hns3_del_all_fd_rules(netdev, true); 4285 4286 hns3_clear_all_ring(handle, true); 4287 4288 hns3_nic_uninit_vector_data(priv); 4289 4290 hns3_nic_dealloc_vector_data(priv); 4291 4292 ret = hns3_uninit_all_ring(priv); 4293 if (ret) 4294 netdev_err(netdev, "uninit ring error\n"); 4295 4296 hns3_put_ring_config(priv); 4297 4298 out_netdev_free: 4299 hns3_dbg_uninit(handle); 4300 free_netdev(netdev); 4301 } 4302 4303 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) 4304 { 4305 struct net_device *netdev = handle->kinfo.netdev; 4306 4307 if (!netdev) 4308 return; 4309 4310 if (linkup) { 4311 netif_tx_wake_all_queues(netdev); 4312 netif_carrier_on(netdev); 4313 if (netif_msg_link(handle)) 4314 netdev_info(netdev, "link up\n"); 4315 } else { 4316 netif_carrier_off(netdev); 4317 netif_tx_stop_all_queues(netdev); 4318 if (netif_msg_link(handle)) 4319 netdev_info(netdev, "link down\n"); 4320 } 4321 } 4322 4323 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) 4324 { 4325 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 4326 struct net_device *ndev = kinfo->netdev; 4327 4328 if (tc > HNAE3_MAX_TC) 4329 return -EINVAL; 4330 4331 if (!ndev) 4332 return -ENODEV; 4333 4334 return hns3_nic_set_real_num_queue(ndev); 4335 } 4336 4337 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) 4338 { 4339 while (ring->next_to_clean != ring->next_to_use) { 4340 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; 4341 hns3_free_buffer_detach(ring, ring->next_to_clean, 0); 4342 ring_ptr_move_fw(ring, next_to_clean); 4343 } 4344 4345 ring->pending_buf = 0; 4346 } 4347 4348 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) 4349 { 4350 struct hns3_desc_cb res_cbs; 4351 int ret; 4352 4353 while (ring->next_to_use != ring->next_to_clean) { 4354 /* When a buffer is not reused, it's memory has been 4355 * freed in hns3_handle_rx_bd or will be freed by 4356 * stack, so we need to replace the buffer here. 4357 */ 4358 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 4359 ret = hns3_alloc_and_map_buffer(ring, &res_cbs); 4360 if (ret) { 4361 u64_stats_update_begin(&ring->syncp); 4362 ring->stats.sw_err_cnt++; 4363 u64_stats_update_end(&ring->syncp); 4364 /* if alloc new buffer fail, exit directly 4365 * and reclear in up flow. 4366 */ 4367 netdev_warn(ring_to_netdev(ring), 4368 "reserve buffer map failed, ret = %d\n", 4369 ret); 4370 return ret; 4371 } 4372 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 4373 } 4374 ring_ptr_move_fw(ring, next_to_use); 4375 } 4376 4377 /* Free the pending skb in rx ring */ 4378 if (ring->skb) { 4379 dev_kfree_skb_any(ring->skb); 4380 ring->skb = NULL; 4381 ring->pending_buf = 0; 4382 } 4383 4384 return 0; 4385 } 4386 4387 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring) 4388 { 4389 while (ring->next_to_use != ring->next_to_clean) { 4390 /* When a buffer is not reused, it's memory has been 4391 * freed in hns3_handle_rx_bd or will be freed by 4392 * stack, so only need to unmap the buffer here. 4393 */ 4394 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 4395 hns3_unmap_buffer(ring, 4396 &ring->desc_cb[ring->next_to_use]); 4397 ring->desc_cb[ring->next_to_use].dma = 0; 4398 } 4399 4400 ring_ptr_move_fw(ring, next_to_use); 4401 } 4402 } 4403 4404 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force) 4405 { 4406 struct net_device *ndev = h->kinfo.netdev; 4407 struct hns3_nic_priv *priv = netdev_priv(ndev); 4408 u32 i; 4409 4410 for (i = 0; i < h->kinfo.num_tqps; i++) { 4411 struct hns3_enet_ring *ring; 4412 4413 ring = &priv->ring[i]; 4414 hns3_clear_tx_ring(ring); 4415 4416 ring = &priv->ring[i + h->kinfo.num_tqps]; 4417 /* Continue to clear other rings even if clearing some 4418 * rings failed. 4419 */ 4420 if (force) 4421 hns3_force_clear_rx_ring(ring); 4422 else 4423 hns3_clear_rx_ring(ring); 4424 } 4425 } 4426 4427 int hns3_nic_reset_all_ring(struct hnae3_handle *h) 4428 { 4429 struct net_device *ndev = h->kinfo.netdev; 4430 struct hns3_nic_priv *priv = netdev_priv(ndev); 4431 struct hns3_enet_ring *rx_ring; 4432 int i, j; 4433 int ret; 4434 4435 for (i = 0; i < h->kinfo.num_tqps; i++) { 4436 ret = h->ae_algo->ops->reset_queue(h, i); 4437 if (ret) 4438 return ret; 4439 4440 hns3_init_ring_hw(&priv->ring[i]); 4441 4442 /* We need to clear tx ring here because self test will 4443 * use the ring and will not run down before up 4444 */ 4445 hns3_clear_tx_ring(&priv->ring[i]); 4446 priv->ring[i].next_to_clean = 0; 4447 priv->ring[i].next_to_use = 0; 4448 priv->ring[i].last_to_use = 0; 4449 4450 rx_ring = &priv->ring[i + h->kinfo.num_tqps]; 4451 hns3_init_ring_hw(rx_ring); 4452 ret = hns3_clear_rx_ring(rx_ring); 4453 if (ret) 4454 return ret; 4455 4456 /* We can not know the hardware head and tail when this 4457 * function is called in reset flow, so we reuse all desc. 4458 */ 4459 for (j = 0; j < rx_ring->desc_num; j++) 4460 hns3_reuse_buffer(rx_ring, j); 4461 4462 rx_ring->next_to_clean = 0; 4463 rx_ring->next_to_use = 0; 4464 } 4465 4466 hns3_init_tx_ring_tc(priv); 4467 4468 return 0; 4469 } 4470 4471 static void hns3_store_coal(struct hns3_nic_priv *priv) 4472 { 4473 /* ethtool only support setting and querying one coal 4474 * configuration for now, so save the vector 0' coal 4475 * configuration here in order to restore it. 4476 */ 4477 memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal, 4478 sizeof(struct hns3_enet_coalesce)); 4479 memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal, 4480 sizeof(struct hns3_enet_coalesce)); 4481 } 4482 4483 static void hns3_restore_coal(struct hns3_nic_priv *priv) 4484 { 4485 u16 vector_num = priv->vector_num; 4486 int i; 4487 4488 for (i = 0; i < vector_num; i++) { 4489 memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal, 4490 sizeof(struct hns3_enet_coalesce)); 4491 memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal, 4492 sizeof(struct hns3_enet_coalesce)); 4493 } 4494 } 4495 4496 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) 4497 { 4498 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 4499 struct net_device *ndev = kinfo->netdev; 4500 struct hns3_nic_priv *priv = netdev_priv(ndev); 4501 4502 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) 4503 return 0; 4504 4505 if (!netif_running(ndev)) 4506 return 0; 4507 4508 return hns3_nic_net_stop(ndev); 4509 } 4510 4511 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) 4512 { 4513 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 4514 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); 4515 int ret = 0; 4516 4517 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); 4518 4519 if (netif_running(kinfo->netdev)) { 4520 ret = hns3_nic_net_open(kinfo->netdev); 4521 if (ret) { 4522 set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); 4523 netdev_err(kinfo->netdev, 4524 "net up fail, ret=%d!\n", ret); 4525 return ret; 4526 } 4527 } 4528 4529 return ret; 4530 } 4531 4532 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) 4533 { 4534 struct net_device *netdev = handle->kinfo.netdev; 4535 struct hns3_nic_priv *priv = netdev_priv(netdev); 4536 int ret; 4537 4538 /* Carrier off reporting is important to ethtool even BEFORE open */ 4539 netif_carrier_off(netdev); 4540 4541 ret = hns3_get_ring_config(priv); 4542 if (ret) 4543 return ret; 4544 4545 ret = hns3_nic_alloc_vector_data(priv); 4546 if (ret) 4547 goto err_put_ring; 4548 4549 hns3_restore_coal(priv); 4550 4551 ret = hns3_nic_init_vector_data(priv); 4552 if (ret) 4553 goto err_dealloc_vector; 4554 4555 ret = hns3_init_all_ring(priv); 4556 if (ret) 4557 goto err_uninit_vector; 4558 4559 /* the device can work without cpu rmap, only aRFS needs it */ 4560 ret = hns3_set_rx_cpu_rmap(netdev); 4561 if (ret) 4562 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); 4563 4564 ret = hns3_nic_init_irq(priv); 4565 if (ret) { 4566 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); 4567 hns3_free_rx_cpu_rmap(netdev); 4568 goto err_init_irq_fail; 4569 } 4570 4571 if (!hns3_is_phys_func(handle->pdev)) 4572 hns3_init_mac_addr(netdev); 4573 4574 ret = hns3_client_start(handle); 4575 if (ret) { 4576 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); 4577 goto err_client_start_fail; 4578 } 4579 4580 set_bit(HNS3_NIC_STATE_INITED, &priv->state); 4581 4582 return ret; 4583 4584 err_client_start_fail: 4585 hns3_free_rx_cpu_rmap(netdev); 4586 hns3_nic_uninit_irq(priv); 4587 err_init_irq_fail: 4588 hns3_uninit_all_ring(priv); 4589 err_uninit_vector: 4590 hns3_nic_uninit_vector_data(priv); 4591 err_dealloc_vector: 4592 hns3_nic_dealloc_vector_data(priv); 4593 err_put_ring: 4594 hns3_put_ring_config(priv); 4595 4596 return ret; 4597 } 4598 4599 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) 4600 { 4601 struct net_device *netdev = handle->kinfo.netdev; 4602 struct hns3_nic_priv *priv = netdev_priv(netdev); 4603 int ret; 4604 4605 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 4606 netdev_warn(netdev, "already uninitialized\n"); 4607 return 0; 4608 } 4609 4610 hns3_free_rx_cpu_rmap(netdev); 4611 hns3_nic_uninit_irq(priv); 4612 hns3_clear_all_ring(handle, true); 4613 hns3_reset_tx_queue(priv->ae_handle); 4614 4615 hns3_nic_uninit_vector_data(priv); 4616 4617 hns3_store_coal(priv); 4618 4619 hns3_nic_dealloc_vector_data(priv); 4620 4621 ret = hns3_uninit_all_ring(priv); 4622 if (ret) 4623 netdev_err(netdev, "uninit ring error\n"); 4624 4625 hns3_put_ring_config(priv); 4626 4627 return ret; 4628 } 4629 4630 static int hns3_reset_notify(struct hnae3_handle *handle, 4631 enum hnae3_reset_notify_type type) 4632 { 4633 int ret = 0; 4634 4635 switch (type) { 4636 case HNAE3_UP_CLIENT: 4637 ret = hns3_reset_notify_up_enet(handle); 4638 break; 4639 case HNAE3_DOWN_CLIENT: 4640 ret = hns3_reset_notify_down_enet(handle); 4641 break; 4642 case HNAE3_INIT_CLIENT: 4643 ret = hns3_reset_notify_init_enet(handle); 4644 break; 4645 case HNAE3_UNINIT_CLIENT: 4646 ret = hns3_reset_notify_uninit_enet(handle); 4647 break; 4648 default: 4649 break; 4650 } 4651 4652 return ret; 4653 } 4654 4655 static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num, 4656 bool rxfh_configured) 4657 { 4658 int ret; 4659 4660 ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num, 4661 rxfh_configured); 4662 if (ret) { 4663 dev_err(&handle->pdev->dev, 4664 "Change tqp num(%u) fail.\n", new_tqp_num); 4665 return ret; 4666 } 4667 4668 ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT); 4669 if (ret) 4670 return ret; 4671 4672 ret = hns3_reset_notify(handle, HNAE3_UP_CLIENT); 4673 if (ret) 4674 hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT); 4675 4676 return ret; 4677 } 4678 4679 int hns3_set_channels(struct net_device *netdev, 4680 struct ethtool_channels *ch) 4681 { 4682 struct hnae3_handle *h = hns3_get_handle(netdev); 4683 struct hnae3_knic_private_info *kinfo = &h->kinfo; 4684 bool rxfh_configured = netif_is_rxfh_configured(netdev); 4685 u32 new_tqp_num = ch->combined_count; 4686 u16 org_tqp_num; 4687 int ret; 4688 4689 if (hns3_nic_resetting(netdev)) 4690 return -EBUSY; 4691 4692 if (ch->rx_count || ch->tx_count) 4693 return -EINVAL; 4694 4695 if (new_tqp_num > hns3_get_max_available_channels(h) || 4696 new_tqp_num < 1) { 4697 dev_err(&netdev->dev, 4698 "Change tqps fail, the tqp range is from 1 to %u", 4699 hns3_get_max_available_channels(h)); 4700 return -EINVAL; 4701 } 4702 4703 if (kinfo->rss_size == new_tqp_num) 4704 return 0; 4705 4706 netif_dbg(h, drv, netdev, 4707 "set channels: tqp_num=%u, rxfh=%d\n", 4708 new_tqp_num, rxfh_configured); 4709 4710 ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); 4711 if (ret) 4712 return ret; 4713 4714 ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); 4715 if (ret) 4716 return ret; 4717 4718 org_tqp_num = h->kinfo.num_tqps; 4719 ret = hns3_change_channels(h, new_tqp_num, rxfh_configured); 4720 if (ret) { 4721 int ret1; 4722 4723 netdev_warn(netdev, 4724 "Change channels fail, revert to old value\n"); 4725 ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured); 4726 if (ret1) { 4727 netdev_err(netdev, 4728 "revert to old channel fail\n"); 4729 return ret1; 4730 } 4731 4732 return ret; 4733 } 4734 4735 return 0; 4736 } 4737 4738 static const struct hns3_hw_error_info hns3_hw_err[] = { 4739 { .type = HNAE3_PPU_POISON_ERROR, 4740 .msg = "PPU poison" }, 4741 { .type = HNAE3_CMDQ_ECC_ERROR, 4742 .msg = "IMP CMDQ error" }, 4743 { .type = HNAE3_IMP_RD_POISON_ERROR, 4744 .msg = "IMP RD poison" }, 4745 { .type = HNAE3_ROCEE_AXI_RESP_ERROR, 4746 .msg = "ROCEE AXI RESP error" }, 4747 }; 4748 4749 static void hns3_process_hw_error(struct hnae3_handle *handle, 4750 enum hnae3_hw_error_type type) 4751 { 4752 int i; 4753 4754 for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) { 4755 if (hns3_hw_err[i].type == type) { 4756 dev_err(&handle->pdev->dev, "Detected %s!\n", 4757 hns3_hw_err[i].msg); 4758 break; 4759 } 4760 } 4761 } 4762 4763 static const struct hnae3_client_ops client_ops = { 4764 .init_instance = hns3_client_init, 4765 .uninit_instance = hns3_client_uninit, 4766 .link_status_change = hns3_link_status_change, 4767 .setup_tc = hns3_client_setup_tc, 4768 .reset_notify = hns3_reset_notify, 4769 .process_hw_error = hns3_process_hw_error, 4770 }; 4771 4772 /* hns3_init_module - Driver registration routine 4773 * hns3_init_module is the first routine called when the driver is 4774 * loaded. All it does is register with the PCI subsystem. 4775 */ 4776 static int __init hns3_init_module(void) 4777 { 4778 int ret; 4779 4780 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); 4781 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); 4782 4783 client.type = HNAE3_CLIENT_KNIC; 4784 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s", 4785 hns3_driver_name); 4786 4787 client.ops = &client_ops; 4788 4789 INIT_LIST_HEAD(&client.node); 4790 4791 hns3_dbg_register_debugfs(hns3_driver_name); 4792 4793 ret = hnae3_register_client(&client); 4794 if (ret) 4795 goto err_reg_client; 4796 4797 ret = pci_register_driver(&hns3_driver); 4798 if (ret) 4799 goto err_reg_driver; 4800 4801 return ret; 4802 4803 err_reg_driver: 4804 hnae3_unregister_client(&client); 4805 err_reg_client: 4806 hns3_dbg_unregister_debugfs(); 4807 return ret; 4808 } 4809 module_init(hns3_init_module); 4810 4811 /* hns3_exit_module - Driver exit cleanup routine 4812 * hns3_exit_module is called just before the driver is removed 4813 * from memory. 4814 */ 4815 static void __exit hns3_exit_module(void) 4816 { 4817 pci_unregister_driver(&hns3_driver); 4818 hnae3_unregister_client(&client); 4819 hns3_dbg_unregister_debugfs(); 4820 } 4821 module_exit(hns3_exit_module); 4822 4823 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); 4824 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 4825 MODULE_LICENSE("GPL"); 4826 MODULE_ALIAS("pci:hns-nic"); 4827