Lines Matching +full:cmdq +full:- +full:sync
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/dma-mapping.h>
54 static int debug = -1;
79 /* hns3_pci_tbl - PCI Device ID Table
387 napi_schedule_irqoff(&tqp_vector->napi); in hns3_irq_handle()
388 tqp_vector->event_cnt++; in hns3_irq_handle()
398 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_uninit_irq()
399 tqp_vectors = &priv->tqp_vector[i]; in hns3_nic_uninit_irq()
401 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) in hns3_nic_uninit_irq()
405 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); in hns3_nic_uninit_irq()
408 free_irq(tqp_vectors->vector_irq, tqp_vectors); in hns3_nic_uninit_irq()
409 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; in hns3_nic_uninit_irq()
422 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_init_irq()
423 tqp_vectors = &priv->tqp_vector[i]; in hns3_nic_init_irq()
425 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) in hns3_nic_init_irq()
428 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { in hns3_nic_init_irq()
429 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, in hns3_nic_init_irq()
430 "%s-%s-%s-%d", hns3_driver_name, in hns3_nic_init_irq()
431 pci_name(priv->ae_handle->pdev), in hns3_nic_init_irq()
434 } else if (tqp_vectors->rx_group.ring) { in hns3_nic_init_irq()
435 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, in hns3_nic_init_irq()
436 "%s-%s-%s-%d", hns3_driver_name, in hns3_nic_init_irq()
437 pci_name(priv->ae_handle->pdev), in hns3_nic_init_irq()
439 } else if (tqp_vectors->tx_group.ring) { in hns3_nic_init_irq()
440 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, in hns3_nic_init_irq()
441 "%s-%s-%s-%d", hns3_driver_name, in hns3_nic_init_irq()
442 pci_name(priv->ae_handle->pdev), in hns3_nic_init_irq()
449 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; in hns3_nic_init_irq()
451 irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN); in hns3_nic_init_irq()
452 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, in hns3_nic_init_irq()
453 tqp_vectors->name, tqp_vectors); in hns3_nic_init_irq()
455 netdev_err(priv->netdev, "request irq(%d) fail\n", in hns3_nic_init_irq()
456 tqp_vectors->vector_irq); in hns3_nic_init_irq()
461 irq_set_affinity_hint(tqp_vectors->vector_irq, in hns3_nic_init_irq()
462 &tqp_vectors->affinity_mask); in hns3_nic_init_irq()
464 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; in hns3_nic_init_irq()
473 writel(mask_en, tqp_vector->mask_addr); in hns3_mask_vector_irq()
478 napi_enable(&tqp_vector->napi); in hns3_vector_enable()
479 enable_irq(tqp_vector->vector_irq); in hns3_vector_enable()
490 disable_irq(tqp_vector->vector_irq); in hns3_vector_disable()
491 napi_disable(&tqp_vector->napi); in hns3_vector_disable()
492 cancel_work_sync(&tqp_vector->rx_group.dim.work); in hns3_vector_disable()
493 cancel_work_sync(&tqp_vector->tx_group.dim.work); in hns3_vector_disable()
502 * Rl defines rate of interrupts i.e. number of interrupts-per-second in hns3_set_vector_coalesce_rl()
505 if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable && in hns3_set_vector_coalesce_rl()
506 !tqp_vector->rx_group.coal.adapt_enable) in hns3_set_vector_coalesce_rl()
508 * 0-59 and the unit is 4. in hns3_set_vector_coalesce_rl()
512 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); in hns3_set_vector_coalesce_rl()
520 if (tqp_vector->rx_group.coal.unit_1us) in hns3_set_vector_coalesce_rx_gl()
525 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); in hns3_set_vector_coalesce_rx_gl()
533 if (tqp_vector->tx_group.coal.unit_1us) in hns3_set_vector_coalesce_tx_gl()
538 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); in hns3_set_vector_coalesce_tx_gl()
544 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET); in hns3_set_vector_coalesce_tx_ql()
550 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET); in hns3_set_vector_coalesce_rx_ql()
556 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); in hns3_vector_coalesce_init()
557 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; in hns3_vector_coalesce_init()
558 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; in hns3_vector_coalesce_init()
559 struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal; in hns3_vector_coalesce_init()
560 struct hns3_enet_coalesce *prx_coal = &priv->rx_coal; in hns3_vector_coalesce_init()
562 tx_coal->adapt_enable = ptx_coal->adapt_enable; in hns3_vector_coalesce_init()
563 rx_coal->adapt_enable = prx_coal->adapt_enable; in hns3_vector_coalesce_init()
565 tx_coal->int_gl = ptx_coal->int_gl; in hns3_vector_coalesce_init()
566 rx_coal->int_gl = prx_coal->int_gl; in hns3_vector_coalesce_init()
568 rx_coal->flow_level = prx_coal->flow_level; in hns3_vector_coalesce_init()
569 tx_coal->flow_level = ptx_coal->flow_level; in hns3_vector_coalesce_init()
574 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { in hns3_vector_coalesce_init()
575 tx_coal->unit_1us = 1; in hns3_vector_coalesce_init()
576 rx_coal->unit_1us = 1; in hns3_vector_coalesce_init()
579 if (ae_dev->dev_specs.int_ql_max) { in hns3_vector_coalesce_init()
580 tx_coal->ql_enable = 1; in hns3_vector_coalesce_init()
581 rx_coal->ql_enable = 1; in hns3_vector_coalesce_init()
582 tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; in hns3_vector_coalesce_init()
583 rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; in hns3_vector_coalesce_init()
584 tx_coal->int_ql = ptx_coal->int_ql; in hns3_vector_coalesce_init()
585 rx_coal->int_ql = prx_coal->int_ql; in hns3_vector_coalesce_init()
593 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; in hns3_vector_coalesce_init_hw()
594 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; in hns3_vector_coalesce_init_hw()
595 struct hnae3_handle *h = priv->ae_handle; in hns3_vector_coalesce_init_hw()
597 hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl); in hns3_vector_coalesce_init_hw()
598 hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl); in hns3_vector_coalesce_init_hw()
599 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); in hns3_vector_coalesce_init_hw()
601 if (tx_coal->ql_enable) in hns3_vector_coalesce_init_hw()
602 hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql); in hns3_vector_coalesce_init_hw()
604 if (rx_coal->ql_enable) in hns3_vector_coalesce_init_hw()
605 hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql); in hns3_vector_coalesce_init_hw()
611 struct hnae3_knic_private_info *kinfo = &h->kinfo; in hns3_nic_set_real_num_queue()
612 struct hnae3_tc_info *tc_info = &kinfo->tc_info; in hns3_nic_set_real_num_queue()
613 unsigned int queue_size = kinfo->num_tqps; in hns3_nic_set_real_num_queue()
616 if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) { in hns3_nic_set_real_num_queue()
619 ret = netdev_set_num_tc(netdev, tc_info->num_tc); in hns3_nic_set_real_num_queue()
626 for (i = 0; i < tc_info->num_tc; i++) in hns3_nic_set_real_num_queue()
627 netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i], in hns3_nic_set_real_num_queue()
628 tc_info->tqp_offset[i]); in hns3_nic_set_real_num_queue()
652 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); in hns3_get_max_available_channels()
653 rss_size = alloc_tqps / h->kinfo.tc_info.num_tc; in hns3_get_max_available_channels()
679 free_irq_cpu_rmap(netdev->rx_cpu_rmap); in hns3_free_rx_cpu_rmap()
680 netdev->rx_cpu_rmap = NULL; in hns3_free_rx_cpu_rmap()
691 if (!netdev->rx_cpu_rmap) { in hns3_set_rx_cpu_rmap()
692 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num); in hns3_set_rx_cpu_rmap()
693 if (!netdev->rx_cpu_rmap) in hns3_set_rx_cpu_rmap()
694 return -ENOMEM; in hns3_set_rx_cpu_rmap()
697 for (i = 0; i < priv->vector_num; i++) { in hns3_set_rx_cpu_rmap()
698 tqp_vector = &priv->tqp_vector[i]; in hns3_set_rx_cpu_rmap()
699 ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap, in hns3_set_rx_cpu_rmap()
700 tqp_vector->vector_irq); in hns3_set_rx_cpu_rmap()
713 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_net_up()
721 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); in hns3_nic_net_up()
724 for (i = 0; i < priv->vector_num; i++) in hns3_nic_net_up()
725 hns3_vector_enable(&priv->tqp_vector[i]); in hns3_nic_net_up()
728 for (j = 0; j < h->kinfo.num_tqps; j++) in hns3_nic_net_up()
729 hns3_tqp_enable(h->kinfo.tqp[j]); in hns3_nic_net_up()
732 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; in hns3_nic_net_up()
734 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); in hns3_nic_net_up()
735 while (j--) in hns3_nic_net_up()
736 hns3_tqp_disable(h->kinfo.tqp[j]); in hns3_nic_net_up()
738 for (j = i - 1; j >= 0; j--) in hns3_nic_net_up()
739 hns3_vector_disable(&priv->tqp_vector[j]); in hns3_nic_net_up()
749 for (i = 0; i < priv->vector_num; i++) { in hns3_config_xps()
750 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; in hns3_config_xps()
751 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; in hns3_config_xps()
756 ret = netif_set_xps_queue(priv->netdev, in hns3_config_xps()
757 &tqp_vector->affinity_mask, in hns3_config_xps()
758 ring->tqp->tqp_index); in hns3_config_xps()
760 netdev_warn(priv->netdev, in hns3_config_xps()
763 ring = ring->next; in hns3_config_xps()
776 return -EBUSY; in hns3_nic_net_open()
778 if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { in hns3_nic_net_open()
795 kinfo = &h->kinfo; in hns3_nic_net_open()
797 netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]); in hns3_nic_net_open()
799 if (h->ae_algo->ops->set_timer_task) in hns3_nic_net_open()
800 h->ae_algo->ops->set_timer_task(priv->ae_handle, true); in hns3_nic_net_open()
811 struct net_device *ndev = h->kinfo.netdev; in hns3_reset_tx_queue()
816 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_reset_tx_queue()
818 priv->ring[i].queue_index); in hns3_reset_tx_queue()
831 for (i = 0; i < priv->vector_num; i++) in hns3_nic_net_down()
832 hns3_vector_disable(&priv->tqp_vector[i]); in hns3_nic_net_down()
835 for (i = 0; i < h->kinfo.num_tqps; i++) in hns3_nic_net_down()
836 hns3_tqp_disable(h->kinfo.tqp[i]); in hns3_nic_net_down()
839 ops = priv->ae_handle->ae_algo->ops; in hns3_nic_net_down()
840 if (ops->stop) in hns3_nic_net_down()
841 ops->stop(priv->ae_handle); in hns3_nic_net_down()
848 hns3_clear_all_ring(priv->ae_handle, false); in hns3_nic_net_down()
850 hns3_reset_tx_queue(priv->ae_handle); in hns3_nic_net_down()
858 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) in hns3_nic_net_stop()
863 if (h->ae_algo->ops->set_timer_task) in hns3_nic_net_stop()
864 h->ae_algo->ops->set_timer_task(priv->ae_handle, false); in hns3_nic_net_stop()
879 if (h->ae_algo->ops->add_uc_addr) in hns3_nic_uc_sync()
880 return h->ae_algo->ops->add_uc_addr(h, addr); in hns3_nic_uc_sync()
894 if (ether_addr_equal(addr, netdev->dev_addr)) in hns3_nic_uc_unsync()
897 if (h->ae_algo->ops->rm_uc_addr) in hns3_nic_uc_unsync()
898 return h->ae_algo->ops->rm_uc_addr(h, addr); in hns3_nic_uc_unsync()
908 if (h->ae_algo->ops->add_mc_addr) in hns3_nic_mc_sync()
909 return h->ae_algo->ops->add_mc_addr(h, addr); in hns3_nic_mc_sync()
919 if (h->ae_algo->ops->rm_mc_addr) in hns3_nic_mc_unsync()
920 return h->ae_algo->ops->rm_mc_addr(h, addr); in hns3_nic_mc_unsync()
929 if (netdev->flags & IFF_PROMISC) in hns3_get_netdev_flags()
931 else if (netdev->flags & IFF_ALLMULTI) in hns3_get_netdev_flags()
950 h->netdev_flags = new_flags; in hns3_nic_set_rx_mode()
956 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; in hns3_request_update_promisc_mode()
958 if (ops->request_update_promisc_mode) in hns3_request_update_promisc_mode()
959 ops->request_update_promisc_mode(handle); in hns3_request_update_promisc_mode()
964 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_space()
970 ntc = smp_load_acquire(&tx_spare->last_to_clean); in hns3_tx_spare_space()
971 ntu = tx_spare->next_to_use; in hns3_tx_spare_space()
974 return ntc - ntu - 1; in hns3_tx_spare_space()
979 return max(ntc, tx_spare->len - ntu) - 1; in hns3_tx_spare_space()
984 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_update()
987 tx_spare->last_to_clean == tx_spare->next_to_clean) in hns3_tx_spare_update()
993 smp_store_release(&tx_spare->last_to_clean, in hns3_tx_spare_update()
994 tx_spare->next_to_clean); in hns3_tx_spare_update()
1001 u32 len = skb->len <= ring->tx_copybreak ? skb->len : in hns3_can_use_tx_bounce()
1004 if (len > ring->tx_copybreak) in hns3_can_use_tx_bounce()
1019 if (skb->len <= ring->tx_copybreak || !tx_sgl || in hns3_can_use_tx_sgl()
1021 skb_shinfo(skb)->nr_frags < tx_sgl)) in hns3_can_use_tx_sgl()
1034 u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; in hns3_init_tx_spare_buffer()
1072 tx_spare->dma = dma; in hns3_init_tx_spare_buffer()
1073 tx_spare->buf = page_address(page); in hns3_init_tx_spare_buffer()
1074 tx_spare->len = PAGE_SIZE << order; in hns3_init_tx_spare_buffer()
1075 ring->tx_spare = tx_spare; in hns3_init_tx_spare_buffer()
1083 ring->tqp->handle->kinfo.tx_spare_buf_size = 0; in hns3_init_tx_spare_buffer()
1093 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_alloc()
1094 u32 ntu = tx_spare->next_to_use; in hns3_tx_spare_alloc()
1102 if (ntu + size > tx_spare->len) { in hns3_tx_spare_alloc()
1103 *cb_len += (tx_spare->len - ntu); in hns3_tx_spare_alloc()
1107 tx_spare->next_to_use = ntu + size; in hns3_tx_spare_alloc()
1108 if (tx_spare->next_to_use == tx_spare->len) in hns3_tx_spare_alloc()
1109 tx_spare->next_to_use = 0; in hns3_tx_spare_alloc()
1111 *dma = tx_spare->dma + ntu; in hns3_tx_spare_alloc()
1113 return tx_spare->buf + ntu; in hns3_tx_spare_alloc()
1118 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_rollback()
1120 if (len > tx_spare->next_to_use) { in hns3_tx_spare_rollback()
1121 len -= tx_spare->next_to_use; in hns3_tx_spare_rollback()
1122 tx_spare->next_to_use = tx_spare->len - len; in hns3_tx_spare_rollback()
1124 tx_spare->next_to_use -= len; in hns3_tx_spare_rollback()
1131 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_reclaim_cb()
1132 u32 ntc = tx_spare->next_to_clean; in hns3_tx_spare_reclaim_cb()
1133 u32 len = cb->length; in hns3_tx_spare_reclaim_cb()
1135 tx_spare->next_to_clean += len; in hns3_tx_spare_reclaim_cb()
1137 if (tx_spare->next_to_clean >= tx_spare->len) { in hns3_tx_spare_reclaim_cb()
1138 tx_spare->next_to_clean -= tx_spare->len; in hns3_tx_spare_reclaim_cb()
1140 if (tx_spare->next_to_clean) { in hns3_tx_spare_reclaim_cb()
1142 len = tx_spare->next_to_clean; in hns3_tx_spare_reclaim_cb()
1148 * the tx buffer to do the dma sync or sg unmapping after in hns3_tx_spare_reclaim_cb()
1149 * tx_spare->next_to_clean is moved forword. in hns3_tx_spare_reclaim_cb()
1151 if (cb->type & (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) { in hns3_tx_spare_reclaim_cb()
1152 dma_addr_t dma = tx_spare->dma + ntc; in hns3_tx_spare_reclaim_cb()
1157 struct sg_table *sgt = tx_spare->buf + ntc; in hns3_tx_spare_reclaim_cb()
1159 dma_unmap_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents, in hns3_tx_spare_reclaim_cb()
1186 if (l3.v4->version == 4) in hns3_set_tso()
1187 l3.v4->check = 0; in hns3_set_tso()
1190 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | in hns3_set_tso()
1201 if (l3.v4->version == 4) in hns3_set_tso()
1202 l3.v4->check = 0; in hns3_set_tso()
1206 l4_offset = l4.hdr - skb->data; in hns3_set_tso()
1209 l4_paylen = skb->len - l4_offset; in hns3_set_tso()
1211 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in hns3_set_tso()
1213 csum_replace_by_diff(&l4.udp->check, in hns3_set_tso()
1216 hdr_len = (l4.tcp->doff << 2) + l4_offset; in hns3_set_tso()
1217 csum_replace_by_diff(&l4.tcp->check, in hns3_set_tso()
1221 *send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len; in hns3_set_tso()
1224 *paylen_fdop_ol4cs = skb->len - hdr_len; in hns3_set_tso()
1228 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) in hns3_set_tso()
1232 *mss = skb_shinfo(skb)->gso_size; in hns3_set_tso()
1252 if (skb->protocol == htons(ETH_P_IPV6)) { in hns3_get_l4_protocol()
1254 l4_proto_tmp = l3.v6->nexthdr; in hns3_get_l4_protocol()
1256 ipv6_skip_exthdr(skb, exthdr - skb->data, in hns3_get_l4_protocol()
1258 } else if (skb->protocol == htons(ETH_P_IP)) { in hns3_get_l4_protocol()
1259 l4_proto_tmp = l3.v4->protocol; in hns3_get_l4_protocol()
1261 return -EINVAL; in hns3_get_l4_protocol()
1267 if (!skb->encapsulation) { in hns3_get_l4_protocol()
1276 if (l3.v6->version == 6) { in hns3_get_l4_protocol()
1278 l4_proto_tmp = l3.v6->nexthdr; in hns3_get_l4_protocol()
1280 ipv6_skip_exthdr(skb, exthdr - skb->data, in hns3_get_l4_protocol()
1282 } else if (l3.v4->version == 4) { in hns3_get_l4_protocol()
1283 l4_proto_tmp = l3.v4->protocol; in hns3_get_l4_protocol()
1291 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
1299 struct hns3_nic_priv *priv = netdev_priv(skb->dev); in hns3_tunnel_csum_bug()
1300 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); in hns3_tunnel_csum_bug()
1306 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) in hns3_tunnel_csum_bug()
1311 if (!(!skb->encapsulation && in hns3_tunnel_csum_bug()
1312 (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || in hns3_tunnel_csum_bug()
1313 l4.udp->dest == htons(GENEVE_UDP_PORT) || in hns3_tunnel_csum_bug()
1314 l4.udp->dest == htons(IANA_VXLAN_GPE_UDP_PORT)))) in hns3_tunnel_csum_bug()
1332 l2_len = l3.hdr - skb->data; in hns3_set_outer_l2l3l4()
1336 l3_len = l4.hdr - l3.hdr; in hns3_set_outer_l2l3l4()
1341 l4_len = il2_hdr - l4.hdr; in hns3_set_outer_l2l3l4()
1345 if (skb->protocol == htons(ETH_P_IP)) { in hns3_set_outer_l2l3l4()
1354 } else if (skb->protocol == htons(ETH_P_IPV6)) { in hns3_set_outer_l2l3l4()
1370 if (l3.v4->version == 4) { in hns3_set_l3_type()
1379 } else if (l3.v6->version == 6) { in hns3_set_l3_type()
1395 l4.tcp->doff); in hns3_set_l4_csum_length()
1422 return -EDOM; in hns3_set_l4_csum_length()
1437 unsigned char *l2_hdr = skb->data; in hns3_set_l2l3l4()
1447 if (skb->encapsulation) { in hns3_set_l2l3l4()
1454 return -EDOM; in hns3_set_l2l3l4()
1474 l2_len = l3.hdr - l2_hdr; in hns3_set_l2l3l4()
1478 l3_len = l4.hdr - l3.hdr; in hns3_set_l2l3l4()
1487 struct hnae3_handle *handle = tx_ring->tqp->handle; in hns3_handle_vtags()
1492 if (!(skb->protocol == htons(ETH_P_8021Q) || in hns3_handle_vtags()
1500 ae_dev = pci_get_drvdata(handle->pdev); in hns3_handle_vtags()
1502 ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && in hns3_handle_vtags()
1503 handle->port_base_vlan_state == in hns3_handle_vtags()
1505 return -EINVAL; in hns3_handle_vtags()
1507 if (skb->protocol == htons(ETH_P_8021Q) && in hns3_handle_vtags()
1508 !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { in hns3_handle_vtags()
1513 skb->protocol = vlan_get_protocol(skb); in hns3_handle_vtags()
1521 if (skb->protocol == htons(ETH_P_8021Q) && in hns3_handle_vtags()
1522 handle->port_base_vlan_state == in hns3_handle_vtags()
1528 skb->protocol = vlan_get_protocol(skb); in hns3_handle_vtags()
1537 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) in hns3_handle_vtags()
1540 skb->protocol = vlan_get_protocol(skb); in hns3_handle_vtags()
1547 struct hns3_nic_priv *priv = netdev_priv(skb->dev); in hns3_check_hw_tx_csum()
1550 * HW checksum of the non-IP packets and GSO packets is handled at in hns3_check_hw_tx_csum()
1554 !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state)) in hns3_check_hw_tx_csum()
1571 pa->paylen_ol4cs = skb->len; in hns3_init_desc_data()
1572 pa->ol_type_vlan_len_msec = 0; in hns3_init_desc_data()
1573 pa->type_cs_vlan_tso = 0; in hns3_init_desc_data()
1574 pa->mss_hw_csum = 0; in hns3_init_desc_data()
1575 pa->inner_vtag = 0; in hns3_init_desc_data()
1576 pa->out_vtag = 0; in hns3_init_desc_data()
1590 param->inner_vtag = skb_vlan_tag_get(skb); in hns3_handle_vlan_info()
1591 param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & in hns3_handle_vlan_info()
1593 hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); in hns3_handle_vlan_info()
1595 param->out_vtag = skb_vlan_tag_get(skb); in hns3_handle_vlan_info()
1596 param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & in hns3_handle_vlan_info()
1598 hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, in hns3_handle_vlan_info()
1614 hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, in hns3_handle_csum_partial()
1616 hns3_set_field(param->ol_type_vlan_len_msec, in hns3_handle_csum_partial()
1618 skb->csum_offset >> 1); in hns3_handle_csum_partial()
1619 param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); in hns3_handle_csum_partial()
1632 ¶m->type_cs_vlan_tso, in hns3_handle_csum_partial()
1633 ¶m->ol_type_vlan_len_msec); in hns3_handle_csum_partial()
1639 ret = hns3_set_tso(skb, ¶m->paylen_ol4cs, ¶m->mss_hw_csum, in hns3_handle_csum_partial()
1640 ¶m->type_cs_vlan_tso, &desc_cb->send_bytes); in hns3_handle_csum_partial()
1660 desc_cb->send_bytes = skb->len; in hns3_fill_skb_desc()
1662 if (skb->ip_summed == CHECKSUM_PARTIAL) { in hns3_fill_skb_desc()
1669 desc->tx.ol_type_vlan_len_msec = in hns3_fill_skb_desc()
1671 desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso); in hns3_fill_skb_desc()
1672 desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs); in hns3_fill_skb_desc()
1673 desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum); in hns3_fill_skb_desc()
1674 desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag); in hns3_fill_skb_desc()
1675 desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag); in hns3_fill_skb_desc()
1685 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; in hns3_fill_desc()
1690 desc->addr = cpu_to_le64(dma); in hns3_fill_desc()
1691 desc->tx.send_size = cpu_to_le16(size); in hns3_fill_desc()
1692 desc->tx.bdtp_fe_sc_vld_ra_ri = in hns3_fill_desc()
1695 trace_hns3_tx_desc(ring, ring->next_to_use); in hns3_fill_desc()
1707 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); in hns3_fill_desc()
1708 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? in hns3_fill_desc()
1710 desc->tx.bdtp_fe_sc_vld_ra_ri = in hns3_fill_desc()
1713 trace_hns3_tx_desc(ring, ring->next_to_use); in hns3_fill_desc()
1717 desc = &ring->desc[ring->next_to_use]; in hns3_fill_desc()
1726 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_map_and_fill_desc()
1738 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); in hns3_map_and_fill_desc()
1756 return -ENOMEM; in hns3_map_and_fill_desc()
1759 desc_cb->priv = priv; in hns3_map_and_fill_desc()
1760 desc_cb->length = size; in hns3_map_and_fill_desc()
1761 desc_cb->dma = dma; in hns3_map_and_fill_desc()
1762 desc_cb->type = type; in hns3_map_and_fill_desc()
1776 size -= HNS3_MAX_BD_SIZE; in hns3_skb_bd_num()
1788 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in hns3_skb_bd_num()
1789 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in hns3_skb_bd_num()
1796 size -= HNS3_MAX_BD_SIZE; in hns3_skb_bd_num()
1819 if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level && in hns3_tx_bd_num()
1821 skb_shinfo(skb)->nr_frags < max_non_tso_bd_num)) in hns3_tx_bd_num()
1822 return skb_shinfo(skb)->nr_frags + 1U; in hns3_tx_bd_num()
1843 if (!skb->encapsulation) in hns3_gso_hdr_len()
1851 * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss,
1852 * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger
1853 * than MSS except the last max_non_tso_bd_num - 1 frags.
1861 for (i = 0; i < max_non_tso_bd_num - 1U; i++) in hns3_skb_need_linearized()
1867 if (tot_len + bd_size[max_non_tso_bd_num - 1U] < in hns3_skb_need_linearized()
1868 skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb)) in hns3_skb_need_linearized()
1871 /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater in hns3_skb_need_linearized()
1874 for (i = 0; i < bd_num - max_non_tso_bd_num; i++) { in hns3_skb_need_linearized()
1875 tot_len -= bd_size[i]; in hns3_skb_need_linearized()
1876 tot_len += bd_size[i + max_non_tso_bd_num - 1U]; in hns3_skb_need_linearized()
1878 if (tot_len < skb_shinfo(skb)->gso_size) in hns3_skb_need_linearized()
1890 size[i] = skb_frag_size(&shinfo->frags[i]); in hns3_shinfo_pack()
1902 return -ENOMEM; in hns3_skb_linearize()
1905 /* The skb->len has exceeded the hw limitation, linearization in hns3_skb_linearize()
1908 if (skb->len > HNS3_MAX_TSO_SIZE || in hns3_skb_linearize()
1909 (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) { in hns3_skb_linearize()
1911 return -ENOMEM; in hns3_skb_linearize()
1916 return -ENOMEM; in hns3_skb_linearize()
1927 u8 max_non_tso_bd_num = priv->max_non_tso_bd_num; in hns3_nic_maybe_stop_tx()
1941 return -ENOMEM; in hns3_nic_maybe_stop_tx()
1943 bd_num = hns3_tx_bd_count(skb->len); in hns3_nic_maybe_stop_tx()
1952 netif_stop_subqueue(netdev, ring->queue_index); in hns3_nic_maybe_stop_tx()
1960 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { in hns3_nic_maybe_stop_tx()
1961 netif_start_subqueue(netdev, ring->queue_index); in hns3_nic_maybe_stop_tx()
1967 return -EBUSY; in hns3_nic_maybe_stop_tx()
1975 for (i = 0; i < ring->desc_num; i++) { in hns3_clear_desc()
1976 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; in hns3_clear_desc()
1982 if (ring->next_to_use == next_to_use_orig) in hns3_clear_desc()
1988 desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_clear_desc()
1990 if (!desc_cb->dma) in hns3_clear_desc()
1994 if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB)) in hns3_clear_desc()
1995 dma_unmap_single(dev, desc_cb->dma, desc_cb->length, in hns3_clear_desc()
1997 else if (desc_cb->type & in hns3_clear_desc()
1999 hns3_tx_spare_rollback(ring, desc_cb->length); in hns3_clear_desc()
2000 else if (desc_cb->length) in hns3_clear_desc()
2001 dma_unmap_page(dev, desc_cb->dma, desc_cb->length, in hns3_clear_desc()
2004 desc_cb->length = 0; in hns3_clear_desc()
2005 desc_cb->dma = 0; in hns3_clear_desc()
2006 desc_cb->type = DESC_TYPE_UNKNOWN; in hns3_clear_desc()
2022 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in hns3_fill_skb_to_desc()
2023 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in hns3_fill_skb_to_desc()
2057 int idx = (ring->next_to_use - num + ring->desc_num) % in hns3_tx_push_bd()
2058 ring->desc_num; in hns3_tx_push_bd()
2060 u64_stats_update_begin(&ring->syncp); in hns3_tx_push_bd()
2061 ring->stats.tx_push++; in hns3_tx_push_bd()
2062 u64_stats_update_end(&ring->syncp); in hns3_tx_push_bd()
2063 memcpy(&desc[offset], &ring->desc[idx], in hns3_tx_push_bd()
2066 } while (--num); in hns3_tx_push_bd()
2068 __iowrite64_copy(ring->tqp->mem_base, desc, in hns3_tx_push_bd()
2077 __le64 bd_num = cpu_to_le64((u64)ring->pending_buf); in hns3_tx_mem_doorbell()
2084 __iowrite64_copy(ring->tqp->mem_base + HNS3_MEM_DOORBELL_OFFSET, in hns3_tx_mem_doorbell()
2086 u64_stats_update_begin(&ring->syncp); in hns3_tx_mem_doorbell()
2087 ring->stats.tx_mem_doorbell += ring->pending_buf; in hns3_tx_mem_doorbell()
2088 u64_stats_update_end(&ring->syncp); in hns3_tx_mem_doorbell()
2100 if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num && in hns3_tx_doorbell()
2101 !ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) { in hns3_tx_doorbell()
2106 smp_store_release(&ring->last_to_use, ring->next_to_use); in hns3_tx_doorbell()
2111 ring->pending_buf += num; in hns3_tx_doorbell()
2121 smp_store_release(&ring->last_to_use, ring->next_to_use); in hns3_tx_doorbell()
2123 if (ring->tqp->mem_base) in hns3_tx_doorbell()
2126 writel(ring->pending_buf, in hns3_tx_doorbell()
2127 ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); in hns3_tx_doorbell()
2129 ring->pending_buf = 0; in hns3_tx_doorbell()
2137 if (!(h->ae_algo->ops->set_tx_hwts_info && in hns3_tsyn()
2138 h->ae_algo->ops->set_tx_hwts_info(h, skb))) in hns3_tsyn()
2141 desc->tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_TSYN_B)); in hns3_tsyn()
2147 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_handle_tx_bounce()
2156 if (skb->len <= ring->tx_copybreak) { in hns3_handle_tx_bounce()
2157 size = skb->len; in hns3_handle_tx_bounce()
2173 desc_cb->priv = skb; in hns3_handle_tx_bounce()
2174 desc_cb->length = cb_len; in hns3_handle_tx_bounce()
2175 desc_cb->dma = dma; in hns3_handle_tx_bounce()
2176 desc_cb->type = type; in hns3_handle_tx_bounce()
2200 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_handle_tx_sgl()
2201 u32 nfrag = skb_shinfo(skb)->nr_frags + 1; in hns3_handle_tx_sgl()
2218 sgt->sgl = (struct scatterlist *)(sgt + 1); in hns3_handle_tx_sgl()
2219 sg_init_table(sgt->sgl, nfrag); in hns3_handle_tx_sgl()
2220 nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); in hns3_handle_tx_sgl()
2224 return -ENOMEM; in hns3_handle_tx_sgl()
2227 sgt->orig_nents = nents; in hns3_handle_tx_sgl()
2228 sgt->nents = dma_map_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents, in hns3_handle_tx_sgl()
2230 if (unlikely(!sgt->nents)) { in hns3_handle_tx_sgl()
2233 return -ENOMEM; in hns3_handle_tx_sgl()
2236 desc_cb->priv = skb; in hns3_handle_tx_sgl()
2237 desc_cb->length = cb_len; in hns3_handle_tx_sgl()
2238 desc_cb->dma = dma; in hns3_handle_tx_sgl()
2239 desc_cb->type = DESC_TYPE_SGL_SKB; in hns3_handle_tx_sgl()
2241 for (i = 0; i < sgt->nents; i++) in hns3_handle_tx_sgl()
2242 bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i), in hns3_handle_tx_sgl()
2243 sg_dma_len(sgt->sgl + i)); in hns3_handle_tx_sgl()
2254 if (!ring->tx_spare) in hns3_handle_desc_filling()
2276 ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use], in hns3_handle_skb_desc()
2281 /* 'ret < 0' means filling error, 'ret == 0' means skb->len is in hns3_handle_skb_desc()
2297 struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; in hns3_nic_net_xmit()
2298 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_nic_net_xmit()
2313 prefetch(skb->data); in hns3_nic_net_xmit()
2317 if (ret == -EBUSY) { in hns3_nic_net_xmit()
2326 ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use); in hns3_nic_net_xmit()
2330 pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : in hns3_nic_net_xmit()
2331 (ring->desc_num - 1); in hns3_nic_net_xmit()
2333 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) in hns3_nic_net_xmit()
2334 hns3_tsyn(netdev, skb, &ring->desc[pre_ntu]); in hns3_nic_net_xmit()
2336 ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= in hns3_nic_net_xmit()
2343 dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); in hns3_nic_net_xmit()
2344 doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes, in hns3_nic_net_xmit()
2364 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) in hns3_nic_net_set_mac_address()
2365 return -EADDRNOTAVAIL; in hns3_nic_net_set_mac_address()
2367 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { in hns3_nic_net_set_mac_address()
2368 hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); in hns3_nic_net_set_mac_address()
2377 if (!hns3_is_phys_func(h->pdev) && in hns3_nic_net_set_mac_address()
2378 !is_zero_ether_addr(netdev->perm_addr)) { in hns3_nic_net_set_mac_address()
2379 hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr); in hns3_nic_net_set_mac_address()
2380 hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); in hns3_nic_net_set_mac_address()
2383 return -EPERM; in hns3_nic_net_set_mac_address()
2386 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); in hns3_nic_net_set_mac_address()
2392 eth_hw_addr_set(netdev, mac_addr->sa_data); in hns3_nic_net_set_mac_address()
2403 return -EINVAL; in hns3_nic_do_ioctl()
2405 if (!h->ae_algo->ops->do_ioctl) in hns3_nic_do_ioctl()
2406 return -EOPNOTSUPP; in hns3_nic_do_ioctl()
2408 return h->ae_algo->ops->do_ioctl(h, ifr, cmd); in hns3_nic_do_ioctl()
2414 netdev_features_t changed = netdev->features ^ features; in hns3_nic_set_features()
2416 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_set_features()
2420 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { in hns3_nic_set_features()
2422 ret = h->ae_algo->ops->set_gro_en(h, enable); in hns3_nic_set_features()
2428 h->ae_algo->ops->enable_hw_strip_rxvtag) { in hns3_nic_set_features()
2430 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); in hns3_nic_set_features()
2435 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { in hns3_nic_set_features()
2437 h->ae_algo->ops->enable_fd(h, enable); in hns3_nic_set_features()
2440 if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && in hns3_nic_set_features()
2441 h->ae_algo->ops->cls_flower_active(h)) { in hns3_nic_set_features()
2444 return -EINVAL; in hns3_nic_set_features()
2448 h->ae_algo->ops->enable_vlan_filter) { in hns3_nic_set_features()
2450 ret = h->ae_algo->ops->enable_vlan_filter(h, enable); in hns3_nic_set_features()
2467 if (skb->ip_summed != CHECKSUM_PARTIAL) in hns3_features_check()
2470 if (skb->encapsulation) in hns3_features_check()
2495 start = u64_stats_fetch_begin(&ring->syncp); in hns3_fetch_stats()
2497 stats->tx_bytes += ring->stats.tx_bytes; in hns3_fetch_stats()
2498 stats->tx_packets += ring->stats.tx_pkts; in hns3_fetch_stats()
2499 stats->tx_dropped += ring->stats.sw_err_cnt; in hns3_fetch_stats()
2500 stats->tx_dropped += ring->stats.tx_vlan_err; in hns3_fetch_stats()
2501 stats->tx_dropped += ring->stats.tx_l4_proto_err; in hns3_fetch_stats()
2502 stats->tx_dropped += ring->stats.tx_l2l3l4_err; in hns3_fetch_stats()
2503 stats->tx_dropped += ring->stats.tx_tso_err; in hns3_fetch_stats()
2504 stats->tx_dropped += ring->stats.over_max_recursion; in hns3_fetch_stats()
2505 stats->tx_dropped += ring->stats.hw_limitation; in hns3_fetch_stats()
2506 stats->tx_dropped += ring->stats.copy_bits_err; in hns3_fetch_stats()
2507 stats->tx_dropped += ring->stats.skb2sgl_err; in hns3_fetch_stats()
2508 stats->tx_dropped += ring->stats.map_sg_err; in hns3_fetch_stats()
2509 stats->tx_errors += ring->stats.sw_err_cnt; in hns3_fetch_stats()
2510 stats->tx_errors += ring->stats.tx_vlan_err; in hns3_fetch_stats()
2511 stats->tx_errors += ring->stats.tx_l4_proto_err; in hns3_fetch_stats()
2512 stats->tx_errors += ring->stats.tx_l2l3l4_err; in hns3_fetch_stats()
2513 stats->tx_errors += ring->stats.tx_tso_err; in hns3_fetch_stats()
2514 stats->tx_errors += ring->stats.over_max_recursion; in hns3_fetch_stats()
2515 stats->tx_errors += ring->stats.hw_limitation; in hns3_fetch_stats()
2516 stats->tx_errors += ring->stats.copy_bits_err; in hns3_fetch_stats()
2517 stats->tx_errors += ring->stats.skb2sgl_err; in hns3_fetch_stats()
2518 stats->tx_errors += ring->stats.map_sg_err; in hns3_fetch_stats()
2520 stats->rx_bytes += ring->stats.rx_bytes; in hns3_fetch_stats()
2521 stats->rx_packets += ring->stats.rx_pkts; in hns3_fetch_stats()
2522 stats->rx_dropped += ring->stats.l2_err; in hns3_fetch_stats()
2523 stats->rx_errors += ring->stats.l2_err; in hns3_fetch_stats()
2524 stats->rx_errors += ring->stats.l3l4_csum_err; in hns3_fetch_stats()
2525 stats->rx_crc_errors += ring->stats.l2_err; in hns3_fetch_stats()
2526 stats->multicast += ring->stats.rx_multicast; in hns3_fetch_stats()
2527 stats->rx_length_errors += ring->stats.err_pkt_len; in hns3_fetch_stats()
2529 } while (u64_stats_fetch_retry(&ring->syncp, start)); in hns3_fetch_stats()
2536 int queue_num = priv->ae_handle->kinfo.num_tqps; in hns3_nic_get_stats64()
2537 struct hnae3_handle *handle = priv->ae_handle; in hns3_nic_get_stats64()
2542 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) in hns3_nic_get_stats64()
2545 handle->ae_algo->ops->update_stats(handle); in hns3_nic_get_stats64()
2550 ring = &priv->ring[idx]; in hns3_nic_get_stats64()
2554 ring = &priv->ring[idx + queue_num]; in hns3_nic_get_stats64()
2558 stats->tx_bytes = ring_total_stats.tx_bytes; in hns3_nic_get_stats64()
2559 stats->tx_packets = ring_total_stats.tx_packets; in hns3_nic_get_stats64()
2560 stats->rx_bytes = ring_total_stats.rx_bytes; in hns3_nic_get_stats64()
2561 stats->rx_packets = ring_total_stats.rx_packets; in hns3_nic_get_stats64()
2563 stats->rx_errors = ring_total_stats.rx_errors; in hns3_nic_get_stats64()
2564 stats->multicast = ring_total_stats.multicast; in hns3_nic_get_stats64()
2565 stats->rx_length_errors = ring_total_stats.rx_length_errors; in hns3_nic_get_stats64()
2566 stats->rx_crc_errors = ring_total_stats.rx_crc_errors; in hns3_nic_get_stats64()
2567 stats->rx_missed_errors = netdev->stats.rx_missed_errors; in hns3_nic_get_stats64()
2569 stats->tx_errors = ring_total_stats.tx_errors; in hns3_nic_get_stats64()
2570 stats->rx_dropped = ring_total_stats.rx_dropped; in hns3_nic_get_stats64()
2571 stats->tx_dropped = ring_total_stats.tx_dropped; in hns3_nic_get_stats64()
2572 stats->collisions = netdev->stats.collisions; in hns3_nic_get_stats64()
2573 stats->rx_over_errors = netdev->stats.rx_over_errors; in hns3_nic_get_stats64()
2574 stats->rx_frame_errors = netdev->stats.rx_frame_errors; in hns3_nic_get_stats64()
2575 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; in hns3_nic_get_stats64()
2576 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; in hns3_nic_get_stats64()
2577 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; in hns3_nic_get_stats64()
2578 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; in hns3_nic_get_stats64()
2579 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; in hns3_nic_get_stats64()
2580 stats->tx_window_errors = netdev->stats.tx_window_errors; in hns3_nic_get_stats64()
2581 stats->rx_compressed = netdev->stats.rx_compressed; in hns3_nic_get_stats64()
2582 stats->tx_compressed = netdev->stats.tx_compressed; in hns3_nic_get_stats64()
2589 u8 tc = mqprio_qopt->qopt.num_tc; in hns3_setup_tc()
2590 u16 mode = mqprio_qopt->mode; in hns3_setup_tc()
2591 u8 hw = mqprio_qopt->qopt.hw; in hns3_setup_tc()
2596 return -EOPNOTSUPP; in hns3_setup_tc()
2599 return -EINVAL; in hns3_setup_tc()
2602 return -EINVAL; in hns3_setup_tc()
2605 kinfo = &h->kinfo; in hns3_setup_tc()
2609 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? in hns3_setup_tc()
2610 kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP; in hns3_setup_tc()
2616 int tc = tc_classid_to_hwtc(priv->netdev, flow->classid); in hns3_setup_tc_cls_flower()
2617 struct hnae3_handle *h = hns3_get_handle(priv->netdev); in hns3_setup_tc_cls_flower()
2619 switch (flow->command) { in hns3_setup_tc_cls_flower()
2621 if (h->ae_algo->ops->add_cls_flower) in hns3_setup_tc_cls_flower()
2622 return h->ae_algo->ops->add_cls_flower(h, flow, tc); in hns3_setup_tc_cls_flower()
2625 if (h->ae_algo->ops->del_cls_flower) in hns3_setup_tc_cls_flower()
2626 return h->ae_algo->ops->del_cls_flower(h, flow); in hns3_setup_tc_cls_flower()
2632 return -EOPNOTSUPP; in hns3_setup_tc_cls_flower()
2640 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) in hns3_setup_tc_block_cb()
2641 return -EOPNOTSUPP; in hns3_setup_tc_block_cb()
2647 return -EOPNOTSUPP; in hns3_setup_tc_block_cb()
2670 return -EOPNOTSUPP; in hns3_nic_setup_tc()
2680 int ret = -EIO; in hns3_vlan_rx_add_vid()
2682 if (h->ae_algo->ops->set_vlan_filter) in hns3_vlan_rx_add_vid()
2683 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); in hns3_vlan_rx_add_vid()
2692 int ret = -EIO; in hns3_vlan_rx_kill_vid()
2694 if (h->ae_algo->ops->set_vlan_filter) in hns3_vlan_rx_kill_vid()
2695 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); in hns3_vlan_rx_kill_vid()
2704 int ret = -EIO; in hns3_ndo_set_vf_vlan()
2710 if (h->ae_algo->ops->set_vf_vlan_filter) in hns3_ndo_set_vf_vlan()
2711 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, in hns3_ndo_set_vf_vlan()
2722 return -EBUSY; in hns3_set_vf_spoofchk()
2724 if (!handle->ae_algo->ops->set_vf_spoofchk) in hns3_set_vf_spoofchk()
2725 return -EOPNOTSUPP; in hns3_set_vf_spoofchk()
2727 return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable); in hns3_set_vf_spoofchk()
2734 if (!handle->ae_algo->ops->set_vf_trust) in hns3_set_vf_trust()
2735 return -EOPNOTSUPP; in hns3_set_vf_trust()
2737 return handle->ae_algo->ops->set_vf_trust(handle, vf, enable); in hns3_set_vf_trust()
2746 return -EBUSY; in hns3_nic_change_mtu()
2748 if (!h->ae_algo->ops->set_mtu) in hns3_nic_change_mtu()
2749 return -EOPNOTSUPP; in hns3_nic_change_mtu()
2752 "change mtu from %u to %d\n", netdev->mtu, new_mtu); in hns3_nic_change_mtu()
2754 ret = h->ae_algo->ops->set_mtu(h, new_mtu); in hns3_nic_change_mtu()
2759 WRITE_ONCE(netdev->mtu, new_mtu); in hns3_nic_change_mtu()
2769 for (i = 0; i < ndev->num_tx_queues; i++) { in hns3_get_timeout_queue()
2774 trans_start = READ_ONCE(q->trans_start); in hns3_get_timeout_queue()
2777 (trans_start + ndev->watchdog_timeo))) { in hns3_get_timeout_queue()
2779 struct dql *dql = &q->dql; in hns3_get_timeout_queue()
2782 dql->last_obj_cnt, dql->num_queued, in hns3_get_timeout_queue()
2783 dql->adj_limit, dql->num_completed); in hns3_get_timeout_queue()
2786 q->state, in hns3_get_timeout_queue()
2787 jiffies_to_msecs(jiffies - trans_start)); in hns3_get_timeout_queue()
2799 struct napi_struct *napi = &tx_ring->tqp_vector->napi; in hns3_dump_queue_stats()
2804 priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, in hns3_dump_queue_stats()
2805 tx_ring->next_to_clean, napi->state); in hns3_dump_queue_stats()
2809 tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, in hns3_dump_queue_stats()
2810 tx_ring->stats.sw_err_cnt, tx_ring->pending_buf); in hns3_dump_queue_stats()
2814 tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, in hns3_dump_queue_stats()
2815 tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); in hns3_dump_queue_stats()
2818 tx_ring->stats.tx_push, tx_ring->stats.tx_mem_doorbell); in hns3_dump_queue_stats()
2830 readl(tx_ring->tqp_vector->mask_addr)); in hns3_dump_queue_reg()
2850 if (timeout_queue >= ndev->num_tx_queues) { in hns3_get_tx_timeo_queue_info()
2853 priv->tx_timeout_count); in hns3_get_tx_timeo_queue_info()
2857 priv->tx_timeout_count++; in hns3_get_tx_timeo_queue_info()
2859 tx_ring = &priv->ring[timeout_queue]; in hns3_get_tx_timeo_queue_info()
2865 if (h->ae_algo->ops->get_mac_stats) { in hns3_get_tx_timeo_queue_info()
2868 h->ae_algo->ops->get_mac_stats(h, &mac_stats); in hns3_get_tx_timeo_queue_info()
2881 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_net_timeout()
2889 if (h->ae_algo->ops->reset_event) in hns3_nic_net_timeout()
2890 h->ae_algo->ops->reset_event(h->pdev, h); in hns3_nic_net_timeout()
2900 if (!h->ae_algo->ops->add_arfs_entry) in hns3_rx_flow_steer()
2901 return -EOPNOTSUPP; in hns3_rx_flow_steer()
2903 if (skb->encapsulation) in hns3_rx_flow_steer()
2904 return -EPROTONOSUPPORT; in hns3_rx_flow_steer()
2907 return -EPROTONOSUPPORT; in hns3_rx_flow_steer()
2913 return -EPROTONOSUPPORT; in hns3_rx_flow_steer()
2915 return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys); in hns3_rx_flow_steer()
2924 if (!h->ae_algo->ops->get_vf_config) in hns3_nic_get_vf_config()
2925 return -EOPNOTSUPP; in hns3_nic_get_vf_config()
2927 return h->ae_algo->ops->get_vf_config(h, vf, ivf); in hns3_nic_get_vf_config()
2935 if (!h->ae_algo->ops->set_vf_link_state) in hns3_nic_set_vf_link_state()
2936 return -EOPNOTSUPP; in hns3_nic_set_vf_link_state()
2938 return h->ae_algo->ops->set_vf_link_state(h, vf, link_state); in hns3_nic_set_vf_link_state()
2946 if (!h->ae_algo->ops->set_vf_rate) in hns3_nic_set_vf_rate()
2947 return -EOPNOTSUPP; in hns3_nic_set_vf_rate()
2949 return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate, in hns3_nic_set_vf_rate()
2958 if (!h->ae_algo->ops->set_vf_mac) in hns3_nic_set_vf_mac()
2959 return -EOPNOTSUPP; in hns3_nic_set_vf_mac()
2966 return -EINVAL; in hns3_nic_set_vf_mac()
2969 return h->ae_algo->ops->set_vf_mac(h, vf_id, mac); in hns3_nic_set_vf_mac()
2977 __be16 protocol = skb->protocol; in hns3_get_skb_dscp()
2998 if (h->kinfo.tc_map_mode != HNAE3_TC_MAP_MODE_DSCP || in hns3_nic_select_queue()
2999 !h->ae_algo->ops->get_dscp_prio) in hns3_nic_select_queue()
3006 skb->priority = h->kinfo.dscp_prio[dscp]; in hns3_nic_select_queue()
3007 if (skb->priority == HNAE3_PRIO_ID_INVALID) in hns3_nic_select_queue()
3008 skb->priority = 0; in hns3_nic_select_queue()
3044 u32 dev_id = pdev->device; in hns3_is_phys_func()
3060 dev_warn(&pdev->dev, "un-recognized pci device-id %u", in hns3_is_phys_func()
3069 /* If our VFs are assigned we cannot shut down SR-IOV in hns3_disable_sriov()
3074 dev_warn(&pdev->dev, in hns3_disable_sriov()
3082 /* hns3_probe - Device initialization routine
3097 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL); in hns3_probe()
3099 return -ENOMEM; in hns3_probe()
3101 ae_dev->pdev = pdev; in hns3_probe()
3102 ae_dev->flag = ent->driver_data; in hns3_probe()
3123 if (ae_dev->ops->clean_vf_config) in hns3_clean_vf_config()
3124 ae_dev->ops->clean_vf_config(ae_dev, num_vfs); in hns3_clean_vf_config()
3127 /* hns3_remove - Device removal routine
3154 dev_warn(&pdev->dev, "Can not config SRIOV\n"); in hns3_pci_sriov_configure()
3155 return -EINVAL; in hns3_pci_sriov_configure()
3161 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); in hns3_pci_sriov_configure()
3170 dev_warn(&pdev->dev, in hns3_pci_sriov_configure()
3192 if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { in hns3_suspend()
3194 if (ae_dev->ops && ae_dev->ops->reset_prepare) in hns3_suspend()
3195 ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET); in hns3_suspend()
3205 if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { in hns3_resume()
3207 if (ae_dev->ops && ae_dev->ops->reset_done) in hns3_resume()
3208 ae_dev->ops->reset_done(ae_dev); in hns3_resume()
3220 dev_info(&pdev->dev, "PCI error detected, state(=%u)!!\n", state); in hns3_error_detected()
3225 if (!ae_dev || !ae_dev->ops) { in hns3_error_detected()
3226 dev_err(&pdev->dev, in hns3_error_detected()
3227 "Can't recover - error happened before device initialized\n"); in hns3_error_detected()
3231 if (ae_dev->ops->handle_hw_ras_error) in hns3_error_detected()
3232 ret = ae_dev->ops->handle_hw_ras_error(ae_dev); in hns3_error_detected()
3244 struct device *dev = &pdev->dev; in hns3_slot_reset()
3246 if (!ae_dev || !ae_dev->ops) in hns3_slot_reset()
3249 ops = ae_dev->ops; in hns3_slot_reset()
3251 if (ops->reset_event && ops->get_reset_level && in hns3_slot_reset()
3252 ops->set_default_reset_request) { in hns3_slot_reset()
3253 if (ae_dev->hw_err_reset_req) { in hns3_slot_reset()
3254 reset_type = ops->get_reset_level(ae_dev, in hns3_slot_reset()
3255 &ae_dev->hw_err_reset_req); in hns3_slot_reset()
3256 ops->set_default_reset_request(ae_dev, reset_type); in hns3_slot_reset()
3258 ops->reset_event(pdev, NULL); in hns3_slot_reset()
3271 dev_info(&pdev->dev, "FLR prepare\n"); in hns3_reset_prepare()
3272 if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare) in hns3_reset_prepare()
3273 ae_dev->ops->reset_prepare(ae_dev, HNAE3_FLR_RESET); in hns3_reset_prepare()
3280 dev_info(&pdev->dev, "FLR done\n"); in hns3_reset_done()
3281 if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done) in hns3_reset_done()
3282 ae_dev->ops->reset_done(ae_dev); in hns3_reset_done()
3309 struct pci_dev *pdev = h->pdev; in hns3_set_default_feature()
3312 netdev->priv_flags |= IFF_UNICAST_FLT; in hns3_set_default_feature()
3314 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | in hns3_set_default_feature()
3322 netdev->features |= NETIF_F_GRO_HW; in hns3_set_default_feature()
3325 netdev->features |= NETIF_F_NTUPLE; in hns3_set_default_feature()
3327 if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) in hns3_set_default_feature()
3328 netdev->features |= NETIF_F_GSO_UDP_L4; in hns3_set_default_feature()
3330 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) in hns3_set_default_feature()
3331 netdev->features |= NETIF_F_HW_CSUM; in hns3_set_default_feature()
3333 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; in hns3_set_default_feature()
3335 if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) in hns3_set_default_feature()
3336 netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; in hns3_set_default_feature()
3338 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) in hns3_set_default_feature()
3339 netdev->features |= NETIF_F_HW_TC; in hns3_set_default_feature()
3341 netdev->hw_features |= netdev->features; in hns3_set_default_feature()
3342 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) in hns3_set_default_feature()
3343 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; in hns3_set_default_feature()
3345 netdev->vlan_features |= netdev->features & in hns3_set_default_feature()
3350 netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID; in hns3_set_default_feature()
3356 if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2) { in hns3_set_default_feature()
3357 netdev->features &= ~NETIF_F_GSO_GRE; in hns3_set_default_feature()
3358 netdev->features &= ~NETIF_F_GSO_GRE_CSUM; in hns3_set_default_feature()
3368 if (ring->page_pool) { in hns3_alloc_buffer()
3369 p = page_pool_dev_alloc_frag(ring->page_pool, in hns3_alloc_buffer()
3370 &cb->page_offset, in hns3_alloc_buffer()
3373 return -ENOMEM; in hns3_alloc_buffer()
3375 cb->priv = p; in hns3_alloc_buffer()
3376 cb->buf = page_address(p); in hns3_alloc_buffer()
3377 cb->dma = page_pool_get_dma_addr(p); in hns3_alloc_buffer()
3378 cb->type = DESC_TYPE_PP_FRAG; in hns3_alloc_buffer()
3379 cb->reuse_flag = 0; in hns3_alloc_buffer()
3385 return -ENOMEM; in hns3_alloc_buffer()
3387 cb->priv = p; in hns3_alloc_buffer()
3388 cb->page_offset = 0; in hns3_alloc_buffer()
3389 cb->reuse_flag = 0; in hns3_alloc_buffer()
3390 cb->buf = page_address(p); in hns3_alloc_buffer()
3391 cb->length = hns3_page_size(ring); in hns3_alloc_buffer()
3392 cb->type = DESC_TYPE_PAGE; in hns3_alloc_buffer()
3393 page_ref_add(p, USHRT_MAX - 1); in hns3_alloc_buffer()
3394 cb->pagecnt_bias = USHRT_MAX; in hns3_alloc_buffer()
3402 if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD | in hns3_free_buffer()
3404 napi_consume_skb(cb->priv, budget); in hns3_free_buffer()
3406 if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias) in hns3_free_buffer()
3407 __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); in hns3_free_buffer()
3408 else if (cb->type & DESC_TYPE_PP_FRAG) in hns3_free_buffer()
3409 page_pool_put_full_page(ring->page_pool, cb->priv, in hns3_free_buffer()
3417 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, in hns3_map_buffer()
3418 cb->length, ring_to_dma_dir(ring)); in hns3_map_buffer()
3420 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) in hns3_map_buffer()
3421 return -EIO; in hns3_map_buffer()
3429 if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB)) in hns3_unmap_buffer()
3430 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, in hns3_unmap_buffer()
3432 else if ((cb->type & DESC_TYPE_PAGE) && cb->length) in hns3_unmap_buffer()
3433 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, in hns3_unmap_buffer()
3435 else if (cb->type & (DESC_TYPE_BOUNCE_ALL | DESC_TYPE_BOUNCE_HEAD | in hns3_unmap_buffer()
3442 hns3_unmap_buffer(ring, &ring->desc_cb[i]); in hns3_buffer_detach()
3443 ring->desc[i].addr = 0; in hns3_buffer_detach()
3444 ring->desc_cb[i].refill = 0; in hns3_buffer_detach()
3450 struct hns3_desc_cb *cb = &ring->desc_cb[i]; in hns3_free_buffer_detach()
3452 if (!ring->desc_cb[i].dma) in hns3_free_buffer_detach()
3463 for (i = 0; i < ring->desc_num; i++) in hns3_free_buffers()
3470 int size = ring->desc_num * sizeof(ring->desc[0]); in hns3_free_desc()
3474 if (ring->desc) { in hns3_free_desc()
3476 ring->desc, ring->desc_dma_addr); in hns3_free_desc()
3477 ring->desc = NULL; in hns3_free_desc()
3483 int size = ring->desc_num * sizeof(ring->desc[0]); in hns3_alloc_desc()
3485 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, in hns3_alloc_desc()
3486 &ring->desc_dma_addr, GFP_KERNEL); in hns3_alloc_desc()
3487 if (!ring->desc) in hns3_alloc_desc()
3488 return -ENOMEM; in hns3_alloc_desc()
3499 if (ret || ring->page_pool) in hns3_alloc_and_map_buffer()
3516 int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]); in hns3_alloc_and_attach_buffer()
3521 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + in hns3_alloc_and_attach_buffer()
3522 ring->desc_cb[i].page_offset); in hns3_alloc_and_attach_buffer()
3523 ring->desc_cb[i].refill = 1; in hns3_alloc_and_attach_buffer()
3533 for (i = 0; i < ring->desc_num; i++) { in hns3_alloc_ring_buffers()
3545 for (j = i - 1; j >= 0; j--) in hns3_alloc_ring_buffers()
3550 /* detach a in-used buffer and replace with a reserved one */
3554 hns3_unmap_buffer(ring, &ring->desc_cb[i]); in hns3_replace_buffer()
3555 ring->desc_cb[i] = *res_cb; in hns3_replace_buffer()
3556 ring->desc_cb[i].refill = 1; in hns3_replace_buffer()
3557 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + in hns3_replace_buffer()
3558 ring->desc_cb[i].page_offset); in hns3_replace_buffer()
3559 ring->desc[i].rx.bd_base_info = 0; in hns3_replace_buffer()
3564 ring->desc_cb[i].reuse_flag = 0; in hns3_reuse_buffer()
3565 ring->desc_cb[i].refill = 1; in hns3_reuse_buffer()
3566 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + in hns3_reuse_buffer()
3567 ring->desc_cb[i].page_offset); in hns3_reuse_buffer()
3568 ring->desc[i].rx.bd_base_info = 0; in hns3_reuse_buffer()
3571 ring->desc_cb[i].dma + ring->desc_cb[i].page_offset, in hns3_reuse_buffer()
3582 int ltu = smp_load_acquire(&ring->last_to_use); in hns3_nic_reclaim_desc()
3583 int ntc = ring->next_to_clean; in hns3_nic_reclaim_desc()
3589 desc = &ring->desc[ntc]; in hns3_nic_reclaim_desc()
3591 if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) & in hns3_nic_reclaim_desc()
3595 desc_cb = &ring->desc_cb[ntc]; in hns3_nic_reclaim_desc()
3597 if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_ALL | in hns3_nic_reclaim_desc()
3601 (*bytes) += desc_cb->send_bytes; in hns3_nic_reclaim_desc()
3607 if (++ntc == ring->desc_num) in hns3_nic_reclaim_desc()
3611 prefetch(&ring->desc_cb[ntc]); in hns3_nic_reclaim_desc()
3621 smp_store_release(&ring->next_to_clean, ntc); in hns3_nic_reclaim_desc()
3641 ring->tqp_vector->tx_group.total_bytes += bytes; in hns3_clean_tx_ring()
3642 ring->tqp_vector->tx_group.total_packets += pkts; in hns3_clean_tx_ring()
3644 u64_stats_update_begin(&ring->syncp); in hns3_clean_tx_ring()
3645 ring->stats.tx_bytes += bytes; in hns3_clean_tx_ring()
3646 ring->stats.tx_pkts += pkts; in hns3_clean_tx_ring()
3647 u64_stats_update_end(&ring->syncp); in hns3_clean_tx_ring()
3649 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); in hns3_clean_tx_ring()
3659 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { in hns3_clean_tx_ring()
3661 ring->stats.restart_queue++; in hns3_clean_tx_ring()
3668 int ntc = ring->next_to_clean; in hns3_desc_unused()
3669 int ntu = ring->next_to_use; in hns3_desc_unused()
3671 if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill)) in hns3_desc_unused()
3672 return ring->desc_num; in hns3_desc_unused()
3674 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; in hns3_desc_unused()
3686 desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_nic_alloc_rx_buffers()
3687 if (desc_cb->reuse_flag) { in hns3_nic_alloc_rx_buffers()
3690 hns3_reuse_buffer(ring, ring->next_to_use); in hns3_nic_alloc_rx_buffers()
3700 writel(i, ring->tqp->io_base + in hns3_nic_alloc_rx_buffers()
3704 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); in hns3_nic_alloc_rx_buffers()
3712 writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); in hns3_nic_alloc_rx_buffers()
3718 return page_count(cb->priv) == cb->pagecnt_bias; in hns3_can_reuse_page()
3726 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; in hns3_handle_rx_copybreak()
3727 u32 frag_offset = desc_cb->page_offset + pull_len; in hns3_handle_rx_copybreak()
3728 int size = le16_to_cpu(desc->rx.size); in hns3_handle_rx_copybreak()
3729 u32 frag_size = size - pull_len; in hns3_handle_rx_copybreak()
3737 return -ENOMEM; in hns3_handle_rx_copybreak()
3740 desc_cb->reuse_flag = 1; in hns3_handle_rx_copybreak()
3741 memcpy(frag, desc_cb->buf + frag_offset, frag_size); in hns3_handle_rx_copybreak()
3753 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; in hns3_nic_reuse_page()
3754 u32 frag_offset = desc_cb->page_offset + pull_len; in hns3_nic_reuse_page()
3755 int size = le16_to_cpu(desc->rx.size); in hns3_nic_reuse_page()
3757 u32 frag_size = size - pull_len; in hns3_nic_reuse_page()
3761 if (ring->page_pool) { in hns3_nic_reuse_page()
3762 skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, in hns3_nic_reuse_page()
3767 /* Avoid re-using remote or pfmem page */ in hns3_nic_reuse_page()
3768 if (unlikely(!dev_page_is_reusable(desc_cb->priv))) in hns3_nic_reuse_page()
3780 * is non-zero, which means page_offset @ truesize will in hns3_nic_reuse_page()
3784 if ((!desc_cb->page_offset && reused) || in hns3_nic_reuse_page()
3785 ((desc_cb->page_offset + truesize + truesize) <= in hns3_nic_reuse_page()
3786 hns3_page_size(ring) && desc_cb->page_offset)) { in hns3_nic_reuse_page()
3787 desc_cb->page_offset += truesize; in hns3_nic_reuse_page()
3788 desc_cb->reuse_flag = 1; in hns3_nic_reuse_page()
3789 } else if (desc_cb->page_offset && reused) { in hns3_nic_reuse_page()
3790 desc_cb->page_offset = 0; in hns3_nic_reuse_page()
3791 desc_cb->reuse_flag = 1; in hns3_nic_reuse_page()
3792 } else if (frag_size <= ring->rx_copybreak) { in hns3_nic_reuse_page()
3799 desc_cb->pagecnt_bias--; in hns3_nic_reuse_page()
3801 if (unlikely(!desc_cb->pagecnt_bias)) { in hns3_nic_reuse_page()
3802 page_ref_add(desc_cb->priv, USHRT_MAX); in hns3_nic_reuse_page()
3803 desc_cb->pagecnt_bias = USHRT_MAX; in hns3_nic_reuse_page()
3806 skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, in hns3_nic_reuse_page()
3809 if (unlikely(!desc_cb->reuse_flag)) in hns3_nic_reuse_page()
3810 __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); in hns3_nic_reuse_page()
3815 __be16 type = skb->protocol; in hns3_gro_complete()
3823 return -EFAULT; in hns3_gro_complete()
3825 vh = (struct vlan_hdr *)(skb->data + depth); in hns3_gro_complete()
3826 type = vh->h_vlan_encapsulated_proto; in hns3_gro_complete()
3838 th->check = ~tcp_v4_check(skb->len - depth, iph->saddr, in hns3_gro_complete()
3839 iph->daddr, 0); in hns3_gro_complete()
3846 th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr, in hns3_gro_complete()
3847 &iph->daddr, 0); in hns3_gro_complete()
3849 hns3_rl_err(skb->dev, in hns3_gro_complete()
3852 return -EFAULT; in hns3_gro_complete()
3855 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; in hns3_gro_complete()
3856 if (th->cwr) in hns3_gro_complete()
3857 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; in hns3_gro_complete()
3860 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; in hns3_gro_complete()
3862 skb->csum_start = (unsigned char *)th - skb->head; in hns3_gro_complete()
3863 skb->csum_offset = offsetof(struct tcphdr, check); in hns3_gro_complete()
3864 skb->ip_summed = CHECKSUM_PARTIAL; in hns3_gro_complete()
3879 skb->ip_summed = CHECKSUM_COMPLETE; in hns3_checksum_complete()
3880 skb->csum = csum_unfold((__force __sum16)csum); in hns3_checksum_complete()
3890 skb->csum_level = hns3_rx_ptype_tbl[ptype].csum_level; in hns3_rx_handle_csum()
3891 skb->ip_summed = hns3_rx_ptype_tbl[ptype].ip_summed; in hns3_rx_handle_csum()
3901 skb->csum_level = 1; in hns3_rx_handle_csum()
3914 skb->ip_summed = CHECKSUM_UNNECESSARY; in hns3_rx_handle_csum()
3929 skb->ip_summed = CHECKSUM_NONE; in hns3_rx_checksum()
3933 if (!(netdev->features & NETIF_F_RXCSUM)) in hns3_rx_checksum()
3936 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) in hns3_rx_checksum()
3949 skb->ip_summed = CHECKSUM_NONE; in hns3_rx_checksum()
3961 napi_gro_flush(&ring->tqp_vector->napi, false); in hns3_rx_skb()
3963 napi_gro_receive(&ring->tqp_vector->napi, skb); in hns3_rx_skb()
3970 struct hnae3_handle *handle = ring->tqp->handle; in hns3_parse_vlan_tag()
3971 struct pci_dev *pdev = ring->tqp->handle->pdev; in hns3_parse_vlan_tag()
3974 if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) { in hns3_parse_vlan_tag()
3975 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); in hns3_parse_vlan_tag()
3977 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); in hns3_parse_vlan_tag()
3993 if (handle->port_base_vlan_state != in hns3_parse_vlan_tag()
3997 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); in hns3_parse_vlan_tag()
4000 if (handle->port_base_vlan_state != in hns3_parse_vlan_tag()
4004 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); in hns3_parse_vlan_tag()
4007 if (handle->port_base_vlan_state == in hns3_parse_vlan_tag()
4009 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); in hns3_parse_vlan_tag()
4011 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); in hns3_parse_vlan_tag()
4021 ring->desc[ring->next_to_clean].rx.bd_base_info &= in hns3_rx_ring_move_fw()
4023 ring->desc_cb[ring->next_to_clean].refill = 0; in hns3_rx_ring_move_fw()
4024 ring->next_to_clean += 1; in hns3_rx_ring_move_fw()
4026 if (unlikely(ring->next_to_clean == ring->desc_num)) in hns3_rx_ring_move_fw()
4027 ring->next_to_clean = 0; in hns3_rx_ring_move_fw()
4033 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; in hns3_alloc_skb()
4037 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); in hns3_alloc_skb()
4038 skb = ring->skb; in hns3_alloc_skb()
4043 return -ENOMEM; in hns3_alloc_skb()
4047 prefetchw(skb->data); in hns3_alloc_skb()
4049 ring->pending_buf = 1; in hns3_alloc_skb()
4050 ring->frag_num = 0; in hns3_alloc_skb()
4051 ring->tail_skb = NULL; in hns3_alloc_skb()
4055 /* We can reuse buffer as-is, just make sure it is reusable */ in hns3_alloc_skb()
4056 if (dev_page_is_reusable(desc_cb->priv)) in hns3_alloc_skb()
4057 desc_cb->reuse_flag = 1; in hns3_alloc_skb()
4058 else if (desc_cb->type & DESC_TYPE_PP_FRAG) in hns3_alloc_skb()
4059 page_pool_put_full_page(ring->page_pool, desc_cb->priv, in hns3_alloc_skb()
4062 __page_frag_cache_drain(desc_cb->priv, in hns3_alloc_skb()
4063 desc_cb->pagecnt_bias); in hns3_alloc_skb()
4069 if (ring->page_pool) in hns3_alloc_skb()
4074 ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); in hns3_alloc_skb()
4075 __skb_put(skb, ring->pull_len); in hns3_alloc_skb()
4076 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, in hns3_alloc_skb()
4085 struct sk_buff *skb = ring->skb; in hns3_add_frag()
4093 desc = &ring->desc[ring->next_to_clean]; in hns3_add_frag()
4094 desc_cb = &ring->desc_cb[ring->next_to_clean]; in hns3_add_frag()
4095 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); in hns3_add_frag()
4099 return -ENXIO; in hns3_add_frag()
4101 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { in hns3_add_frag()
4102 new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0); in hns3_add_frag()
4106 return -ENXIO; in hns3_add_frag()
4109 if (ring->page_pool) in hns3_add_frag()
4112 ring->frag_num = 0; in hns3_add_frag()
4114 if (ring->tail_skb) { in hns3_add_frag()
4115 ring->tail_skb->next = new_skb; in hns3_add_frag()
4116 ring->tail_skb = new_skb; in hns3_add_frag()
4118 skb_shinfo(skb)->frag_list = new_skb; in hns3_add_frag()
4119 ring->tail_skb = new_skb; in hns3_add_frag()
4123 if (ring->tail_skb) { in hns3_add_frag()
4124 head_skb->truesize += hns3_buf_size(ring); in hns3_add_frag()
4125 head_skb->data_len += le16_to_cpu(desc->rx.size); in hns3_add_frag()
4126 head_skb->len += le16_to_cpu(desc->rx.size); in hns3_add_frag()
4127 skb = ring->tail_skb; in hns3_add_frag()
4131 desc_cb->dma + desc_cb->page_offset, in hns3_add_frag()
4135 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); in hns3_add_frag()
4138 ring->pending_buf++; in hns3_add_frag()
4152 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, in hns3_set_gro_and_checksum()
4156 if (!skb_shinfo(skb)->gso_size) { in hns3_set_gro_and_checksum()
4162 NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info, in hns3_set_gro_and_checksum()
4166 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { in hns3_set_gro_and_checksum()
4177 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in hns3_set_gro_and_checksum()
4179 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in hns3_set_gro_and_checksum()
4181 return -EFAULT; in hns3_set_gro_and_checksum()
4194 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { in hns3_set_rx_skb_rss_type()
4226 u32 nsec = le32_to_cpu(desc->ts_nsec); in hns3_handle_rx_ts_info()
4227 u32 sec = le32_to_cpu(desc->ts_sec); in hns3_handle_rx_ts_info()
4229 if (h->ae_algo->ops->get_rx_hwts) in hns3_handle_rx_ts_info()
4230 h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec); in hns3_handle_rx_ts_info()
4244 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { in hns3_handle_rx_vlan_tag()
4264 * current packet, and ring->next_to_clean indicates the first in hns3_handle_bdinfo()
4265 * descriptor of next packet, so need - 1 below. in hns3_handle_bdinfo()
4267 pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : in hns3_handle_bdinfo()
4268 (ring->desc_num - 1); in hns3_handle_bdinfo()
4269 desc = &ring->desc[pre_ntc]; in hns3_handle_bdinfo()
4270 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); in hns3_handle_bdinfo()
4271 l234info = le32_to_cpu(desc->rx.l234_info); in hns3_handle_bdinfo()
4272 ol_info = le32_to_cpu(desc->rx.ol_info); in hns3_handle_bdinfo()
4273 csum = le16_to_cpu(desc->csum); in hns3_handle_bdinfo()
4279 if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | in hns3_handle_bdinfo()
4281 u64_stats_update_begin(&ring->syncp); in hns3_handle_bdinfo()
4283 ring->stats.l2_err++; in hns3_handle_bdinfo()
4285 ring->stats.err_pkt_len++; in hns3_handle_bdinfo()
4286 u64_stats_update_end(&ring->syncp); in hns3_handle_bdinfo()
4288 return -EFAULT; in hns3_handle_bdinfo()
4291 len = skb->len; in hns3_handle_bdinfo()
4294 skb->protocol = eth_type_trans(skb, netdev); in hns3_handle_bdinfo()
4307 u64_stats_update_begin(&ring->syncp); in hns3_handle_bdinfo()
4308 ring->stats.rx_pkts++; in hns3_handle_bdinfo()
4309 ring->stats.rx_bytes += len; in hns3_handle_bdinfo()
4312 ring->stats.rx_multicast++; in hns3_handle_bdinfo()
4314 u64_stats_update_end(&ring->syncp); in hns3_handle_bdinfo()
4316 ring->tqp_vector->rx_group.total_bytes += len; in hns3_handle_bdinfo()
4318 hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash), in hns3_handle_bdinfo()
4325 struct sk_buff *skb = ring->skb; in hns3_handle_rx_bd()
4332 desc = &ring->desc[ring->next_to_clean]; in hns3_handle_rx_bd()
4333 desc_cb = &ring->desc_cb[ring->next_to_clean]; in hns3_handle_rx_bd()
4338 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); in hns3_handle_rx_bd()
4341 return -ENXIO; in hns3_handle_rx_bd()
4344 length = le16_to_cpu(desc->rx.size); in hns3_handle_rx_bd()
4346 ring->va = desc_cb->buf + desc_cb->page_offset; in hns3_handle_rx_bd()
4349 desc_cb->dma + desc_cb->page_offset, in hns3_handle_rx_bd()
4360 net_prefetch(ring->va); in hns3_handle_rx_bd()
4362 ret = hns3_alloc_skb(ring, length, ring->va); in hns3_handle_rx_bd()
4363 skb = ring->skb; in hns3_handle_rx_bd()
4381 if (skb->len > HNS3_RX_HEAD_SIZE) in hns3_handle_rx_bd()
4382 memcpy(skb->data, ring->va, in hns3_handle_rx_bd()
4383 ALIGN(ring->pull_len, sizeof(long))); in hns3_handle_rx_bd()
4391 skb_record_rx_queue(skb, ring->tqp->tqp_index); in hns3_handle_rx_bd()
4404 unused_count -= ring->pending_buf; in hns3_clean_rx_ring()
4417 if (unlikely(!ring->skb || err == -ENXIO)) { in hns3_clean_rx_ring()
4420 rx_fn(ring, ring->skb); in hns3_clean_rx_ring()
4424 unused_count += ring->pending_buf; in hns3_clean_rx_ring()
4425 ring->skb = NULL; in hns3_clean_rx_ring()
4426 ring->pending_buf = 0; in hns3_clean_rx_ring()
4430 /* sync head pointer before exiting, since hardware will calculate in hns3_clean_rx_ring()
4442 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; in hns3_update_rx_int_coalesce()
4445 if (!rx_group->coal.adapt_enable) in hns3_update_rx_int_coalesce()
4448 dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets, in hns3_update_rx_int_coalesce()
4449 rx_group->total_bytes, &sample); in hns3_update_rx_int_coalesce()
4450 net_dim(&rx_group->dim, &sample); in hns3_update_rx_int_coalesce()
4455 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; in hns3_update_tx_int_coalesce()
4458 if (!tx_group->coal.adapt_enable) in hns3_update_tx_int_coalesce()
4461 dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets, in hns3_update_tx_int_coalesce()
4462 tx_group->total_bytes, &sample); in hns3_update_tx_int_coalesce()
4463 net_dim(&tx_group->dim, &sample); in hns3_update_tx_int_coalesce()
4468 struct hns3_nic_priv *priv = netdev_priv(napi->dev); in hns3_nic_common_poll()
4477 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { in hns3_nic_common_poll()
4485 hns3_for_each_ring(ring, tqp_vector->tx_group) in hns3_nic_common_poll()
4489 if (tqp_vector->num_tqps > 1) in hns3_nic_common_poll()
4490 rx_budget = max(budget / tqp_vector->num_tqps, 1); in hns3_nic_common_poll()
4492 hns3_for_each_ring(ring, tqp_vector->rx_group) { in hns3_nic_common_poll()
4501 tqp_vector->rx_group.total_packets += rx_pkt_total; in hns3_nic_common_poll()
4507 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { in hns3_nic_common_poll()
4524 struct pci_dev *pdev = tqp_vector->handle->pdev; in hns3_create_ring_chain()
4528 ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring; in hns3_create_ring_chain()
4531 while (cur_chain->next) in hns3_create_ring_chain()
4532 cur_chain = cur_chain->next; in hns3_create_ring_chain()
4536 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); in hns3_create_ring_chain()
4538 return -ENOMEM; in hns3_create_ring_chain()
4540 cur_chain->next = chain; in hns3_create_ring_chain()
4543 chain->tqp_index = ring->tqp->tqp_index; in hns3_create_ring_chain()
4544 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, in hns3_create_ring_chain()
4546 hnae3_set_field(chain->int_gl_idx, in hns3_create_ring_chain()
4552 ring = ring->next; in hns3_create_ring_chain()
4561 struct pci_dev *pdev = tqp_vector->handle->pdev; in hns3_get_vector_ring_chain()
4575 chain = cur_chain->next; in hns3_get_vector_ring_chain()
4576 devm_kfree(&pdev->dev, cur_chain); in hns3_get_vector_ring_chain()
4586 struct pci_dev *pdev = tqp_vector->handle->pdev; in hns3_free_vector_ring_chain()
4592 chain_tmp = chain->next; in hns3_free_vector_ring_chain()
4593 devm_kfree(&pdev->dev, chain); in hns3_free_vector_ring_chain()
4601 ring->next = group->ring; in hns3_add_ring_to_group()
4602 group->ring = ring; in hns3_add_ring_to_group()
4604 group->count++; in hns3_add_ring_to_group()
4609 struct pci_dev *pdev = priv->ae_handle->pdev; in hns3_nic_set_cpumask()
4611 int num_vectors = priv->vector_num; in hns3_nic_set_cpumask()
4615 numa_node = dev_to_node(&pdev->dev); in hns3_nic_set_cpumask()
4618 tqp_vector = &priv->tqp_vector[vector_i]; in hns3_nic_set_cpumask()
4620 &tqp_vector->affinity_mask); in hns3_nic_set_cpumask()
4629 struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector; in hns3_rx_dim_work()
4631 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); in hns3_rx_dim_work()
4633 hns3_set_vector_coalesce_rx_gl(group->ring->tqp_vector, cur_moder.usec); in hns3_rx_dim_work()
4634 tqp_vector->rx_group.coal.int_gl = cur_moder.usec; in hns3_rx_dim_work()
4636 if (cur_moder.pkts < tqp_vector->rx_group.coal.int_ql_max) { in hns3_rx_dim_work()
4638 tqp_vector->rx_group.coal.int_ql = cur_moder.pkts; in hns3_rx_dim_work()
4641 dim->state = DIM_START_MEASURE; in hns3_rx_dim_work()
4649 struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector; in hns3_tx_dim_work()
4651 net_dim_get_tx_moderation(dim->mode, dim->profile_ix); in hns3_tx_dim_work()
4654 tqp_vector->tx_group.coal.int_gl = cur_moder.usec; in hns3_tx_dim_work()
4656 if (cur_moder.pkts < tqp_vector->tx_group.coal.int_ql_max) { in hns3_tx_dim_work()
4658 tqp_vector->tx_group.coal.int_ql = cur_moder.pkts; in hns3_tx_dim_work()
4661 dim->state = DIM_START_MEASURE; in hns3_tx_dim_work()
4666 INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work); in hns3_nic_init_dim()
4667 INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work); in hns3_nic_init_dim()
4672 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_init_vector_data()
4679 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_init_vector_data()
4680 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_init_vector_data()
4682 tqp_vector->num_tqps = 0; in hns3_nic_init_vector_data()
4686 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_nic_init_vector_data()
4687 u16 vector_i = i % priv->vector_num; in hns3_nic_init_vector_data()
4688 u16 tqp_num = h->kinfo.num_tqps; in hns3_nic_init_vector_data()
4690 tqp_vector = &priv->tqp_vector[vector_i]; in hns3_nic_init_vector_data()
4692 hns3_add_ring_to_group(&tqp_vector->tx_group, in hns3_nic_init_vector_data()
4693 &priv->ring[i]); in hns3_nic_init_vector_data()
4695 hns3_add_ring_to_group(&tqp_vector->rx_group, in hns3_nic_init_vector_data()
4696 &priv->ring[i + tqp_num]); in hns3_nic_init_vector_data()
4698 priv->ring[i].tqp_vector = tqp_vector; in hns3_nic_init_vector_data()
4699 priv->ring[i + tqp_num].tqp_vector = tqp_vector; in hns3_nic_init_vector_data()
4700 tqp_vector->num_tqps++; in hns3_nic_init_vector_data()
4703 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_init_vector_data()
4706 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_init_vector_data()
4708 tqp_vector->rx_group.total_bytes = 0; in hns3_nic_init_vector_data()
4709 tqp_vector->rx_group.total_packets = 0; in hns3_nic_init_vector_data()
4710 tqp_vector->tx_group.total_bytes = 0; in hns3_nic_init_vector_data()
4711 tqp_vector->tx_group.total_packets = 0; in hns3_nic_init_vector_data()
4712 tqp_vector->handle = h; in hns3_nic_init_vector_data()
4716 ret = -ENOMEM; in hns3_nic_init_vector_data()
4720 ret = h->ae_algo->ops->map_ring_to_vector(h, in hns3_nic_init_vector_data()
4721 tqp_vector->vector_irq, vector_ring_chain); in hns3_nic_init_vector_data()
4728 netif_napi_add(priv->netdev, &tqp_vector->napi, in hns3_nic_init_vector_data()
4735 while (i--) in hns3_nic_init_vector_data()
4736 netif_napi_del(&priv->tqp_vector[i].napi); in hns3_nic_init_vector_data()
4743 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); in hns3_nic_init_coal_cfg()
4744 struct hns3_enet_coalesce *tx_coal = &priv->tx_coal; in hns3_nic_init_coal_cfg()
4745 struct hns3_enet_coalesce *rx_coal = &priv->rx_coal; in hns3_nic_init_coal_cfg()
4752 * Default: enable interrupt coalescing self-adaptive and GL in hns3_nic_init_coal_cfg()
4754 tx_coal->adapt_enable = 1; in hns3_nic_init_coal_cfg()
4755 rx_coal->adapt_enable = 1; in hns3_nic_init_coal_cfg()
4757 tx_coal->int_gl = HNS3_INT_GL_50K; in hns3_nic_init_coal_cfg()
4758 rx_coal->int_gl = HNS3_INT_GL_50K; in hns3_nic_init_coal_cfg()
4760 rx_coal->flow_level = HNS3_FLOW_LOW; in hns3_nic_init_coal_cfg()
4761 tx_coal->flow_level = HNS3_FLOW_LOW; in hns3_nic_init_coal_cfg()
4763 if (ae_dev->dev_specs.int_ql_max) { in hns3_nic_init_coal_cfg()
4764 tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; in hns3_nic_init_coal_cfg()
4765 rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; in hns3_nic_init_coal_cfg()
4771 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_alloc_vector_data()
4774 struct pci_dev *pdev = h->pdev; in hns3_nic_alloc_vector_data()
4775 u16 tqp_num = h->kinfo.num_tqps; in hns3_nic_alloc_vector_data()
4784 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), in hns3_nic_alloc_vector_data()
4787 return -ENOMEM; in hns3_nic_alloc_vector_data()
4790 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); in hns3_nic_alloc_vector_data()
4792 priv->vector_num = vector_num; in hns3_nic_alloc_vector_data()
4793 priv->tqp_vector = (struct hns3_enet_tqp_vector *) in hns3_nic_alloc_vector_data()
4794 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), in hns3_nic_alloc_vector_data()
4796 if (!priv->tqp_vector) { in hns3_nic_alloc_vector_data()
4797 ret = -ENOMEM; in hns3_nic_alloc_vector_data()
4801 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_alloc_vector_data()
4802 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_alloc_vector_data()
4803 tqp_vector->idx = i; in hns3_nic_alloc_vector_data()
4804 tqp_vector->mask_addr = vector[i].io_addr; in hns3_nic_alloc_vector_data()
4805 tqp_vector->vector_irq = vector[i].vector; in hns3_nic_alloc_vector_data()
4810 devm_kfree(&pdev->dev, vector); in hns3_nic_alloc_vector_data()
4816 group->ring = NULL; in hns3_clear_ring_group()
4817 group->count = 0; in hns3_clear_ring_group()
4823 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_uninit_vector_data()
4827 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_uninit_vector_data()
4828 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_uninit_vector_data()
4830 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) in hns3_nic_uninit_vector_data()
4839 dev_warn(priv->dev, "failed to get ring chain\n"); in hns3_nic_uninit_vector_data()
4841 h->ae_algo->ops->unmap_ring_from_vector(h, in hns3_nic_uninit_vector_data()
4842 tqp_vector->vector_irq, vector_ring_chain); in hns3_nic_uninit_vector_data()
4846 hns3_clear_ring_group(&tqp_vector->rx_group); in hns3_nic_uninit_vector_data()
4847 hns3_clear_ring_group(&tqp_vector->tx_group); in hns3_nic_uninit_vector_data()
4848 netif_napi_del(&priv->tqp_vector[i].napi); in hns3_nic_uninit_vector_data()
4854 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_dealloc_vector_data()
4855 struct pci_dev *pdev = h->pdev; in hns3_nic_dealloc_vector_data()
4858 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_dealloc_vector_data()
4861 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_dealloc_vector_data()
4862 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); in hns3_nic_dealloc_vector_data()
4867 devm_kfree(&pdev->dev, priv->tqp_vector); in hns3_nic_dealloc_vector_data()
4873 int queue_num = priv->ae_handle->kinfo.num_tqps; in hns3_ring_get_cfg()
4878 ring = &priv->ring[q->tqp_index]; in hns3_ring_get_cfg()
4879 desc_num = priv->ae_handle->kinfo.num_tx_desc; in hns3_ring_get_cfg()
4880 ring->queue_index = q->tqp_index; in hns3_ring_get_cfg()
4881 ring->tx_copybreak = priv->tx_copybreak; in hns3_ring_get_cfg()
4882 ring->last_to_use = 0; in hns3_ring_get_cfg()
4884 ring = &priv->ring[q->tqp_index + queue_num]; in hns3_ring_get_cfg()
4885 desc_num = priv->ae_handle->kinfo.num_rx_desc; in hns3_ring_get_cfg()
4886 ring->queue_index = q->tqp_index; in hns3_ring_get_cfg()
4887 ring->rx_copybreak = priv->rx_copybreak; in hns3_ring_get_cfg()
4890 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); in hns3_ring_get_cfg()
4892 ring->tqp = q; in hns3_ring_get_cfg()
4893 ring->desc = NULL; in hns3_ring_get_cfg()
4894 ring->desc_cb = NULL; in hns3_ring_get_cfg()
4895 ring->dev = priv->dev; in hns3_ring_get_cfg()
4896 ring->desc_dma_addr = 0; in hns3_ring_get_cfg()
4897 ring->buf_size = q->buf_size; in hns3_ring_get_cfg()
4898 ring->desc_num = desc_num; in hns3_ring_get_cfg()
4899 ring->next_to_use = 0; in hns3_ring_get_cfg()
4900 ring->next_to_clean = 0; in hns3_ring_get_cfg()
4912 struct hnae3_handle *h = priv->ae_handle; in hns3_get_ring_config()
4913 struct pci_dev *pdev = h->pdev; in hns3_get_ring_config()
4916 priv->ring = devm_kzalloc(&pdev->dev, in hns3_get_ring_config()
4917 array3_size(h->kinfo.num_tqps, in hns3_get_ring_config()
4918 sizeof(*priv->ring), 2), in hns3_get_ring_config()
4920 if (!priv->ring) in hns3_get_ring_config()
4921 return -ENOMEM; in hns3_get_ring_config()
4923 for (i = 0; i < h->kinfo.num_tqps; i++) in hns3_get_ring_config()
4924 hns3_queue_to_ring(h->kinfo.tqp[i], priv); in hns3_get_ring_config()
4931 if (!priv->ring) in hns3_put_ring_config()
4934 devm_kfree(priv->dev, priv->ring); in hns3_put_ring_config()
4935 priv->ring = NULL; in hns3_put_ring_config()
4943 .pool_size = ring->desc_num * hns3_buf_size(ring) / in hns3_alloc_page_pool()
4952 ring->page_pool = page_pool_create(&pp_params); in hns3_alloc_page_pool()
4953 if (IS_ERR(ring->page_pool)) { in hns3_alloc_page_pool()
4955 PTR_ERR(ring->page_pool)); in hns3_alloc_page_pool()
4956 ring->page_pool = NULL; in hns3_alloc_page_pool()
4964 if (ring->desc_num <= 0 || ring->buf_size <= 0) in hns3_alloc_ring_memory()
4965 return -EINVAL; in hns3_alloc_ring_memory()
4967 ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, in hns3_alloc_ring_memory()
4968 sizeof(ring->desc_cb[0]), GFP_KERNEL); in hns3_alloc_ring_memory()
4969 if (!ring->desc_cb) { in hns3_alloc_ring_memory()
4970 ret = -ENOMEM; in hns3_alloc_ring_memory()
4994 devm_kfree(ring_to_dev(ring), ring->desc_cb); in hns3_alloc_ring_memory()
4995 ring->desc_cb = NULL; in hns3_alloc_ring_memory()
5003 devm_kfree(ring_to_dev(ring), ring->desc_cb); in hns3_fini_ring()
5004 ring->desc_cb = NULL; in hns3_fini_ring()
5005 ring->next_to_clean = 0; in hns3_fini_ring()
5006 ring->next_to_use = 0; in hns3_fini_ring()
5007 ring->last_to_use = 0; in hns3_fini_ring()
5008 ring->pending_buf = 0; in hns3_fini_ring()
5009 if (!HNAE3_IS_TX_RING(ring) && ring->skb) { in hns3_fini_ring()
5010 dev_kfree_skb_any(ring->skb); in hns3_fini_ring()
5011 ring->skb = NULL; in hns3_fini_ring()
5012 } else if (HNAE3_IS_TX_RING(ring) && ring->tx_spare) { in hns3_fini_ring()
5013 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_fini_ring()
5015 dma_unmap_page(ring_to_dev(ring), tx_spare->dma, tx_spare->len, in hns3_fini_ring()
5017 free_pages((unsigned long)tx_spare->buf, in hns3_fini_ring()
5018 get_order(tx_spare->len)); in hns3_fini_ring()
5020 ring->tx_spare = NULL; in hns3_fini_ring()
5023 if (!HNAE3_IS_TX_RING(ring) && ring->page_pool) { in hns3_fini_ring()
5024 page_pool_destroy(ring->page_pool); in hns3_fini_ring()
5025 ring->page_pool = NULL; in hns3_fini_ring()
5055 dma_addr_t dma = ring->desc_dma_addr; in hns3_init_ring_hw()
5056 struct hnae3_queue *q = ring->tqp; in hns3_init_ring_hw()
5064 hns3_buf_size2type(ring->buf_size)); in hns3_init_ring_hw()
5066 ring->desc_num / 8 - 1); in hns3_init_ring_hw()
5074 ring->desc_num / 8 - 1); in hns3_init_ring_hw()
5080 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; in hns3_init_tx_ring_tc()
5081 struct hnae3_tc_info *tc_info = &kinfo->tc_info; in hns3_init_tx_ring_tc()
5084 for (i = 0; i < tc_info->num_tc; i++) { in hns3_init_tx_ring_tc()
5087 for (j = 0; j < tc_info->tqp_count[i]; j++) { in hns3_init_tx_ring_tc()
5090 q = priv->ring[tc_info->tqp_offset[i] + j].tqp; in hns3_init_tx_ring_tc()
5098 struct hnae3_handle *h = priv->ae_handle; in hns3_init_all_ring()
5099 int ring_num = h->kinfo.num_tqps * 2; in hns3_init_all_ring()
5104 ret = hns3_alloc_ring_memory(&priv->ring[i]); in hns3_init_all_ring()
5106 dev_err(priv->dev, in hns3_init_all_ring()
5111 u64_stats_init(&priv->ring[i].syncp); in hns3_init_all_ring()
5118 for (j = i - 1; j >= 0; j--) in hns3_init_all_ring()
5119 hns3_fini_ring(&priv->ring[j]); in hns3_init_all_ring()
5121 return -ENOMEM; in hns3_init_all_ring()
5126 struct hnae3_handle *h = priv->ae_handle; in hns3_uninit_all_ring()
5129 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_uninit_all_ring()
5130 hns3_fini_ring(&priv->ring[i]); in hns3_uninit_all_ring()
5131 hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]); in hns3_uninit_all_ring()
5140 struct hnae3_handle *h = priv->ae_handle; in hns3_init_mac_addr()
5144 if (h->ae_algo->ops->get_mac_addr) in hns3_init_mac_addr()
5145 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); in hns3_init_mac_addr()
5150 hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr); in hns3_init_mac_addr()
5151 dev_warn(priv->dev, "using random MAC address %s\n", in hns3_init_mac_addr()
5153 } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { in hns3_init_mac_addr()
5155 ether_addr_copy(netdev->perm_addr, mac_addr_temp); in hns3_init_mac_addr()
5160 if (h->ae_algo->ops->set_mac_addr) in hns3_init_mac_addr()
5161 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); in hns3_init_mac_addr()
5171 if (h->ae_algo->ops->mac_connect_phy) in hns3_init_phy()
5172 ret = h->ae_algo->ops->mac_connect_phy(h); in hns3_init_phy()
5181 if (h->ae_algo->ops->mac_disconnect_phy) in hns3_uninit_phy()
5182 h->ae_algo->ops->mac_disconnect_phy(h); in hns3_uninit_phy()
5187 if (!handle->ae_algo->ops->client_start) in hns3_client_start()
5190 return handle->ae_algo->ops->client_start(handle); in hns3_client_start()
5195 if (!handle->ae_algo->ops->client_stop) in hns3_client_stop()
5198 handle->ae_algo->ops->client_stop(handle); in hns3_client_stop()
5203 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; in hns3_info_show()
5206 hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr); in hns3_info_show()
5207 dev_info(priv->dev, "MAC address: %s\n", format_mac_addr); in hns3_info_show()
5208 dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); in hns3_info_show()
5209 dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); in hns3_info_show()
5210 dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); in hns3_info_show()
5211 dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); in hns3_info_show()
5212 dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); in hns3_info_show()
5213 dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); in hns3_info_show()
5214 dev_info(priv->dev, "Total number of enabled TCs: %u\n", in hns3_info_show()
5215 kinfo->tc_info.num_tc); in hns3_info_show()
5216 dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); in hns3_info_show()
5222 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); in hns3_set_cq_period_mode()
5223 struct hnae3_handle *handle = priv->ae_handle; in hns3_set_cq_period_mode()
5227 priv->tx_cqe_mode = mode; in hns3_set_cq_period_mode()
5229 for (i = 0; i < priv->vector_num; i++) in hns3_set_cq_period_mode()
5230 priv->tqp_vector[i].tx_group.dim.mode = mode; in hns3_set_cq_period_mode()
5232 priv->rx_cqe_mode = mode; in hns3_set_cq_period_mode()
5234 for (i = 0; i < priv->vector_num; i++) in hns3_set_cq_period_mode()
5235 priv->tqp_vector[i].rx_group.dim.mode = mode; in hns3_set_cq_period_mode()
5246 writel(new_mode, handle->kinfo.io_base + reg); in hns3_set_cq_period_mode()
5260 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); in hns3_state_init()
5261 struct net_device *netdev = handle->kinfo.netdev; in hns3_state_init()
5264 set_bit(HNS3_NIC_STATE_INITED, &priv->state); in hns3_state_init()
5266 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) in hns3_state_init()
5267 set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); in hns3_state_init()
5269 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) in hns3_state_init()
5270 set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); in hns3_state_init()
5272 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) in hns3_state_init()
5273 set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); in hns3_state_init()
5276 set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); in hns3_state_init()
5281 struct hns3_nic_priv *priv = handle->priv; in hns3_state_uninit()
5283 clear_bit(HNS3_NIC_STATE_INITED, &priv->state); in hns3_state_uninit()
5288 struct pci_dev *pdev = handle->pdev; in hns3_client_init()
5295 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, in hns3_client_init()
5299 return -ENOMEM; in hns3_client_init()
5302 priv->dev = &pdev->dev; in hns3_client_init()
5303 priv->netdev = netdev; in hns3_client_init()
5304 priv->ae_handle = handle; in hns3_client_init()
5305 priv->tx_timeout_count = 0; in hns3_client_init()
5306 priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num; in hns3_client_init()
5307 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); in hns3_client_init()
5309 handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); in hns3_client_init()
5311 handle->kinfo.netdev = netdev; in hns3_client_init()
5312 handle->priv = (void *)priv; in hns3_client_init()
5318 netdev->watchdog_timeo = HNS3_TX_TIMEOUT; in hns3_client_init()
5319 netdev->priv_flags |= IFF_UNICAST_FLT; in hns3_client_init()
5320 netdev->netdev_ops = &hns3_nic_netdev_ops; in hns3_client_init()
5321 SET_NETDEV_DEV(netdev, &pdev->dev); in hns3_client_init()
5329 ret = -ENOMEM; in hns3_client_init()
5337 ret = -ENOMEM; in hns3_client_init()
5343 ret = -ENOMEM; in hns3_client_init()
5349 ret = -ENOMEM; in hns3_client_init()
5363 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); in hns3_client_init()
5367 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); in hns3_client_init()
5374 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); in hns3_client_init()
5382 dev_err(priv->dev, "failed to init debugfs, ret = %d\n", in hns3_client_init()
5387 netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size); in hns3_client_init()
5393 dev_err(priv->dev, "probe register netdev fail!\n"); in hns3_client_init()
5418 priv->ring = NULL; in hns3_client_init()
5420 priv->ae_handle = NULL; in hns3_client_init()
5427 struct net_device *netdev = handle->kinfo.netdev; in hns3_client_uninit()
5430 if (netdev->reg_state != NETREG_UNINITIALIZED) in hns3_client_uninit()
5437 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { in hns3_client_uninit()
5463 struct net_device *netdev = handle->kinfo.netdev; in hns3_link_status_change()
5483 while (ring->next_to_clean != ring->next_to_use) { in hns3_clear_tx_ring()
5484 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; in hns3_clear_tx_ring()
5485 hns3_free_buffer_detach(ring, ring->next_to_clean, 0); in hns3_clear_tx_ring()
5489 ring->pending_buf = 0; in hns3_clear_tx_ring()
5497 while (ring->next_to_use != ring->next_to_clean) { in hns3_clear_rx_ring()
5502 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { in hns3_clear_rx_ring()
5514 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); in hns3_clear_rx_ring()
5520 if (ring->skb) { in hns3_clear_rx_ring()
5521 dev_kfree_skb_any(ring->skb); in hns3_clear_rx_ring()
5522 ring->skb = NULL; in hns3_clear_rx_ring()
5523 ring->pending_buf = 0; in hns3_clear_rx_ring()
5531 while (ring->next_to_use != ring->next_to_clean) { in hns3_force_clear_rx_ring()
5536 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { in hns3_force_clear_rx_ring()
5538 &ring->desc_cb[ring->next_to_use]); in hns3_force_clear_rx_ring()
5539 ring->desc_cb[ring->next_to_use].dma = 0; in hns3_force_clear_rx_ring()
5548 struct net_device *ndev = h->kinfo.netdev; in hns3_clear_all_ring()
5552 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_clear_all_ring()
5555 ring = &priv->ring[i]; in hns3_clear_all_ring()
5558 ring = &priv->ring[i + h->kinfo.num_tqps]; in hns3_clear_all_ring()
5571 struct net_device *ndev = h->kinfo.netdev; in hns3_nic_reset_all_ring()
5577 ret = h->ae_algo->ops->reset_queue(h); in hns3_nic_reset_all_ring()
5581 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_nic_reset_all_ring()
5582 hns3_init_ring_hw(&priv->ring[i]); in hns3_nic_reset_all_ring()
5587 hns3_clear_tx_ring(&priv->ring[i]); in hns3_nic_reset_all_ring()
5588 priv->ring[i].next_to_clean = 0; in hns3_nic_reset_all_ring()
5589 priv->ring[i].next_to_use = 0; in hns3_nic_reset_all_ring()
5590 priv->ring[i].last_to_use = 0; in hns3_nic_reset_all_ring()
5592 rx_ring = &priv->ring[i + h->kinfo.num_tqps]; in hns3_nic_reset_all_ring()
5601 for (j = 0; j < rx_ring->desc_num; j++) in hns3_nic_reset_all_ring()
5604 rx_ring->next_to_clean = 0; in hns3_nic_reset_all_ring()
5605 rx_ring->next_to_use = 0; in hns3_nic_reset_all_ring()
5615 struct hnae3_knic_private_info *kinfo = &handle->kinfo; in hns3_reset_notify_down_enet()
5616 struct net_device *ndev = kinfo->netdev; in hns3_reset_notify_down_enet()
5619 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) in hns3_reset_notify_down_enet()
5630 struct hnae3_knic_private_info *kinfo = &handle->kinfo; in hns3_reset_notify_up_enet()
5631 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); in hns3_reset_notify_up_enet()
5634 if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) { in hns3_reset_notify_up_enet()
5635 netdev_err(kinfo->netdev, "device is not initialized yet\n"); in hns3_reset_notify_up_enet()
5636 return -EFAULT; in hns3_reset_notify_up_enet()
5639 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); in hns3_reset_notify_up_enet()
5641 if (netif_running(kinfo->netdev)) { in hns3_reset_notify_up_enet()
5642 ret = hns3_nic_net_open(kinfo->netdev); in hns3_reset_notify_up_enet()
5644 set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); in hns3_reset_notify_up_enet()
5645 netdev_err(kinfo->netdev, in hns3_reset_notify_up_enet()
5656 struct net_device *netdev = handle->kinfo.netdev; in hns3_reset_notify_init_enet()
5679 hns3_cq_period_mode_init(priv, priv->tx_cqe_mode, priv->rx_cqe_mode); in hns3_reset_notify_init_enet()
5684 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); in hns3_reset_notify_init_enet()
5688 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); in hns3_reset_notify_init_enet()
5693 if (!hns3_is_phys_func(handle->pdev)) in hns3_reset_notify_init_enet()
5698 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); in hns3_reset_notify_init_enet()
5702 set_bit(HNS3_NIC_STATE_INITED, &priv->state); in hns3_reset_notify_init_enet()
5723 struct net_device *netdev = handle->kinfo.netdev; in hns3_reset_notify_uninit_enet()
5726 if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) in hns3_reset_notify_uninit_enet()
5729 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { in hns3_reset_notify_uninit_enet()
5737 hns3_reset_tx_queue(priv->ae_handle); in hns3_reset_notify_uninit_enet()
5780 ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num, in hns3_change_channels()
5783 dev_err(&handle->pdev->dev, in hns3_change_channels()
5803 struct hnae3_knic_private_info *kinfo = &h->kinfo; in hns3_set_channels()
5805 u32 new_tqp_num = ch->combined_count; in hns3_set_channels()
5810 return -EBUSY; in hns3_set_channels()
5812 if (ch->rx_count || ch->tx_count) in hns3_set_channels()
5813 return -EINVAL; in hns3_set_channels()
5815 if (kinfo->tc_info.mqprio_active) { in hns3_set_channels()
5816 dev_err(&netdev->dev, in hns3_set_channels()
5818 return -EINVAL; in hns3_set_channels()
5823 dev_err(&netdev->dev, in hns3_set_channels()
5826 return -EINVAL; in hns3_set_channels()
5829 if (kinfo->rss_size == new_tqp_num) in hns3_set_channels()
5844 org_tqp_num = h->kinfo.num_tqps; in hns3_set_channels()
5867 struct hnae3_handle *h = priv->ae_handle; in hns3_external_lb_prepare()
5873 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) in hns3_external_lb_prepare()
5879 for (i = 0; i < priv->vector_num; i++) in hns3_external_lb_prepare()
5880 hns3_vector_disable(&priv->tqp_vector[i]); in hns3_external_lb_prepare()
5882 for (i = 0; i < h->kinfo.num_tqps; i++) in hns3_external_lb_prepare()
5883 hns3_tqp_disable(h->kinfo.tqp[i]); in hns3_external_lb_prepare()
5890 hns3_nic_reset_all_ring(priv->ae_handle); in hns3_external_lb_prepare()
5892 hns3_reset_tx_queue(priv->ae_handle); in hns3_external_lb_prepare()
5898 struct hnae3_handle *h = priv->ae_handle; in hns3_external_lb_restore()
5907 if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) in hns3_external_lb_restore()
5910 if (hns3_nic_reset_all_ring(priv->ae_handle)) in hns3_external_lb_restore()
5913 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); in hns3_external_lb_restore()
5915 for (i = 0; i < priv->vector_num; i++) in hns3_external_lb_restore()
5916 hns3_vector_enable(&priv->tqp_vector[i]); in hns3_external_lb_restore()
5918 for (i = 0; i < h->kinfo.num_tqps; i++) in hns3_external_lb_restore()
5919 hns3_tqp_enable(h->kinfo.tqp[i]); in hns3_external_lb_restore()
5923 if (h->ae_algo->ops->get_status(h)) in hns3_external_lb_restore()
5931 .msg = "IMP CMDQ error" },
5945 dev_err(&handle->pdev->dev, "Detected %s!\n", in hns3_process_hw_error()
5960 /* hns3_init_module - Driver registration routine
5968 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); in hns3_init_module()
5999 /* hns3_exit_module - Driver exit cleanup routine
6014 MODULE_ALIAS("pci:hns-nic");