Lines Matching +full:compute +full:- +full:cb
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/dma-mapping.h>
55 static int debug = -1;
80 /* hns3_pci_tbl - PCI Device ID Table
388 napi_schedule_irqoff(&tqp_vector->napi); in hns3_irq_handle()
389 tqp_vector->event_cnt++; in hns3_irq_handle()
399 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_uninit_irq()
400 tqp_vectors = &priv->tqp_vector[i]; in hns3_nic_uninit_irq()
402 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) in hns3_nic_uninit_irq()
406 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); in hns3_nic_uninit_irq()
409 free_irq(tqp_vectors->vector_irq, tqp_vectors); in hns3_nic_uninit_irq()
410 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; in hns3_nic_uninit_irq()
423 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_init_irq()
424 tqp_vectors = &priv->tqp_vector[i]; in hns3_nic_init_irq()
426 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) in hns3_nic_init_irq()
429 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { in hns3_nic_init_irq()
430 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, in hns3_nic_init_irq()
431 "%s-%s-%s-%d", hns3_driver_name, in hns3_nic_init_irq()
432 pci_name(priv->ae_handle->pdev), in hns3_nic_init_irq()
435 } else if (tqp_vectors->rx_group.ring) { in hns3_nic_init_irq()
436 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, in hns3_nic_init_irq()
437 "%s-%s-%s-%d", hns3_driver_name, in hns3_nic_init_irq()
438 pci_name(priv->ae_handle->pdev), in hns3_nic_init_irq()
440 } else if (tqp_vectors->tx_group.ring) { in hns3_nic_init_irq()
441 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, in hns3_nic_init_irq()
442 "%s-%s-%s-%d", hns3_driver_name, in hns3_nic_init_irq()
443 pci_name(priv->ae_handle->pdev), in hns3_nic_init_irq()
450 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; in hns3_nic_init_irq()
452 irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN); in hns3_nic_init_irq()
453 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, in hns3_nic_init_irq()
454 tqp_vectors->name, tqp_vectors); in hns3_nic_init_irq()
456 netdev_err(priv->netdev, "request irq(%d) fail\n", in hns3_nic_init_irq()
457 tqp_vectors->vector_irq); in hns3_nic_init_irq()
462 irq_set_affinity_hint(tqp_vectors->vector_irq, in hns3_nic_init_irq()
463 &tqp_vectors->affinity_mask); in hns3_nic_init_irq()
465 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; in hns3_nic_init_irq()
474 writel(mask_en, tqp_vector->mask_addr); in hns3_mask_vector_irq()
479 napi_enable(&tqp_vector->napi); in hns3_irq_enable()
480 enable_irq(tqp_vector->vector_irq); in hns3_irq_enable()
485 disable_irq(tqp_vector->vector_irq); in hns3_irq_disable()
486 napi_disable(&tqp_vector->napi); in hns3_irq_disable()
487 cancel_work_sync(&tqp_vector->rx_group.dim.work); in hns3_irq_disable()
488 cancel_work_sync(&tqp_vector->tx_group.dim.work); in hns3_irq_disable()
497 * Rl defines rate of interrupts i.e. number of interrupts-per-second in hns3_set_vector_coalesce_rl()
500 if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable && in hns3_set_vector_coalesce_rl()
501 !tqp_vector->rx_group.coal.adapt_enable) in hns3_set_vector_coalesce_rl()
503 * 0-59 and the unit is 4. in hns3_set_vector_coalesce_rl()
507 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); in hns3_set_vector_coalesce_rl()
515 if (tqp_vector->rx_group.coal.unit_1us) in hns3_set_vector_coalesce_rx_gl()
520 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); in hns3_set_vector_coalesce_rx_gl()
528 if (tqp_vector->tx_group.coal.unit_1us) in hns3_set_vector_coalesce_tx_gl()
533 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); in hns3_set_vector_coalesce_tx_gl()
539 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET); in hns3_set_vector_coalesce_tx_ql()
545 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET); in hns3_set_vector_coalesce_rx_ql()
551 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; in hns3_vector_coalesce_init()
552 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; in hns3_vector_coalesce_init()
553 struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle); in hns3_vector_coalesce_init()
554 struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal; in hns3_vector_coalesce_init()
555 struct hns3_enet_coalesce *prx_coal = &priv->rx_coal; in hns3_vector_coalesce_init()
557 tx_coal->adapt_enable = ptx_coal->adapt_enable; in hns3_vector_coalesce_init()
558 rx_coal->adapt_enable = prx_coal->adapt_enable; in hns3_vector_coalesce_init()
560 tx_coal->int_gl = ptx_coal->int_gl; in hns3_vector_coalesce_init()
561 rx_coal->int_gl = prx_coal->int_gl; in hns3_vector_coalesce_init()
563 rx_coal->flow_level = prx_coal->flow_level; in hns3_vector_coalesce_init()
564 tx_coal->flow_level = ptx_coal->flow_level; in hns3_vector_coalesce_init()
569 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { in hns3_vector_coalesce_init()
570 tx_coal->unit_1us = 1; in hns3_vector_coalesce_init()
571 rx_coal->unit_1us = 1; in hns3_vector_coalesce_init()
574 if (ae_dev->dev_specs.int_ql_max) { in hns3_vector_coalesce_init()
575 tx_coal->ql_enable = 1; in hns3_vector_coalesce_init()
576 rx_coal->ql_enable = 1; in hns3_vector_coalesce_init()
577 tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; in hns3_vector_coalesce_init()
578 rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; in hns3_vector_coalesce_init()
579 tx_coal->int_ql = ptx_coal->int_ql; in hns3_vector_coalesce_init()
580 rx_coal->int_ql = prx_coal->int_ql; in hns3_vector_coalesce_init()
588 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; in hns3_vector_coalesce_init_hw()
589 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; in hns3_vector_coalesce_init_hw()
590 struct hnae3_handle *h = priv->ae_handle; in hns3_vector_coalesce_init_hw()
592 hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl); in hns3_vector_coalesce_init_hw()
593 hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl); in hns3_vector_coalesce_init_hw()
594 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); in hns3_vector_coalesce_init_hw()
596 if (tx_coal->ql_enable) in hns3_vector_coalesce_init_hw()
597 hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql); in hns3_vector_coalesce_init_hw()
599 if (rx_coal->ql_enable) in hns3_vector_coalesce_init_hw()
600 hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql); in hns3_vector_coalesce_init_hw()
606 struct hnae3_knic_private_info *kinfo = &h->kinfo; in hns3_nic_set_real_num_queue()
607 struct hnae3_tc_info *tc_info = &kinfo->tc_info; in hns3_nic_set_real_num_queue()
608 unsigned int queue_size = kinfo->num_tqps; in hns3_nic_set_real_num_queue()
611 if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) { in hns3_nic_set_real_num_queue()
614 ret = netdev_set_num_tc(netdev, tc_info->num_tc); in hns3_nic_set_real_num_queue()
621 for (i = 0; i < tc_info->num_tc; i++) in hns3_nic_set_real_num_queue()
622 netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i], in hns3_nic_set_real_num_queue()
623 tc_info->tqp_offset[i]); in hns3_nic_set_real_num_queue()
647 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); in hns3_get_max_available_channels()
648 rss_size = alloc_tqps / h->kinfo.tc_info.num_tc; in hns3_get_max_available_channels()
674 free_irq_cpu_rmap(netdev->rx_cpu_rmap); in hns3_free_rx_cpu_rmap()
675 netdev->rx_cpu_rmap = NULL; in hns3_free_rx_cpu_rmap()
686 if (!netdev->rx_cpu_rmap) { in hns3_set_rx_cpu_rmap()
687 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num); in hns3_set_rx_cpu_rmap()
688 if (!netdev->rx_cpu_rmap) in hns3_set_rx_cpu_rmap()
689 return -ENOMEM; in hns3_set_rx_cpu_rmap()
692 for (i = 0; i < priv->vector_num; i++) { in hns3_set_rx_cpu_rmap()
693 tqp_vector = &priv->tqp_vector[i]; in hns3_set_rx_cpu_rmap()
694 ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap, in hns3_set_rx_cpu_rmap()
695 tqp_vector->vector_irq); in hns3_set_rx_cpu_rmap()
708 struct hnae3_handle *h = priv->ae_handle; in hns3_enable_irqs_and_tqps()
711 for (i = 0; i < priv->vector_num; i++) in hns3_enable_irqs_and_tqps()
712 hns3_irq_enable(&priv->tqp_vector[i]); in hns3_enable_irqs_and_tqps()
714 for (i = 0; i < priv->vector_num; i++) in hns3_enable_irqs_and_tqps()
715 hns3_mask_vector_irq(&priv->tqp_vector[i], 1); in hns3_enable_irqs_and_tqps()
717 for (i = 0; i < h->kinfo.num_tqps; i++) in hns3_enable_irqs_and_tqps()
718 hns3_tqp_enable(h->kinfo.tqp[i]); in hns3_enable_irqs_and_tqps()
724 struct hnae3_handle *h = priv->ae_handle; in hns3_disable_irqs_and_tqps()
727 for (i = 0; i < h->kinfo.num_tqps; i++) in hns3_disable_irqs_and_tqps()
728 hns3_tqp_disable(h->kinfo.tqp[i]); in hns3_disable_irqs_and_tqps()
730 for (i = 0; i < priv->vector_num; i++) in hns3_disable_irqs_and_tqps()
731 hns3_mask_vector_irq(&priv->tqp_vector[i], 0); in hns3_disable_irqs_and_tqps()
733 for (i = 0; i < priv->vector_num; i++) in hns3_disable_irqs_and_tqps()
734 hns3_irq_disable(&priv->tqp_vector[i]); in hns3_disable_irqs_and_tqps()
740 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_net_up()
747 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); in hns3_nic_net_up()
752 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; in hns3_nic_net_up()
754 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); in hns3_nic_net_up()
765 for (i = 0; i < priv->vector_num; i++) { in hns3_config_xps()
766 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; in hns3_config_xps()
767 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; in hns3_config_xps()
772 ret = netif_set_xps_queue(priv->netdev, in hns3_config_xps()
773 &tqp_vector->affinity_mask, in hns3_config_xps()
774 ring->tqp->tqp_index); in hns3_config_xps()
776 netdev_warn(priv->netdev, in hns3_config_xps()
779 ring = ring->next; in hns3_config_xps()
792 return -EBUSY; in hns3_nic_net_open()
794 if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { in hns3_nic_net_open()
811 kinfo = &h->kinfo; in hns3_nic_net_open()
813 netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]); in hns3_nic_net_open()
815 if (h->ae_algo->ops->set_timer_task) in hns3_nic_net_open()
816 h->ae_algo->ops->set_timer_task(priv->ae_handle, true); in hns3_nic_net_open()
827 struct net_device *ndev = h->kinfo.netdev; in hns3_reset_tx_queue()
832 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_reset_tx_queue()
834 priv->ring[i].queue_index); in hns3_reset_tx_queue()
847 ops = priv->ae_handle->ae_algo->ops; in hns3_nic_net_down()
848 if (ops->stop) in hns3_nic_net_down()
849 ops->stop(priv->ae_handle); in hns3_nic_net_down()
856 hns3_clear_all_ring(priv->ae_handle, false); in hns3_nic_net_down()
858 hns3_reset_tx_queue(priv->ae_handle); in hns3_nic_net_down()
866 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) in hns3_nic_net_stop()
871 if (h->ae_algo->ops->set_timer_task) in hns3_nic_net_stop()
872 h->ae_algo->ops->set_timer_task(priv->ae_handle, false); in hns3_nic_net_stop()
887 if (h->ae_algo->ops->add_uc_addr) in hns3_nic_uc_sync()
888 return h->ae_algo->ops->add_uc_addr(h, addr); in hns3_nic_uc_sync()
902 if (ether_addr_equal(addr, netdev->dev_addr)) in hns3_nic_uc_unsync()
905 if (h->ae_algo->ops->rm_uc_addr) in hns3_nic_uc_unsync()
906 return h->ae_algo->ops->rm_uc_addr(h, addr); in hns3_nic_uc_unsync()
916 if (h->ae_algo->ops->add_mc_addr) in hns3_nic_mc_sync()
917 return h->ae_algo->ops->add_mc_addr(h, addr); in hns3_nic_mc_sync()
927 if (h->ae_algo->ops->rm_mc_addr) in hns3_nic_mc_unsync()
928 return h->ae_algo->ops->rm_mc_addr(h, addr); in hns3_nic_mc_unsync()
937 if (netdev->flags & IFF_PROMISC) in hns3_get_netdev_flags()
939 else if (netdev->flags & IFF_ALLMULTI) in hns3_get_netdev_flags()
958 h->netdev_flags = new_flags; in hns3_nic_set_rx_mode()
966 if (ops->request_update_promisc_mode) in hns3_request_update_promisc_mode()
967 ops->request_update_promisc_mode(handle); in hns3_request_update_promisc_mode()
972 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_space()
978 ntc = smp_load_acquire(&tx_spare->last_to_clean); in hns3_tx_spare_space()
979 ntu = tx_spare->next_to_use; in hns3_tx_spare_space()
982 return ntc - ntu - 1; in hns3_tx_spare_space()
987 return max(ntc, tx_spare->len - ntu) - 1; in hns3_tx_spare_space()
992 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_update()
995 tx_spare->last_to_clean == tx_spare->next_to_clean) in hns3_tx_spare_update()
1001 smp_store_release(&tx_spare->last_to_clean, in hns3_tx_spare_update()
1002 tx_spare->next_to_clean); in hns3_tx_spare_update()
1009 u32 len = skb->len <= ring->tx_copybreak ? skb->len : in hns3_can_use_tx_bounce()
1012 if (len > ring->tx_copybreak) in hns3_can_use_tx_bounce()
1027 if (skb->len <= ring->tx_copybreak || !tx_sgl || in hns3_can_use_tx_sgl()
1029 skb_shinfo(skb)->nr_frags < tx_sgl)) in hns3_can_use_tx_sgl()
1042 u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; in hns3_init_tx_spare_buffer()
1082 tx_spare->dma = dma; in hns3_init_tx_spare_buffer()
1083 tx_spare->buf = page_address(page); in hns3_init_tx_spare_buffer()
1084 tx_spare->len = PAGE_SIZE << order; in hns3_init_tx_spare_buffer()
1085 ring->tx_spare = tx_spare; in hns3_init_tx_spare_buffer()
1086 ring->tx_copybreak = priv->tx_copybreak; in hns3_init_tx_spare_buffer()
1094 ring->tqp->handle->kinfo.tx_spare_buf_size = 0; in hns3_init_tx_spare_buffer()
1104 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_alloc()
1105 u32 ntu = tx_spare->next_to_use; in hns3_tx_spare_alloc()
1113 if (ntu + size > tx_spare->len) { in hns3_tx_spare_alloc()
1114 *cb_len += (tx_spare->len - ntu); in hns3_tx_spare_alloc()
1118 tx_spare->next_to_use = ntu + size; in hns3_tx_spare_alloc()
1119 if (tx_spare->next_to_use == tx_spare->len) in hns3_tx_spare_alloc()
1120 tx_spare->next_to_use = 0; in hns3_tx_spare_alloc()
1122 *dma = tx_spare->dma + ntu; in hns3_tx_spare_alloc()
1124 return tx_spare->buf + ntu; in hns3_tx_spare_alloc()
1129 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_rollback()
1131 if (len > tx_spare->next_to_use) { in hns3_tx_spare_rollback()
1132 len -= tx_spare->next_to_use; in hns3_tx_spare_rollback()
1133 tx_spare->next_to_use = tx_spare->len - len; in hns3_tx_spare_rollback()
1135 tx_spare->next_to_use -= len; in hns3_tx_spare_rollback()
1140 struct hns3_desc_cb *cb) in hns3_tx_spare_reclaim_cb() argument
1142 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_reclaim_cb()
1143 u32 ntc = tx_spare->next_to_clean; in hns3_tx_spare_reclaim_cb()
1144 u32 len = cb->length; in hns3_tx_spare_reclaim_cb()
1146 tx_spare->next_to_clean += len; in hns3_tx_spare_reclaim_cb()
1148 if (tx_spare->next_to_clean >= tx_spare->len) { in hns3_tx_spare_reclaim_cb()
1149 tx_spare->next_to_clean -= tx_spare->len; in hns3_tx_spare_reclaim_cb()
1151 if (tx_spare->next_to_clean) { in hns3_tx_spare_reclaim_cb()
1153 len = tx_spare->next_to_clean; in hns3_tx_spare_reclaim_cb()
1160 * tx_spare->next_to_clean is moved forword. in hns3_tx_spare_reclaim_cb()
1162 if (cb->type & (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) { in hns3_tx_spare_reclaim_cb()
1163 dma_addr_t dma = tx_spare->dma + ntc; in hns3_tx_spare_reclaim_cb()
1168 struct sg_table *sgt = tx_spare->buf + ntc; in hns3_tx_spare_reclaim_cb()
1170 dma_unmap_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents, in hns3_tx_spare_reclaim_cb()
1197 if (l3.v4->version == 4) in hns3_set_tso()
1198 l3.v4->check = 0; in hns3_set_tso()
1201 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | in hns3_set_tso()
1212 if (l3.v4->version == 4) in hns3_set_tso()
1213 l3.v4->check = 0; in hns3_set_tso()
1217 l4_offset = l4.hdr - skb->data; in hns3_set_tso()
1220 l4_paylen = skb->len - l4_offset; in hns3_set_tso()
1222 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in hns3_set_tso()
1224 csum_replace_by_diff(&l4.udp->check, in hns3_set_tso()
1227 hdr_len = (l4.tcp->doff << 2) + l4_offset; in hns3_set_tso()
1228 csum_replace_by_diff(&l4.tcp->check, in hns3_set_tso()
1232 *send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len; in hns3_set_tso()
1235 *paylen_fdop_ol4cs = skb->len - hdr_len; in hns3_set_tso()
1239 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) in hns3_set_tso()
1243 *mss = skb_shinfo(skb)->gso_size; in hns3_set_tso()
1263 if (skb->protocol == htons(ETH_P_IPV6)) { in hns3_get_l4_protocol()
1265 l4_proto_tmp = l3.v6->nexthdr; in hns3_get_l4_protocol()
1267 ipv6_skip_exthdr(skb, exthdr - skb->data, in hns3_get_l4_protocol()
1269 } else if (skb->protocol == htons(ETH_P_IP)) { in hns3_get_l4_protocol()
1270 l4_proto_tmp = l3.v4->protocol; in hns3_get_l4_protocol()
1272 return -EINVAL; in hns3_get_l4_protocol()
1278 if (!skb->encapsulation) { in hns3_get_l4_protocol()
1287 if (l3.v6->version == 6) { in hns3_get_l4_protocol()
1289 l4_proto_tmp = l3.v6->nexthdr; in hns3_get_l4_protocol()
1291 ipv6_skip_exthdr(skb, exthdr - skb->data, in hns3_get_l4_protocol()
1293 } else if (l3.v4->version == 4) { in hns3_get_l4_protocol()
1294 l4_proto_tmp = l3.v4->protocol; in hns3_get_l4_protocol()
1302 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
1310 struct hns3_nic_priv *priv = netdev_priv(skb->dev); in hns3_tunnel_csum_bug()
1311 struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle); in hns3_tunnel_csum_bug()
1317 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) in hns3_tunnel_csum_bug()
1322 if (!(!skb->encapsulation && in hns3_tunnel_csum_bug()
1323 (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || in hns3_tunnel_csum_bug()
1324 l4.udp->dest == htons(GENEVE_UDP_PORT) || in hns3_tunnel_csum_bug()
1325 l4.udp->dest == htons(IANA_VXLAN_GPE_UDP_PORT)))) in hns3_tunnel_csum_bug()
1342 /* compute OL2 header size, defined in 2 Bytes */ in hns3_set_outer_l2l3l4()
1343 l2_len = l3.hdr - skb->data; in hns3_set_outer_l2l3l4()
1346 /* compute OL3 header size, defined in 4 Bytes */ in hns3_set_outer_l2l3l4()
1347 l3_len = l4.hdr - l3.hdr; in hns3_set_outer_l2l3l4()
1351 /* compute OL4 header size, defined in 4 Bytes */ in hns3_set_outer_l2l3l4()
1352 l4_len = il2_hdr - l4.hdr; in hns3_set_outer_l2l3l4()
1356 if (skb->protocol == htons(ETH_P_IP)) { in hns3_set_outer_l2l3l4()
1365 } else if (skb->protocol == htons(ETH_P_IPV6)) { in hns3_set_outer_l2l3l4()
1381 if (l3.v4->version == 4) { in hns3_set_l3_type()
1390 } else if (l3.v6->version == 6) { in hns3_set_l3_type()
1399 /* compute inner(/normal) L4 header size, defined in 4 Bytes */ in hns3_set_l4_csum_length()
1406 l4.tcp->doff); in hns3_set_l4_csum_length()
1433 return -EDOM; in hns3_set_l4_csum_length()
1448 unsigned char *l2_hdr = skb->data; in hns3_set_l2l3l4()
1458 if (skb->encapsulation) { in hns3_set_l2l3l4()
1465 return -EDOM; in hns3_set_l2l3l4()
1484 /* compute inner(/normal) L2 header size, defined in 2 Bytes */ in hns3_set_l2l3l4()
1485 l2_len = l3.hdr - l2_hdr; in hns3_set_l2l3l4()
1488 /* compute inner(/normal) L3 header size, defined in 4 Bytes */ in hns3_set_l2l3l4()
1489 l3_len = l4.hdr - l3.hdr; in hns3_set_l2l3l4()
1498 struct hnae3_handle *handle = tx_ring->tqp->handle; in hns3_handle_vtags()
1503 if (!(skb->protocol == htons(ETH_P_8021Q) || in hns3_handle_vtags()
1513 ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && in hns3_handle_vtags()
1514 handle->port_base_vlan_state == in hns3_handle_vtags()
1516 return -EINVAL; in hns3_handle_vtags()
1518 if (skb->protocol == htons(ETH_P_8021Q) && in hns3_handle_vtags()
1519 !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { in hns3_handle_vtags()
1524 skb->protocol = vlan_get_protocol(skb); in hns3_handle_vtags()
1532 if (skb->protocol == htons(ETH_P_8021Q) && in hns3_handle_vtags()
1533 handle->port_base_vlan_state == in hns3_handle_vtags()
1539 skb->protocol = vlan_get_protocol(skb); in hns3_handle_vtags()
1548 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) in hns3_handle_vtags()
1551 skb->protocol = vlan_get_protocol(skb); in hns3_handle_vtags()
1558 struct hns3_nic_priv *priv = netdev_priv(skb->dev); in hns3_check_hw_tx_csum()
1561 * HW checksum of the non-IP packets and GSO packets is handled at in hns3_check_hw_tx_csum()
1565 !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state)) in hns3_check_hw_tx_csum()
1582 pa->paylen_ol4cs = skb->len; in hns3_init_desc_data()
1583 pa->ol_type_vlan_len_msec = 0; in hns3_init_desc_data()
1584 pa->type_cs_vlan_tso = 0; in hns3_init_desc_data()
1585 pa->mss_hw_csum = 0; in hns3_init_desc_data()
1586 pa->inner_vtag = 0; in hns3_init_desc_data()
1587 pa->out_vtag = 0; in hns3_init_desc_data()
1601 param->inner_vtag = skb_vlan_tag_get(skb); in hns3_handle_vlan_info()
1602 param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & in hns3_handle_vlan_info()
1604 hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); in hns3_handle_vlan_info()
1606 param->out_vtag = skb_vlan_tag_get(skb); in hns3_handle_vlan_info()
1607 param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & in hns3_handle_vlan_info()
1609 hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, in hns3_handle_vlan_info()
1625 hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, in hns3_handle_csum_partial()
1627 hns3_set_field(param->ol_type_vlan_len_msec, in hns3_handle_csum_partial()
1629 skb->csum_offset >> 1); in hns3_handle_csum_partial()
1630 param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); in hns3_handle_csum_partial()
1643 ¶m->type_cs_vlan_tso, in hns3_handle_csum_partial()
1644 ¶m->ol_type_vlan_len_msec); in hns3_handle_csum_partial()
1650 ret = hns3_set_tso(skb, ¶m->paylen_ol4cs, ¶m->mss_hw_csum, in hns3_handle_csum_partial()
1651 ¶m->type_cs_vlan_tso, &desc_cb->send_bytes); in hns3_handle_csum_partial()
1671 desc_cb->send_bytes = skb->len; in hns3_fill_skb_desc()
1673 if (skb->ip_summed == CHECKSUM_PARTIAL) { in hns3_fill_skb_desc()
1680 desc->tx.ol_type_vlan_len_msec = in hns3_fill_skb_desc()
1682 desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso); in hns3_fill_skb_desc()
1683 desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs); in hns3_fill_skb_desc()
1684 desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum); in hns3_fill_skb_desc()
1685 desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag); in hns3_fill_skb_desc()
1686 desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag); in hns3_fill_skb_desc()
1696 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; in hns3_fill_desc()
1701 desc->addr = cpu_to_le64(dma); in hns3_fill_desc()
1702 desc->tx.send_size = cpu_to_le16(size); in hns3_fill_desc()
1703 desc->tx.bdtp_fe_sc_vld_ra_ri = in hns3_fill_desc()
1706 trace_hns3_tx_desc(ring, ring->next_to_use); in hns3_fill_desc()
1718 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); in hns3_fill_desc()
1719 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? in hns3_fill_desc()
1721 desc->tx.bdtp_fe_sc_vld_ra_ri = in hns3_fill_desc()
1724 trace_hns3_tx_desc(ring, ring->next_to_use); in hns3_fill_desc()
1728 desc = &ring->desc[ring->next_to_use]; in hns3_fill_desc()
1737 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_map_and_fill_desc()
1749 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); in hns3_map_and_fill_desc()
1767 return -ENOMEM; in hns3_map_and_fill_desc()
1770 desc_cb->priv = priv; in hns3_map_and_fill_desc()
1771 desc_cb->length = size; in hns3_map_and_fill_desc()
1772 desc_cb->dma = dma; in hns3_map_and_fill_desc()
1773 desc_cb->type = type; in hns3_map_and_fill_desc()
1787 size -= HNS3_MAX_BD_SIZE; in hns3_skb_bd_num()
1799 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in hns3_skb_bd_num()
1800 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in hns3_skb_bd_num()
1807 size -= HNS3_MAX_BD_SIZE; in hns3_skb_bd_num()
1830 if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level && in hns3_tx_bd_num()
1832 skb_shinfo(skb)->nr_frags < max_non_tso_bd_num)) in hns3_tx_bd_num()
1833 return skb_shinfo(skb)->nr_frags + 1U; in hns3_tx_bd_num()
1854 if (!skb->encapsulation) in hns3_gso_hdr_len()
1862 * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss,
1863 * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger
1864 * than MSS except the last max_non_tso_bd_num - 1 frags.
1872 for (i = 0; i < max_non_tso_bd_num - 1U; i++) in hns3_skb_need_linearized()
1878 if (tot_len + bd_size[max_non_tso_bd_num - 1U] < in hns3_skb_need_linearized()
1879 skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb)) in hns3_skb_need_linearized()
1882 /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater in hns3_skb_need_linearized()
1885 for (i = 0; i < bd_num - max_non_tso_bd_num; i++) { in hns3_skb_need_linearized()
1886 tot_len -= bd_size[i]; in hns3_skb_need_linearized()
1887 tot_len += bd_size[i + max_non_tso_bd_num - 1U]; in hns3_skb_need_linearized()
1889 if (tot_len < skb_shinfo(skb)->gso_size) in hns3_skb_need_linearized()
1901 size[i] = skb_frag_size(&shinfo->frags[i]); in hns3_shinfo_pack()
1913 return -ENOMEM; in hns3_skb_linearize()
1916 /* The skb->len has exceeded the hw limitation, linearization in hns3_skb_linearize()
1919 if (skb->len > HNS3_MAX_TSO_SIZE || in hns3_skb_linearize()
1920 (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) { in hns3_skb_linearize()
1922 return -ENOMEM; in hns3_skb_linearize()
1927 return -ENOMEM; in hns3_skb_linearize()
1938 u8 max_non_tso_bd_num = priv->max_non_tso_bd_num; in hns3_nic_maybe_stop_tx()
1952 return -ENOMEM; in hns3_nic_maybe_stop_tx()
1954 bd_num = hns3_tx_bd_count(skb->len); in hns3_nic_maybe_stop_tx()
1963 netif_stop_subqueue(netdev, ring->queue_index); in hns3_nic_maybe_stop_tx()
1971 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { in hns3_nic_maybe_stop_tx()
1972 netif_start_subqueue(netdev, ring->queue_index); in hns3_nic_maybe_stop_tx()
1978 return -EBUSY; in hns3_nic_maybe_stop_tx()
1986 for (i = 0; i < ring->desc_num; i++) { in hns3_clear_desc()
1987 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; in hns3_clear_desc()
1993 if (ring->next_to_use == next_to_use_orig) in hns3_clear_desc()
1999 desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_clear_desc()
2001 if (!desc_cb->dma) in hns3_clear_desc()
2005 if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB)) in hns3_clear_desc()
2006 dma_unmap_single(dev, desc_cb->dma, desc_cb->length, in hns3_clear_desc()
2008 else if (desc_cb->type & in hns3_clear_desc()
2010 hns3_tx_spare_rollback(ring, desc_cb->length); in hns3_clear_desc()
2011 else if (desc_cb->length) in hns3_clear_desc()
2012 dma_unmap_page(dev, desc_cb->dma, desc_cb->length, in hns3_clear_desc()
2015 desc_cb->length = 0; in hns3_clear_desc()
2016 desc_cb->dma = 0; in hns3_clear_desc()
2017 desc_cb->type = DESC_TYPE_UNKNOWN; in hns3_clear_desc()
2033 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in hns3_fill_skb_to_desc()
2034 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in hns3_fill_skb_to_desc()
2068 int idx = (ring->next_to_use - num + ring->desc_num) % in hns3_tx_push_bd()
2069 ring->desc_num; in hns3_tx_push_bd()
2071 u64_stats_update_begin(&ring->syncp); in hns3_tx_push_bd()
2072 ring->stats.tx_push++; in hns3_tx_push_bd()
2073 u64_stats_update_end(&ring->syncp); in hns3_tx_push_bd()
2074 memcpy(&desc[offset], &ring->desc[idx], in hns3_tx_push_bd()
2077 } while (--num); in hns3_tx_push_bd()
2079 __iowrite64_copy(ring->tqp->mem_base, desc, in hns3_tx_push_bd()
2088 __le64 bd_num = cpu_to_le64((u64)ring->pending_buf); in hns3_tx_mem_doorbell()
2095 __iowrite64_copy(ring->tqp->mem_base + HNS3_MEM_DOORBELL_OFFSET, in hns3_tx_mem_doorbell()
2097 u64_stats_update_begin(&ring->syncp); in hns3_tx_mem_doorbell()
2098 ring->stats.tx_mem_doorbell += ring->pending_buf; in hns3_tx_mem_doorbell()
2099 u64_stats_update_end(&ring->syncp); in hns3_tx_mem_doorbell()
2111 if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num && in hns3_tx_doorbell()
2112 !ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) { in hns3_tx_doorbell()
2117 smp_store_release(&ring->last_to_use, ring->next_to_use); in hns3_tx_doorbell()
2122 ring->pending_buf += num; in hns3_tx_doorbell()
2132 smp_store_release(&ring->last_to_use, ring->next_to_use); in hns3_tx_doorbell()
2134 if (ring->tqp->mem_base) in hns3_tx_doorbell()
2137 writel(ring->pending_buf, in hns3_tx_doorbell()
2138 ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); in hns3_tx_doorbell()
2140 ring->pending_buf = 0; in hns3_tx_doorbell()
2148 if (!(h->ae_algo->ops->set_tx_hwts_info && in hns3_tsyn()
2149 h->ae_algo->ops->set_tx_hwts_info(h, skb))) in hns3_tsyn()
2152 desc->tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_TSYN_B)); in hns3_tsyn()
2158 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_handle_tx_bounce()
2167 if (skb->len <= ring->tx_copybreak) { in hns3_handle_tx_bounce()
2168 size = skb->len; in hns3_handle_tx_bounce()
2184 desc_cb->priv = skb; in hns3_handle_tx_bounce()
2185 desc_cb->length = cb_len; in hns3_handle_tx_bounce()
2186 desc_cb->dma = dma; in hns3_handle_tx_bounce()
2187 desc_cb->type = type; in hns3_handle_tx_bounce()
2211 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_handle_tx_sgl()
2212 u32 nfrag = skb_shinfo(skb)->nr_frags + 1; in hns3_handle_tx_sgl()
2229 sgt->sgl = (struct scatterlist *)(sgt + 1); in hns3_handle_tx_sgl()
2230 sg_init_table(sgt->sgl, nfrag); in hns3_handle_tx_sgl()
2231 nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); in hns3_handle_tx_sgl()
2235 return -ENOMEM; in hns3_handle_tx_sgl()
2238 sgt->orig_nents = nents; in hns3_handle_tx_sgl()
2239 sgt->nents = dma_map_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents, in hns3_handle_tx_sgl()
2241 if (unlikely(!sgt->nents)) { in hns3_handle_tx_sgl()
2244 return -ENOMEM; in hns3_handle_tx_sgl()
2247 desc_cb->priv = skb; in hns3_handle_tx_sgl()
2248 desc_cb->length = cb_len; in hns3_handle_tx_sgl()
2249 desc_cb->dma = dma; in hns3_handle_tx_sgl()
2250 desc_cb->type = DESC_TYPE_SGL_SKB; in hns3_handle_tx_sgl()
2252 for (i = 0; i < sgt->nents; i++) in hns3_handle_tx_sgl()
2253 bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i), in hns3_handle_tx_sgl()
2254 sg_dma_len(sgt->sgl + i)); in hns3_handle_tx_sgl()
2265 if (!ring->tx_spare) in hns3_handle_desc_filling()
2287 ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use], in hns3_handle_skb_desc()
2292 /* 'ret < 0' means filling error, 'ret == 0' means skb->len is in hns3_handle_skb_desc()
2308 struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; in hns3_nic_net_xmit()
2309 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_nic_net_xmit()
2324 prefetch(skb->data); in hns3_nic_net_xmit()
2328 if (ret == -EBUSY) { in hns3_nic_net_xmit()
2337 ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use); in hns3_nic_net_xmit()
2341 pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : in hns3_nic_net_xmit()
2342 (ring->desc_num - 1); in hns3_nic_net_xmit()
2344 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) in hns3_nic_net_xmit()
2345 hns3_tsyn(netdev, skb, &ring->desc[pre_ntu]); in hns3_nic_net_xmit()
2347 ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= in hns3_nic_net_xmit()
2354 dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); in hns3_nic_net_xmit()
2355 doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes, in hns3_nic_net_xmit()
2375 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) in hns3_nic_net_set_mac_address()
2376 return -EADDRNOTAVAIL; in hns3_nic_net_set_mac_address()
2378 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { in hns3_nic_net_set_mac_address()
2379 hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); in hns3_nic_net_set_mac_address()
2388 if (!hns3_is_phys_func(h->pdev) && in hns3_nic_net_set_mac_address()
2389 !is_zero_ether_addr(netdev->perm_addr)) { in hns3_nic_net_set_mac_address()
2390 hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr); in hns3_nic_net_set_mac_address()
2391 hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); in hns3_nic_net_set_mac_address()
2394 return -EPERM; in hns3_nic_net_set_mac_address()
2397 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); in hns3_nic_net_set_mac_address()
2403 eth_hw_addr_set(netdev, mac_addr->sa_data); in hns3_nic_net_set_mac_address()
2414 return -EINVAL; in hns3_nic_do_ioctl()
2416 if (!h->ae_algo->ops->do_ioctl) in hns3_nic_do_ioctl()
2417 return -EOPNOTSUPP; in hns3_nic_do_ioctl()
2419 return h->ae_algo->ops->do_ioctl(h, ifr, cmd); in hns3_nic_do_ioctl()
2425 netdev_features_t changed = netdev->features ^ features; in hns3_nic_set_features()
2427 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_set_features()
2431 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { in hns3_nic_set_features()
2433 ret = h->ae_algo->ops->set_gro_en(h, enable); in hns3_nic_set_features()
2439 h->ae_algo->ops->enable_hw_strip_rxvtag) { in hns3_nic_set_features()
2441 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); in hns3_nic_set_features()
2446 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { in hns3_nic_set_features()
2448 h->ae_algo->ops->enable_fd(h, enable); in hns3_nic_set_features()
2451 if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && in hns3_nic_set_features()
2452 h->ae_algo->ops->cls_flower_active(h)) { in hns3_nic_set_features()
2455 return -EINVAL; in hns3_nic_set_features()
2459 h->ae_algo->ops->enable_vlan_filter) { in hns3_nic_set_features()
2461 ret = h->ae_algo->ops->enable_vlan_filter(h, enable); in hns3_nic_set_features()
2478 if (skb->ip_summed != CHECKSUM_PARTIAL) in hns3_features_check()
2481 if (skb->encapsulation) in hns3_features_check()
2506 start = u64_stats_fetch_begin(&ring->syncp); in hns3_fetch_stats()
2508 stats->tx_bytes += ring->stats.tx_bytes; in hns3_fetch_stats()
2509 stats->tx_packets += ring->stats.tx_pkts; in hns3_fetch_stats()
2510 stats->tx_dropped += ring->stats.sw_err_cnt; in hns3_fetch_stats()
2511 stats->tx_dropped += ring->stats.tx_vlan_err; in hns3_fetch_stats()
2512 stats->tx_dropped += ring->stats.tx_l4_proto_err; in hns3_fetch_stats()
2513 stats->tx_dropped += ring->stats.tx_l2l3l4_err; in hns3_fetch_stats()
2514 stats->tx_dropped += ring->stats.tx_tso_err; in hns3_fetch_stats()
2515 stats->tx_dropped += ring->stats.over_max_recursion; in hns3_fetch_stats()
2516 stats->tx_dropped += ring->stats.hw_limitation; in hns3_fetch_stats()
2517 stats->tx_dropped += ring->stats.copy_bits_err; in hns3_fetch_stats()
2518 stats->tx_dropped += ring->stats.skb2sgl_err; in hns3_fetch_stats()
2519 stats->tx_dropped += ring->stats.map_sg_err; in hns3_fetch_stats()
2520 stats->tx_errors += ring->stats.sw_err_cnt; in hns3_fetch_stats()
2521 stats->tx_errors += ring->stats.tx_vlan_err; in hns3_fetch_stats()
2522 stats->tx_errors += ring->stats.tx_l4_proto_err; in hns3_fetch_stats()
2523 stats->tx_errors += ring->stats.tx_l2l3l4_err; in hns3_fetch_stats()
2524 stats->tx_errors += ring->stats.tx_tso_err; in hns3_fetch_stats()
2525 stats->tx_errors += ring->stats.over_max_recursion; in hns3_fetch_stats()
2526 stats->tx_errors += ring->stats.hw_limitation; in hns3_fetch_stats()
2527 stats->tx_errors += ring->stats.copy_bits_err; in hns3_fetch_stats()
2528 stats->tx_errors += ring->stats.skb2sgl_err; in hns3_fetch_stats()
2529 stats->tx_errors += ring->stats.map_sg_err; in hns3_fetch_stats()
2531 stats->rx_bytes += ring->stats.rx_bytes; in hns3_fetch_stats()
2532 stats->rx_packets += ring->stats.rx_pkts; in hns3_fetch_stats()
2533 stats->rx_dropped += ring->stats.l2_err; in hns3_fetch_stats()
2534 stats->rx_errors += ring->stats.l2_err; in hns3_fetch_stats()
2535 stats->rx_errors += ring->stats.l3l4_csum_err; in hns3_fetch_stats()
2536 stats->rx_crc_errors += ring->stats.l2_err; in hns3_fetch_stats()
2537 stats->multicast += ring->stats.rx_multicast; in hns3_fetch_stats()
2538 stats->rx_length_errors += ring->stats.err_pkt_len; in hns3_fetch_stats()
2540 } while (u64_stats_fetch_retry(&ring->syncp, start)); in hns3_fetch_stats()
2547 int queue_num = priv->ae_handle->kinfo.num_tqps; in hns3_nic_get_stats64()
2548 struct hnae3_handle *handle = priv->ae_handle; in hns3_nic_get_stats64()
2553 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) in hns3_nic_get_stats64()
2556 handle->ae_algo->ops->update_stats(handle); in hns3_nic_get_stats64()
2561 ring = &priv->ring[idx]; in hns3_nic_get_stats64()
2565 ring = &priv->ring[idx + queue_num]; in hns3_nic_get_stats64()
2569 stats->tx_bytes = ring_total_stats.tx_bytes; in hns3_nic_get_stats64()
2570 stats->tx_packets = ring_total_stats.tx_packets; in hns3_nic_get_stats64()
2571 stats->rx_bytes = ring_total_stats.rx_bytes; in hns3_nic_get_stats64()
2572 stats->rx_packets = ring_total_stats.rx_packets; in hns3_nic_get_stats64()
2574 stats->rx_errors = ring_total_stats.rx_errors; in hns3_nic_get_stats64()
2575 stats->multicast = ring_total_stats.multicast; in hns3_nic_get_stats64()
2576 stats->rx_length_errors = ring_total_stats.rx_length_errors; in hns3_nic_get_stats64()
2577 stats->rx_crc_errors = ring_total_stats.rx_crc_errors; in hns3_nic_get_stats64()
2578 stats->rx_missed_errors = netdev->stats.rx_missed_errors; in hns3_nic_get_stats64()
2580 stats->tx_errors = ring_total_stats.tx_errors; in hns3_nic_get_stats64()
2581 stats->rx_dropped = ring_total_stats.rx_dropped; in hns3_nic_get_stats64()
2582 stats->tx_dropped = ring_total_stats.tx_dropped; in hns3_nic_get_stats64()
2583 stats->collisions = netdev->stats.collisions; in hns3_nic_get_stats64()
2584 stats->rx_over_errors = netdev->stats.rx_over_errors; in hns3_nic_get_stats64()
2585 stats->rx_frame_errors = netdev->stats.rx_frame_errors; in hns3_nic_get_stats64()
2586 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; in hns3_nic_get_stats64()
2587 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; in hns3_nic_get_stats64()
2588 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; in hns3_nic_get_stats64()
2589 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; in hns3_nic_get_stats64()
2590 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; in hns3_nic_get_stats64()
2591 stats->tx_window_errors = netdev->stats.tx_window_errors; in hns3_nic_get_stats64()
2592 stats->rx_compressed = netdev->stats.rx_compressed; in hns3_nic_get_stats64()
2593 stats->tx_compressed = netdev->stats.tx_compressed; in hns3_nic_get_stats64()
2600 u8 tc = mqprio_qopt->qopt.num_tc; in hns3_setup_tc()
2601 u16 mode = mqprio_qopt->mode; in hns3_setup_tc()
2602 u8 hw = mqprio_qopt->qopt.hw; in hns3_setup_tc()
2607 return -EOPNOTSUPP; in hns3_setup_tc()
2610 return -EINVAL; in hns3_setup_tc()
2613 return -EINVAL; in hns3_setup_tc()
2616 kinfo = &h->kinfo; in hns3_setup_tc()
2620 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? in hns3_setup_tc()
2621 kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP; in hns3_setup_tc()
2627 int tc = tc_classid_to_hwtc(priv->netdev, flow->classid); in hns3_setup_tc_cls_flower()
2628 struct hnae3_handle *h = hns3_get_handle(priv->netdev); in hns3_setup_tc_cls_flower()
2630 switch (flow->command) { in hns3_setup_tc_cls_flower()
2632 if (h->ae_algo->ops->add_cls_flower) in hns3_setup_tc_cls_flower()
2633 return h->ae_algo->ops->add_cls_flower(h, flow, tc); in hns3_setup_tc_cls_flower()
2636 if (h->ae_algo->ops->del_cls_flower) in hns3_setup_tc_cls_flower()
2637 return h->ae_algo->ops->del_cls_flower(h, flow); in hns3_setup_tc_cls_flower()
2643 return -EOPNOTSUPP; in hns3_setup_tc_cls_flower()
2651 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) in hns3_setup_tc_block_cb()
2652 return -EOPNOTSUPP; in hns3_setup_tc_block_cb()
2658 return -EOPNOTSUPP; in hns3_setup_tc_block_cb()
2681 return -EOPNOTSUPP; in hns3_nic_setup_tc()
2691 int ret = -EIO; in hns3_vlan_rx_add_vid()
2693 if (h->ae_algo->ops->set_vlan_filter) in hns3_vlan_rx_add_vid()
2694 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); in hns3_vlan_rx_add_vid()
2703 int ret = -EIO; in hns3_vlan_rx_kill_vid()
2705 if (h->ae_algo->ops->set_vlan_filter) in hns3_vlan_rx_kill_vid()
2706 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); in hns3_vlan_rx_kill_vid()
2715 int ret = -EIO; in hns3_ndo_set_vf_vlan()
2721 if (h->ae_algo->ops->set_vf_vlan_filter) in hns3_ndo_set_vf_vlan()
2722 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, in hns3_ndo_set_vf_vlan()
2733 return -EBUSY; in hns3_set_vf_spoofchk()
2735 if (!handle->ae_algo->ops->set_vf_spoofchk) in hns3_set_vf_spoofchk()
2736 return -EOPNOTSUPP; in hns3_set_vf_spoofchk()
2738 return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable); in hns3_set_vf_spoofchk()
2745 if (!handle->ae_algo->ops->set_vf_trust) in hns3_set_vf_trust()
2746 return -EOPNOTSUPP; in hns3_set_vf_trust()
2748 return handle->ae_algo->ops->set_vf_trust(handle, vf, enable); in hns3_set_vf_trust()
2757 return -EBUSY; in hns3_nic_change_mtu()
2759 if (!h->ae_algo->ops->set_mtu) in hns3_nic_change_mtu()
2760 return -EOPNOTSUPP; in hns3_nic_change_mtu()
2763 "change mtu from %u to %d\n", netdev->mtu, new_mtu); in hns3_nic_change_mtu()
2765 ret = h->ae_algo->ops->set_mtu(h, new_mtu); in hns3_nic_change_mtu()
2770 WRITE_ONCE(netdev->mtu, new_mtu); in hns3_nic_change_mtu()
2780 for (i = 0; i < ndev->num_tx_queues; i++) { in hns3_get_timeout_queue()
2785 trans_start = READ_ONCE(q->trans_start); in hns3_get_timeout_queue()
2788 (trans_start + ndev->watchdog_timeo))) { in hns3_get_timeout_queue()
2790 struct dql *dql = &q->dql; in hns3_get_timeout_queue()
2793 dql->last_obj_cnt, dql->num_queued, in hns3_get_timeout_queue()
2794 dql->adj_limit, dql->num_completed); in hns3_get_timeout_queue()
2797 q->state, in hns3_get_timeout_queue()
2798 jiffies_to_msecs(jiffies - trans_start)); in hns3_get_timeout_queue()
2810 struct napi_struct *napi = &tx_ring->tqp_vector->napi; in hns3_dump_queue_stats()
2815 priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, in hns3_dump_queue_stats()
2816 tx_ring->next_to_clean, napi->state); in hns3_dump_queue_stats()
2820 tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, in hns3_dump_queue_stats()
2821 tx_ring->stats.sw_err_cnt, tx_ring->pending_buf); in hns3_dump_queue_stats()
2825 tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, in hns3_dump_queue_stats()
2826 tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); in hns3_dump_queue_stats()
2829 tx_ring->stats.tx_push, tx_ring->stats.tx_mem_doorbell); in hns3_dump_queue_stats()
2841 readl(tx_ring->tqp_vector->mask_addr)); in hns3_dump_queue_reg()
2861 if (timeout_queue >= ndev->num_tx_queues) { in hns3_get_tx_timeo_queue_info()
2864 priv->tx_timeout_count); in hns3_get_tx_timeo_queue_info()
2868 priv->tx_timeout_count++; in hns3_get_tx_timeo_queue_info()
2870 tx_ring = &priv->ring[timeout_queue]; in hns3_get_tx_timeo_queue_info()
2876 if (h->ae_algo->ops->get_mac_stats) { in hns3_get_tx_timeo_queue_info()
2879 h->ae_algo->ops->get_mac_stats(h, &mac_stats); in hns3_get_tx_timeo_queue_info()
2892 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_net_timeout()
2900 if (h->ae_algo->ops->reset_event) in hns3_nic_net_timeout()
2901 h->ae_algo->ops->reset_event(h->pdev, h); in hns3_nic_net_timeout()
2911 if (!h->ae_algo->ops->add_arfs_entry) in hns3_rx_flow_steer()
2912 return -EOPNOTSUPP; in hns3_rx_flow_steer()
2914 if (skb->encapsulation) in hns3_rx_flow_steer()
2915 return -EPROTONOSUPPORT; in hns3_rx_flow_steer()
2918 return -EPROTONOSUPPORT; in hns3_rx_flow_steer()
2924 return -EPROTONOSUPPORT; in hns3_rx_flow_steer()
2926 return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys); in hns3_rx_flow_steer()
2935 if (!h->ae_algo->ops->get_vf_config) in hns3_nic_get_vf_config()
2936 return -EOPNOTSUPP; in hns3_nic_get_vf_config()
2938 return h->ae_algo->ops->get_vf_config(h, vf, ivf); in hns3_nic_get_vf_config()
2946 if (!h->ae_algo->ops->set_vf_link_state) in hns3_nic_set_vf_link_state()
2947 return -EOPNOTSUPP; in hns3_nic_set_vf_link_state()
2949 return h->ae_algo->ops->set_vf_link_state(h, vf, link_state); in hns3_nic_set_vf_link_state()
2957 if (!h->ae_algo->ops->set_vf_rate) in hns3_nic_set_vf_rate()
2958 return -EOPNOTSUPP; in hns3_nic_set_vf_rate()
2960 return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate, in hns3_nic_set_vf_rate()
2969 if (!h->ae_algo->ops->set_vf_mac) in hns3_nic_set_vf_mac()
2970 return -EOPNOTSUPP; in hns3_nic_set_vf_mac()
2977 return -EINVAL; in hns3_nic_set_vf_mac()
2980 return h->ae_algo->ops->set_vf_mac(h, vf_id, mac); in hns3_nic_set_vf_mac()
2988 __be16 protocol = skb->protocol; in hns3_get_skb_dscp()
3009 if (h->kinfo.tc_map_mode != HNAE3_TC_MAP_MODE_DSCP || in hns3_nic_select_queue()
3010 !h->ae_algo->ops->get_dscp_prio) in hns3_nic_select_queue()
3017 skb->priority = h->kinfo.dscp_prio[dscp]; in hns3_nic_select_queue()
3018 if (skb->priority == HNAE3_PRIO_ID_INVALID) in hns3_nic_select_queue()
3019 skb->priority = 0; in hns3_nic_select_queue()
3055 u32 dev_id = pdev->device; in hns3_is_phys_func()
3071 dev_warn(&pdev->dev, "un-recognized pci device-id %u", in hns3_is_phys_func()
3080 /* If our VFs are assigned we cannot shut down SR-IOV in hns3_disable_sriov()
3085 dev_warn(&pdev->dev, in hns3_disable_sriov()
3093 /* hns3_probe - Device initialization routine
3108 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL); in hns3_probe()
3110 return -ENOMEM; in hns3_probe()
3112 ae_dev->pdev = pdev; in hns3_probe()
3113 ae_dev->flag = ent->driver_data; in hns3_probe()
3134 if (ae_dev->ops->clean_vf_config) in hns3_clean_vf_config()
3135 ae_dev->ops->clean_vf_config(ae_dev, num_vfs); in hns3_clean_vf_config()
3138 /* hns3_remove - Device removal routine
3165 dev_warn(&pdev->dev, "Can not config SRIOV\n"); in hns3_pci_sriov_configure()
3166 return -EINVAL; in hns3_pci_sriov_configure()
3172 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); in hns3_pci_sriov_configure()
3181 dev_warn(&pdev->dev, in hns3_pci_sriov_configure()
3203 if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { in hns3_suspend()
3205 if (ae_dev->ops && ae_dev->ops->reset_prepare) in hns3_suspend()
3206 ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET); in hns3_suspend()
3216 if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { in hns3_resume()
3218 if (ae_dev->ops && ae_dev->ops->reset_done) in hns3_resume()
3219 ae_dev->ops->reset_done(ae_dev); in hns3_resume()
3231 dev_info(&pdev->dev, "PCI error detected, state(=%u)!!\n", state); in hns3_error_detected()
3236 if (!ae_dev || !ae_dev->ops) { in hns3_error_detected()
3237 dev_err(&pdev->dev, in hns3_error_detected()
3238 "Can't recover - error happened before device initialized\n"); in hns3_error_detected()
3242 if (ae_dev->ops->handle_hw_ras_error) in hns3_error_detected()
3243 ret = ae_dev->ops->handle_hw_ras_error(ae_dev); in hns3_error_detected()
3255 struct device *dev = &pdev->dev; in hns3_slot_reset()
3257 if (!ae_dev || !ae_dev->ops) in hns3_slot_reset()
3260 ops = ae_dev->ops; in hns3_slot_reset()
3262 if (ops->reset_event && ops->get_reset_level && in hns3_slot_reset()
3263 ops->set_default_reset_request) { in hns3_slot_reset()
3264 if (ae_dev->hw_err_reset_req) { in hns3_slot_reset()
3265 reset_type = ops->get_reset_level(ae_dev, in hns3_slot_reset()
3266 &ae_dev->hw_err_reset_req); in hns3_slot_reset()
3267 ops->set_default_reset_request(ae_dev, reset_type); in hns3_slot_reset()
3269 ops->reset_event(pdev, NULL); in hns3_slot_reset()
3282 dev_info(&pdev->dev, "FLR prepare\n"); in hns3_reset_prepare()
3283 if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare) in hns3_reset_prepare()
3284 ae_dev->ops->reset_prepare(ae_dev, HNAE3_FLR_RESET); in hns3_reset_prepare()
3291 dev_info(&pdev->dev, "FLR done\n"); in hns3_reset_done()
3292 if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done) in hns3_reset_done()
3293 ae_dev->ops->reset_done(ae_dev); in hns3_reset_done()
3320 struct pci_dev *pdev = h->pdev; in hns3_set_default_feature()
3323 netdev->priv_flags |= IFF_UNICAST_FLT; in hns3_set_default_feature()
3325 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | in hns3_set_default_feature()
3333 netdev->features |= NETIF_F_GRO_HW; in hns3_set_default_feature()
3336 netdev->features |= NETIF_F_NTUPLE; in hns3_set_default_feature()
3338 if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) in hns3_set_default_feature()
3339 netdev->features |= NETIF_F_GSO_UDP_L4; in hns3_set_default_feature()
3341 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) in hns3_set_default_feature()
3342 netdev->features |= NETIF_F_HW_CSUM; in hns3_set_default_feature()
3344 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; in hns3_set_default_feature()
3346 if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) in hns3_set_default_feature()
3347 netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; in hns3_set_default_feature()
3349 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) in hns3_set_default_feature()
3350 netdev->features |= NETIF_F_HW_TC; in hns3_set_default_feature()
3352 netdev->hw_features |= netdev->features; in hns3_set_default_feature()
3353 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) in hns3_set_default_feature()
3354 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; in hns3_set_default_feature()
3356 netdev->vlan_features |= netdev->features & in hns3_set_default_feature()
3361 netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID; in hns3_set_default_feature()
3367 if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2) { in hns3_set_default_feature()
3368 netdev->features &= ~NETIF_F_GSO_GRE; in hns3_set_default_feature()
3369 netdev->features &= ~NETIF_F_GSO_GRE_CSUM; in hns3_set_default_feature()
3374 struct hns3_desc_cb *cb) in hns3_alloc_buffer() argument
3379 if (ring->page_pool) { in hns3_alloc_buffer()
3380 p = page_pool_dev_alloc_frag(ring->page_pool, in hns3_alloc_buffer()
3381 &cb->page_offset, in hns3_alloc_buffer()
3384 return -ENOMEM; in hns3_alloc_buffer()
3386 cb->priv = p; in hns3_alloc_buffer()
3387 cb->buf = page_address(p); in hns3_alloc_buffer()
3388 cb->dma = page_pool_get_dma_addr(p); in hns3_alloc_buffer()
3389 cb->type = DESC_TYPE_PP_FRAG; in hns3_alloc_buffer()
3390 cb->reuse_flag = 0; in hns3_alloc_buffer()
3396 return -ENOMEM; in hns3_alloc_buffer()
3398 cb->priv = p; in hns3_alloc_buffer()
3399 cb->page_offset = 0; in hns3_alloc_buffer()
3400 cb->reuse_flag = 0; in hns3_alloc_buffer()
3401 cb->buf = page_address(p); in hns3_alloc_buffer()
3402 cb->length = hns3_page_size(ring); in hns3_alloc_buffer()
3403 cb->type = DESC_TYPE_PAGE; in hns3_alloc_buffer()
3404 page_ref_add(p, USHRT_MAX - 1); in hns3_alloc_buffer()
3405 cb->pagecnt_bias = USHRT_MAX; in hns3_alloc_buffer()
3411 struct hns3_desc_cb *cb, int budget) in hns3_free_buffer() argument
3413 if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD | in hns3_free_buffer()
3415 napi_consume_skb(cb->priv, budget); in hns3_free_buffer()
3417 if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias) in hns3_free_buffer()
3418 __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); in hns3_free_buffer()
3419 else if (cb->type & DESC_TYPE_PP_FRAG) in hns3_free_buffer()
3420 page_pool_put_full_page(ring->page_pool, cb->priv, in hns3_free_buffer()
3423 memset(cb, 0, sizeof(*cb)); in hns3_free_buffer()
3426 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) in hns3_map_buffer() argument
3428 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, in hns3_map_buffer()
3429 cb->length, ring_to_dma_dir(ring)); in hns3_map_buffer()
3431 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) in hns3_map_buffer()
3432 return -EIO; in hns3_map_buffer()
3438 struct hns3_desc_cb *cb) in hns3_unmap_buffer() argument
3440 if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB)) in hns3_unmap_buffer()
3441 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, in hns3_unmap_buffer()
3443 else if ((cb->type & DESC_TYPE_PAGE) && cb->length) in hns3_unmap_buffer()
3444 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, in hns3_unmap_buffer()
3446 else if (cb->type & (DESC_TYPE_BOUNCE_ALL | DESC_TYPE_BOUNCE_HEAD | in hns3_unmap_buffer()
3448 hns3_tx_spare_reclaim_cb(ring, cb); in hns3_unmap_buffer()
3453 hns3_unmap_buffer(ring, &ring->desc_cb[i]); in hns3_buffer_detach()
3454 ring->desc[i].addr = 0; in hns3_buffer_detach()
3455 ring->desc_cb[i].refill = 0; in hns3_buffer_detach()
3461 struct hns3_desc_cb *cb = &ring->desc_cb[i]; in hns3_free_buffer_detach() local
3463 if (!ring->desc_cb[i].dma) in hns3_free_buffer_detach()
3467 hns3_free_buffer(ring, cb, budget); in hns3_free_buffer_detach()
3474 for (i = 0; i < ring->desc_num; i++) in hns3_free_buffers()
3481 int size = ring->desc_num * sizeof(ring->desc[0]); in hns3_free_desc()
3485 if (ring->desc) { in hns3_free_desc()
3487 ring->desc, ring->desc_dma_addr); in hns3_free_desc()
3488 ring->desc = NULL; in hns3_free_desc()
3494 int size = ring->desc_num * sizeof(ring->desc[0]); in hns3_alloc_desc()
3496 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, in hns3_alloc_desc()
3497 &ring->desc_dma_addr, GFP_KERNEL); in hns3_alloc_desc()
3498 if (!ring->desc) in hns3_alloc_desc()
3499 return -ENOMEM; in hns3_alloc_desc()
3505 struct hns3_desc_cb *cb) in hns3_alloc_and_map_buffer() argument
3509 ret = hns3_alloc_buffer(ring, cb); in hns3_alloc_and_map_buffer()
3510 if (ret || ring->page_pool) in hns3_alloc_and_map_buffer()
3513 ret = hns3_map_buffer(ring, cb); in hns3_alloc_and_map_buffer()
3520 hns3_free_buffer(ring, cb, 0); in hns3_alloc_and_map_buffer()
3527 int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]); in hns3_alloc_and_attach_buffer()
3532 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + in hns3_alloc_and_attach_buffer()
3533 ring->desc_cb[i].page_offset); in hns3_alloc_and_attach_buffer()
3534 ring->desc_cb[i].refill = 1; in hns3_alloc_and_attach_buffer()
3544 for (i = 0; i < ring->desc_num; i++) { in hns3_alloc_ring_buffers()
3556 for (j = i - 1; j >= 0; j--) in hns3_alloc_ring_buffers()
3561 /* detach a in-used buffer and replace with a reserved one */
3565 hns3_unmap_buffer(ring, &ring->desc_cb[i]); in hns3_replace_buffer()
3566 ring->desc_cb[i] = *res_cb; in hns3_replace_buffer()
3567 ring->desc_cb[i].refill = 1; in hns3_replace_buffer()
3568 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + in hns3_replace_buffer()
3569 ring->desc_cb[i].page_offset); in hns3_replace_buffer()
3570 ring->desc[i].rx.bd_base_info = 0; in hns3_replace_buffer()
3575 ring->desc_cb[i].reuse_flag = 0; in hns3_reuse_buffer()
3576 ring->desc_cb[i].refill = 1; in hns3_reuse_buffer()
3577 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + in hns3_reuse_buffer()
3578 ring->desc_cb[i].page_offset); in hns3_reuse_buffer()
3579 ring->desc[i].rx.bd_base_info = 0; in hns3_reuse_buffer()
3582 ring->desc_cb[i].dma + ring->desc_cb[i].page_offset, in hns3_reuse_buffer()
3593 int ltu = smp_load_acquire(&ring->last_to_use); in hns3_nic_reclaim_desc()
3594 int ntc = ring->next_to_clean; in hns3_nic_reclaim_desc()
3600 desc = &ring->desc[ntc]; in hns3_nic_reclaim_desc()
3602 if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) & in hns3_nic_reclaim_desc()
3606 desc_cb = &ring->desc_cb[ntc]; in hns3_nic_reclaim_desc()
3608 if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_ALL | in hns3_nic_reclaim_desc()
3612 (*bytes) += desc_cb->send_bytes; in hns3_nic_reclaim_desc()
3618 if (++ntc == ring->desc_num) in hns3_nic_reclaim_desc()
3622 prefetch(&ring->desc_cb[ntc]); in hns3_nic_reclaim_desc()
3632 smp_store_release(&ring->next_to_clean, ntc); in hns3_nic_reclaim_desc()
3652 ring->tqp_vector->tx_group.total_bytes += bytes; in hns3_clean_tx_ring()
3653 ring->tqp_vector->tx_group.total_packets += pkts; in hns3_clean_tx_ring()
3655 u64_stats_update_begin(&ring->syncp); in hns3_clean_tx_ring()
3656 ring->stats.tx_bytes += bytes; in hns3_clean_tx_ring()
3657 ring->stats.tx_pkts += pkts; in hns3_clean_tx_ring()
3658 u64_stats_update_end(&ring->syncp); in hns3_clean_tx_ring()
3660 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); in hns3_clean_tx_ring()
3670 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { in hns3_clean_tx_ring()
3672 ring->stats.restart_queue++; in hns3_clean_tx_ring()
3679 int ntc = ring->next_to_clean; in hns3_desc_unused()
3680 int ntu = ring->next_to_use; in hns3_desc_unused()
3682 if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill)) in hns3_desc_unused()
3683 return ring->desc_num; in hns3_desc_unused()
3685 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; in hns3_desc_unused()
3697 desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_nic_alloc_rx_buffers()
3698 if (desc_cb->reuse_flag) { in hns3_nic_alloc_rx_buffers()
3701 hns3_reuse_buffer(ring, ring->next_to_use); in hns3_nic_alloc_rx_buffers()
3711 writel(i, ring->tqp->io_base + in hns3_nic_alloc_rx_buffers()
3715 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); in hns3_nic_alloc_rx_buffers()
3723 writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); in hns3_nic_alloc_rx_buffers()
3727 static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) in hns3_can_reuse_page() argument
3729 return page_count(cb->priv) == cb->pagecnt_bias; in hns3_can_reuse_page()
3737 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; in hns3_handle_rx_copybreak()
3738 u32 frag_offset = desc_cb->page_offset + pull_len; in hns3_handle_rx_copybreak()
3739 int size = le16_to_cpu(desc->rx.size); in hns3_handle_rx_copybreak()
3740 u32 frag_size = size - pull_len; in hns3_handle_rx_copybreak()
3748 return -ENOMEM; in hns3_handle_rx_copybreak()
3751 desc_cb->reuse_flag = 1; in hns3_handle_rx_copybreak()
3752 memcpy(frag, desc_cb->buf + frag_offset, frag_size); in hns3_handle_rx_copybreak()
3764 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; in hns3_nic_reuse_page()
3765 u32 frag_offset = desc_cb->page_offset + pull_len; in hns3_nic_reuse_page()
3766 int size = le16_to_cpu(desc->rx.size); in hns3_nic_reuse_page()
3768 u32 frag_size = size - pull_len; in hns3_nic_reuse_page()
3772 if (ring->page_pool) { in hns3_nic_reuse_page()
3773 skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, in hns3_nic_reuse_page()
3778 /* Avoid re-using remote or pfmem page */ in hns3_nic_reuse_page()
3779 if (unlikely(!dev_page_is_reusable(desc_cb->priv))) in hns3_nic_reuse_page()
3791 * is non-zero, which means page_offset @ truesize will in hns3_nic_reuse_page()
3795 if ((!desc_cb->page_offset && reused) || in hns3_nic_reuse_page()
3796 ((desc_cb->page_offset + truesize + truesize) <= in hns3_nic_reuse_page()
3797 hns3_page_size(ring) && desc_cb->page_offset)) { in hns3_nic_reuse_page()
3798 desc_cb->page_offset += truesize; in hns3_nic_reuse_page()
3799 desc_cb->reuse_flag = 1; in hns3_nic_reuse_page()
3800 } else if (desc_cb->page_offset && reused) { in hns3_nic_reuse_page()
3801 desc_cb->page_offset = 0; in hns3_nic_reuse_page()
3802 desc_cb->reuse_flag = 1; in hns3_nic_reuse_page()
3803 } else if (frag_size <= ring->rx_copybreak) { in hns3_nic_reuse_page()
3810 desc_cb->pagecnt_bias--; in hns3_nic_reuse_page()
3812 if (unlikely(!desc_cb->pagecnt_bias)) { in hns3_nic_reuse_page()
3813 page_ref_add(desc_cb->priv, USHRT_MAX); in hns3_nic_reuse_page()
3814 desc_cb->pagecnt_bias = USHRT_MAX; in hns3_nic_reuse_page()
3817 skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, in hns3_nic_reuse_page()
3820 if (unlikely(!desc_cb->reuse_flag)) in hns3_nic_reuse_page()
3821 __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); in hns3_nic_reuse_page()
3826 __be16 type = skb->protocol; in hns3_gro_complete()
3834 return -EFAULT; in hns3_gro_complete()
3836 vh = (struct vlan_hdr *)(skb->data + depth); in hns3_gro_complete()
3837 type = vh->h_vlan_encapsulated_proto; in hns3_gro_complete()
3849 th->check = ~tcp_v4_check(skb->len - depth, iph->saddr, in hns3_gro_complete()
3850 iph->daddr, 0); in hns3_gro_complete()
3857 th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr, in hns3_gro_complete()
3858 &iph->daddr, 0); in hns3_gro_complete()
3860 hns3_rl_err(skb->dev, in hns3_gro_complete()
3863 return -EFAULT; in hns3_gro_complete()
3866 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; in hns3_gro_complete()
3867 if (th->cwr) in hns3_gro_complete()
3868 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; in hns3_gro_complete()
3871 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; in hns3_gro_complete()
3873 skb->csum_start = (unsigned char *)th - skb->head; in hns3_gro_complete()
3874 skb->csum_offset = offsetof(struct tcphdr, check); in hns3_gro_complete()
3875 skb->ip_summed = CHECKSUM_PARTIAL; in hns3_gro_complete()
3890 skb->ip_summed = CHECKSUM_COMPLETE; in hns3_checksum_complete()
3891 skb->csum = csum_unfold((__force __sum16)csum); in hns3_checksum_complete()
3901 skb->csum_level = hns3_rx_ptype_tbl[ptype].csum_level; in hns3_rx_handle_csum()
3902 skb->ip_summed = hns3_rx_ptype_tbl[ptype].ip_summed; in hns3_rx_handle_csum()
3912 skb->csum_level = 1; in hns3_rx_handle_csum()
3925 skb->ip_summed = CHECKSUM_UNNECESSARY; in hns3_rx_handle_csum()
3940 skb->ip_summed = CHECKSUM_NONE; in hns3_rx_checksum()
3944 if (!(netdev->features & NETIF_F_RXCSUM)) in hns3_rx_checksum()
3947 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) in hns3_rx_checksum()
3960 skb->ip_summed = CHECKSUM_NONE; in hns3_rx_checksum()
3972 napi_gro_flush(&ring->tqp_vector->napi, false); in hns3_rx_skb()
3974 napi_gro_receive(&ring->tqp_vector->napi, skb); in hns3_rx_skb()
3981 struct hnae3_handle *handle = ring->tqp->handle; in hns3_parse_vlan_tag()
3982 struct pci_dev *pdev = ring->tqp->handle->pdev; in hns3_parse_vlan_tag()
3985 if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) { in hns3_parse_vlan_tag()
3986 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); in hns3_parse_vlan_tag()
3988 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); in hns3_parse_vlan_tag()
4004 if (handle->port_base_vlan_state != in hns3_parse_vlan_tag()
4008 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); in hns3_parse_vlan_tag()
4011 if (handle->port_base_vlan_state != in hns3_parse_vlan_tag()
4015 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); in hns3_parse_vlan_tag()
4018 if (handle->port_base_vlan_state == in hns3_parse_vlan_tag()
4020 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); in hns3_parse_vlan_tag()
4022 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); in hns3_parse_vlan_tag()
4032 ring->desc[ring->next_to_clean].rx.bd_base_info &= in hns3_rx_ring_move_fw()
4034 ring->desc_cb[ring->next_to_clean].refill = 0; in hns3_rx_ring_move_fw()
4035 ring->next_to_clean += 1; in hns3_rx_ring_move_fw()
4037 if (unlikely(ring->next_to_clean == ring->desc_num)) in hns3_rx_ring_move_fw()
4038 ring->next_to_clean = 0; in hns3_rx_ring_move_fw()
4044 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; in hns3_alloc_skb()
4048 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); in hns3_alloc_skb()
4049 skb = ring->skb; in hns3_alloc_skb()
4054 return -ENOMEM; in hns3_alloc_skb()
4058 prefetchw(skb->data); in hns3_alloc_skb()
4060 ring->pending_buf = 1; in hns3_alloc_skb()
4061 ring->frag_num = 0; in hns3_alloc_skb()
4062 ring->tail_skb = NULL; in hns3_alloc_skb()
4066 /* We can reuse buffer as-is, just make sure it is reusable */ in hns3_alloc_skb()
4067 if (dev_page_is_reusable(desc_cb->priv)) in hns3_alloc_skb()
4068 desc_cb->reuse_flag = 1; in hns3_alloc_skb()
4069 else if (desc_cb->type & DESC_TYPE_PP_FRAG) in hns3_alloc_skb()
4070 page_pool_put_full_page(ring->page_pool, desc_cb->priv, in hns3_alloc_skb()
4073 __page_frag_cache_drain(desc_cb->priv, in hns3_alloc_skb()
4074 desc_cb->pagecnt_bias); in hns3_alloc_skb()
4080 if (ring->page_pool) in hns3_alloc_skb()
4085 ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); in hns3_alloc_skb()
4086 __skb_put(skb, ring->pull_len); in hns3_alloc_skb()
4087 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, in hns3_alloc_skb()
4096 struct sk_buff *skb = ring->skb; in hns3_add_frag()
4104 desc = &ring->desc[ring->next_to_clean]; in hns3_add_frag()
4105 desc_cb = &ring->desc_cb[ring->next_to_clean]; in hns3_add_frag()
4106 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); in hns3_add_frag()
4110 return -ENXIO; in hns3_add_frag()
4112 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { in hns3_add_frag()
4113 new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0); in hns3_add_frag()
4117 return -ENXIO; in hns3_add_frag()
4120 if (ring->page_pool) in hns3_add_frag()
4123 ring->frag_num = 0; in hns3_add_frag()
4125 if (ring->tail_skb) { in hns3_add_frag()
4126 ring->tail_skb->next = new_skb; in hns3_add_frag()
4127 ring->tail_skb = new_skb; in hns3_add_frag()
4129 skb_shinfo(skb)->frag_list = new_skb; in hns3_add_frag()
4130 ring->tail_skb = new_skb; in hns3_add_frag()
4134 if (ring->tail_skb) { in hns3_add_frag()
4135 head_skb->truesize += hns3_buf_size(ring); in hns3_add_frag()
4136 head_skb->data_len += le16_to_cpu(desc->rx.size); in hns3_add_frag()
4137 head_skb->len += le16_to_cpu(desc->rx.size); in hns3_add_frag()
4138 skb = ring->tail_skb; in hns3_add_frag()
4142 desc_cb->dma + desc_cb->page_offset, in hns3_add_frag()
4146 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); in hns3_add_frag()
4149 ring->pending_buf++; in hns3_add_frag()
4163 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, in hns3_set_gro_and_checksum()
4167 if (!skb_shinfo(skb)->gso_size) { in hns3_set_gro_and_checksum()
4173 NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info, in hns3_set_gro_and_checksum()
4177 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { in hns3_set_gro_and_checksum()
4188 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in hns3_set_gro_and_checksum()
4190 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in hns3_set_gro_and_checksum()
4192 return -EFAULT; in hns3_set_gro_and_checksum()
4205 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { in hns3_set_rx_skb_rss_type()
4237 u32 nsec = le32_to_cpu(desc->ts_nsec); in hns3_handle_rx_ts_info()
4238 u32 sec = le32_to_cpu(desc->ts_sec); in hns3_handle_rx_ts_info()
4240 if (h->ae_algo->ops->get_rx_hwts) in hns3_handle_rx_ts_info()
4241 h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec); in hns3_handle_rx_ts_info()
4255 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { in hns3_handle_rx_vlan_tag()
4275 * current packet, and ring->next_to_clean indicates the first in hns3_handle_bdinfo()
4276 * descriptor of next packet, so need - 1 below. in hns3_handle_bdinfo()
4278 pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : in hns3_handle_bdinfo()
4279 (ring->desc_num - 1); in hns3_handle_bdinfo()
4280 desc = &ring->desc[pre_ntc]; in hns3_handle_bdinfo()
4281 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); in hns3_handle_bdinfo()
4282 l234info = le32_to_cpu(desc->rx.l234_info); in hns3_handle_bdinfo()
4283 ol_info = le32_to_cpu(desc->rx.ol_info); in hns3_handle_bdinfo()
4284 csum = le16_to_cpu(desc->csum); in hns3_handle_bdinfo()
4290 if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | in hns3_handle_bdinfo()
4292 u64_stats_update_begin(&ring->syncp); in hns3_handle_bdinfo()
4294 ring->stats.l2_err++; in hns3_handle_bdinfo()
4296 ring->stats.err_pkt_len++; in hns3_handle_bdinfo()
4297 u64_stats_update_end(&ring->syncp); in hns3_handle_bdinfo()
4299 return -EFAULT; in hns3_handle_bdinfo()
4302 len = skb->len; in hns3_handle_bdinfo()
4305 skb->protocol = eth_type_trans(skb, netdev); in hns3_handle_bdinfo()
4318 u64_stats_update_begin(&ring->syncp); in hns3_handle_bdinfo()
4319 ring->stats.rx_pkts++; in hns3_handle_bdinfo()
4320 ring->stats.rx_bytes += len; in hns3_handle_bdinfo()
4323 ring->stats.rx_multicast++; in hns3_handle_bdinfo()
4325 u64_stats_update_end(&ring->syncp); in hns3_handle_bdinfo()
4327 ring->tqp_vector->rx_group.total_bytes += len; in hns3_handle_bdinfo()
4329 hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash), in hns3_handle_bdinfo()
4336 struct sk_buff *skb = ring->skb; in hns3_handle_rx_bd()
4343 desc = &ring->desc[ring->next_to_clean]; in hns3_handle_rx_bd()
4344 desc_cb = &ring->desc_cb[ring->next_to_clean]; in hns3_handle_rx_bd()
4349 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); in hns3_handle_rx_bd()
4352 return -ENXIO; in hns3_handle_rx_bd()
4355 length = le16_to_cpu(desc->rx.size); in hns3_handle_rx_bd()
4357 ring->va = desc_cb->buf + desc_cb->page_offset; in hns3_handle_rx_bd()
4360 desc_cb->dma + desc_cb->page_offset, in hns3_handle_rx_bd()
4371 net_prefetch(ring->va); in hns3_handle_rx_bd()
4373 ret = hns3_alloc_skb(ring, length, ring->va); in hns3_handle_rx_bd()
4374 skb = ring->skb; in hns3_handle_rx_bd()
4392 if (skb->len > HNS3_RX_HEAD_SIZE) in hns3_handle_rx_bd()
4393 memcpy(skb->data, ring->va, in hns3_handle_rx_bd()
4394 ALIGN(ring->pull_len, sizeof(long))); in hns3_handle_rx_bd()
4402 skb_record_rx_queue(skb, ring->tqp->tqp_index); in hns3_handle_rx_bd()
4415 unused_count -= ring->pending_buf; in hns3_clean_rx_ring()
4428 if (unlikely(!ring->skb || err == -ENXIO)) { in hns3_clean_rx_ring()
4431 rx_fn(ring, ring->skb); in hns3_clean_rx_ring()
4435 unused_count += ring->pending_buf; in hns3_clean_rx_ring()
4436 ring->skb = NULL; in hns3_clean_rx_ring()
4437 ring->pending_buf = 0; in hns3_clean_rx_ring()
4453 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; in hns3_update_rx_int_coalesce()
4456 if (!rx_group->coal.adapt_enable) in hns3_update_rx_int_coalesce()
4459 dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets, in hns3_update_rx_int_coalesce()
4460 rx_group->total_bytes, &sample); in hns3_update_rx_int_coalesce()
4461 net_dim(&rx_group->dim, &sample); in hns3_update_rx_int_coalesce()
4466 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; in hns3_update_tx_int_coalesce()
4469 if (!tx_group->coal.adapt_enable) in hns3_update_tx_int_coalesce()
4472 dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets, in hns3_update_tx_int_coalesce()
4473 tx_group->total_bytes, &sample); in hns3_update_tx_int_coalesce()
4474 net_dim(&tx_group->dim, &sample); in hns3_update_tx_int_coalesce()
4479 struct hns3_nic_priv *priv = netdev_priv(napi->dev); in hns3_nic_common_poll()
4488 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { in hns3_nic_common_poll()
4496 hns3_for_each_ring(ring, tqp_vector->tx_group) in hns3_nic_common_poll()
4500 if (tqp_vector->num_tqps > 1) in hns3_nic_common_poll()
4501 rx_budget = max(budget / tqp_vector->num_tqps, 1); in hns3_nic_common_poll()
4503 hns3_for_each_ring(ring, tqp_vector->rx_group) { in hns3_nic_common_poll()
4512 tqp_vector->rx_group.total_packets += rx_pkt_total; in hns3_nic_common_poll()
4518 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { in hns3_nic_common_poll()
4535 struct pci_dev *pdev = tqp_vector->handle->pdev; in hns3_create_ring_chain()
4539 ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring; in hns3_create_ring_chain()
4542 while (cur_chain->next) in hns3_create_ring_chain()
4543 cur_chain = cur_chain->next; in hns3_create_ring_chain()
4547 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); in hns3_create_ring_chain()
4549 return -ENOMEM; in hns3_create_ring_chain()
4551 cur_chain->next = chain; in hns3_create_ring_chain()
4554 chain->tqp_index = ring->tqp->tqp_index; in hns3_create_ring_chain()
4555 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, in hns3_create_ring_chain()
4557 hnae3_set_field(chain->int_gl_idx, in hns3_create_ring_chain()
4563 ring = ring->next; in hns3_create_ring_chain()
4572 struct pci_dev *pdev = tqp_vector->handle->pdev; in hns3_get_vector_ring_chain()
4586 chain = cur_chain->next; in hns3_get_vector_ring_chain()
4587 devm_kfree(&pdev->dev, cur_chain); in hns3_get_vector_ring_chain()
4597 struct pci_dev *pdev = tqp_vector->handle->pdev; in hns3_free_vector_ring_chain()
4603 chain_tmp = chain->next; in hns3_free_vector_ring_chain()
4604 devm_kfree(&pdev->dev, chain); in hns3_free_vector_ring_chain()
4612 ring->next = group->ring; in hns3_add_ring_to_group()
4613 group->ring = ring; in hns3_add_ring_to_group()
4615 group->count++; in hns3_add_ring_to_group()
4620 struct pci_dev *pdev = priv->ae_handle->pdev; in hns3_nic_set_cpumask()
4622 int num_vectors = priv->vector_num; in hns3_nic_set_cpumask()
4626 numa_node = dev_to_node(&pdev->dev); in hns3_nic_set_cpumask()
4629 tqp_vector = &priv->tqp_vector[vector_i]; in hns3_nic_set_cpumask()
4631 &tqp_vector->affinity_mask); in hns3_nic_set_cpumask()
4640 struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector; in hns3_rx_dim_work()
4642 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); in hns3_rx_dim_work()
4644 hns3_set_vector_coalesce_rx_gl(group->ring->tqp_vector, cur_moder.usec); in hns3_rx_dim_work()
4645 tqp_vector->rx_group.coal.int_gl = cur_moder.usec; in hns3_rx_dim_work()
4647 if (cur_moder.pkts < tqp_vector->rx_group.coal.int_ql_max) { in hns3_rx_dim_work()
4649 tqp_vector->rx_group.coal.int_ql = cur_moder.pkts; in hns3_rx_dim_work()
4652 dim->state = DIM_START_MEASURE; in hns3_rx_dim_work()
4660 struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector; in hns3_tx_dim_work()
4662 net_dim_get_tx_moderation(dim->mode, dim->profile_ix); in hns3_tx_dim_work()
4665 tqp_vector->tx_group.coal.int_gl = cur_moder.usec; in hns3_tx_dim_work()
4667 if (cur_moder.pkts < tqp_vector->tx_group.coal.int_ql_max) { in hns3_tx_dim_work()
4669 tqp_vector->tx_group.coal.int_ql = cur_moder.pkts; in hns3_tx_dim_work()
4672 dim->state = DIM_START_MEASURE; in hns3_tx_dim_work()
4677 INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work); in hns3_nic_init_dim()
4678 INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work); in hns3_nic_init_dim()
4683 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_init_vector_data()
4690 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_init_vector_data()
4691 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_init_vector_data()
4693 tqp_vector->num_tqps = 0; in hns3_nic_init_vector_data()
4697 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_nic_init_vector_data()
4698 u16 vector_i = i % priv->vector_num; in hns3_nic_init_vector_data()
4699 u16 tqp_num = h->kinfo.num_tqps; in hns3_nic_init_vector_data()
4701 tqp_vector = &priv->tqp_vector[vector_i]; in hns3_nic_init_vector_data()
4703 hns3_add_ring_to_group(&tqp_vector->tx_group, in hns3_nic_init_vector_data()
4704 &priv->ring[i]); in hns3_nic_init_vector_data()
4706 hns3_add_ring_to_group(&tqp_vector->rx_group, in hns3_nic_init_vector_data()
4707 &priv->ring[i + tqp_num]); in hns3_nic_init_vector_data()
4709 priv->ring[i].tqp_vector = tqp_vector; in hns3_nic_init_vector_data()
4710 priv->ring[i + tqp_num].tqp_vector = tqp_vector; in hns3_nic_init_vector_data()
4711 tqp_vector->num_tqps++; in hns3_nic_init_vector_data()
4714 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_init_vector_data()
4717 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_init_vector_data()
4719 tqp_vector->rx_group.total_bytes = 0; in hns3_nic_init_vector_data()
4720 tqp_vector->rx_group.total_packets = 0; in hns3_nic_init_vector_data()
4721 tqp_vector->tx_group.total_bytes = 0; in hns3_nic_init_vector_data()
4722 tqp_vector->tx_group.total_packets = 0; in hns3_nic_init_vector_data()
4723 tqp_vector->handle = h; in hns3_nic_init_vector_data()
4727 ret = -ENOMEM; in hns3_nic_init_vector_data()
4731 ret = h->ae_algo->ops->map_ring_to_vector(h, in hns3_nic_init_vector_data()
4732 tqp_vector->vector_irq, vector_ring_chain); in hns3_nic_init_vector_data()
4739 netif_napi_add(priv->netdev, &tqp_vector->napi, in hns3_nic_init_vector_data()
4746 while (i--) in hns3_nic_init_vector_data()
4747 netif_napi_del(&priv->tqp_vector[i].napi); in hns3_nic_init_vector_data()
4754 struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle); in hns3_nic_init_coal_cfg()
4755 struct hns3_enet_coalesce *tx_coal = &priv->tx_coal; in hns3_nic_init_coal_cfg()
4756 struct hns3_enet_coalesce *rx_coal = &priv->rx_coal; in hns3_nic_init_coal_cfg()
4763 * Default: enable interrupt coalescing self-adaptive and GL in hns3_nic_init_coal_cfg()
4765 tx_coal->adapt_enable = 1; in hns3_nic_init_coal_cfg()
4766 rx_coal->adapt_enable = 1; in hns3_nic_init_coal_cfg()
4768 tx_coal->int_gl = HNS3_INT_GL_50K; in hns3_nic_init_coal_cfg()
4769 rx_coal->int_gl = HNS3_INT_GL_50K; in hns3_nic_init_coal_cfg()
4771 rx_coal->flow_level = HNS3_FLOW_LOW; in hns3_nic_init_coal_cfg()
4772 tx_coal->flow_level = HNS3_FLOW_LOW; in hns3_nic_init_coal_cfg()
4774 if (ae_dev->dev_specs.int_ql_max) { in hns3_nic_init_coal_cfg()
4775 tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; in hns3_nic_init_coal_cfg()
4776 rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; in hns3_nic_init_coal_cfg()
4782 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_alloc_vector_data()
4785 struct pci_dev *pdev = h->pdev; in hns3_nic_alloc_vector_data()
4786 u16 tqp_num = h->kinfo.num_tqps; in hns3_nic_alloc_vector_data()
4795 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), in hns3_nic_alloc_vector_data()
4798 return -ENOMEM; in hns3_nic_alloc_vector_data()
4801 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); in hns3_nic_alloc_vector_data()
4803 priv->vector_num = vector_num; in hns3_nic_alloc_vector_data()
4804 priv->tqp_vector = (struct hns3_enet_tqp_vector *) in hns3_nic_alloc_vector_data()
4805 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), in hns3_nic_alloc_vector_data()
4807 if (!priv->tqp_vector) { in hns3_nic_alloc_vector_data()
4808 ret = -ENOMEM; in hns3_nic_alloc_vector_data()
4812 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_alloc_vector_data()
4813 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_alloc_vector_data()
4814 tqp_vector->idx = i; in hns3_nic_alloc_vector_data()
4815 tqp_vector->mask_addr = vector[i].io_addr; in hns3_nic_alloc_vector_data()
4816 tqp_vector->vector_irq = vector[i].vector; in hns3_nic_alloc_vector_data()
4821 devm_kfree(&pdev->dev, vector); in hns3_nic_alloc_vector_data()
4827 group->ring = NULL; in hns3_clear_ring_group()
4828 group->count = 0; in hns3_clear_ring_group()
4834 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_uninit_vector_data()
4838 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_uninit_vector_data()
4839 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_uninit_vector_data()
4841 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) in hns3_nic_uninit_vector_data()
4850 dev_warn(priv->dev, "failed to get ring chain\n"); in hns3_nic_uninit_vector_data()
4852 h->ae_algo->ops->unmap_ring_from_vector(h, in hns3_nic_uninit_vector_data()
4853 tqp_vector->vector_irq, vector_ring_chain); in hns3_nic_uninit_vector_data()
4857 hns3_clear_ring_group(&tqp_vector->rx_group); in hns3_nic_uninit_vector_data()
4858 hns3_clear_ring_group(&tqp_vector->tx_group); in hns3_nic_uninit_vector_data()
4859 netif_napi_del(&priv->tqp_vector[i].napi); in hns3_nic_uninit_vector_data()
4865 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_dealloc_vector_data()
4866 struct pci_dev *pdev = h->pdev; in hns3_nic_dealloc_vector_data()
4869 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_dealloc_vector_data()
4872 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_dealloc_vector_data()
4873 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); in hns3_nic_dealloc_vector_data()
4878 devm_kfree(&pdev->dev, priv->tqp_vector); in hns3_nic_dealloc_vector_data()
4886 struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev); in hns3_update_tx_spare_buf_config()
4887 struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle); in hns3_update_tx_spare_buf_config()
4888 struct hnae3_handle *handle = priv->ae_handle; in hns3_update_tx_spare_buf_config()
4890 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) in hns3_update_tx_spare_buf_config()
4896 priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE; in hns3_update_tx_spare_buf_config()
4897 priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE; in hns3_update_tx_spare_buf_config()
4899 if (priv->tx_copybreak < priv->min_tx_copybreak) in hns3_update_tx_spare_buf_config()
4900 priv->tx_copybreak = priv->min_tx_copybreak; in hns3_update_tx_spare_buf_config()
4901 if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size) in hns3_update_tx_spare_buf_config()
4902 handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size; in hns3_update_tx_spare_buf_config()
4908 int queue_num = priv->ae_handle->kinfo.num_tqps; in hns3_ring_get_cfg()
4913 ring = &priv->ring[q->tqp_index]; in hns3_ring_get_cfg()
4914 desc_num = priv->ae_handle->kinfo.num_tx_desc; in hns3_ring_get_cfg()
4915 ring->queue_index = q->tqp_index; in hns3_ring_get_cfg()
4916 ring->tx_copybreak = priv->tx_copybreak; in hns3_ring_get_cfg()
4917 ring->last_to_use = 0; in hns3_ring_get_cfg()
4919 ring = &priv->ring[q->tqp_index + queue_num]; in hns3_ring_get_cfg()
4920 desc_num = priv->ae_handle->kinfo.num_rx_desc; in hns3_ring_get_cfg()
4921 ring->queue_index = q->tqp_index; in hns3_ring_get_cfg()
4922 ring->rx_copybreak = priv->rx_copybreak; in hns3_ring_get_cfg()
4925 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); in hns3_ring_get_cfg()
4927 ring->tqp = q; in hns3_ring_get_cfg()
4928 ring->desc = NULL; in hns3_ring_get_cfg()
4929 ring->desc_cb = NULL; in hns3_ring_get_cfg()
4930 ring->dev = priv->dev; in hns3_ring_get_cfg()
4931 ring->desc_dma_addr = 0; in hns3_ring_get_cfg()
4932 ring->buf_size = q->buf_size; in hns3_ring_get_cfg()
4933 ring->desc_num = desc_num; in hns3_ring_get_cfg()
4934 ring->next_to_use = 0; in hns3_ring_get_cfg()
4935 ring->next_to_clean = 0; in hns3_ring_get_cfg()
4947 struct hnae3_handle *h = priv->ae_handle; in hns3_get_ring_config()
4948 struct pci_dev *pdev = h->pdev; in hns3_get_ring_config()
4951 priv->ring = devm_kzalloc(&pdev->dev, in hns3_get_ring_config()
4952 array3_size(h->kinfo.num_tqps, in hns3_get_ring_config()
4953 sizeof(*priv->ring), 2), in hns3_get_ring_config()
4955 if (!priv->ring) in hns3_get_ring_config()
4956 return -ENOMEM; in hns3_get_ring_config()
4958 for (i = 0; i < h->kinfo.num_tqps; i++) in hns3_get_ring_config()
4959 hns3_queue_to_ring(h->kinfo.tqp[i], priv); in hns3_get_ring_config()
4966 if (!priv->ring) in hns3_put_ring_config()
4969 devm_kfree(priv->dev, priv->ring); in hns3_put_ring_config()
4970 priv->ring = NULL; in hns3_put_ring_config()
4978 .pool_size = ring->desc_num * hns3_buf_size(ring) / in hns3_alloc_page_pool()
4987 ring->page_pool = page_pool_create(&pp_params); in hns3_alloc_page_pool()
4988 if (IS_ERR(ring->page_pool)) { in hns3_alloc_page_pool()
4990 PTR_ERR(ring->page_pool)); in hns3_alloc_page_pool()
4991 ring->page_pool = NULL; in hns3_alloc_page_pool()
4999 if (ring->desc_num <= 0 || ring->buf_size <= 0) in hns3_alloc_ring_memory()
5000 return -EINVAL; in hns3_alloc_ring_memory()
5002 ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, in hns3_alloc_ring_memory()
5003 sizeof(ring->desc_cb[0]), GFP_KERNEL); in hns3_alloc_ring_memory()
5004 if (!ring->desc_cb) { in hns3_alloc_ring_memory()
5005 ret = -ENOMEM; in hns3_alloc_ring_memory()
5029 devm_kfree(ring_to_dev(ring), ring->desc_cb); in hns3_alloc_ring_memory()
5030 ring->desc_cb = NULL; in hns3_alloc_ring_memory()
5038 devm_kfree(ring_to_dev(ring), ring->desc_cb); in hns3_fini_ring()
5039 ring->desc_cb = NULL; in hns3_fini_ring()
5040 ring->next_to_clean = 0; in hns3_fini_ring()
5041 ring->next_to_use = 0; in hns3_fini_ring()
5042 ring->last_to_use = 0; in hns3_fini_ring()
5043 ring->pending_buf = 0; in hns3_fini_ring()
5044 if (!HNAE3_IS_TX_RING(ring) && ring->skb) { in hns3_fini_ring()
5045 dev_kfree_skb_any(ring->skb); in hns3_fini_ring()
5046 ring->skb = NULL; in hns3_fini_ring()
5047 } else if (HNAE3_IS_TX_RING(ring) && ring->tx_spare) { in hns3_fini_ring()
5048 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_fini_ring()
5050 dma_unmap_page(ring_to_dev(ring), tx_spare->dma, tx_spare->len, in hns3_fini_ring()
5052 free_pages((unsigned long)tx_spare->buf, in hns3_fini_ring()
5053 get_order(tx_spare->len)); in hns3_fini_ring()
5055 ring->tx_spare = NULL; in hns3_fini_ring()
5058 if (!HNAE3_IS_TX_RING(ring) && ring->page_pool) { in hns3_fini_ring()
5059 page_pool_destroy(ring->page_pool); in hns3_fini_ring()
5060 ring->page_pool = NULL; in hns3_fini_ring()
5090 dma_addr_t dma = ring->desc_dma_addr; in hns3_init_ring_hw()
5091 struct hnae3_queue *q = ring->tqp; in hns3_init_ring_hw()
5099 hns3_buf_size2type(ring->buf_size)); in hns3_init_ring_hw()
5101 ring->desc_num / 8 - 1); in hns3_init_ring_hw()
5109 ring->desc_num / 8 - 1); in hns3_init_ring_hw()
5115 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; in hns3_init_tx_ring_tc()
5116 struct hnae3_tc_info *tc_info = &kinfo->tc_info; in hns3_init_tx_ring_tc()
5119 for (i = 0; i < tc_info->num_tc; i++) { in hns3_init_tx_ring_tc()
5122 for (j = 0; j < tc_info->tqp_count[i]; j++) { in hns3_init_tx_ring_tc()
5125 q = priv->ring[tc_info->tqp_offset[i] + j].tqp; in hns3_init_tx_ring_tc()
5133 struct hnae3_handle *h = priv->ae_handle; in hns3_init_all_ring()
5134 int ring_num = h->kinfo.num_tqps * 2; in hns3_init_all_ring()
5140 ret = hns3_alloc_ring_memory(&priv->ring[i]); in hns3_init_all_ring()
5142 dev_err(priv->dev, in hns3_init_all_ring()
5147 u64_stats_init(&priv->ring[i].syncp); in hns3_init_all_ring()
5154 for (j = i - 1; j >= 0; j--) in hns3_init_all_ring()
5155 hns3_fini_ring(&priv->ring[j]); in hns3_init_all_ring()
5157 return -ENOMEM; in hns3_init_all_ring()
5162 struct hnae3_handle *h = priv->ae_handle; in hns3_uninit_all_ring()
5165 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_uninit_all_ring()
5166 hns3_fini_ring(&priv->ring[i]); in hns3_uninit_all_ring()
5167 hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]); in hns3_uninit_all_ring()
5176 struct hnae3_handle *h = priv->ae_handle; in hns3_init_mac_addr()
5180 if (h->ae_algo->ops->get_mac_addr) in hns3_init_mac_addr()
5181 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); in hns3_init_mac_addr()
5186 hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr); in hns3_init_mac_addr()
5187 dev_warn(priv->dev, "using random MAC address %s\n", in hns3_init_mac_addr()
5189 } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { in hns3_init_mac_addr()
5191 ether_addr_copy(netdev->perm_addr, mac_addr_temp); in hns3_init_mac_addr()
5196 if (h->ae_algo->ops->set_mac_addr) in hns3_init_mac_addr()
5197 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); in hns3_init_mac_addr()
5207 if (h->ae_algo->ops->mac_connect_phy) in hns3_init_phy()
5208 ret = h->ae_algo->ops->mac_connect_phy(h); in hns3_init_phy()
5217 if (h->ae_algo->ops->mac_disconnect_phy) in hns3_uninit_phy()
5218 h->ae_algo->ops->mac_disconnect_phy(h); in hns3_uninit_phy()
5223 if (!handle->ae_algo->ops->client_start) in hns3_client_start()
5226 return handle->ae_algo->ops->client_start(handle); in hns3_client_start()
5231 if (!handle->ae_algo->ops->client_stop) in hns3_client_stop()
5234 handle->ae_algo->ops->client_stop(handle); in hns3_client_stop()
5239 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; in hns3_info_show()
5242 hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr); in hns3_info_show()
5243 dev_info(priv->dev, "MAC address: %s\n", format_mac_addr); in hns3_info_show()
5244 dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); in hns3_info_show()
5245 dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); in hns3_info_show()
5246 dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); in hns3_info_show()
5247 dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); in hns3_info_show()
5248 dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); in hns3_info_show()
5249 dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); in hns3_info_show()
5250 dev_info(priv->dev, "Total number of enabled TCs: %u\n", in hns3_info_show()
5251 kinfo->tc_info.num_tc); in hns3_info_show()
5252 dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); in hns3_info_show()
5258 struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle); in hns3_set_cq_period_mode()
5259 struct hnae3_handle *handle = priv->ae_handle; in hns3_set_cq_period_mode()
5263 priv->tx_cqe_mode = mode; in hns3_set_cq_period_mode()
5265 for (i = 0; i < priv->vector_num; i++) in hns3_set_cq_period_mode()
5266 priv->tqp_vector[i].tx_group.dim.mode = mode; in hns3_set_cq_period_mode()
5268 priv->rx_cqe_mode = mode; in hns3_set_cq_period_mode()
5270 for (i = 0; i < priv->vector_num; i++) in hns3_set_cq_period_mode()
5271 priv->tqp_vector[i].rx_group.dim.mode = mode; in hns3_set_cq_period_mode()
5282 writel(new_mode, handle->kinfo.io_base + reg); in hns3_set_cq_period_mode()
5297 struct net_device *netdev = handle->kinfo.netdev; in hns3_state_init()
5300 set_bit(HNS3_NIC_STATE_INITED, &priv->state); in hns3_state_init()
5302 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) in hns3_state_init()
5303 set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); in hns3_state_init()
5305 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) in hns3_state_init()
5306 set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); in hns3_state_init()
5308 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) in hns3_state_init()
5309 set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); in hns3_state_init()
5312 set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); in hns3_state_init()
5317 struct hns3_nic_priv *priv = handle->priv; in hns3_state_uninit()
5319 clear_bit(HNS3_NIC_STATE_INITED, &priv->state); in hns3_state_uninit()
5324 struct pci_dev *pdev = handle->pdev; in hns3_client_init()
5331 ae_dev->handle = handle; in hns3_client_init()
5333 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, in hns3_client_init()
5337 return -ENOMEM; in hns3_client_init()
5340 priv->dev = &pdev->dev; in hns3_client_init()
5341 priv->netdev = netdev; in hns3_client_init()
5342 priv->ae_handle = handle; in hns3_client_init()
5343 priv->tx_timeout_count = 0; in hns3_client_init()
5344 priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num; in hns3_client_init()
5345 priv->min_tx_copybreak = 0; in hns3_client_init()
5346 priv->min_tx_spare_buf_size = 0; in hns3_client_init()
5347 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); in hns3_client_init()
5349 handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); in hns3_client_init()
5351 handle->kinfo.netdev = netdev; in hns3_client_init()
5352 handle->priv = (void *)priv; in hns3_client_init()
5358 netdev->watchdog_timeo = HNS3_TX_TIMEOUT; in hns3_client_init()
5359 netdev->priv_flags |= IFF_UNICAST_FLT; in hns3_client_init()
5360 netdev->netdev_ops = &hns3_nic_netdev_ops; in hns3_client_init()
5361 SET_NETDEV_DEV(netdev, &pdev->dev); in hns3_client_init()
5369 ret = -ENOMEM; in hns3_client_init()
5377 ret = -ENOMEM; in hns3_client_init()
5383 ret = -ENOMEM; in hns3_client_init()
5389 ret = -ENOMEM; in hns3_client_init()
5403 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); in hns3_client_init()
5407 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); in hns3_client_init()
5414 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); in hns3_client_init()
5422 dev_err(priv->dev, "failed to init debugfs, ret = %d\n", in hns3_client_init()
5427 netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size); in hns3_client_init()
5433 dev_err(priv->dev, "probe register netdev fail!\n"); in hns3_client_init()
5458 priv->ring = NULL; in hns3_client_init()
5460 priv->ae_handle = NULL; in hns3_client_init()
5467 struct net_device *netdev = handle->kinfo.netdev; in hns3_client_uninit()
5470 if (netdev->reg_state != NETREG_UNINITIALIZED) in hns3_client_uninit()
5477 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { in hns3_client_uninit()
5503 struct net_device *netdev = handle->kinfo.netdev; in hns3_link_status_change()
5523 while (ring->next_to_clean != ring->next_to_use) { in hns3_clear_tx_ring()
5524 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; in hns3_clear_tx_ring()
5525 hns3_free_buffer_detach(ring, ring->next_to_clean, 0); in hns3_clear_tx_ring()
5529 ring->pending_buf = 0; in hns3_clear_tx_ring()
5537 while (ring->next_to_use != ring->next_to_clean) { in hns3_clear_rx_ring()
5542 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { in hns3_clear_rx_ring()
5554 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); in hns3_clear_rx_ring()
5560 if (ring->skb) { in hns3_clear_rx_ring()
5561 dev_kfree_skb_any(ring->skb); in hns3_clear_rx_ring()
5562 ring->skb = NULL; in hns3_clear_rx_ring()
5563 ring->pending_buf = 0; in hns3_clear_rx_ring()
5571 while (ring->next_to_use != ring->next_to_clean) { in hns3_force_clear_rx_ring()
5576 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { in hns3_force_clear_rx_ring()
5578 &ring->desc_cb[ring->next_to_use]); in hns3_force_clear_rx_ring()
5579 ring->desc_cb[ring->next_to_use].dma = 0; in hns3_force_clear_rx_ring()
5588 struct net_device *ndev = h->kinfo.netdev; in hns3_clear_all_ring()
5592 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_clear_all_ring()
5595 ring = &priv->ring[i]; in hns3_clear_all_ring()
5598 ring = &priv->ring[i + h->kinfo.num_tqps]; in hns3_clear_all_ring()
5611 struct net_device *ndev = h->kinfo.netdev; in hns3_nic_reset_all_ring()
5617 ret = h->ae_algo->ops->reset_queue(h); in hns3_nic_reset_all_ring()
5621 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_nic_reset_all_ring()
5622 hns3_init_ring_hw(&priv->ring[i]); in hns3_nic_reset_all_ring()
5627 hns3_clear_tx_ring(&priv->ring[i]); in hns3_nic_reset_all_ring()
5628 priv->ring[i].next_to_clean = 0; in hns3_nic_reset_all_ring()
5629 priv->ring[i].next_to_use = 0; in hns3_nic_reset_all_ring()
5630 priv->ring[i].last_to_use = 0; in hns3_nic_reset_all_ring()
5632 rx_ring = &priv->ring[i + h->kinfo.num_tqps]; in hns3_nic_reset_all_ring()
5641 for (j = 0; j < rx_ring->desc_num; j++) in hns3_nic_reset_all_ring()
5644 rx_ring->next_to_clean = 0; in hns3_nic_reset_all_ring()
5645 rx_ring->next_to_use = 0; in hns3_nic_reset_all_ring()
5655 struct hnae3_knic_private_info *kinfo = &handle->kinfo; in hns3_reset_notify_down_enet()
5656 struct net_device *ndev = kinfo->netdev; in hns3_reset_notify_down_enet()
5659 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) in hns3_reset_notify_down_enet()
5670 struct hnae3_knic_private_info *kinfo = &handle->kinfo; in hns3_reset_notify_up_enet()
5671 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); in hns3_reset_notify_up_enet()
5674 if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) { in hns3_reset_notify_up_enet()
5675 netdev_err(kinfo->netdev, "device is not initialized yet\n"); in hns3_reset_notify_up_enet()
5676 return -EFAULT; in hns3_reset_notify_up_enet()
5679 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); in hns3_reset_notify_up_enet()
5681 if (netif_running(kinfo->netdev)) { in hns3_reset_notify_up_enet()
5682 ret = hns3_nic_net_open(kinfo->netdev); in hns3_reset_notify_up_enet()
5684 set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); in hns3_reset_notify_up_enet()
5685 netdev_err(kinfo->netdev, in hns3_reset_notify_up_enet()
5696 struct net_device *netdev = handle->kinfo.netdev; in hns3_reset_notify_init_enet()
5719 hns3_cq_period_mode_init(priv, priv->tx_cqe_mode, priv->rx_cqe_mode); in hns3_reset_notify_init_enet()
5724 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); in hns3_reset_notify_init_enet()
5728 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); in hns3_reset_notify_init_enet()
5733 if (!hns3_is_phys_func(handle->pdev)) in hns3_reset_notify_init_enet()
5738 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); in hns3_reset_notify_init_enet()
5742 set_bit(HNS3_NIC_STATE_INITED, &priv->state); in hns3_reset_notify_init_enet()
5763 struct net_device *netdev = handle->kinfo.netdev; in hns3_reset_notify_uninit_enet()
5766 if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) in hns3_reset_notify_uninit_enet()
5769 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { in hns3_reset_notify_uninit_enet()
5777 hns3_reset_tx_queue(priv->ae_handle); in hns3_reset_notify_uninit_enet()
5820 ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num, in hns3_change_channels()
5823 dev_err(&handle->pdev->dev, in hns3_change_channels()
5843 struct hnae3_knic_private_info *kinfo = &h->kinfo; in hns3_set_channels()
5845 u32 new_tqp_num = ch->combined_count; in hns3_set_channels()
5850 return -EBUSY; in hns3_set_channels()
5852 if (ch->rx_count || ch->tx_count) in hns3_set_channels()
5853 return -EINVAL; in hns3_set_channels()
5855 if (kinfo->tc_info.mqprio_active) { in hns3_set_channels()
5856 dev_err(&netdev->dev, in hns3_set_channels()
5858 return -EINVAL; in hns3_set_channels()
5863 dev_err(&netdev->dev, in hns3_set_channels()
5866 return -EINVAL; in hns3_set_channels()
5869 if (kinfo->rss_size == new_tqp_num) in hns3_set_channels()
5884 org_tqp_num = h->kinfo.num_tqps; in hns3_set_channels()
5911 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) in hns3_external_lb_prepare()
5924 hns3_nic_reset_all_ring(priv->ae_handle); in hns3_external_lb_prepare()
5926 hns3_reset_tx_queue(priv->ae_handle); in hns3_external_lb_prepare()
5932 struct hnae3_handle *h = priv->ae_handle; in hns3_external_lb_restore()
5940 if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) in hns3_external_lb_restore()
5943 if (hns3_nic_reset_all_ring(priv->ae_handle)) in hns3_external_lb_restore()
5946 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); in hns3_external_lb_restore()
5952 if (h->ae_algo->ops->get_status(h)) in hns3_external_lb_restore()
5974 dev_err(&handle->pdev->dev, "Detected %s!\n", in hns3_process_hw_error()
5989 /* hns3_init_module - Driver registration routine
5997 pr_debug("%s: %s - version\n", hns3_driver_name, hns3_driver_string); in hns3_init_module()
6028 /* hns3_exit_module - Driver exit cleanup routine
6045 MODULE_ALIAS("pci:hns-nic");