Lines Matching +full:disable +full:- +full:eop
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
16 * i40e_fdir - Generate a Flow Director descriptor based on fdata
26 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_fdir()
31 i = tx_ring->next_to_use; in i40e_fdir()
35 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_fdir()
37 flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK, fdata->q_index); in i40e_fdir()
40 fdata->flex_off); in i40e_fdir()
42 flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_PCTYPE_MASK, fdata->pctype); in i40e_fdir()
45 vsi_id = fdata->dest_vsi ? : i40e_pf_get_main_vsi(pf)->id; in i40e_fdir()
56 dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_DEST_MASK, fdata->dest_ctl); in i40e_fdir()
59 fdata->fd_status); in i40e_fdir()
61 if (fdata->cnt_index) { in i40e_fdir()
64 fdata->cnt_index); in i40e_fdir()
67 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); in i40e_fdir()
68 fdir_desc->rsvd = cpu_to_le32(0); in i40e_fdir()
69 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); in i40e_fdir()
70 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id); in i40e_fdir()
75 * i40e_program_fdir_filter - Program a Flow Director filter
77 * @raw_packet: the pre-allocated packet buffer for FDir
97 return -ENOENT; in i40e_program_fdir_filter()
99 tx_ring = vsi->tx_rings[0]; in i40e_program_fdir_filter()
100 dev = tx_ring->dev; in i40e_program_fdir_filter()
103 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) { in i40e_program_fdir_filter()
105 return -EAGAIN; in i40e_program_fdir_filter()
115 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
116 first = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter()
120 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
122 tx_buf = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter()
124 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; in i40e_program_fdir_filter()
132 tx_desc->buffer_addr = cpu_to_le64(dma); in i40e_program_fdir_filter()
135 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB; in i40e_program_fdir_filter()
136 tx_buf->raw_buf = (void *)raw_packet; in i40e_program_fdir_filter()
138 tx_desc->cmd_type_offset_bsz = in i40e_program_fdir_filter()
147 first->next_to_watch = tx_desc; in i40e_program_fdir_filter()
149 writel(tx_ring->next_to_use, tx_ring->tail); in i40e_program_fdir_filter()
153 return -1; in i40e_program_fdir_filter()
157 * i40e_create_dummy_packet - Constructs dummy packet for HW
168 bool is_vlan = !!data->vlan_tag; in i40e_create_dummy_packet()
181 ip.daddr = data->dst_ip; in i40e_create_dummy_packet()
182 ip.saddr = data->src_ip; in i40e_create_dummy_packet()
188 memcpy(&ipv6.saddr.in6_u.u6_addr32, data->src_ip6, in i40e_create_dummy_packet()
190 memcpy(&ipv6.daddr.in6_u.u6_addr32, data->dst_ip6, in i40e_create_dummy_packet()
195 vlan.h_vlan_TCI = data->vlan_tag; in i40e_create_dummy_packet()
197 eth.h_proto = data->vlan_etype; in i40e_create_dummy_packet()
221 * i40e_create_dummy_udp_packet - helper function to create UDP packet
237 udp->dest = data->dst_port; in i40e_create_dummy_udp_packet()
238 udp->source = data->src_port; in i40e_create_dummy_udp_packet()
242 * i40e_create_dummy_tcp_packet - helper function to create TCP packet
263 tcp->dest = data->dst_port; in i40e_create_dummy_tcp_packet()
264 tcp->source = data->src_port; in i40e_create_dummy_tcp_packet()
268 * i40e_create_dummy_sctp_packet - helper function to create SCTP packet
286 sctp->dest = data->dst_port; in i40e_create_dummy_sctp_packet()
287 sctp->source = data->src_port; in i40e_create_dummy_sctp_packet()
291 * i40e_prepare_fdir_filter - Prepare and program fdir filter
309 if (fd_data->flex_filter) { in i40e_prepare_fdir_filter()
311 __be16 pattern = fd_data->flex_word; in i40e_prepare_fdir_filter()
312 u16 off = fd_data->flex_offset; in i40e_prepare_fdir_filter()
317 if (!!fd_data->vlan_tag) in i40e_prepare_fdir_filter()
323 fd_data->pctype = pctype; in i40e_prepare_fdir_filter()
326 dev_info(&pf->pdev->dev, in i40e_prepare_fdir_filter()
328 fd_data->pctype, fd_data->fd_id, ret); in i40e_prepare_fdir_filter()
330 return -EOPNOTSUPP; in i40e_prepare_fdir_filter()
331 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { in i40e_prepare_fdir_filter()
333 dev_info(&pf->pdev->dev, in i40e_prepare_fdir_filter()
335 fd_data->pctype, fd_data->fd_id); in i40e_prepare_fdir_filter()
337 dev_info(&pf->pdev->dev, in i40e_prepare_fdir_filter()
339 fd_data->pctype, fd_data->fd_id); in i40e_prepare_fdir_filter()
346 * i40e_change_filter_num - Prepare and program fdir filter
364 (*ipv4_filter_num)--; in i40e_change_filter_num()
366 (*ipv6_filter_num)--; in i40e_change_filter_num()
373 * i40e_add_del_fdir_udp - Add/Remove UDP filters
386 struct i40e_pf *pf = vsi->back; in i40e_add_del_fdir_udp()
392 return -ENOMEM; in i40e_add_del_fdir_udp()
412 i40e_change_filter_num(ipv4, add, &pf->fd_udp4_filter_cnt, in i40e_add_del_fdir_udp()
413 &pf->fd_udp6_filter_cnt); in i40e_add_del_fdir_udp()
421 * i40e_add_del_fdir_tcp - Add/Remove TCPv4 filters
434 struct i40e_pf *pf = vsi->back; in i40e_add_del_fdir_tcp()
440 return -ENOMEM; in i40e_add_del_fdir_tcp()
459 i40e_change_filter_num(ipv4, add, &pf->fd_tcp4_filter_cnt, in i40e_add_del_fdir_tcp()
460 &pf->fd_tcp6_filter_cnt); in i40e_add_del_fdir_tcp()
463 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && in i40e_add_del_fdir_tcp()
464 I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_add_del_fdir_tcp()
465 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); in i40e_add_del_fdir_tcp()
466 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_add_del_fdir_tcp()
474 * i40e_add_del_fdir_sctp - Add/Remove SCTPv4 Flow Director filters for
488 struct i40e_pf *pf = vsi->back; in i40e_add_del_fdir_sctp()
494 return -ENOMEM; in i40e_add_del_fdir_sctp()
514 i40e_change_filter_num(ipv4, add, &pf->fd_sctp4_filter_cnt, in i40e_add_del_fdir_sctp()
515 &pf->fd_sctp6_filter_cnt); in i40e_add_del_fdir_sctp()
523 * i40e_add_del_fdir_ip - Add/Remove IPv4 Flow Director filters for
537 struct i40e_pf *pf = vsi->back; in i40e_add_del_fdir_ip()
556 return -ENOMEM; in i40e_add_del_fdir_ip()
571 i40e_change_filter_num(ipv4, add, &pf->fd_ip4_filter_cnt, in i40e_add_del_fdir_ip()
572 &pf->fd_ip6_filter_cnt); in i40e_add_del_fdir_ip()
581 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
591 struct i40e_pf *pf = vsi->back; in i40e_add_del_fdir()
594 switch (input->flow_type & ~FLOW_EXT) { in i40e_add_del_fdir()
614 switch (input->ipl4_proto) { in i40e_add_del_fdir()
629 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n", in i40e_add_del_fdir()
630 input->ipl4_proto); in i40e_add_del_fdir()
631 return -EINVAL; in i40e_add_del_fdir()
635 switch (input->ipl4_proto) { in i40e_add_del_fdir()
650 dev_info(&pf->pdev->dev, "Unsupported IPv6 protocol 0x%02x\n", in i40e_add_del_fdir()
651 input->ipl4_proto); in i40e_add_del_fdir()
652 return -EINVAL; in i40e_add_del_fdir()
656 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n", in i40e_add_del_fdir()
657 input->flow_type); in i40e_add_del_fdir()
658 return -EINVAL; in i40e_add_del_fdir()
671 * i40e_fd_handle_status - check the Programming Status for FD
683 struct i40e_pf *pf = rx_ring->vsi->back; in i40e_fd_handle_status()
684 struct pci_dev *pdev = pf->pdev; in i40e_fd_handle_status()
693 pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id); in i40e_fd_handle_status()
694 if (qw0->hi_dword.fd_id != 0 || in i40e_fd_handle_status()
695 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_fd_handle_status()
696 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", in i40e_fd_handle_status()
697 pf->fd_inv); in i40e_fd_handle_status()
700 * If so, auto disable ATR and set a state for in i40e_fd_handle_status()
705 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_fd_handle_status()
708 pf->fd_add_err++; in i40e_fd_handle_status()
710 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); in i40e_fd_handle_status()
712 if (qw0->hi_dword.fd_id == 0 && in i40e_fd_handle_status()
713 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) { in i40e_fd_handle_status()
716 * disable ATR and queue a flush right after SB in i40e_fd_handle_status()
717 * support is re-enabled. That shouldn't cause an in i40e_fd_handle_status()
720 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_fd_handle_status()
721 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); in i40e_fd_handle_status()
726 fcnt_avail = pf->fdir_pf_filter_count; in i40e_fd_handle_status()
728 * if we are very close to full, it makes sense to disable in i40e_fd_handle_status()
729 * FD ATR/SB and then re-enable it when there is room. in i40e_fd_handle_status()
731 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { in i40e_fd_handle_status()
732 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && in i40e_fd_handle_status()
734 pf->state)) in i40e_fd_handle_status()
735 if (I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_fd_handle_status()
736 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); in i40e_fd_handle_status()
739 if (I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_fd_handle_status()
740 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n", in i40e_fd_handle_status()
741 qw0->hi_dword.fd_id); in i40e_fd_handle_status()
746 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
753 if (tx_buffer->skb) { in i40e_unmap_and_free_tx_resource()
754 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) in i40e_unmap_and_free_tx_resource()
755 kfree(tx_buffer->raw_buf); in i40e_unmap_and_free_tx_resource()
757 xdp_return_frame(tx_buffer->xdpf); in i40e_unmap_and_free_tx_resource()
759 dev_kfree_skb_any(tx_buffer->skb); in i40e_unmap_and_free_tx_resource()
761 dma_unmap_single(ring->dev, in i40e_unmap_and_free_tx_resource()
766 dma_unmap_page(ring->dev, in i40e_unmap_and_free_tx_resource()
772 tx_buffer->next_to_watch = NULL; in i40e_unmap_and_free_tx_resource()
773 tx_buffer->skb = NULL; in i40e_unmap_and_free_tx_resource()
779 * i40e_clean_tx_ring - Free any empty Tx buffers
787 if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in i40e_clean_tx_ring()
791 if (!tx_ring->tx_bi) in i40e_clean_tx_ring()
795 for (i = 0; i < tx_ring->count; i++) in i40e_clean_tx_ring()
797 &tx_ring->tx_bi[i]); in i40e_clean_tx_ring()
800 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40e_clean_tx_ring()
801 memset(tx_ring->tx_bi, 0, bi_size); in i40e_clean_tx_ring()
804 memset(tx_ring->desc, 0, tx_ring->size); in i40e_clean_tx_ring()
806 tx_ring->next_to_use = 0; in i40e_clean_tx_ring()
807 tx_ring->next_to_clean = 0; in i40e_clean_tx_ring()
809 if (!tx_ring->netdev) in i40e_clean_tx_ring()
817 * i40e_free_tx_resources - Free Tx resources per queue
825 kfree(tx_ring->tx_bi); in i40e_free_tx_resources()
826 tx_ring->tx_bi = NULL; in i40e_free_tx_resources()
828 if (tx_ring->desc) { in i40e_free_tx_resources()
829 dma_free_coherent(tx_ring->dev, tx_ring->size, in i40e_free_tx_resources()
830 tx_ring->desc, tx_ring->dma); in i40e_free_tx_resources()
831 tx_ring->desc = NULL; in i40e_free_tx_resources()
836 * i40e_get_tx_pending - how many tx descriptors not processed
849 tail = readl(ring->tail); in i40e_get_tx_pending()
851 head = ring->next_to_clean; in i40e_get_tx_pending()
852 tail = ring->next_to_use; in i40e_get_tx_pending()
857 tail - head : (tail + ring->count - head); in i40e_get_tx_pending()
863 * i40e_detect_recover_hung - Function to detect and recover hung_queues
881 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_detect_recover_hung()
884 netdev = vsi->netdev; in i40e_detect_recover_hung()
891 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_detect_recover_hung()
892 tx_ring = vsi->tx_rings[i]; in i40e_detect_recover_hung()
893 if (tx_ring && tx_ring->desc) { in i40e_detect_recover_hung()
901 packets = tx_ring->stats.packets & INT_MAX; in i40e_detect_recover_hung()
902 if (tx_ring->tx_stats.prev_pkt_ctr == packets) { in i40e_detect_recover_hung()
903 i40e_force_wb(vsi, tx_ring->q_vector); in i40e_detect_recover_hung()
911 tx_ring->tx_stats.prev_pkt_ctr = in i40e_detect_recover_hung()
912 i40e_get_tx_pending(tx_ring, true) ? packets : -1; in i40e_detect_recover_hung()
918 * i40e_clean_tx_irq - Reclaim resources after transmit completes
930 int i = tx_ring->next_to_clean; in i40e_clean_tx_irq()
935 unsigned int budget = vsi->work_limit; in i40e_clean_tx_irq()
937 tx_buf = &tx_ring->tx_bi[i]; in i40e_clean_tx_irq()
939 i -= tx_ring->count; in i40e_clean_tx_irq()
944 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; in i40e_clean_tx_irq()
959 tx_buf->next_to_watch = NULL; in i40e_clean_tx_irq()
962 total_bytes += tx_buf->bytecount; in i40e_clean_tx_irq()
963 total_packets += tx_buf->gso_segs; in i40e_clean_tx_irq()
967 xdp_return_frame(tx_buf->xdpf); in i40e_clean_tx_irq()
969 napi_consume_skb(tx_buf->skb, napi_budget); in i40e_clean_tx_irq()
972 dma_unmap_single(tx_ring->dev, in i40e_clean_tx_irq()
978 tx_buf->skb = NULL; in i40e_clean_tx_irq()
990 i -= tx_ring->count; in i40e_clean_tx_irq()
991 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
997 dma_unmap_page(tx_ring->dev, in i40e_clean_tx_irq()
1010 i -= tx_ring->count; in i40e_clean_tx_irq()
1011 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
1018 budget--; in i40e_clean_tx_irq()
1021 i += tx_ring->count; in i40e_clean_tx_irq()
1022 tx_ring->next_to_clean = i; in i40e_clean_tx_irq()
1034 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in i40e_clean_tx_irq()
1040 if (__netif_subqueue_stopped(tx_ring->netdev, in i40e_clean_tx_irq()
1041 tx_ring->queue_index) && in i40e_clean_tx_irq()
1042 !test_bit(__I40E_VSI_DOWN, vsi->state)) { in i40e_clean_tx_irq()
1043 netif_wake_subqueue(tx_ring->netdev, in i40e_clean_tx_irq()
1044 tx_ring->queue_index); in i40e_clean_tx_irq()
1045 ++tx_ring->tx_stats.restart_queue; in i40e_clean_tx_irq()
1054 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
1062 u16 flags = q_vector->tx.ring[0].flags; in i40e_enable_wb_on_itr()
1068 if (q_vector->arm_wb_state) in i40e_enable_wb_on_itr()
1071 if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) { in i40e_enable_wb_on_itr()
1075 wr32(&vsi->back->hw, in i40e_enable_wb_on_itr()
1076 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), in i40e_enable_wb_on_itr()
1082 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); in i40e_enable_wb_on_itr()
1084 q_vector->arm_wb_state = true; in i40e_enable_wb_on_itr()
1088 * i40e_force_wb - Issue SW Interrupt so HW does a wb
1095 if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) { in i40e_force_wb()
1102 wr32(&vsi->back->hw, in i40e_force_wb()
1103 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val); in i40e_force_wb()
1111 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); in i40e_force_wb()
1118 return &q_vector->rx == rc; in i40e_container_is_rx()
1125 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) { in i40e_itr_divisor()
1147 * i40e_update_itr - update the dynamic ITR value based on statistics
1168 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting)) in i40e_update_itr()
1178 /* If we didn't update within up to 1 - 2 jiffies we can assume in i40e_update_itr()
1183 if (time_after(next_update, rc->next_update)) in i40e_update_itr()
1192 if (q_vector->itr_countdown) { in i40e_update_itr()
1193 itr = rc->target_itr; in i40e_update_itr()
1197 packets = rc->total_packets; in i40e_update_itr()
1198 bytes = rc->total_bytes; in i40e_update_itr()
1207 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) { in i40e_update_itr()
1217 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS && in i40e_update_itr()
1218 (q_vector->rx.target_itr & I40E_ITR_MASK) == in i40e_update_itr()
1225 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY; in i40e_update_itr()
1237 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC; in i40e_update_itr()
1246 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); in i40e_update_itr()
1343 rc->target_itr = itr; in i40e_update_itr()
1346 rc->next_update = next_update + 1; in i40e_update_itr()
1348 rc->total_bytes = 0; in i40e_update_itr()
1349 rc->total_packets = 0; in i40e_update_itr()
1354 return &rx_ring->rx_bi[idx]; in i40e_rx_bi()
1358 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1368 u16 nta = rx_ring->next_to_alloc; in i40e_reuse_rx_page()
1374 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in i40e_reuse_rx_page()
1377 new_buff->dma = old_buff->dma; in i40e_reuse_rx_page()
1378 new_buff->page = old_buff->page; in i40e_reuse_rx_page()
1379 new_buff->page_offset = old_buff->page_offset; in i40e_reuse_rx_page()
1380 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in i40e_reuse_rx_page()
1383 old_buff->page = NULL; in i40e_reuse_rx_page()
1387 * i40e_clean_programming_status - clean the programming status descriptor
1410 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1417 struct device *dev = tx_ring->dev; in i40e_setup_tx_descriptors()
1421 return -ENOMEM; in i40e_setup_tx_descriptors()
1424 WARN_ON(tx_ring->tx_bi); in i40e_setup_tx_descriptors()
1425 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40e_setup_tx_descriptors()
1426 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); in i40e_setup_tx_descriptors()
1427 if (!tx_ring->tx_bi) in i40e_setup_tx_descriptors()
1430 u64_stats_init(&tx_ring->syncp); in i40e_setup_tx_descriptors()
1433 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); in i40e_setup_tx_descriptors()
1437 tx_ring->size += sizeof(u32); in i40e_setup_tx_descriptors()
1438 tx_ring->size = ALIGN(tx_ring->size, 4096); in i40e_setup_tx_descriptors()
1439 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in i40e_setup_tx_descriptors()
1440 &tx_ring->dma, GFP_KERNEL); in i40e_setup_tx_descriptors()
1441 if (!tx_ring->desc) { in i40e_setup_tx_descriptors()
1443 tx_ring->size); in i40e_setup_tx_descriptors()
1447 tx_ring->next_to_use = 0; in i40e_setup_tx_descriptors()
1448 tx_ring->next_to_clean = 0; in i40e_setup_tx_descriptors()
1449 tx_ring->tx_stats.prev_pkt_ctr = -1; in i40e_setup_tx_descriptors()
1453 kfree(tx_ring->tx_bi); in i40e_setup_tx_descriptors()
1454 tx_ring->tx_bi = NULL; in i40e_setup_tx_descriptors()
1455 return -ENOMEM; in i40e_setup_tx_descriptors()
1460 memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count); in i40e_clear_rx_bi()
1464 * i40e_clean_rx_ring - Free Rx buffers
1472 if (!rx_ring->rx_bi) in i40e_clean_rx_ring()
1475 if (rx_ring->xsk_pool) { in i40e_clean_rx_ring()
1481 for (i = 0; i < rx_ring->count; i++) { in i40e_clean_rx_ring()
1484 if (!rx_bi->page) in i40e_clean_rx_ring()
1490 dma_sync_single_range_for_cpu(rx_ring->dev, in i40e_clean_rx_ring()
1491 rx_bi->dma, in i40e_clean_rx_ring()
1492 rx_bi->page_offset, in i40e_clean_rx_ring()
1493 rx_ring->rx_buf_len, in i40e_clean_rx_ring()
1497 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, in i40e_clean_rx_ring()
1502 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); in i40e_clean_rx_ring()
1504 rx_bi->page = NULL; in i40e_clean_rx_ring()
1505 rx_bi->page_offset = 0; in i40e_clean_rx_ring()
1509 if (rx_ring->xsk_pool) in i40e_clean_rx_ring()
1515 memset(rx_ring->desc, 0, rx_ring->size); in i40e_clean_rx_ring()
1517 rx_ring->next_to_alloc = 0; in i40e_clean_rx_ring()
1518 rx_ring->next_to_clean = 0; in i40e_clean_rx_ring()
1519 rx_ring->next_to_process = 0; in i40e_clean_rx_ring()
1520 rx_ring->next_to_use = 0; in i40e_clean_rx_ring()
1524 * i40e_free_rx_resources - Free Rx resources
1532 if (rx_ring->vsi->type == I40E_VSI_MAIN) in i40e_free_rx_resources()
1533 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in i40e_free_rx_resources()
1534 rx_ring->xdp_prog = NULL; in i40e_free_rx_resources()
1535 kfree(rx_ring->rx_bi); in i40e_free_rx_resources()
1536 rx_ring->rx_bi = NULL; in i40e_free_rx_resources()
1538 if (rx_ring->desc) { in i40e_free_rx_resources()
1539 dma_free_coherent(rx_ring->dev, rx_ring->size, in i40e_free_rx_resources()
1540 rx_ring->desc, rx_ring->dma); in i40e_free_rx_resources()
1541 rx_ring->desc = NULL; in i40e_free_rx_resources()
1546 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1553 struct device *dev = rx_ring->dev; in i40e_setup_rx_descriptors()
1555 u64_stats_init(&rx_ring->syncp); in i40e_setup_rx_descriptors()
1558 rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc); in i40e_setup_rx_descriptors()
1559 rx_ring->size = ALIGN(rx_ring->size, 4096); in i40e_setup_rx_descriptors()
1560 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in i40e_setup_rx_descriptors()
1561 &rx_ring->dma, GFP_KERNEL); in i40e_setup_rx_descriptors()
1563 if (!rx_ring->desc) { in i40e_setup_rx_descriptors()
1565 rx_ring->size); in i40e_setup_rx_descriptors()
1566 return -ENOMEM; in i40e_setup_rx_descriptors()
1569 rx_ring->next_to_alloc = 0; in i40e_setup_rx_descriptors()
1570 rx_ring->next_to_clean = 0; in i40e_setup_rx_descriptors()
1571 rx_ring->next_to_process = 0; in i40e_setup_rx_descriptors()
1572 rx_ring->next_to_use = 0; in i40e_setup_rx_descriptors()
1574 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; in i40e_setup_rx_descriptors()
1576 rx_ring->rx_bi = in i40e_setup_rx_descriptors()
1577 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL); in i40e_setup_rx_descriptors()
1578 if (!rx_ring->rx_bi) in i40e_setup_rx_descriptors()
1579 return -ENOMEM; in i40e_setup_rx_descriptors()
1585 * i40e_release_rx_desc - Store the new tail and head values
1591 rx_ring->next_to_use = val; in i40e_release_rx_desc()
1594 rx_ring->next_to_alloc = val; in i40e_release_rx_desc()
1598 * applicable for weak-ordered memory model archs, in i40e_release_rx_desc()
1599 * such as IA-64). in i40e_release_rx_desc()
1602 writel(val, rx_ring->tail); in i40e_release_rx_desc()
1611 truesize = rx_ring->rx_offset ? in i40e_rx_frame_truesize()
1612 SKB_DATA_ALIGN(size + rx_ring->rx_offset) + in i40e_rx_frame_truesize()
1620 * i40e_alloc_mapped_page - recycle or make a new page
1630 struct page *page = bi->page; in i40e_alloc_mapped_page()
1635 rx_ring->rx_stats.page_reuse_count++; in i40e_alloc_mapped_page()
1642 rx_ring->rx_stats.alloc_page_failed++; in i40e_alloc_mapped_page()
1646 rx_ring->rx_stats.page_alloc_count++; in i40e_alloc_mapped_page()
1649 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in i40e_alloc_mapped_page()
1657 if (dma_mapping_error(rx_ring->dev, dma)) { in i40e_alloc_mapped_page()
1659 rx_ring->rx_stats.alloc_page_failed++; in i40e_alloc_mapped_page()
1663 bi->dma = dma; in i40e_alloc_mapped_page()
1664 bi->page = page; in i40e_alloc_mapped_page()
1665 bi->page_offset = rx_ring->rx_offset; in i40e_alloc_mapped_page()
1666 page_ref_add(page, USHRT_MAX - 1); in i40e_alloc_mapped_page()
1667 bi->pagecnt_bias = USHRT_MAX; in i40e_alloc_mapped_page()
1673 * i40e_alloc_rx_buffers - Replace used receive buffers
1681 u16 ntu = rx_ring->next_to_use; in i40e_alloc_rx_buffers()
1686 if (!rx_ring->netdev || !cleaned_count) in i40e_alloc_rx_buffers()
1697 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in i40e_alloc_rx_buffers()
1698 bi->page_offset, in i40e_alloc_rx_buffers()
1699 rx_ring->rx_buf_len, in i40e_alloc_rx_buffers()
1703 * because each write-back erases this info. in i40e_alloc_rx_buffers()
1705 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in i40e_alloc_rx_buffers()
1710 if (unlikely(ntu == rx_ring->count)) { in i40e_alloc_rx_buffers()
1717 rx_desc->wb.qword1.status_error_len = 0; in i40e_alloc_rx_buffers()
1719 cleaned_count--; in i40e_alloc_rx_buffers()
1722 if (rx_ring->next_to_use != ntu) in i40e_alloc_rx_buffers()
1728 if (rx_ring->next_to_use != ntu) in i40e_alloc_rx_buffers()
1738 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1753 skb->ip_summed = CHECKSUM_NONE; in i40e_rx_checksum()
1755 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); in i40e_rx_checksum()
1759 if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded)) in i40e_rx_checksum()
1780 /* don't increment checksum err here, non-fatal err */ in i40e_rx_checksum()
1799 skb->csum_level = 1; in i40e_rx_checksum()
1801 skb->ip_summed = CHECKSUM_UNNECESSARY; in i40e_rx_checksum()
1805 vsi->back->hw_csum_rx_error++; in i40e_rx_checksum()
1809 * i40e_rx_hash - set the hash value in the skb
1827 if (!libeth_rx_pt_has_hash(ring->netdev, decoded)) in i40e_rx_hash()
1830 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { in i40e_rx_hash()
1831 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); in i40e_rx_hash()
1837 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1839 * @rx_desc: pointer to the EOP Rx descriptor
1849 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); in i40e_process_skb_fields()
1856 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn); in i40e_process_skb_fields()
1860 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); in i40e_process_skb_fields()
1862 skb_record_rx_queue(skb, rx_ring->queue_index); in i40e_process_skb_fields()
1865 __le16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1; in i40e_process_skb_fields()
1871 /* modifies the skb - consumes the enet header */ in i40e_process_skb_fields()
1872 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in i40e_process_skb_fields()
1876 * i40e_cleanup_headers - Correct empty headers
1879 * @rx_desc: pointer to the EOP Rx descriptor
1890 /* ERR_MASK will only have valid bits if EOP set, and in i40e_cleanup_headers()
1909 * i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
1924 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in i40e_can_reuse_rx_page()
1925 struct page *page = rx_buffer->page; in i40e_can_reuse_rx_page()
1929 rx_stats->page_waive_count++; in i40e_can_reuse_rx_page()
1935 if (unlikely((rx_buffer->page_count - pagecnt_bias) > 1)) { in i40e_can_reuse_rx_page()
1936 rx_stats->page_busy_count++; in i40e_can_reuse_rx_page()
1941 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) in i40e_can_reuse_rx_page()
1942 if (rx_buffer->page_offset > I40E_LAST_OFFSET) { in i40e_can_reuse_rx_page()
1943 rx_stats->page_busy_count++; in i40e_can_reuse_rx_page()
1953 page_ref_add(page, USHRT_MAX - 1); in i40e_can_reuse_rx_page()
1954 rx_buffer->pagecnt_bias = USHRT_MAX; in i40e_can_reuse_rx_page()
1961 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
1969 rx_buffer->page_offset ^= truesize; in i40e_rx_buffer_flip()
1971 rx_buffer->page_offset += truesize; in i40e_rx_buffer_flip()
1976 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1988 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_process); in i40e_get_rx_buffer()
1989 rx_buffer->page_count = in i40e_get_rx_buffer()
1991 page_count(rx_buffer->page); in i40e_get_rx_buffer()
1995 prefetch_page_address(rx_buffer->page); in i40e_get_rx_buffer()
1998 dma_sync_single_range_for_cpu(rx_ring->dev, in i40e_get_rx_buffer()
1999 rx_buffer->dma, in i40e_get_rx_buffer()
2000 rx_buffer->page_offset, in i40e_get_rx_buffer()
2005 rx_buffer->pagecnt_bias--; in i40e_get_rx_buffer()
2011 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
2021 if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats)) { in i40e_put_rx_buffer()
2026 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in i40e_put_rx_buffer()
2029 __page_frag_cache_drain(rx_buffer->page, in i40e_put_rx_buffer()
2030 rx_buffer->pagecnt_bias); in i40e_put_rx_buffer()
2032 rx_buffer->page = NULL; in i40e_put_rx_buffer()
2037 * i40e_process_rx_buffs- Processing of buffers post XDP prog or on error
2045 u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; in i40e_process_rx_buffs()
2046 u32 next = rx_ring->next_to_clean, i = 0; in i40e_process_rx_buffs()
2049 xdp->flags = 0; in i40e_process_rx_buffs()
2053 if (++next == rx_ring->count) in i40e_process_rx_buffs()
2056 if (!rx_buffer->page) in i40e_process_rx_buffs()
2060 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); in i40e_process_rx_buffs()
2062 rx_buffer->pagecnt_bias++; in i40e_process_rx_buffs()
2064 /* EOP buffer will be put in i40e_clean_rx_irq() */ in i40e_process_rx_buffs()
2065 if (next == rx_ring->next_to_process) in i40e_process_rx_buffs()
2073 * i40e_construct_skb - Allocate skb and populate it
2084 unsigned int size = xdp->data_end - xdp->data; in i40e_construct_skb()
2092 net_prefetch(xdp->data); in i40e_construct_skb()
2094 /* Note, we get here by enabling legacy-rx via: in i40e_construct_skb()
2096 * ethtool --set-priv-flags <dev> legacy-rx on in i40e_construct_skb()
2099 * opposed to having legacy-rx off, where we process XDP in i40e_construct_skb()
2104 * xdp->data_meta will always point to xdp->data, since in i40e_construct_skb()
2106 * change in future for legacy-rx mode on, then lets also in i40e_construct_skb()
2107 * add xdp->data_meta handling here. in i40e_construct_skb()
2111 skb = napi_alloc_skb(&rx_ring->q_vector->napi, I40E_RX_HDR_SIZE); in i40e_construct_skb()
2118 headlen = eth_get_headlen(skb->dev, xdp->data, in i40e_construct_skb()
2122 memcpy(__skb_put(skb, headlen), xdp->data, in i40e_construct_skb()
2127 nr_frags = sinfo->nr_frags; in i40e_construct_skb()
2129 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); in i40e_construct_skb()
2131 size -= headlen; in i40e_construct_skb()
2137 skb_add_rx_frag(skb, 0, rx_buffer->page, in i40e_construct_skb()
2138 rx_buffer->page_offset + headlen, in i40e_construct_skb()
2139 size, xdp->frame_sz); in i40e_construct_skb()
2141 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); in i40e_construct_skb()
2144 rx_buffer->pagecnt_bias++; in i40e_construct_skb()
2150 memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], in i40e_construct_skb()
2153 xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags, in i40e_construct_skb()
2154 sinfo->xdp_frags_size, in i40e_construct_skb()
2155 nr_frags * xdp->frame_sz, in i40e_construct_skb()
2159 if (++rx_ring->next_to_clean == rx_ring->count) in i40e_construct_skb()
2160 rx_ring->next_to_clean = 0; in i40e_construct_skb()
2169 * i40e_build_skb - Build skb around an existing buffer
2179 unsigned int metasize = xdp->data - xdp->data_meta; in i40e_build_skb()
2184 /* Prefetch first cache line of first page. If xdp->data_meta in i40e_build_skb()
2185 * is unused, this points exactly as xdp->data, otherwise we in i40e_build_skb()
2189 net_prefetch(xdp->data_meta); in i40e_build_skb()
2193 nr_frags = sinfo->nr_frags; in i40e_build_skb()
2197 skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); in i40e_build_skb()
2202 skb_reserve(skb, xdp->data - xdp->data_hard_start); in i40e_build_skb()
2203 __skb_put(skb, xdp->data_end - xdp->data); in i40e_build_skb()
2209 sinfo->xdp_frags_size, in i40e_build_skb()
2210 nr_frags * xdp->frame_sz, in i40e_build_skb()
2217 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); in i40e_build_skb()
2219 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); in i40e_build_skb()
2226 * i40e_is_non_eop - process handling of non-EOP buffers
2230 * If the buffer is an EOP buffer, this function exits returning false,
2231 * otherwise return true indicating that this is in fact a non-EOP buffer.
2241 rx_ring->rx_stats.non_eop_descs++; in i40e_is_non_eop()
2260 * i40e_run_xdp - run an XDP program
2274 prefetchw(xdp->data_hard_start); /* xdp_frame write */ in i40e_run_xdp()
2281 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_run_xdp()
2287 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in i40e_run_xdp()
2293 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); in i40e_run_xdp()
2297 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in i40e_run_xdp()
2308 * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register
2319 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); in i40e_xdp_ring_update_tail()
2323 * i40e_update_rx_stats - Update Rx ring statistics
2334 u64_stats_update_begin(&rx_ring->syncp); in i40e_update_rx_stats()
2335 rx_ring->stats.packets += total_rx_packets; in i40e_update_rx_stats()
2336 rx_ring->stats.bytes += total_rx_bytes; in i40e_update_rx_stats()
2337 u64_stats_update_end(&rx_ring->syncp); in i40e_update_rx_stats()
2338 rx_ring->q_vector->rx.total_packets += total_rx_packets; in i40e_update_rx_stats()
2339 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; in i40e_update_rx_stats()
2343 * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
2358 rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_finalize_xdp_rx()
2370 u32 ntp = rx_ring->next_to_process + 1; in i40e_inc_ntp()
2372 ntp = (ntp < rx_ring->count) ? ntp : 0; in i40e_inc_ntp()
2373 rx_ring->next_to_process = ntp; in i40e_inc_ntp()
2390 sinfo->nr_frags = 0; in i40e_add_xdp_frag()
2391 sinfo->xdp_frags_size = 0; in i40e_add_xdp_frag()
2393 } else if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) { in i40e_add_xdp_frag()
2395 return -ENOMEM; in i40e_add_xdp_frag()
2398 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buffer->page, in i40e_add_xdp_frag()
2399 rx_buffer->page_offset, size); in i40e_add_xdp_frag()
2401 sinfo->xdp_frags_size += size; in i40e_add_xdp_frag()
2403 if (page_is_pfmemalloc(rx_buffer->page)) in i40e_add_xdp_frag()
2405 *nr_frags = sinfo->nr_frags; in i40e_add_xdp_frag()
2411 * i40e_consume_xdp_buff - Consume all the buffers of the packet and update ntc
2414 * @rx_buffer: rx_buffer of eop desc
2422 rx_ring->next_to_clean = rx_ring->next_to_process; in i40e_consume_xdp_buff()
2423 xdp->data = NULL; in i40e_consume_xdp_buff()
2427 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2444 u16 clean_threshold = rx_ring->count / 2; in i40e_clean_rx_irq()
2445 unsigned int offset = rx_ring->rx_offset; in i40e_clean_rx_irq()
2446 struct xdp_buff *xdp = &rx_ring->xdp; in i40e_clean_rx_irq()
2452 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in i40e_clean_rx_irq()
2455 u16 ntp = rx_ring->next_to_process; in i40e_clean_rx_irq()
2476 * hardware wrote DD then the length will be non-zero in i40e_clean_rx_irq()
2478 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); in i40e_clean_rx_irq()
2488 rx_desc->raw.qword[0], in i40e_clean_rx_irq()
2496 if (rx_ring->next_to_clean == ntp) { in i40e_clean_rx_irq()
2497 rx_ring->next_to_clean = in i40e_clean_rx_irq()
2498 rx_ring->next_to_process; in i40e_clean_rx_irq()
2515 if (!xdp->data) { in i40e_clean_rx_irq()
2518 hard_start = page_address(rx_buffer->page) + in i40e_clean_rx_irq()
2519 rx_buffer->page_offset - offset; in i40e_clean_rx_irq()
2523 xdp->frame_sz = i40e_rx_frame_truesize(rx_ring, size); in i40e_clean_rx_irq()
2527 /* Overflowing packet: Drop all frags on EOP */ in i40e_clean_rx_irq()
2544 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); in i40e_clean_rx_irq()
2546 rx_buffer->pagecnt_bias++; in i40e_clean_rx_irq()
2557 rx_ring->rx_stats.alloc_buff_failed++; in i40e_clean_rx_irq()
2566 total_rx_bytes += skb->len; in i40e_clean_rx_irq()
2572 napi_gro_receive(&rx_ring->q_vector->napi, skb); in i40e_clean_rx_irq()
2580 rx_ring->next_to_clean = rx_ring->next_to_process; in i40e_clean_rx_irq()
2582 xdp->data = NULL; in i40e_clean_rx_irq()
2596 * i40e_buildreg_itr - build a value for writing to I40E_PFINT_DYN_CTLN register
2616 * auto-cleared". The auto-clearing happens when the interrupt is in i40e_buildreg_itr()
2622 * to hold pending events for us until the interrupt is re-enabled in i40e_buildreg_itr()
2661 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2670 struct i40e_hw *hw = &vsi->back->hw; in i40e_update_enable_itr()
2674 /* If we don't have MSIX, then we only need to re-enable icr0 */ in i40e_update_enable_itr()
2675 if (!test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) { in i40e_update_enable_itr()
2676 i40e_irq_dynamic_enable_icr0(vsi->back); in i40e_update_enable_itr()
2681 i40e_update_itr(q_vector, &q_vector->tx); in i40e_update_enable_itr()
2682 i40e_update_itr(q_vector, &q_vector->rx); in i40e_update_enable_itr()
2686 * pseudo-lazy update with the following criteria. in i40e_update_enable_itr()
2692 if (q_vector->rx.target_itr < q_vector->rx.current_itr) { in i40e_update_enable_itr()
2695 interval = q_vector->rx.target_itr; in i40e_update_enable_itr()
2696 q_vector->rx.current_itr = q_vector->rx.target_itr; in i40e_update_enable_itr()
2697 q_vector->itr_countdown = ITR_COUNTDOWN_START; in i40e_update_enable_itr()
2698 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || in i40e_update_enable_itr()
2699 ((q_vector->rx.target_itr - q_vector->rx.current_itr) < in i40e_update_enable_itr()
2700 (q_vector->tx.target_itr - q_vector->tx.current_itr))) { in i40e_update_enable_itr()
2705 interval = q_vector->tx.target_itr; in i40e_update_enable_itr()
2706 q_vector->tx.current_itr = q_vector->tx.target_itr; in i40e_update_enable_itr()
2707 q_vector->itr_countdown = ITR_COUNTDOWN_START; in i40e_update_enable_itr()
2708 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { in i40e_update_enable_itr()
2711 interval = q_vector->rx.target_itr; in i40e_update_enable_itr()
2712 q_vector->rx.current_itr = q_vector->rx.target_itr; in i40e_update_enable_itr()
2713 q_vector->itr_countdown = ITR_COUNTDOWN_START; in i40e_update_enable_itr()
2716 if (q_vector->itr_countdown) in i40e_update_enable_itr()
2717 q_vector->itr_countdown--; in i40e_update_enable_itr()
2721 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_update_enable_itr()
2727 if (q_vector->in_busy_poll) { in i40e_update_enable_itr()
2729 q_vector->in_busy_poll = false; in i40e_update_enable_itr()
2733 wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->reg_idx), itr_val); in i40e_update_enable_itr()
2737 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2749 struct i40e_vsi *vsi = q_vector->vsi; in i40e_napi_poll()
2760 if (test_bit(__I40E_VSI_DOWN, vsi->state)) { in i40e_napi_poll()
2768 i40e_for_each_ring(ring, q_vector->tx) { in i40e_napi_poll()
2769 bool wd = ring->xsk_pool ? in i40e_napi_poll()
2777 arm_wb |= ring->arm_wb; in i40e_napi_poll()
2778 ring->arm_wb = false; in i40e_napi_poll()
2786 if (unlikely(q_vector->num_ringpairs > 1)) in i40e_napi_poll()
2791 budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1); in i40e_napi_poll()
2796 i40e_for_each_ring(ring, q_vector->rx) { in i40e_napi_poll()
2797 int cleaned = ring->xsk_pool ? in i40e_napi_poll()
2822 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { in i40e_napi_poll()
2829 /* Return budget-1 so that polling stops */ in i40e_napi_poll()
2830 return budget - 1; in i40e_napi_poll()
2834 q_vector->tx.ring[0].tx_stats.tx_force_wb++; in i40e_napi_poll()
2840 if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR) in i40e_napi_poll()
2841 q_vector->arm_wb_state = false; in i40e_napi_poll()
2843 /* Exit the polling mode, but don't re-enable interrupts if stack might in i40e_napi_poll()
2844 * poll us due to busy-polling in i40e_napi_poll()
2849 q_vector->in_busy_poll = true; in i40e_napi_poll()
2851 return min(work_done, budget - 1); in i40e_napi_poll()
2855 * i40e_atr - Add a Flow Director ATR filter
2864 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_atr()
2877 if (!test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags)) in i40e_atr()
2880 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) in i40e_atr()
2884 if (!tx_ring->atr_sample_rate) in i40e_atr()
2901 l4_proto = hdr.ipv4->protocol; in i40e_atr()
2904 unsigned int inner_hlen = hdr.network - skb->data; in i40e_atr()
2911 hlen = h_offset - inner_hlen; in i40e_atr()
2920 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) in i40e_atr()
2922 if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags)) { in i40e_atr()
2926 if (th->fin || th->rst) in i40e_atr()
2930 tx_ring->atr_count++; in i40e_atr()
2933 if (!th->fin && in i40e_atr()
2934 !th->syn && in i40e_atr()
2935 !th->rst && in i40e_atr()
2936 (tx_ring->atr_count < tx_ring->atr_sample_rate)) in i40e_atr()
2939 tx_ring->atr_count = 0; in i40e_atr()
2942 i = tx_ring->next_to_use; in i40e_atr()
2946 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_atr()
2949 tx_ring->queue_index); in i40e_atr()
2956 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; in i40e_atr()
2960 dtype_cmd |= (th->fin || th->rst) ? in i40e_atr()
2976 I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)); in i40e_atr()
2980 I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)); in i40e_atr()
2982 if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags)) in i40e_atr()
2985 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); in i40e_atr()
2986 fdir_desc->rsvd = cpu_to_le32(0); in i40e_atr()
2987 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); in i40e_atr()
2988 fdir_desc->fd_id = cpu_to_le32(0); in i40e_atr()
2992 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
3007 __be16 protocol = skb->protocol; in i40e_tx_prepare_vlan_flags()
3011 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { in i40e_tx_prepare_vlan_flags()
3019 skb->protocol = vlan_get_protocol(skb); in i40e_tx_prepare_vlan_flags()
3033 return -EINVAL; in i40e_tx_prepare_vlan_flags()
3035 protocol = vhdr->h_vlan_encapsulated_proto; in i40e_tx_prepare_vlan_flags()
3036 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT; in i40e_tx_prepare_vlan_flags()
3040 if (!test_bit(I40E_FLAG_DCB_ENA, tx_ring->vsi->back->flags)) in i40e_tx_prepare_vlan_flags()
3045 (skb->priority != TC_PRIO_CONTROL)) { in i40e_tx_prepare_vlan_flags()
3047 tx_flags |= (skb->priority & 0x7) << in i40e_tx_prepare_vlan_flags()
3057 vhdr->h_vlan_TCI = htons(tx_flags >> in i40e_tx_prepare_vlan_flags()
3070 * i40e_tso - set up the tso context descriptor
3080 struct sk_buff *skb = first->skb; in i40e_tso()
3097 if (skb->ip_summed != CHECKSUM_PARTIAL) in i40e_tso()
3116 if (ip.v4->version == 4) { in i40e_tso()
3117 ip.v4->tot_len = 0; in i40e_tso()
3118 ip.v4->check = 0; in i40e_tso()
3120 first->tx_flags |= I40E_TX_FLAGS_TSO; in i40e_tso()
3122 ip.v6->payload_len = 0; in i40e_tso()
3123 first->tx_flags |= I40E_TX_FLAGS_TSO; in i40e_tso()
3126 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | in i40e_tso()
3132 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && in i40e_tso()
3133 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { in i40e_tso()
3134 l4.udp->len = 0; in i40e_tso()
3137 l4_offset = l4.hdr - skb->data; in i40e_tso()
3140 paylen = skb->len - l4_offset; in i40e_tso()
3141 csum_replace_by_diff(&l4.udp->check, in i40e_tso()
3150 if (ip.v4->version == 4) { in i40e_tso()
3151 ip.v4->tot_len = 0; in i40e_tso()
3152 ip.v4->check = 0; in i40e_tso()
3154 ip.v6->payload_len = 0; in i40e_tso()
3159 l4_offset = l4.hdr - skb->data; in i40e_tso()
3162 paylen = skb->len - l4_offset; in i40e_tso()
3164 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in i40e_tso()
3165 csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen)); in i40e_tso()
3169 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); in i40e_tso()
3171 *hdr_len = (l4.tcp->doff * 4) + l4_offset; in i40e_tso()
3175 gso_size = skb_shinfo(skb)->gso_size; in i40e_tso()
3178 first->gso_segs = skb_shinfo(skb)->gso_segs; in i40e_tso()
3179 first->bytecount += (first->gso_segs - 1) * *hdr_len; in i40e_tso()
3183 cd_tso_len = skb->len - *hdr_len; in i40e_tso()
3192 * i40e_tsyn - set up the tsyn context descriptor
3205 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) in i40e_tsyn()
3215 pf = i40e_netdev_to_pf(tx_ring->netdev); in i40e_tsyn()
3216 if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags)) in i40e_tsyn()
3219 if (pf->ptp_tx && in i40e_tsyn()
3220 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) { in i40e_tsyn()
3221 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in i40e_tsyn()
3222 pf->ptp_tx_start = jiffies; in i40e_tsyn()
3223 pf->ptp_tx_skb = skb_get(skb); in i40e_tsyn()
3225 pf->tx_hwtstamp_skipped++; in i40e_tsyn()
3236 * i40e_tx_enable_csum - Enable Tx checksum offloads
3265 if (skb->ip_summed != CHECKSUM_PARTIAL) in i40e_tx_enable_csum()
3281 if (ip.v4->version == 4) in i40e_tx_enable_csum()
3287 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; in i40e_tx_enable_csum()
3289 if (skb->encapsulation) { in i40e_tx_enable_csum()
3297 l4_proto = ip.v4->protocol; in i40e_tx_enable_csum()
3304 l4_proto = ip.v6->nexthdr; in i40e_tx_enable_csum()
3305 ret = ipv6_skip_exthdr(skb, exthdr - skb->data, in i40e_tx_enable_csum()
3308 return -1; in i40e_tx_enable_csum()
3328 return -1; in i40e_tx_enable_csum()
3335 tunnel |= ((l4.hdr - ip.hdr) / 4) << in i40e_tx_enable_csum()
3342 tunnel |= ((ip.hdr - l4.hdr) / 2) << in i40e_tx_enable_csum()
3347 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && in i40e_tx_enable_csum()
3348 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) in i40e_tx_enable_csum()
3360 if (ip.v4->version == 4) in i40e_tx_enable_csum()
3362 if (ip.v6->version == 6) in i40e_tx_enable_csum()
3368 l4_proto = ip.v4->protocol; in i40e_tx_enable_csum()
3379 l4_proto = ip.v6->nexthdr; in i40e_tx_enable_csum()
3381 ipv6_skip_exthdr(skb, exthdr - skb->data, in i40e_tx_enable_csum()
3386 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; in i40e_tx_enable_csum()
3393 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; in i40e_tx_enable_csum()
3409 return -1; in i40e_tx_enable_csum()
3421 * i40e_create_tx_ctx - Build the Tx context descriptor
3424 * @cd_tunneling: Quad Word 0 - bits 0-31
3425 * @cd_l2tag2: Quad Word 0 - bits 32-63
3432 int i = tx_ring->next_to_use; in i40e_create_tx_ctx()
3442 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_create_tx_ctx()
3445 context_desc->tunneling_params = cpu_to_le32(cd_tunneling); in i40e_create_tx_ctx()
3446 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); in i40e_create_tx_ctx()
3447 context_desc->rsvd = cpu_to_le16(0); in i40e_create_tx_ctx()
3448 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); in i40e_create_tx_ctx()
3452 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
3456 * Returns -EBUSY if a stop is needed, else 0
3460 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
3464 ++tx_ring->tx_stats.tx_stopped; in __i40e_maybe_stop_tx()
3468 return -EBUSY; in __i40e_maybe_stop_tx()
3470 /* A reprieve! - use start_queue because it doesn't call schedule */ in __i40e_maybe_stop_tx()
3471 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
3472 ++tx_ring->tx_stats.restart_queue; in __i40e_maybe_stop_tx()
3477 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
3495 nr_frags = skb_shinfo(skb)->nr_frags; in __i40e_chk_linearize()
3496 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) in __i40e_chk_linearize()
3502 nr_frags -= I40E_MAX_BUFFER_TXD - 2; in __i40e_chk_linearize()
3503 frag = &skb_shinfo(skb)->frags[0]; in __i40e_chk_linearize()
3511 sum = 1 - skb_shinfo(skb)->gso_size; in __i40e_chk_linearize()
3523 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { in __i40e_chk_linearize()
3535 int align_pad = -(skb_frag_off(stale)) & in __i40e_chk_linearize()
3536 (I40E_MAX_READ_REQ_SIZE - 1); in __i40e_chk_linearize()
3538 sum -= align_pad; in __i40e_chk_linearize()
3539 stale_size -= align_pad; in __i40e_chk_linearize()
3542 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; in __i40e_chk_linearize()
3543 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; in __i40e_chk_linearize()
3551 if (!nr_frags--) in __i40e_chk_linearize()
3554 sum -= stale_size; in __i40e_chk_linearize()
3561 * i40e_tx_map - Build the Tx descriptor
3570 * Returns 0 on success, -1 on failure to DMA
3576 unsigned int data_len = skb->data_len; in i40e_tx_map()
3581 u16 i = tx_ring->next_to_use; in i40e_tx_map()
3591 first->tx_flags = tx_flags; in i40e_tx_map()
3593 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in i40e_tx_map()
3598 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in i40e_tx_map()
3601 if (dma_mapping_error(tx_ring->dev, dma)) in i40e_tx_map()
3609 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); in i40e_tx_map()
3610 tx_desc->buffer_addr = cpu_to_le64(dma); in i40e_tx_map()
3613 tx_desc->cmd_type_offset_bsz = in i40e_tx_map()
3621 if (i == tx_ring->count) { in i40e_tx_map()
3627 size -= max_data; in i40e_tx_map()
3630 tx_desc->buffer_addr = cpu_to_le64(dma); in i40e_tx_map()
3636 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, in i40e_tx_map()
3643 if (i == tx_ring->count) { in i40e_tx_map()
3649 data_len -= size; in i40e_tx_map()
3651 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in i40e_tx_map()
3654 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
3657 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in i40e_tx_map()
3660 if (i == tx_ring->count) in i40e_tx_map()
3663 tx_ring->next_to_use = i; in i40e_tx_map()
3667 /* write last descriptor with EOP bit */ in i40e_tx_map()
3671 * below. This is safe since we don't re-use desc_count afterwards. in i40e_tx_map()
3673 desc_count |= ++tx_ring->packet_stride; in i40e_tx_map()
3678 tx_ring->packet_stride = 0; in i40e_tx_map()
3681 tx_desc->cmd_type_offset_bsz = in i40e_tx_map()
3695 first->next_to_watch = tx_desc; in i40e_tx_map()
3699 writel(i, tx_ring->tail); in i40e_tx_map()
3705 dev_info(tx_ring->dev, "TX DMA map failed\n"); in i40e_tx_map()
3709 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
3714 i = tx_ring->count; in i40e_tx_map()
3715 i--; in i40e_tx_map()
3718 tx_ring->next_to_use = i; in i40e_tx_map()
3720 return -1; in i40e_tx_map()
3730 if (skb->sk && skb->sk->sk_hash) in i40e_swdcb_skb_tx_hash()
3731 hash = skb->sk->sk_hash; in i40e_swdcb_skb_tx_hash()
3733 hash = (__force u16)skb->protocol ^ skb->hash; in i40e_swdcb_skb_tx_hash()
3745 struct i40e_vsi *vsi = np->vsi; in i40e_lan_select_queue()
3754 if (vsi->tc_config.numtc == 1 || in i40e_lan_select_queue()
3755 i40e_is_tc_mqprio_enabled(vsi->back)) in i40e_lan_select_queue()
3758 prio = skb->priority; in i40e_lan_select_queue()
3759 hw = &vsi->back->hw; in i40e_lan_select_queue()
3760 tclass = hw->local_dcbx_config.etscfg.prioritytable[prio]; in i40e_lan_select_queue()
3762 if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass)))) in i40e_lan_select_queue()
3766 qcount = vsi->tc_config.tc_info[tclass].qcount; in i40e_lan_select_queue()
3769 qoffset = vsi->tc_config.tc_info[tclass].qoffset; in i40e_lan_select_queue()
3774 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3782 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; in i40e_xmit_xdp_ring()
3783 u16 i = 0, index = xdp_ring->next_to_use; in i40e_xmit_xdp_ring()
3784 struct i40e_tx_buffer *tx_head = &xdp_ring->tx_bi[index]; in i40e_xmit_xdp_ring()
3787 void *data = xdpf->data; in i40e_xmit_xdp_ring()
3788 u32 size = xdpf->len; in i40e_xmit_xdp_ring()
3791 xdp_ring->tx_stats.tx_busy++; in i40e_xmit_xdp_ring()
3795 tx_head->bytecount = xdp_get_frame_len(xdpf); in i40e_xmit_xdp_ring()
3796 tx_head->gso_segs = 1; in i40e_xmit_xdp_ring()
3797 tx_head->xdpf = xdpf; in i40e_xmit_xdp_ring()
3802 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); in i40e_xmit_xdp_ring()
3803 if (dma_mapping_error(xdp_ring->dev, dma)) in i40e_xmit_xdp_ring()
3810 tx_desc->buffer_addr = cpu_to_le64(dma); in i40e_xmit_xdp_ring()
3811 tx_desc->cmd_type_offset_bsz = in i40e_xmit_xdp_ring()
3814 if (++index == xdp_ring->count) in i40e_xmit_xdp_ring()
3820 tx_bi = &xdp_ring->tx_bi[index]; in i40e_xmit_xdp_ring()
3823 data = skb_frag_address(&sinfo->frags[i]); in i40e_xmit_xdp_ring()
3824 size = skb_frag_size(&sinfo->frags[i]); in i40e_xmit_xdp_ring()
3828 tx_desc->cmd_type_offset_bsz |= in i40e_xmit_xdp_ring()
3836 xdp_ring->xdp_tx_active++; in i40e_xmit_xdp_ring()
3838 tx_head->next_to_watch = tx_desc; in i40e_xmit_xdp_ring()
3839 xdp_ring->next_to_use = index; in i40e_xmit_xdp_ring()
3845 tx_bi = &xdp_ring->tx_bi[index]; in i40e_xmit_xdp_ring()
3847 dma_unmap_page(xdp_ring->dev, in i40e_xmit_xdp_ring()
3856 index += xdp_ring->count; in i40e_xmit_xdp_ring()
3857 index--; in i40e_xmit_xdp_ring()
3864 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3884 prefetch(skb->data); in i40e_xmit_frame_ring()
3894 count = i40e_txd_use_count(skb->len); in i40e_xmit_frame_ring()
3895 tx_ring->tx_stats.tx_linearize++; in i40e_xmit_frame_ring()
3905 tx_ring->tx_stats.tx_busy++; in i40e_xmit_frame_ring()
3910 first = &tx_ring->tx_bi[tx_ring->next_to_use]; in i40e_xmit_frame_ring()
3911 first->skb = skb; in i40e_xmit_frame_ring()
3912 first->bytecount = skb->len; in i40e_xmit_frame_ring()
3913 first->gso_segs = 1; in i40e_xmit_frame_ring()
3956 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); in i40e_xmit_frame_ring()
3957 dev_kfree_skb_any(first->skb); in i40e_xmit_frame_ring()
3958 first->skb = NULL; in i40e_xmit_frame_ring()
3961 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev); in i40e_xmit_frame_ring()
3963 dev_kfree_skb_any(pf->ptp_tx_skb); in i40e_xmit_frame_ring()
3964 pf->ptp_tx_skb = NULL; in i40e_xmit_frame_ring()
3965 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); in i40e_xmit_frame_ring()
3972 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3981 struct i40e_vsi *vsi = np->vsi; in i40e_lan_xmit_frame()
3982 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; in i40e_lan_xmit_frame()
3994 * i40e_xdp_xmit - Implements ndo_xdp_xmit
4003 * For error cases, a negative errno code is returned and no-frames
4011 struct i40e_vsi *vsi = np->vsi; in i40e_xdp_xmit()
4012 struct i40e_pf *pf = vsi->back; in i40e_xdp_xmit()
4017 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_xdp_xmit()
4018 return -ENETDOWN; in i40e_xdp_xmit()
4020 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs || in i40e_xdp_xmit()
4021 test_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_xdp_xmit()
4022 return -ENXIO; in i40e_xdp_xmit()
4025 return -EINVAL; in i40e_xdp_xmit()
4027 xdp_ring = vsi->xdp_rings[queue_index]; in i40e_xdp_xmit()