1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright(c) 2018 Intel Corporation. */ 3 4 #ifndef I40E_TXRX_COMMON_ 5 #define I40E_TXRX_COMMON_ 6 7 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring); 8 void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw, 9 u64 qword1); 10 void i40e_process_skb_fields(struct i40e_ring *rx_ring, 11 union i40e_rx_desc *rx_desc, struct sk_buff *skb); 12 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring); 13 void i40e_update_rx_stats(struct i40e_ring *rx_ring, 14 unsigned int total_rx_bytes, 15 unsigned int total_rx_packets); 16 void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res); 17 void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val); 18 19 #define I40E_XDP_PASS 0 20 #define I40E_XDP_CONSUMED BIT(0) 21 #define I40E_XDP_TX BIT(1) 22 #define I40E_XDP_REDIR BIT(2) 23 24 /** 25 * build_ctob - Builds the Tx descriptor (cmd, offset and type) qword 26 **/ 27 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, 28 u32 td_tag) 29 { 30 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | 31 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | 32 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | 33 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | 34 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); 35 } 36 37 /** 38 * i40e_update_tx_stats - Update the egress statistics for the Tx ring 39 * @tx_ring: Tx ring to update 40 * @total_packet: total packets sent 41 * @total_bytes: total bytes sent 42 **/ 43 static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring, 44 unsigned int total_packets, 45 unsigned int total_bytes) 46 { 47 u64_stats_update_begin(&tx_ring->syncp); 48 tx_ring->stats.bytes += total_bytes; 49 tx_ring->stats.packets += total_packets; 50 u64_stats_update_end(&tx_ring->syncp); 51 tx_ring->q_vector->tx.total_bytes += total_bytes; 52 tx_ring->q_vector->tx.total_packets += total_packets; 53 } 54 55 #define WB_STRIDE 4 56 57 /** 58 * i40e_arm_wb - (Possibly) arms Tx write-back 59 * @tx_ring: Tx ring to update 60 * @vsi: the VSI 61 * @budget: the NAPI budget left 62 **/ 63 static inline void i40e_arm_wb(struct i40e_ring *tx_ring, 64 struct i40e_vsi *vsi, 65 int budget) 66 { 67 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { 68 /* check to see if there are < 4 descriptors 69 * waiting to be written back, then kick the hardware to force 70 * them to be written back in case we stay in NAPI. 71 * In this mode on X722 we do not enable Interrupt. 72 */ 73 unsigned int j = i40e_get_tx_pending(tx_ring, false); 74 75 if (budget && 76 ((j / WB_STRIDE) == 0) && j > 0 && 77 !test_bit(__I40E_VSI_DOWN, vsi->state) && 78 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) 79 tx_ring->arm_wb = true; 80 } 81 } 82 83 /** 84 * i40e_rx_is_programming_status - check for programming status descriptor 85 * @qword1: qword1 representing status_error_len in CPU ordering 86 * 87 * The value of in the descriptor length field indicate if this 88 * is a programming status descriptor for flow director or FCoE 89 * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise 90 * it is a packet descriptor. 91 **/ 92 static inline bool i40e_rx_is_programming_status(u64 qword1) 93 { 94 /* The Rx filter programming status and SPH bit occupy the same 95 * spot in the descriptor. Since we don't support packet split we 96 * can just reuse the bit as an indication that this is a 97 * programming status descriptor. 98 */ 99 return qword1 & I40E_RXD_QW1_LENGTH_SPH_MASK; 100 } 101 102 /** 103 * i40e_inc_ntc: Advance the next_to_clean index 104 * @rx_ring: Rx ring 105 **/ 106 static inline void i40e_inc_ntc(struct i40e_ring *rx_ring) 107 { 108 u32 ntc = rx_ring->next_to_clean + 1; 109 110 ntc = (ntc < rx_ring->count) ? ntc : 0; 111 rx_ring->next_to_clean = ntc; 112 prefetch(I40E_RX_DESC(rx_ring, ntc)); 113 } 114 115 void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring); 116 void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring); 117 bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi); 118 119 #endif /* I40E_TXRX_COMMON_ */ 120