xref: /linux/drivers/net/ethernet/intel/ice/ice_txrx_lib.h (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #ifndef _ICE_TXRX_LIB_H_
5 #define _ICE_TXRX_LIB_H_
6 #include "ice.h"
7 
8 /**
9  * ice_test_staterr - tests bits in Rx descriptor status and error fields
10  * @status_err_n: Rx descriptor status_error0 or status_error1 bits
11  * @stat_err_bits: value to mask
12  *
13  * This function does some fast chicanery in order to return the
14  * value of the mask which is really only used for boolean tests.
15  * The status_error_len doesn't need to be shifted because it begins
16  * at offset zero.
17  */
18 static inline bool
19 ice_test_staterr(__le16 status_err_n, const u16 stat_err_bits)
20 {
21 	return !!(status_err_n & cpu_to_le16(stat_err_bits));
22 }
23 
24 /**
25  * ice_is_non_eop - process handling of non-EOP buffers
26  * @rx_ring: Rx ring being processed
27  * @rx_desc: Rx descriptor for current buffer
28  *
29  * If the buffer is an EOP buffer, this function exits returning false,
30  * otherwise return true indicating that this is in fact a non-EOP buffer.
31  */
32 static inline bool
33 ice_is_non_eop(const struct ice_rx_ring *rx_ring,
34 	       const union ice_32b_rx_flex_desc *rx_desc)
35 {
36 	/* if we are the last buffer then there is nothing else to do */
37 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
38 	if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
39 		return false;
40 
41 	rx_ring->ring_stats->rx_stats.non_eop_descs++;
42 
43 	return true;
44 }
45 
46 static inline __le64
47 ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
48 {
49 	return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
50 			   (td_cmd    << ICE_TXD_QW1_CMD_S) |
51 			   (td_offset << ICE_TXD_QW1_OFFSET_S) |
52 			   ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
53 			   (td_tag    << ICE_TXD_QW1_L2TAG1_S));
54 }
55 
56 /**
57  * ice_build_tstamp_desc - build Tx time stamp descriptor
58  * @tx_desc: Tx LAN descriptor index
59  * @tstamp: time stamp
60  *
61  * Return: Tx time stamp descriptor
62  */
63 static inline __le32
64 ice_build_tstamp_desc(u16 tx_desc, u32 tstamp)
65 {
66 	return cpu_to_le32(FIELD_PREP(ICE_TXTIME_TX_DESC_IDX_M, tx_desc) |
67 			   FIELD_PREP(ICE_TXTIME_STAMP_M, tstamp));
68 }
69 
70 /**
71  * ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor
72  * @rx_desc: Rx 32b flex descriptor with RXDID=2
73  *
74  * The OS and current PF implementation only support stripping a single VLAN tag
75  * at a time, so there should only ever be 0 or 1 tags in the l2tag* fields. If
76  * one is found return the tag, else return 0 to mean no VLAN tag was found.
77  */
78 static inline u16
79 ice_get_vlan_tci(const union ice_32b_rx_flex_desc *rx_desc)
80 {
81 	u16 stat_err_bits;
82 
83 	stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
84 	if (ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
85 		return le16_to_cpu(rx_desc->wb.l2tag1);
86 
87 	stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S);
88 	if (ice_test_staterr(rx_desc->wb.status_error1, stat_err_bits))
89 		return le16_to_cpu(rx_desc->wb.l2tag2_2nd);
90 
91 	return 0;
92 }
93 
94 /**
95  * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register
96  * @xdp_ring: XDP Tx ring
97  *
98  * This function updates the XDP Tx ring tail register.
99  */
100 static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring)
101 {
102 	/* Force memory writes to complete before letting h/w
103 	 * know there are new descriptors to fetch.
104 	 */
105 	wmb();
106 	writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
107 }
108 
109 /**
110  * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
111  * @xdp_ring: XDP ring to produce the HW Tx descriptors on
112  *
113  * returns index of descriptor that had RS bit produced on
114  */
115 static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring)
116 {
117 	u32 rs_idx = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
118 	struct ice_tx_desc *tx_desc;
119 
120 	tx_desc = ICE_TX_DESC(xdp_ring, rs_idx);
121 	tx_desc->cmd_type_offset_bsz |=
122 		cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
123 
124 	return rs_idx;
125 }
126 
127 void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx);
128 int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
129 			bool frame);
130 void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
131 void
132 ice_process_skb_fields(struct ice_rx_ring *rx_ring,
133 		       union ice_32b_rx_flex_desc *rx_desc,
134 		       struct sk_buff *skb);
135 void
136 ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci);
137 
138 static inline void
139 ice_xdp_meta_set_desc(struct xdp_buff *xdp,
140 		      union ice_32b_rx_flex_desc *eop_desc)
141 {
142 	struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff,
143 						    xdp_buff);
144 
145 	xdp_ext->eop_desc = eop_desc;
146 }
147 #endif /* !_ICE_TXRX_LIB_H_ */
148