xref: /linux/drivers/net/ethernet/intel/ice/ice_flow.h (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #ifndef _ICE_FLOW_H_
5 #define _ICE_FLOW_H_
6 
7 #define ICE_FLOW_ENTRY_HANDLE_INVAL	0
8 #define ICE_FLOW_FLD_OFF_INVAL		0xffff
9 
10 /* Generate flow hash field from flow field type(s) */
11 #define ICE_FLOW_HASH_ETH	\
12 	(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA) | \
13 	 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA))
14 #define ICE_FLOW_HASH_IPV4	\
15 	(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
16 	 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
17 #define ICE_FLOW_HASH_IPV6	\
18 	(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
19 	 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
20 #define ICE_FLOW_HASH_TCP_PORT	\
21 	(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
22 	 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
23 #define ICE_FLOW_HASH_UDP_PORT	\
24 	(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
25 	 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
26 #define ICE_FLOW_HASH_SCTP_PORT	\
27 	(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
28 	 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
29 
30 #define ICE_HASH_INVALID	0
31 #define ICE_HASH_TCP_IPV4	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
32 #define ICE_HASH_TCP_IPV6	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
33 #define ICE_HASH_UDP_IPV4	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
34 #define ICE_HASH_UDP_IPV6	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
35 
36 #define ICE_FLOW_HASH_GTP_TEID \
37 	(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
38 
39 #define ICE_FLOW_HASH_GTP_IPV4_TEID \
40 	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID)
41 #define ICE_FLOW_HASH_GTP_IPV6_TEID \
42 	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID)
43 
44 #define ICE_FLOW_HASH_GTP_U_TEID \
45 	(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID))
46 
47 #define ICE_FLOW_HASH_GTP_U_IPV4_TEID \
48 	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_TEID)
49 #define ICE_FLOW_HASH_GTP_U_IPV6_TEID \
50 	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_TEID)
51 
52 #define ICE_FLOW_HASH_GTP_U_EH_TEID \
53 	(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID))
54 
55 #define ICE_FLOW_HASH_GTP_U_EH_QFI \
56 	(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_QFI))
57 
58 #define ICE_FLOW_HASH_GTP_U_IPV4_EH \
59 	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
60 	 ICE_FLOW_HASH_GTP_U_EH_QFI)
61 #define ICE_FLOW_HASH_GTP_U_IPV6_EH \
62 	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
63 	 ICE_FLOW_HASH_GTP_U_EH_QFI)
64 
65 #define ICE_FLOW_HASH_PPPOE_SESS_ID \
66 	(BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID))
67 
68 #define ICE_FLOW_HASH_PPPOE_SESS_ID_ETH \
69 	(ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_PPPOE_SESS_ID)
70 #define ICE_FLOW_HASH_PPPOE_TCP_ID \
71 	(ICE_FLOW_HASH_TCP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
72 #define ICE_FLOW_HASH_PPPOE_UDP_ID \
73 	(ICE_FLOW_HASH_UDP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
74 
75 #define ICE_FLOW_HASH_PFCP_SEID \
76 	(BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID))
77 #define ICE_FLOW_HASH_PFCP_IPV4_SEID \
78 	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_PFCP_SEID)
79 #define ICE_FLOW_HASH_PFCP_IPV6_SEID \
80 	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_PFCP_SEID)
81 
82 #define ICE_FLOW_HASH_L2TPV3_SESS_ID \
83 	(BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID))
84 #define ICE_FLOW_HASH_L2TPV3_IPV4_SESS_ID \
85 	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
86 #define ICE_FLOW_HASH_L2TPV3_IPV6_SESS_ID \
87 	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
88 
89 #define ICE_FLOW_HASH_ESP_SPI \
90 	(BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI))
91 #define ICE_FLOW_HASH_ESP_IPV4_SPI \
92 	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_ESP_SPI)
93 #define ICE_FLOW_HASH_ESP_IPV6_SPI \
94 	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_ESP_SPI)
95 
96 #define ICE_FLOW_HASH_AH_SPI \
97 	(BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI))
98 #define ICE_FLOW_HASH_AH_IPV4_SPI \
99 	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_AH_SPI)
100 #define ICE_FLOW_HASH_AH_IPV6_SPI \
101 	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_AH_SPI)
102 
103 #define ICE_FLOW_HASH_NAT_T_ESP_SPI \
104 	(BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI))
105 #define ICE_FLOW_HASH_NAT_T_ESP_IPV4_SPI \
106 	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
107 #define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \
108 	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
109 
110 /* Protocol header fields within a packet segment. A segment consists of one or
111  * more protocol headers that make up a logical group of protocol headers. Each
112  * logical group of protocol headers encapsulates or is encapsulated using/by
113  * tunneling or encapsulation protocols for network virtualization such as GRE,
114  * VxLAN, etc.
115  */
116 enum ice_flow_seg_hdr {
117 	ICE_FLOW_SEG_HDR_NONE		= 0x00000000,
118 	ICE_FLOW_SEG_HDR_ETH		= 0x00000001,
119 	ICE_FLOW_SEG_HDR_VLAN		= 0x00000002,
120 	ICE_FLOW_SEG_HDR_IPV4		= 0x00000004,
121 	ICE_FLOW_SEG_HDR_IPV6		= 0x00000008,
122 	ICE_FLOW_SEG_HDR_ARP		= 0x00000010,
123 	ICE_FLOW_SEG_HDR_ICMP		= 0x00000020,
124 	ICE_FLOW_SEG_HDR_TCP		= 0x00000040,
125 	ICE_FLOW_SEG_HDR_UDP		= 0x00000080,
126 	ICE_FLOW_SEG_HDR_SCTP		= 0x00000100,
127 	ICE_FLOW_SEG_HDR_GRE		= 0x00000200,
128 	ICE_FLOW_SEG_HDR_GTPC		= 0x00000400,
129 	ICE_FLOW_SEG_HDR_GTPC_TEID	= 0x00000800,
130 	ICE_FLOW_SEG_HDR_GTPU_IP	= 0x00001000,
131 	ICE_FLOW_SEG_HDR_GTPU_EH	= 0x00002000,
132 	ICE_FLOW_SEG_HDR_GTPU_DWN	= 0x00004000,
133 	ICE_FLOW_SEG_HDR_GTPU_UP	= 0x00008000,
134 	ICE_FLOW_SEG_HDR_PPPOE		= 0x00010000,
135 	ICE_FLOW_SEG_HDR_PFCP_NODE	= 0x00020000,
136 	ICE_FLOW_SEG_HDR_PFCP_SESSION	= 0x00040000,
137 	ICE_FLOW_SEG_HDR_L2TPV3		= 0x00080000,
138 	ICE_FLOW_SEG_HDR_ESP		= 0x00100000,
139 	ICE_FLOW_SEG_HDR_AH		= 0x00200000,
140 	ICE_FLOW_SEG_HDR_NAT_T_ESP	= 0x00400000,
141 	ICE_FLOW_SEG_HDR_ETH_NON_IP	= 0x00800000,
142 	/* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and
143 	 * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs
144 	 */
145 	ICE_FLOW_SEG_HDR_IPV_OTHER      = 0x20000000,
146 };
147 
148 /* These segments all have the same PTYPES, but are otherwise distinguished by
149  * the value of the gtp_eh_pdu and gtp_eh_pdu_link flags:
150  *
151  *                                gtp_eh_pdu     gtp_eh_pdu_link
152  * ICE_FLOW_SEG_HDR_GTPU_IP           0              0
153  * ICE_FLOW_SEG_HDR_GTPU_EH           1              don't care
154  * ICE_FLOW_SEG_HDR_GTPU_DWN          1              0
155  * ICE_FLOW_SEG_HDR_GTPU_UP           1              1
156  */
157 #define ICE_FLOW_SEG_HDR_GTPU (ICE_FLOW_SEG_HDR_GTPU_IP | \
158 			       ICE_FLOW_SEG_HDR_GTPU_EH | \
159 			       ICE_FLOW_SEG_HDR_GTPU_DWN | \
160 			       ICE_FLOW_SEG_HDR_GTPU_UP)
161 #define ICE_FLOW_SEG_HDR_PFCP (ICE_FLOW_SEG_HDR_PFCP_NODE | \
162 			       ICE_FLOW_SEG_HDR_PFCP_SESSION)
163 
164 enum ice_flow_field {
165 	/* L2 */
166 	ICE_FLOW_FIELD_IDX_ETH_DA,
167 	ICE_FLOW_FIELD_IDX_ETH_SA,
168 	ICE_FLOW_FIELD_IDX_S_VLAN,
169 	ICE_FLOW_FIELD_IDX_C_VLAN,
170 	ICE_FLOW_FIELD_IDX_ETH_TYPE,
171 	/* L3 */
172 	ICE_FLOW_FIELD_IDX_IPV4_DSCP,
173 	ICE_FLOW_FIELD_IDX_IPV6_DSCP,
174 	ICE_FLOW_FIELD_IDX_IPV4_TTL,
175 	ICE_FLOW_FIELD_IDX_IPV4_PROT,
176 	ICE_FLOW_FIELD_IDX_IPV6_TTL,
177 	ICE_FLOW_FIELD_IDX_IPV6_PROT,
178 	ICE_FLOW_FIELD_IDX_IPV4_SA,
179 	ICE_FLOW_FIELD_IDX_IPV4_DA,
180 	ICE_FLOW_FIELD_IDX_IPV6_SA,
181 	ICE_FLOW_FIELD_IDX_IPV6_DA,
182 	/* L4 */
183 	ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
184 	ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
185 	ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
186 	ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
187 	ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
188 	ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
189 	ICE_FLOW_FIELD_IDX_TCP_FLAGS,
190 	/* ARP */
191 	ICE_FLOW_FIELD_IDX_ARP_SIP,
192 	ICE_FLOW_FIELD_IDX_ARP_DIP,
193 	ICE_FLOW_FIELD_IDX_ARP_SHA,
194 	ICE_FLOW_FIELD_IDX_ARP_DHA,
195 	ICE_FLOW_FIELD_IDX_ARP_OP,
196 	/* ICMP */
197 	ICE_FLOW_FIELD_IDX_ICMP_TYPE,
198 	ICE_FLOW_FIELD_IDX_ICMP_CODE,
199 	/* GRE */
200 	ICE_FLOW_FIELD_IDX_GRE_KEYID,
201 	/* GTPC_TEID */
202 	ICE_FLOW_FIELD_IDX_GTPC_TEID,
203 	/* GTPU_IP */
204 	ICE_FLOW_FIELD_IDX_GTPU_IP_TEID,
205 	/* GTPU_EH */
206 	ICE_FLOW_FIELD_IDX_GTPU_EH_TEID,
207 	ICE_FLOW_FIELD_IDX_GTPU_EH_QFI,
208 	/* GTPU_UP */
209 	ICE_FLOW_FIELD_IDX_GTPU_UP_TEID,
210 	/* GTPU_DWN */
211 	ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID,
212 	/* PPPoE */
213 	ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID,
214 	/* PFCP */
215 	ICE_FLOW_FIELD_IDX_PFCP_SEID,
216 	/* L2TPv3 */
217 	ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID,
218 	/* ESP */
219 	ICE_FLOW_FIELD_IDX_ESP_SPI,
220 	/* AH */
221 	ICE_FLOW_FIELD_IDX_AH_SPI,
222 	/* NAT_T ESP */
223 	ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
224 	 /* The total number of enums must not exceed 64 */
225 	ICE_FLOW_FIELD_IDX_MAX
226 };
227 
228 /* Flow headers and fields for AVF support */
229 enum ice_flow_avf_hdr_field {
230 	/* Values 0 - 28 are reserved for future use */
231 	ICE_AVF_FLOW_FIELD_INVALID		= 0,
232 	ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP	= 29,
233 	ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
234 	ICE_AVF_FLOW_FIELD_IPV4_UDP,
235 	ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
236 	ICE_AVF_FLOW_FIELD_IPV4_TCP,
237 	ICE_AVF_FLOW_FIELD_IPV4_SCTP,
238 	ICE_AVF_FLOW_FIELD_IPV4_OTHER,
239 	ICE_AVF_FLOW_FIELD_FRAG_IPV4,
240 	/* Values 37-38 are reserved */
241 	ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP	= 39,
242 	ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
243 	ICE_AVF_FLOW_FIELD_IPV6_UDP,
244 	ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
245 	ICE_AVF_FLOW_FIELD_IPV6_TCP,
246 	ICE_AVF_FLOW_FIELD_IPV6_SCTP,
247 	ICE_AVF_FLOW_FIELD_IPV6_OTHER,
248 	ICE_AVF_FLOW_FIELD_FRAG_IPV6,
249 	ICE_AVF_FLOW_FIELD_RSVD47,
250 	ICE_AVF_FLOW_FIELD_FCOE_OX,
251 	ICE_AVF_FLOW_FIELD_FCOE_RX,
252 	ICE_AVF_FLOW_FIELD_FCOE_OTHER,
253 	/* Values 51-62 are reserved */
254 	ICE_AVF_FLOW_FIELD_L2_PAYLOAD		= 63,
255 	ICE_AVF_FLOW_FIELD_MAX
256 };
257 
258 /* Supported RSS offloads  This macro is defined to support
259  * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
260  * capabilities to the caller of this ops.
261  */
262 #define ICE_DEFAULT_RSS_HENA ( \
263 	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
264 	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
265 	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
266 	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
267 	BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
268 	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
269 	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
270 	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
271 	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
272 	BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
273 	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
274 	BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
275 	BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
276 	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
277 	BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
278 	BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
279 
280 enum ice_flow_dir {
281 	ICE_FLOW_RX		= 0x02,
282 };
283 
284 enum ice_flow_priority {
285 	ICE_FLOW_PRIO_LOW,
286 	ICE_FLOW_PRIO_NORMAL,
287 	ICE_FLOW_PRIO_HIGH
288 };
289 
290 #define ICE_FLOW_SEG_MAX		2
291 #define ICE_FLOW_SEG_RAW_FLD_MAX	2
292 #define ICE_FLOW_FV_EXTRACT_SZ		2
293 
294 #define ICE_FLOW_SET_HDRS(seg, val)	((seg)->hdrs |= (u32)(val))
295 
296 struct ice_flow_seg_xtrct {
297 	u8 prot_id;	/* Protocol ID of extracted header field */
298 	u16 off;	/* Starting offset of the field in header in bytes */
299 	u8 idx;		/* Index of FV entry used */
300 	u8 disp;	/* Displacement of field in bits fr. FV entry's start */
301 	u16 mask;	/* Mask for field */
302 };
303 
304 enum ice_flow_fld_match_type {
305 	ICE_FLOW_FLD_TYPE_REG,		/* Value, mask */
306 	ICE_FLOW_FLD_TYPE_RANGE,	/* Value, mask, last (upper bound) */
307 	ICE_FLOW_FLD_TYPE_PREFIX,	/* IP address, prefix, size of prefix */
308 	ICE_FLOW_FLD_TYPE_SIZE,		/* Value, mask, size of match */
309 };
310 
311 struct ice_flow_fld_loc {
312 	/* Describe offsets of field information relative to the beginning of
313 	 * input buffer provided when adding flow entries.
314 	 */
315 	u16 val;	/* Offset where the value is located */
316 	u16 mask;	/* Offset where the mask/prefix value is located */
317 	u16 last;	/* Length or offset where the upper value is located */
318 };
319 
320 struct ice_flow_fld_info {
321 	enum ice_flow_fld_match_type type;
322 	/* Location where to retrieve data from an input buffer */
323 	struct ice_flow_fld_loc src;
324 	/* Location where to put the data into the final entry buffer */
325 	struct ice_flow_fld_loc entry;
326 	struct ice_flow_seg_xtrct xtrct;
327 };
328 
329 struct ice_flow_seg_fld_raw {
330 	struct ice_flow_fld_info info;
331 	u16 off;	/* Offset from the start of the segment */
332 };
333 
334 struct ice_flow_seg_info {
335 	u32 hdrs;	/* Bitmask indicating protocol headers present */
336 	u64 match;	/* Bitmask indicating header fields to be matched */
337 	u64 range;	/* Bitmask indicating header fields matched as ranges */
338 
339 	struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
340 
341 	u8 raws_cnt;	/* Number of raw fields to be matched */
342 	struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
343 };
344 
345 /* This structure describes a flow entry, and is tracked only in this file */
346 struct ice_flow_entry {
347 	struct list_head l_entry;
348 
349 	u64 id;
350 	struct ice_flow_prof *prof;
351 	/* Flow entry's content */
352 	void *entry;
353 	enum ice_flow_priority priority;
354 	u16 vsi_handle;
355 	u16 entry_sz;
356 };
357 
358 #define ICE_FLOW_ENTRY_HNDL(e)	((u64)(uintptr_t)e)
359 #define ICE_FLOW_ENTRY_PTR(h)	((struct ice_flow_entry *)(uintptr_t)(h))
360 
361 struct ice_flow_prof {
362 	struct list_head l_entry;
363 
364 	u64 id;
365 	enum ice_flow_dir dir;
366 	u8 segs_cnt;
367 
368 	/* Keep track of flow entries associated with this flow profile */
369 	struct mutex entries_lock;
370 	struct list_head entries;
371 
372 	struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
373 
374 	/* software VSI handles referenced by this flow profile */
375 	DECLARE_BITMAP(vsis, ICE_MAX_VSI);
376 };
377 
378 struct ice_rss_cfg {
379 	struct list_head l_entry;
380 	/* bitmap of VSIs added to the RSS entry */
381 	DECLARE_BITMAP(vsis, ICE_MAX_VSI);
382 	u64 hashed_flds;
383 	u32 packet_hdr;
384 };
385 
386 int
387 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
388 		  u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
389 		  struct ice_flow_prof **prof);
390 int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
391 int
392 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
393 		   u64 entry_id, u16 vsi, enum ice_flow_priority prio,
394 		   void *data, u64 *entry_h);
395 int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
396 void
397 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
398 		 u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
399 void
400 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
401 		     u16 val_loc, u16 mask_loc);
402 int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id);
403 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
404 int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
405 int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
406 int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
407 int
408 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
409 		u32 addl_hdrs);
410 int
411 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
412 		u32 addl_hdrs);
413 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
414 #endif /* _ICE_FLOW_H_ */
415