xref: /linux/drivers/net/ethernet/intel/ice/virt/rss.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
3 
4 #include "rss.h"
5 #include "ice_vf_lib_private.h"
6 #include "ice.h"
7 
8 #define FIELD_SELECTOR(proto_hdr_field) \
9 		BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
10 
11 struct ice_vc_hdr_match_type {
12 	u32 vc_hdr;	/* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
13 	u32 ice_hdr;	/* ice headers (ICE_FLOW_SEG_HDR_XXX) */
14 };
15 
16 static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
17 	{VIRTCHNL_PROTO_HDR_NONE,	ICE_FLOW_SEG_HDR_NONE},
18 	{VIRTCHNL_PROTO_HDR_ETH,	ICE_FLOW_SEG_HDR_ETH},
19 	{VIRTCHNL_PROTO_HDR_S_VLAN,	ICE_FLOW_SEG_HDR_VLAN},
20 	{VIRTCHNL_PROTO_HDR_C_VLAN,	ICE_FLOW_SEG_HDR_VLAN},
21 	{VIRTCHNL_PROTO_HDR_IPV4,	ICE_FLOW_SEG_HDR_IPV4 |
22 					ICE_FLOW_SEG_HDR_IPV_OTHER},
23 	{VIRTCHNL_PROTO_HDR_IPV6,	ICE_FLOW_SEG_HDR_IPV6 |
24 					ICE_FLOW_SEG_HDR_IPV_OTHER},
25 	{VIRTCHNL_PROTO_HDR_TCP,	ICE_FLOW_SEG_HDR_TCP},
26 	{VIRTCHNL_PROTO_HDR_UDP,	ICE_FLOW_SEG_HDR_UDP},
27 	{VIRTCHNL_PROTO_HDR_SCTP,	ICE_FLOW_SEG_HDR_SCTP},
28 	{VIRTCHNL_PROTO_HDR_PPPOE,	ICE_FLOW_SEG_HDR_PPPOE},
29 	{VIRTCHNL_PROTO_HDR_GTPU_IP,	ICE_FLOW_SEG_HDR_GTPU_IP},
30 	{VIRTCHNL_PROTO_HDR_GTPU_EH,	ICE_FLOW_SEG_HDR_GTPU_EH},
31 	{VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
32 					ICE_FLOW_SEG_HDR_GTPU_DWN},
33 	{VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
34 					ICE_FLOW_SEG_HDR_GTPU_UP},
35 	{VIRTCHNL_PROTO_HDR_L2TPV3,	ICE_FLOW_SEG_HDR_L2TPV3},
36 	{VIRTCHNL_PROTO_HDR_ESP,	ICE_FLOW_SEG_HDR_ESP},
37 	{VIRTCHNL_PROTO_HDR_AH,		ICE_FLOW_SEG_HDR_AH},
38 	{VIRTCHNL_PROTO_HDR_PFCP,	ICE_FLOW_SEG_HDR_PFCP_SESSION},
39 	{VIRTCHNL_PROTO_HDR_GTPC,	ICE_FLOW_SEG_HDR_GTPC},
40 	{VIRTCHNL_PROTO_HDR_L2TPV2,	ICE_FLOW_SEG_HDR_L2TPV2},
41 	{VIRTCHNL_PROTO_HDR_IPV4_FRAG,	ICE_FLOW_SEG_HDR_IPV_FRAG},
42 	{VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG,	ICE_FLOW_SEG_HDR_IPV_FRAG},
43 	{VIRTCHNL_PROTO_HDR_GRE,        ICE_FLOW_SEG_HDR_GRE},
44 };
45 
46 struct ice_vc_hash_field_match_type {
47 	u32 vc_hdr;		/* virtchnl headers
48 				 * (VIRTCHNL_PROTO_HDR_XXX)
49 				 */
50 	u32 vc_hash_field;	/* virtchnl hash fields selector
51 				 * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
52 				 */
53 	u64 ice_hash_field;	/* ice hash fields
54 				 * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
55 				 */
56 };
57 
58 static const struct
59 ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
60 	{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
61 		BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
62 	{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
63 		BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
64 	{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
65 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
66 		ICE_FLOW_HASH_ETH},
67 	{VIRTCHNL_PROTO_HDR_ETH,
68 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
69 		BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
70 	{VIRTCHNL_PROTO_HDR_S_VLAN,
71 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
72 		BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
73 	{VIRTCHNL_PROTO_HDR_C_VLAN,
74 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
75 		BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
76 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
77 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
78 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
79 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
80 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
81 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
82 		ICE_FLOW_HASH_IPV4},
83 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
84 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
85 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
86 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
87 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
88 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
89 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
90 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
91 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
92 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
93 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
94 		ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
95 	{VIRTCHNL_PROTO_HDR_IPV4,
96 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID),
97 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID)},
98 	{VIRTCHNL_PROTO_HDR_IPV4,
99 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
100 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
101 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
102 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
103 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
104 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
105 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
106 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
107 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
108 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
109 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
110 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
111 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
112 		ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
113 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
114 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
115 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
116 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
117 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
118 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
119 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
120 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
121 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
122 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
123 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
124 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
125 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
126 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
127 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
128 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
129 		ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
130 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
131 	{VIRTCHNL_PROTO_HDR_IPV4,
132 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
133 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
134 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
135 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
136 	{VIRTCHNL_PROTO_HDR_IPV4_FRAG,
137 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
138 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
139 	{VIRTCHNL_PROTO_HDR_IPV4_FRAG,
140 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
141 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
142 	{VIRTCHNL_PROTO_HDR_IPV4_FRAG,
143 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
144 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
145 	{VIRTCHNL_PROTO_HDR_IPV4_FRAG,
146 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
147 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
148 		ICE_FLOW_HASH_IPV4},
149 	{VIRTCHNL_PROTO_HDR_IPV4_FRAG,
150 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
151 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
152 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
153 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
154 	{VIRTCHNL_PROTO_HDR_IPV4_FRAG,
155 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
156 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
157 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
158 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
159 	{VIRTCHNL_PROTO_HDR_IPV4_FRAG,
160 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
161 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
162 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
163 		ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
164 	{VIRTCHNL_PROTO_HDR_IPV4_FRAG,
165 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
166 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
167 	{VIRTCHNL_PROTO_HDR_IPV4_FRAG,
168 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID),
169 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID)},
170 	{VIRTCHNL_PROTO_HDR_IPV4_FRAG,
171 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
172 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
173 	{VIRTCHNL_PROTO_HDR_IPV4_FRAG,
174 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
175 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
176 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
177 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
178 	{VIRTCHNL_PROTO_HDR_IPV4_FRAG,
179 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
180 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
181 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
182 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
183 	{VIRTCHNL_PROTO_HDR_IPV4_FRAG,
184 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
185 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
186 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
187 		ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
188 	{VIRTCHNL_PROTO_HDR_IPV4_FRAG,
189 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
190 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
191 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
192 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
193 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
194 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
195 	{VIRTCHNL_PROTO_HDR_IPV4_FRAG,
196 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
197 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
198 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
199 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
200 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
201 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
202 	{VIRTCHNL_PROTO_HDR_IPV4_FRAG,
203 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
204 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
205 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
206 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
207 		ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
208 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
209 	{VIRTCHNL_PROTO_HDR_IPV4_FRAG,
210 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
211 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
212 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
213 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
214 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
215 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
216 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
217 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
218 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
219 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
220 		ICE_FLOW_HASH_IPV6},
221 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
222 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
223 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
224 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
225 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
226 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
227 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
228 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
229 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
230 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
231 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
232 		ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
233 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
234 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
235 	{VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG,
236 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID),
237 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID)},
238 	{VIRTCHNL_PROTO_HDR_IPV6,
239 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) |
240 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST),
241 		ICE_FLOW_HASH_IPV6_PRE64},
242 	{VIRTCHNL_PROTO_HDR_IPV6,
243 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC),
244 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA)},
245 	{VIRTCHNL_PROTO_HDR_IPV6,
246 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST),
247 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA)},
248 	{VIRTCHNL_PROTO_HDR_IPV6,
249 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) |
250 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST) |
251 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
252 		ICE_FLOW_HASH_IPV6_PRE64 |
253 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
254 	{VIRTCHNL_PROTO_HDR_IPV6,
255 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) |
256 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
257 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA) |
258 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
259 	{VIRTCHNL_PROTO_HDR_IPV6,
260 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST) |
261 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
262 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA) |
263 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
264 	{VIRTCHNL_PROTO_HDR_TCP,
265 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
266 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
267 	{VIRTCHNL_PROTO_HDR_TCP,
268 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
269 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
270 	{VIRTCHNL_PROTO_HDR_TCP,
271 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
272 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
273 		ICE_FLOW_HASH_TCP_PORT},
274 	{VIRTCHNL_PROTO_HDR_TCP,
275 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM),
276 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)},
277 	{VIRTCHNL_PROTO_HDR_TCP,
278 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
279 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM),
280 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) |
281 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)},
282 	{VIRTCHNL_PROTO_HDR_TCP,
283 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT) |
284 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM),
285 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT) |
286 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)},
287 	{VIRTCHNL_PROTO_HDR_TCP,
288 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
289 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT) |
290 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM),
291 		ICE_FLOW_HASH_TCP_PORT |
292 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)},
293 	{VIRTCHNL_PROTO_HDR_UDP,
294 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
295 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
296 	{VIRTCHNL_PROTO_HDR_UDP,
297 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
298 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
299 	{VIRTCHNL_PROTO_HDR_UDP,
300 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
301 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
302 		ICE_FLOW_HASH_UDP_PORT},
303 	{VIRTCHNL_PROTO_HDR_UDP,
304 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM),
305 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)},
306 	{VIRTCHNL_PROTO_HDR_UDP,
307 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
308 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM),
309 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) |
310 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)},
311 	{VIRTCHNL_PROTO_HDR_UDP,
312 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT) |
313 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM),
314 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT) |
315 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)},
316 	{VIRTCHNL_PROTO_HDR_UDP,
317 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
318 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT) |
319 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM),
320 		ICE_FLOW_HASH_UDP_PORT |
321 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)},
322 	{VIRTCHNL_PROTO_HDR_SCTP,
323 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
324 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
325 	{VIRTCHNL_PROTO_HDR_SCTP,
326 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
327 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
328 	{VIRTCHNL_PROTO_HDR_SCTP,
329 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
330 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
331 		ICE_FLOW_HASH_SCTP_PORT},
332 	{VIRTCHNL_PROTO_HDR_SCTP,
333 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM),
334 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)},
335 	{VIRTCHNL_PROTO_HDR_SCTP,
336 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
337 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM),
338 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) |
339 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)},
340 	{VIRTCHNL_PROTO_HDR_SCTP,
341 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT) |
342 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM),
343 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT) |
344 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)},
345 	{VIRTCHNL_PROTO_HDR_SCTP,
346 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
347 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT) |
348 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM),
349 		ICE_FLOW_HASH_SCTP_PORT |
350 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)},
351 	{VIRTCHNL_PROTO_HDR_PPPOE,
352 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
353 		BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
354 	{VIRTCHNL_PROTO_HDR_GTPU_IP,
355 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
356 		BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
357 	{VIRTCHNL_PROTO_HDR_L2TPV3,
358 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
359 		BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
360 	{VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
361 		BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
362 	{VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
363 		BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
364 	{VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
365 		BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
366 	{VIRTCHNL_PROTO_HDR_GTPC,
367 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPC_TEID),
368 		BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)},
369 	{VIRTCHNL_PROTO_HDR_L2TPV2,
370 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID),
371 		BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID)},
372 	{VIRTCHNL_PROTO_HDR_L2TPV2,
373 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID),
374 		BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID)},
375 };
376 
377 static int
378 ice_vc_rss_hash_update(struct ice_hw *hw, struct ice_vsi *vsi, u8 hash_type)
379 {
380 	struct ice_vsi_ctx *ctx;
381 	int ret;
382 
383 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
384 	if (!ctx)
385 		return -ENOMEM;
386 
387 	/* clear previous hash_type */
388 	ctx->info.q_opt_rss = vsi->info.q_opt_rss &
389 		~ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
390 	/* hash_type is passed in as ICE_AQ_VSI_Q_OPT_RSS_<XOR|TPLZ|SYM_TPLZ */
391 	ctx->info.q_opt_rss |= FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M,
392 					  hash_type);
393 
394 	/* Preserve existing queueing option setting */
395 	ctx->info.q_opt_tc = vsi->info.q_opt_tc;
396 	ctx->info.q_opt_flags = vsi->info.q_opt_flags;
397 
398 	ctx->info.valid_sections =
399 			cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
400 
401 	ret = ice_update_vsi(hw, vsi->idx, ctx, NULL);
402 	if (ret) {
403 		dev_err(ice_hw_to_dev(hw), "update VSI for RSS failed, err %d aq_err %s\n",
404 			ret, libie_aq_str(hw->adminq.sq_last_status));
405 	} else {
406 		vsi->info.q_opt_rss = ctx->info.q_opt_rss;
407 	}
408 
409 	kfree(ctx);
410 
411 	return ret;
412 }
413 
414 /**
415  * ice_vc_validate_pattern
416  * @vf: pointer to the VF info
417  * @proto: virtchnl protocol headers
418  *
419  * validate the pattern is supported or not.
420  *
421  * Return: true on success, false on error.
422  */
423 bool
424 ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
425 {
426 	bool is_ipv4 = false;
427 	bool is_ipv6 = false;
428 	bool is_udp = false;
429 	u16 ptype = -1;
430 	int i = 0;
431 
432 	while (i < proto->count &&
433 	       proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
434 		switch (proto->proto_hdr[i].type) {
435 		case VIRTCHNL_PROTO_HDR_ETH:
436 			ptype = ICE_PTYPE_MAC_PAY;
437 			break;
438 		case VIRTCHNL_PROTO_HDR_IPV4:
439 			ptype = ICE_PTYPE_IPV4_PAY;
440 			is_ipv4 = true;
441 			break;
442 		case VIRTCHNL_PROTO_HDR_IPV6:
443 			ptype = ICE_PTYPE_IPV6_PAY;
444 			is_ipv6 = true;
445 			break;
446 		case VIRTCHNL_PROTO_HDR_UDP:
447 			if (is_ipv4)
448 				ptype = ICE_PTYPE_IPV4_UDP_PAY;
449 			else if (is_ipv6)
450 				ptype = ICE_PTYPE_IPV6_UDP_PAY;
451 			is_udp = true;
452 			break;
453 		case VIRTCHNL_PROTO_HDR_TCP:
454 			if (is_ipv4)
455 				ptype = ICE_PTYPE_IPV4_TCP_PAY;
456 			else if (is_ipv6)
457 				ptype = ICE_PTYPE_IPV6_TCP_PAY;
458 			break;
459 		case VIRTCHNL_PROTO_HDR_SCTP:
460 			if (is_ipv4)
461 				ptype = ICE_PTYPE_IPV4_SCTP_PAY;
462 			else if (is_ipv6)
463 				ptype = ICE_PTYPE_IPV6_SCTP_PAY;
464 			break;
465 		case VIRTCHNL_PROTO_HDR_GTPU_IP:
466 		case VIRTCHNL_PROTO_HDR_GTPU_EH:
467 			if (is_ipv4)
468 				ptype = ICE_MAC_IPV4_GTPU;
469 			else if (is_ipv6)
470 				ptype = ICE_MAC_IPV6_GTPU;
471 			goto out;
472 		case VIRTCHNL_PROTO_HDR_L2TPV3:
473 			if (is_ipv4)
474 				ptype = ICE_MAC_IPV4_L2TPV3;
475 			else if (is_ipv6)
476 				ptype = ICE_MAC_IPV6_L2TPV3;
477 			goto out;
478 		case VIRTCHNL_PROTO_HDR_ESP:
479 			if (is_ipv4)
480 				ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
481 						ICE_MAC_IPV4_ESP;
482 			else if (is_ipv6)
483 				ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
484 						ICE_MAC_IPV6_ESP;
485 			goto out;
486 		case VIRTCHNL_PROTO_HDR_AH:
487 			if (is_ipv4)
488 				ptype = ICE_MAC_IPV4_AH;
489 			else if (is_ipv6)
490 				ptype = ICE_MAC_IPV6_AH;
491 			goto out;
492 		case VIRTCHNL_PROTO_HDR_PFCP:
493 			if (is_ipv4)
494 				ptype = ICE_MAC_IPV4_PFCP_SESSION;
495 			else if (is_ipv6)
496 				ptype = ICE_MAC_IPV6_PFCP_SESSION;
497 			goto out;
498 		default:
499 			break;
500 		}
501 		i++;
502 	}
503 
504 out:
505 	return ice_hw_ptype_ena(&vf->pf->hw, ptype);
506 }
507 
508 /**
509  * ice_vc_parse_rss_cfg - parses hash fields and headers from
510  * a specific virtchnl RSS cfg
511  * @hw: pointer to the hardware
512  * @rss_cfg: pointer to the virtchnl RSS cfg
513  * @hash_cfg: pointer to the HW hash configuration
514  *
515  * Return true if all the protocol header and hash fields in the RSS cfg could
516  * be parsed, else return false
517  *
518  * This function parses the virtchnl RSS cfg to be the intended
519  * hash fields and the intended header for RSS configuration
520  */
521 static bool ice_vc_parse_rss_cfg(struct ice_hw *hw,
522 				 struct virtchnl_rss_cfg *rss_cfg,
523 				 struct ice_rss_hash_cfg *hash_cfg)
524 {
525 	const struct ice_vc_hash_field_match_type *hf_list;
526 	const struct ice_vc_hdr_match_type *hdr_list;
527 	int i, hf_list_len, hdr_list_len;
528 	bool outer_ipv4 = false;
529 	bool outer_ipv6 = false;
530 	bool inner_hdr = false;
531 	bool has_gre = false;
532 
533 	u32 *addl_hdrs = &hash_cfg->addl_hdrs;
534 	u64 *hash_flds = &hash_cfg->hash_flds;
535 
536 	/* set outer layer RSS as default */
537 	hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
538 
539 	if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
540 		hash_cfg->symm = true;
541 	else
542 		hash_cfg->symm = false;
543 
544 	hf_list = ice_vc_hash_field_list;
545 	hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
546 	hdr_list = ice_vc_hdr_list;
547 	hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
548 
549 	for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
550 		struct virtchnl_proto_hdr *proto_hdr =
551 					&rss_cfg->proto_hdrs.proto_hdr[i];
552 		u32 hdr_found = 0;
553 		int j;
554 
555 		/* Find matched ice headers according to virtchnl headers.
556 		 * Also figure out the outer type of GTPU headers.
557 		 */
558 		for (j = 0; j < hdr_list_len; j++) {
559 			struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
560 
561 			if (proto_hdr->type == hdr_map.vc_hdr)
562 				hdr_found = hdr_map.ice_hdr;
563 		}
564 
565 		if (!hdr_found)
566 			return false;
567 
568 		/* Find matched ice hash fields according to
569 		 * virtchnl hash fields.
570 		 */
571 		for (j = 0; j < hf_list_len; j++) {
572 			struct ice_vc_hash_field_match_type hf_map = hf_list[j];
573 
574 			if (proto_hdr->type == hf_map.vc_hdr &&
575 			    proto_hdr->field_selector == hf_map.vc_hash_field) {
576 				*hash_flds |= hf_map.ice_hash_field;
577 				break;
578 			}
579 		}
580 
581 		if (proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV4 && !inner_hdr)
582 			outer_ipv4 = true;
583 		else if (proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV6 &&
584 			 !inner_hdr)
585 			outer_ipv6 = true;
586 		/* for GRE and L2TPv2, take inner header as input set if no
587 		 * any field is selected from outer headers.
588 		 * for GTPU, take inner header and GTPU teid as input set.
589 		 */
590 		else if ((proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_IP ||
591 			  proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_EH ||
592 			  proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN ||
593 			  proto_hdr->type ==
594 				VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP) ||
595 			 ((proto_hdr->type == VIRTCHNL_PROTO_HDR_L2TPV2 ||
596 			   proto_hdr->type == VIRTCHNL_PROTO_HDR_GRE) &&
597 			   *hash_flds == 0)) {
598 			/* set inner_hdr flag, and clean up outer header */
599 			inner_hdr = true;
600 
601 			/* clear outer headers */
602 			*addl_hdrs = 0;
603 
604 			if (outer_ipv4 && outer_ipv6)
605 				return false;
606 
607 			if (outer_ipv4)
608 				hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
609 			else if (outer_ipv6)
610 				hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
611 			else
612 				hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS;
613 
614 			if (has_gre && outer_ipv4)
615 				hash_cfg->hdr_type =
616 					ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE;
617 			if (has_gre && outer_ipv6)
618 				hash_cfg->hdr_type =
619 					ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE;
620 
621 			if (proto_hdr->type == VIRTCHNL_PROTO_HDR_GRE)
622 				has_gre = true;
623 		}
624 
625 		*addl_hdrs |= hdr_found;
626 
627 		/* refine hash hdrs and fields for IP fragment */
628 		if (VIRTCHNL_TEST_PROTO_HDR_FIELD(proto_hdr,
629 						  VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID) &&
630 		    proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV4_FRAG) {
631 			*addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG;
632 			*addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER);
633 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID);
634 			VIRTCHNL_DEL_PROTO_HDR_FIELD(proto_hdr,
635 						     VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID);
636 		}
637 		if (VIRTCHNL_TEST_PROTO_HDR_FIELD(proto_hdr,
638 						  VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID) &&
639 		    proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG) {
640 			*addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG;
641 			*addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER);
642 			*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID);
643 			VIRTCHNL_DEL_PROTO_HDR_FIELD(proto_hdr,
644 						     VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID);
645 		}
646 	}
647 
648 	/* refine gtpu header if we take outer as input set for a no inner
649 	 * ip gtpu flow.
650 	 */
651 	if (hash_cfg->hdr_type == ICE_RSS_OUTER_HEADERS &&
652 	    *addl_hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
653 		*addl_hdrs &= ~(ICE_FLOW_SEG_HDR_GTPU_IP);
654 		*addl_hdrs |= ICE_FLOW_SEG_HDR_GTPU_NON_IP;
655 	}
656 
657 	/* refine hash field for esp and nat-t-esp. */
658 	if ((*addl_hdrs & ICE_FLOW_SEG_HDR_UDP) &&
659 	    (*addl_hdrs & ICE_FLOW_SEG_HDR_ESP)) {
660 		*addl_hdrs &= ~(ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_UDP);
661 		*addl_hdrs |= ICE_FLOW_SEG_HDR_NAT_T_ESP;
662 		*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI));
663 		*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI);
664 	}
665 
666 	/* refine hash hdrs for L4 udp/tcp/sctp. */
667 	if (*addl_hdrs & (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP |
668 			  ICE_FLOW_SEG_HDR_SCTP) &&
669 	    *addl_hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)
670 		*addl_hdrs &= ~ICE_FLOW_SEG_HDR_IPV_OTHER;
671 
672 	return true;
673 }
674 
675 /**
676  * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
677  * RSS offloads
678  * @caps: VF driver negotiated capabilities
679  *
680  * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
681  * else return false
682  */
683 static bool ice_vf_adv_rss_offload_ena(u32 caps)
684 {
685 	return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
686 }
687 
688 /**
689  * ice_is_hash_cfg_valid - Check whether an RSS hash context is valid
690  * @cfg: RSS hash configuration to test
691  *
692  * Return: true if both @cfg->hash_flds and @cfg->addl_hdrs are non-zero; false otherwise.
693  */
694 static bool ice_is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg)
695 {
696 	return cfg->hash_flds && cfg->addl_hdrs;
697 }
698 
699 /**
700  * ice_hash_cfg_reset - Reset an RSS hash context
701  * @cfg: RSS hash configuration to reset
702  *
703  * Reset fields of @cfg that store the active rule information.
704  */
705 static void ice_hash_cfg_reset(struct ice_rss_hash_cfg *cfg)
706 {
707 	cfg->hash_flds = 0;
708 	cfg->addl_hdrs = 0;
709 	cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
710 	cfg->symm = 0;
711 }
712 
713 /**
714  * ice_hash_cfg_record - Record an RSS hash context
715  * @ctx: destination (global) RSS hash configuration
716  * @cfg: source RSS hash configuration to record
717  *
718  * Copy the active rule information from @cfg into @ctx.
719  */
720 static void ice_hash_cfg_record(struct ice_rss_hash_cfg *ctx,
721 				struct ice_rss_hash_cfg *cfg)
722 {
723 	ctx->hash_flds = cfg->hash_flds;
724 	ctx->addl_hdrs = cfg->addl_hdrs;
725 	ctx->hdr_type = cfg->hdr_type;
726 	ctx->symm = cfg->symm;
727 }
728 
729 /**
730  * ice_hash_moveout - Delete an RSS configuration (keep context)
731  * @vf: VF pointer
732  * @cfg: RSS hash configuration
733  *
734  * Return: 0 on success (including when already absent); -ENOENT if @cfg is
735  * invalid or VSI is missing; -EBUSY on hardware removal failure.
736  */
737 static int
738 ice_hash_moveout(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
739 {
740 	struct device *dev = ice_pf_to_dev(vf->pf);
741 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
742 	struct ice_hw *hw = &vf->pf->hw;
743 	int ret;
744 
745 	if (!ice_is_hash_cfg_valid(cfg) || !vsi)
746 		return -ENOENT;
747 
748 	ret = ice_rem_rss_cfg(hw, vsi->idx, cfg);
749 	if (ret && ret != -ENOENT) {
750 		dev_err(dev, "ice_rem_rss_cfg failed for VF %d, VSI %d, error:%d\n",
751 			vf->vf_id, vf->lan_vsi_idx, ret);
752 		return -EBUSY;
753 	}
754 
755 	return 0;
756 }
757 
758 /**
759  * ice_hash_moveback - Add an RSS hash configuration for a VF
760  * @vf: VF pointer
761  * @cfg: RSS hash configuration to apply
762  *
763  * Add @cfg to @vf if the context is valid and VSI exists; programs HW.
764  *
765  * Return:
766  * * 0 on success
767  * * -ENOENT if @cfg is invalid or VSI is missing
768  * * -EBUSY if hardware programming fails
769  */
770 static int
771 ice_hash_moveback(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
772 {
773 	struct device *dev = ice_pf_to_dev(vf->pf);
774 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
775 	struct ice_hw *hw = &vf->pf->hw;
776 	int ret;
777 
778 	if (!ice_is_hash_cfg_valid(cfg) || !vsi)
779 		return -ENOENT;
780 
781 	ret = ice_add_rss_cfg(hw, vsi, cfg);
782 	if (ret) {
783 		dev_err(dev, "ice_add_rss_cfg failed for VF %d, VSI %d, error:%d\n",
784 			vf->vf_id, vf->lan_vsi_idx, ret);
785 		return -EBUSY;
786 	}
787 
788 	return 0;
789 }
790 
791 /**
792  * ice_hash_remove - remove a RSS configuration
793  * @vf: pointer to the VF info
794  * @cfg: pointer to the RSS hash configuration
795  *
796  * This function will delete a RSS hash configuration and also delete the
797  * hash context which stores the rule info.
798  *
799  * Return: 0 on success, or a negative error code on failure.
800  */
801 static int
802 ice_hash_remove(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
803 {
804 	int ret;
805 
806 	ret = ice_hash_moveout(vf, cfg);
807 	if (ret && ret != -ENOENT)
808 		return ret;
809 
810 	ice_hash_cfg_reset(cfg);
811 
812 	return 0;
813 }
814 
815 struct ice_gtpu_ctx_action {
816 	u32 ctx_idx;
817 	const u32 *remove_list;
818 	int remove_count;
819 	const u32 *moveout_list;
820 	int moveout_count;
821 };
822 
823 /**
824  * ice_add_rss_cfg_pre_gtpu - Pre-process the GTPU RSS configuration
825  * @vf: pointer to the VF info
826  * @ctx: pointer to the context of the GTPU hash
827  * @ctx_idx: index of the hash context
828  *
829  * Pre-processes the GTPU hash configuration before adding a new
830  * hash context. It removes or reorders existing hash configurations that may
831  * conflict with the new one. For example, if a GTPU_UP or GTPU_DWN rule is
832  * configured after a GTPU_EH rule, the GTPU_EH hash will be matched first due
833  * to TCAM write and match order (top-down). In such cases, the GTPU_EH rule
834  * must be moved after the GTPU_UP/DWN rule. Conversely, if a GTPU_EH rule is
835  * configured after a GTPU_UP/DWN rule, the UP/DWN rules should be removed to
836  * avoid conflict.
837  *
838  * Return: 0 on success or a negative error code on failure
839  */
840 static int ice_add_rss_cfg_pre_gtpu(struct ice_vf *vf,
841 				    struct ice_vf_hash_gtpu_ctx *ctx,
842 				    u32 ctx_idx)
843 {
844 	int ret, i;
845 
846 	static const u32 remove_eh_ip[] = {
847 		ICE_HASH_GTPU_CTX_EH_IP_UDP, ICE_HASH_GTPU_CTX_EH_IP_TCP,
848 		ICE_HASH_GTPU_CTX_UP_IP,     ICE_HASH_GTPU_CTX_UP_IP_UDP,
849 		ICE_HASH_GTPU_CTX_UP_IP_TCP, ICE_HASH_GTPU_CTX_DW_IP,
850 		ICE_HASH_GTPU_CTX_DW_IP_UDP, ICE_HASH_GTPU_CTX_DW_IP_TCP,
851 	};
852 
853 	static const u32 remove_eh_ip_udp[] = {
854 		ICE_HASH_GTPU_CTX_UP_IP_UDP,
855 		ICE_HASH_GTPU_CTX_DW_IP_UDP,
856 	};
857 	static const u32 moveout_eh_ip_udp[] = {
858 		ICE_HASH_GTPU_CTX_UP_IP,
859 		ICE_HASH_GTPU_CTX_UP_IP_TCP,
860 		ICE_HASH_GTPU_CTX_DW_IP,
861 		ICE_HASH_GTPU_CTX_DW_IP_TCP,
862 	};
863 
864 	static const u32 remove_eh_ip_tcp[] = {
865 		ICE_HASH_GTPU_CTX_UP_IP_TCP,
866 		ICE_HASH_GTPU_CTX_DW_IP_TCP,
867 	};
868 	static const u32 moveout_eh_ip_tcp[] = {
869 		ICE_HASH_GTPU_CTX_UP_IP,
870 		ICE_HASH_GTPU_CTX_UP_IP_UDP,
871 		ICE_HASH_GTPU_CTX_DW_IP,
872 		ICE_HASH_GTPU_CTX_DW_IP_UDP,
873 	};
874 
875 	static const u32 remove_up_ip[] = {
876 		ICE_HASH_GTPU_CTX_UP_IP_UDP,
877 		ICE_HASH_GTPU_CTX_UP_IP_TCP,
878 	};
879 	static const u32 moveout_up_ip[] = {
880 		ICE_HASH_GTPU_CTX_EH_IP,
881 		ICE_HASH_GTPU_CTX_EH_IP_UDP,
882 		ICE_HASH_GTPU_CTX_EH_IP_TCP,
883 	};
884 
885 	static const u32 moveout_up_ip_udp_tcp[] = {
886 		ICE_HASH_GTPU_CTX_EH_IP,
887 		ICE_HASH_GTPU_CTX_EH_IP_UDP,
888 		ICE_HASH_GTPU_CTX_EH_IP_TCP,
889 	};
890 
891 	static const u32 remove_dw_ip[] = {
892 		ICE_HASH_GTPU_CTX_DW_IP_UDP,
893 		ICE_HASH_GTPU_CTX_DW_IP_TCP,
894 	};
895 	static const u32 moveout_dw_ip[] = {
896 		ICE_HASH_GTPU_CTX_EH_IP,
897 		ICE_HASH_GTPU_CTX_EH_IP_UDP,
898 		ICE_HASH_GTPU_CTX_EH_IP_TCP,
899 	};
900 
901 	static const struct ice_gtpu_ctx_action actions[] = {
902 		{ ICE_HASH_GTPU_CTX_EH_IP, remove_eh_ip,
903 		  ARRAY_SIZE(remove_eh_ip), NULL, 0 },
904 		{ ICE_HASH_GTPU_CTX_EH_IP_UDP, remove_eh_ip_udp,
905 		  ARRAY_SIZE(remove_eh_ip_udp), moveout_eh_ip_udp,
906 		  ARRAY_SIZE(moveout_eh_ip_udp) },
907 		{ ICE_HASH_GTPU_CTX_EH_IP_TCP, remove_eh_ip_tcp,
908 		  ARRAY_SIZE(remove_eh_ip_tcp), moveout_eh_ip_tcp,
909 		  ARRAY_SIZE(moveout_eh_ip_tcp) },
910 		{ ICE_HASH_GTPU_CTX_UP_IP, remove_up_ip,
911 		  ARRAY_SIZE(remove_up_ip), moveout_up_ip,
912 		  ARRAY_SIZE(moveout_up_ip) },
913 		{ ICE_HASH_GTPU_CTX_UP_IP_UDP, NULL, 0, moveout_up_ip_udp_tcp,
914 		  ARRAY_SIZE(moveout_up_ip_udp_tcp) },
915 		{ ICE_HASH_GTPU_CTX_UP_IP_TCP, NULL, 0, moveout_up_ip_udp_tcp,
916 		  ARRAY_SIZE(moveout_up_ip_udp_tcp) },
917 		{ ICE_HASH_GTPU_CTX_DW_IP, remove_dw_ip,
918 		  ARRAY_SIZE(remove_dw_ip), moveout_dw_ip,
919 		  ARRAY_SIZE(moveout_dw_ip) },
920 		{ ICE_HASH_GTPU_CTX_DW_IP_UDP, NULL, 0, moveout_dw_ip,
921 		  ARRAY_SIZE(moveout_dw_ip) },
922 		{ ICE_HASH_GTPU_CTX_DW_IP_TCP, NULL, 0, moveout_dw_ip,
923 		  ARRAY_SIZE(moveout_dw_ip) },
924 	};
925 
926 	for (i = 0; i < ARRAY_SIZE(actions); i++) {
927 		if (actions[i].ctx_idx != ctx_idx)
928 			continue;
929 
930 		if (actions[i].remove_list) {
931 			for (int j = 0; j < actions[i].remove_count; j++) {
932 				u16 rm = actions[i].remove_list[j];
933 
934 				ret = ice_hash_remove(vf, &ctx->ctx[rm]);
935 				if (ret && ret != -ENOENT)
936 					return ret;
937 			}
938 		}
939 
940 		if (actions[i].moveout_list) {
941 			for (int j = 0; j < actions[i].moveout_count; j++) {
942 				u16 mv = actions[i].moveout_list[j];
943 
944 				ret = ice_hash_moveout(vf, &ctx->ctx[mv]);
945 				if (ret && ret != -ENOENT)
946 					return ret;
947 			}
948 		}
949 		break;
950 	}
951 
952 	return 0;
953 }
954 
955 /**
956  * ice_add_rss_cfg_pre_ip - Pre-process IP-layer RSS configuration
957  * @vf: VF pointer
958  * @ctx: IP L4 hash context (ESP/UDP-ESP/AH/PFCP and UDP/TCP/SCTP)
959  *
960  * Remove covered/recorded IP RSS configurations prior to adding a new one.
961  *
962  * Return: 0 on success; negative error code on failure.
963  */
964 static int
965 ice_add_rss_cfg_pre_ip(struct ice_vf *vf, struct ice_vf_hash_ip_ctx *ctx)
966 {
967 	int i, ret;
968 
969 	for (i = 1; i < ICE_HASH_IP_CTX_MAX; i++)
970 		if (ice_is_hash_cfg_valid(&ctx->ctx[i])) {
971 			ret = ice_hash_remove(vf, &ctx->ctx[i]);
972 			if (ret)
973 				return ret;
974 		}
975 
976 	return 0;
977 }
978 
979 /**
980  * ice_calc_gtpu_ctx_idx - Calculate GTPU hash context index
981  * @hdrs: Bitmask of protocol headers prefixed with ICE_FLOW_SEG_HDR_*
982  *
983  * Determine the GTPU hash context index based on the combination of
984  * encapsulation headers (GTPU_EH, GTPU_UP, GTPU_DWN) and transport
985  * protocols (UDP, TCP) within IPv4 or IPv6 flows.
986  *
987  * Return: A valid context index (0-8) if the header combination is supported,
988  *         or ICE_HASH_GTPU_CTX_MAX if the combination is invalid.
989  */
990 static enum ice_hash_gtpu_ctx_type ice_calc_gtpu_ctx_idx(u32 hdrs)
991 {
992 	u32 eh_idx, ip_idx;
993 
994 	if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH)
995 		eh_idx = 0;
996 	else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP)
997 		eh_idx = 1;
998 	else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN)
999 		eh_idx = 2;
1000 	else
1001 		return ICE_HASH_GTPU_CTX_MAX;
1002 
1003 	ip_idx = 0;
1004 	if (hdrs & ICE_FLOW_SEG_HDR_UDP)
1005 		ip_idx = 1;
1006 	else if (hdrs & ICE_FLOW_SEG_HDR_TCP)
1007 		ip_idx = 2;
1008 
1009 	if (hdrs & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6))
1010 		return eh_idx * 3 + ip_idx;
1011 	else
1012 		return ICE_HASH_GTPU_CTX_MAX;
1013 }
1014 
1015 /**
1016  * ice_map_ip_ctx_idx - map the index of the IP L4 hash context
1017  * @hdrs: protocol headers prefix with ICE_FLOW_SEG_HDR_XXX.
1018  *
1019  * The IP L4 hash context use the index to classify for IPv4/IPv6 with
1020  * ESP/UDP_ESP/AH/PFCP and non-tunnel UDP/TCP/SCTP
1021  * this function map the index based on the protocol headers.
1022  *
1023  * Return: The mapped IP context index on success, or ICE_HASH_IP_CTX_MAX
1024  *         if no matching context is found.
1025  */
1026 static u8 ice_map_ip_ctx_idx(u32 hdrs)
1027 {
1028 	u8 i;
1029 
1030 	static struct {
1031 		u32 hdrs;
1032 		u8 ctx_idx;
1033 	} ip_ctx_idx_map[] = {
1034 		{ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER |
1035 			ICE_FLOW_SEG_HDR_ESP,
1036 			ICE_HASH_IP_CTX_IP_ESP },
1037 		{ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER |
1038 			ICE_FLOW_SEG_HDR_NAT_T_ESP,
1039 			ICE_HASH_IP_CTX_IP_UDP_ESP },
1040 		{ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER |
1041 			ICE_FLOW_SEG_HDR_AH,
1042 			ICE_HASH_IP_CTX_IP_AH },
1043 		{ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER |
1044 			ICE_FLOW_SEG_HDR_PFCP_SESSION,
1045 			ICE_HASH_IP_CTX_IP_PFCP },
1046 		{ ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
1047 			ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP,
1048 			ICE_HASH_IP_CTX_IP_UDP },
1049 		{ ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
1050 			ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP,
1051 			ICE_HASH_IP_CTX_IP_TCP },
1052 		{ ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
1053 			ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_SCTP,
1054 			ICE_HASH_IP_CTX_IP_SCTP },
1055 		{ ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
1056 			ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER,
1057 			ICE_HASH_IP_CTX_IP },
1058 		{ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER |
1059 			ICE_FLOW_SEG_HDR_ESP,
1060 			ICE_HASH_IP_CTX_IP_ESP },
1061 		{ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER |
1062 			ICE_FLOW_SEG_HDR_NAT_T_ESP,
1063 			ICE_HASH_IP_CTX_IP_UDP_ESP },
1064 		{ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER |
1065 			ICE_FLOW_SEG_HDR_AH,
1066 			ICE_HASH_IP_CTX_IP_AH },
1067 		{ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER |
1068 			ICE_FLOW_SEG_HDR_PFCP_SESSION,
1069 			ICE_HASH_IP_CTX_IP_PFCP },
1070 		{ ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
1071 			ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP,
1072 			ICE_HASH_IP_CTX_IP_UDP },
1073 		{ ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
1074 			ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP,
1075 			ICE_HASH_IP_CTX_IP_TCP },
1076 		{ ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
1077 			ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_SCTP,
1078 			ICE_HASH_IP_CTX_IP_SCTP },
1079 		{ ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
1080 			ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER,
1081 			ICE_HASH_IP_CTX_IP },
1082 		/* the remaining mappings are used for default RSS */
1083 		{ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP,
1084 			ICE_HASH_IP_CTX_IP_UDP },
1085 		{ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP,
1086 			ICE_HASH_IP_CTX_IP_TCP },
1087 		{ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_SCTP,
1088 			ICE_HASH_IP_CTX_IP_SCTP },
1089 		{ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER,
1090 			ICE_HASH_IP_CTX_IP },
1091 		{ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP,
1092 			ICE_HASH_IP_CTX_IP_UDP },
1093 		{ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP,
1094 			ICE_HASH_IP_CTX_IP_TCP },
1095 		{ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_SCTP,
1096 			ICE_HASH_IP_CTX_IP_SCTP },
1097 		{ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER,
1098 			ICE_HASH_IP_CTX_IP },
1099 	};
1100 
1101 	for (i = 0; i < ARRAY_SIZE(ip_ctx_idx_map); i++) {
1102 		if (hdrs == ip_ctx_idx_map[i].hdrs)
1103 			return ip_ctx_idx_map[i].ctx_idx;
1104 	}
1105 
1106 	return ICE_HASH_IP_CTX_MAX;
1107 }
1108 
1109 /**
1110  * ice_add_rss_cfg_pre - Prepare RSS configuration context for a VF
1111  * @vf: pointer to the VF structure
1112  * @cfg: pointer to the RSS hash configuration
1113  *
1114  * Prepare the RSS hash context for a given VF based on the additional
1115  * protocol headers specified in @cfg. This includes pre-configuration
1116  * for IP and GTPU-based flows.
1117  *
1118  * If the configuration matches a known IP context, the function sets up
1119  * the appropriate IP hash context. If the configuration includes GTPU
1120  * headers, it prepares the GTPU-specific context accordingly.
1121  *
1122  * Return: 0 on success, or a negative error code on failure.
1123  */
1124 static int
1125 ice_add_rss_cfg_pre(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
1126 {
1127 	u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs);
1128 	u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs);
1129 
1130 	if (ip_ctx_idx == ICE_HASH_IP_CTX_IP) {
1131 		int ret = 0;
1132 
1133 		if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
1134 			ret = ice_add_rss_cfg_pre_ip(vf, &vf->hash_ctx.v4);
1135 		else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
1136 			ret = ice_add_rss_cfg_pre_ip(vf, &vf->hash_ctx.v6);
1137 
1138 		if (ret)
1139 			return ret;
1140 	}
1141 
1142 	if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) {
1143 		return ice_add_rss_cfg_pre_gtpu(vf, &vf->hash_ctx.ipv4,
1144 						ice_gtpu_ctx_idx);
1145 	} else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) {
1146 		return ice_add_rss_cfg_pre_gtpu(vf, &vf->hash_ctx.ipv6,
1147 						ice_gtpu_ctx_idx);
1148 	}
1149 
1150 	return 0;
1151 }
1152 
1153 /**
1154  * ice_add_rss_cfg_post_gtpu - Post-process GTPU RSS configuration
1155  * @vf: pointer to the VF info
1156  * @ctx: pointer to the context of the GTPU hash
1157  * @cfg: pointer to the RSS hash configuration
1158  * @ctx_idx: index of the hash context
1159  *
1160  * Post-processes the GTPU hash configuration after a new hash
1161  * context has been successfully added. It updates the context with the new
1162  * configuration and restores any previously removed hash contexts that need
1163  * to be re-applied. This ensures proper TCAM rule ordering and avoids
1164  * conflicts between overlapping GTPU rules.
1165  *
1166  * Return: 0 on success or a negative error code on failure
1167  */
1168 static int ice_add_rss_cfg_post_gtpu(struct ice_vf *vf,
1169 				     struct ice_vf_hash_gtpu_ctx *ctx,
1170 				     struct ice_rss_hash_cfg *cfg, u32 ctx_idx)
1171 {
1172 	/* GTPU hash moveback lookup table indexed by context ID.
1173 	 * Each entry is a bitmap indicating which contexts need moveback
1174 	 * operations when the corresponding context index is processed.
1175 	 */
1176 	static const unsigned long
1177 		ice_gtpu_moveback_tbl[ICE_HASH_GTPU_CTX_MAX] = {
1178 			[ICE_HASH_GTPU_CTX_EH_IP] = 0,
1179 			[ICE_HASH_GTPU_CTX_EH_IP_UDP] =
1180 				BIT(ICE_HASH_GTPU_CTX_UP_IP) |
1181 				BIT(ICE_HASH_GTPU_CTX_UP_IP_TCP) |
1182 				BIT(ICE_HASH_GTPU_CTX_DW_IP) |
1183 				BIT(ICE_HASH_GTPU_CTX_DW_IP_TCP),
1184 			[ICE_HASH_GTPU_CTX_EH_IP_TCP] =
1185 				BIT(ICE_HASH_GTPU_CTX_UP_IP) |
1186 				BIT(ICE_HASH_GTPU_CTX_UP_IP_UDP) |
1187 				BIT(ICE_HASH_GTPU_CTX_DW_IP) |
1188 				BIT(ICE_HASH_GTPU_CTX_DW_IP_UDP),
1189 			[ICE_HASH_GTPU_CTX_UP_IP] =
1190 				BIT(ICE_HASH_GTPU_CTX_EH_IP) |
1191 				BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
1192 				BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
1193 			[ICE_HASH_GTPU_CTX_UP_IP_UDP] =
1194 				BIT(ICE_HASH_GTPU_CTX_EH_IP) |
1195 				BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
1196 				BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
1197 			[ICE_HASH_GTPU_CTX_UP_IP_TCP] =
1198 				BIT(ICE_HASH_GTPU_CTX_EH_IP) |
1199 				BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
1200 				BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
1201 			[ICE_HASH_GTPU_CTX_DW_IP] =
1202 				BIT(ICE_HASH_GTPU_CTX_EH_IP) |
1203 				BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
1204 				BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
1205 			[ICE_HASH_GTPU_CTX_DW_IP_UDP] =
1206 				BIT(ICE_HASH_GTPU_CTX_EH_IP) |
1207 				BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
1208 				BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
1209 			[ICE_HASH_GTPU_CTX_DW_IP_TCP] =
1210 				BIT(ICE_HASH_GTPU_CTX_EH_IP) |
1211 				BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
1212 				BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
1213 		};
1214 	unsigned long moveback_mask;
1215 	int ret;
1216 	int i;
1217 
1218 	if (unlikely(ctx_idx >= ICE_HASH_GTPU_CTX_MAX))
1219 		return 0;
1220 
1221 	ctx->ctx[ctx_idx].addl_hdrs = cfg->addl_hdrs;
1222 	ctx->ctx[ctx_idx].hash_flds = cfg->hash_flds;
1223 	ctx->ctx[ctx_idx].hdr_type = cfg->hdr_type;
1224 	ctx->ctx[ctx_idx].symm = cfg->symm;
1225 
1226 	moveback_mask = ice_gtpu_moveback_tbl[ctx_idx];
1227 	for_each_set_bit(i, &moveback_mask, ICE_HASH_GTPU_CTX_MAX) {
1228 		ret = ice_hash_moveback(vf, &ctx->ctx[i]);
1229 		if (ret && ret != -ENOENT)
1230 			return ret;
1231 	}
1232 
1233 	return 0;
1234 }
1235 
1236 static int
1237 ice_add_rss_cfg_post(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
1238 {
1239 	u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs);
1240 	u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs);
1241 
1242 	if (ip_ctx_idx && ip_ctx_idx < ICE_HASH_IP_CTX_MAX) {
1243 		if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
1244 			ice_hash_cfg_record(&vf->hash_ctx.v4.ctx[ip_ctx_idx], cfg);
1245 		else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
1246 			ice_hash_cfg_record(&vf->hash_ctx.v6.ctx[ip_ctx_idx], cfg);
1247 	}
1248 
1249 	if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) {
1250 		return ice_add_rss_cfg_post_gtpu(vf, &vf->hash_ctx.ipv4,
1251 						 cfg, ice_gtpu_ctx_idx);
1252 	} else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) {
1253 		return ice_add_rss_cfg_post_gtpu(vf, &vf->hash_ctx.ipv6,
1254 						 cfg, ice_gtpu_ctx_idx);
1255 	}
1256 
1257 	return 0;
1258 }
1259 
1260 /**
1261  * ice_rem_rss_cfg_post - post-process the RSS configuration
1262  * @vf: pointer to the VF info
1263  * @cfg: pointer to the RSS hash configuration
1264  *
1265  * Post process the RSS hash configuration after deleting a hash
1266  * config. Such as, it will reset the hash context for the GTPU hash.
1267  */
1268 static void
1269 ice_rem_rss_cfg_post(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
1270 {
1271 	u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs);
1272 	u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs);
1273 
1274 	if (ip_ctx_idx && ip_ctx_idx < ICE_HASH_IP_CTX_MAX) {
1275 		if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
1276 			ice_hash_cfg_reset(&vf->hash_ctx.v4.ctx[ip_ctx_idx]);
1277 		else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
1278 			ice_hash_cfg_reset(&vf->hash_ctx.v6.ctx[ip_ctx_idx]);
1279 	}
1280 
1281 	if (ice_gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX)
1282 		return;
1283 
1284 	if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
1285 		ice_hash_cfg_reset(&vf->hash_ctx.ipv4.ctx[ice_gtpu_ctx_idx]);
1286 	else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
1287 		ice_hash_cfg_reset(&vf->hash_ctx.ipv6.ctx[ice_gtpu_ctx_idx]);
1288 }
1289 
1290 /**
1291  * ice_rem_rss_cfg_wrap - Wrapper for deleting an RSS configuration
1292  * @vf: pointer to the VF info
1293  * @cfg: pointer to the RSS hash configuration
1294  *
1295  * Wrapper function to delete a flow profile base on an RSS configuration,
1296  * and also post process the hash context base on the rollback mechanism
1297  * which handle some rules conflict by ice_add_rss_cfg_wrap.
1298  *
1299  * Return: 0 on success; negative error code on failure.
1300  */
1301 static int
1302 ice_rem_rss_cfg_wrap(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
1303 {
1304 	struct device *dev = ice_pf_to_dev(vf->pf);
1305 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1306 	struct ice_hw *hw = &vf->pf->hw;
1307 	int ret;
1308 
1309 	ret = ice_rem_rss_cfg(hw, vsi->idx, cfg);
1310 	/* We just ignore -ENOENT, because if two configurations share the same
1311 	 * profile remove one of them actually removes both, since the
1312 	 * profile is deleted.
1313 	 */
1314 	if (ret && ret != -ENOENT) {
1315 		dev_err(dev, "ice_rem_rss_cfg failed for VF %d, VSI %d, error:%d\n",
1316 			vf->vf_id, vf->lan_vsi_idx, ret);
1317 		return ret;
1318 	}
1319 
1320 	ice_rem_rss_cfg_post(vf, cfg);
1321 
1322 	return 0;
1323 }
1324 
1325 /**
1326  * ice_add_rss_cfg_wrap - Wrapper for adding an RSS configuration
1327  * @vf: pointer to the VF info
1328  * @cfg: pointer to the RSS hash configuration
1329  *
1330  * Add a flow profile based on an RSS configuration. Use a rollback
1331  * mechanism to handle rule conflicts due to TCAM
1332  * write sequence from top to down.
1333  *
1334  * Return: 0 on success; negative error code on failure.
1335  */
1336 static int
1337 ice_add_rss_cfg_wrap(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
1338 {
1339 	struct device *dev = ice_pf_to_dev(vf->pf);
1340 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1341 	struct ice_hw *hw = &vf->pf->hw;
1342 	int ret;
1343 
1344 	if (ice_add_rss_cfg_pre(vf, cfg))
1345 		return -EINVAL;
1346 
1347 	ret = ice_add_rss_cfg(hw, vsi, cfg);
1348 	if (ret) {
1349 		dev_err(dev, "ice_add_rss_cfg failed for VF %d, VSI %d, error:%d\n",
1350 			vf->vf_id, vf->lan_vsi_idx, ret);
1351 		return ret;
1352 	}
1353 
1354 	if (ice_add_rss_cfg_post(vf, cfg))
1355 		ret = -EINVAL;
1356 
1357 	return ret;
1358 }
1359 
1360 /**
1361  * ice_parse_raw_rss_pattern - Parse raw pattern spec and mask for RSS
1362  * @vf: pointer to the VF info
1363  * @proto: pointer to the virtchnl protocol header
1364  * @raw_cfg: pointer to the RSS raw pattern configuration
1365  *
1366  * Parser function to get spec and mask from virtchnl message, and parse
1367  * them to get the corresponding profile and offset. The profile is used
1368  * to add RSS configuration.
1369  *
1370  * Return: 0 on success; negative error code on failure.
1371  */
1372 static int
1373 ice_parse_raw_rss_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto,
1374 			  struct ice_rss_raw_cfg *raw_cfg)
1375 {
1376 	struct ice_parser_result pkt_parsed;
1377 	struct ice_hw *hw = &vf->pf->hw;
1378 	struct ice_parser_profile prof;
1379 	struct ice_parser *psr;
1380 	u8 *pkt_buf, *msk_buf;
1381 	u16 pkt_len;
1382 	int ret = 0;
1383 
1384 	pkt_len = proto->raw.pkt_len;
1385 	if (!pkt_len)
1386 		return -EINVAL;
1387 	if (pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET)
1388 		pkt_len = VIRTCHNL_MAX_SIZE_RAW_PACKET;
1389 
1390 	pkt_buf = kzalloc(pkt_len, GFP_KERNEL);
1391 	msk_buf = kzalloc(pkt_len, GFP_KERNEL);
1392 	if (!pkt_buf || !msk_buf) {
1393 		ret = -ENOMEM;
1394 		goto free_alloc;
1395 	}
1396 
1397 	memcpy(pkt_buf, proto->raw.spec, pkt_len);
1398 	memcpy(msk_buf, proto->raw.mask, pkt_len);
1399 
1400 	psr = ice_parser_create(hw);
1401 	if (IS_ERR(psr)) {
1402 		ret = PTR_ERR(psr);
1403 		goto free_alloc;
1404 	}
1405 
1406 	ret = ice_parser_run(psr, pkt_buf, pkt_len, &pkt_parsed);
1407 	if (ret)
1408 		goto parser_destroy;
1409 
1410 	ret = ice_parser_profile_init(&pkt_parsed, pkt_buf, msk_buf,
1411 				      pkt_len, ICE_BLK_RSS, &prof);
1412 	if (ret)
1413 		goto parser_destroy;
1414 
1415 	memcpy(&raw_cfg->prof, &prof, sizeof(prof));
1416 
1417 parser_destroy:
1418 	ice_parser_destroy(psr);
1419 free_alloc:
1420 	kfree(pkt_buf);
1421 	kfree(msk_buf);
1422 	return ret;
1423 }
1424 
1425 /**
1426  * ice_add_raw_rss_cfg - add RSS configuration for raw pattern
1427  * @vf: pointer to the VF info
1428  * @cfg: pointer to the RSS raw pattern configuration
1429  *
1430  * This function adds the RSS configuration for raw pattern.
1431  * Check if current profile is matched. If not, remove the old
1432  * one and add the new profile to HW directly. Update the symmetric
1433  * hash configuration as well.
1434  *
1435  * Return: 0 on success; negative error code on failure.
1436  */
1437 static int
1438 ice_add_raw_rss_cfg(struct ice_vf *vf, struct ice_rss_raw_cfg *cfg)
1439 {
1440 	struct ice_parser_profile *prof = &cfg->prof;
1441 	struct device *dev = ice_pf_to_dev(vf->pf);
1442 	struct ice_rss_prof_info *rss_prof;
1443 	struct ice_hw *hw = &vf->pf->hw;
1444 	int i, ptg, ret = 0;
1445 	u16 vsi_handle;
1446 	u64 id;
1447 
1448 	vsi_handle = vf->lan_vsi_idx;
1449 	id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX);
1450 
1451 	ptg = hw->blk[ICE_BLK_RSS].xlt1.t[id];
1452 	rss_prof = &vf->rss_prof_info[ptg];
1453 
1454 	/* check if ptg already has a profile */
1455 	if (rss_prof->prof.fv_num) {
1456 		for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1457 			if (rss_prof->prof.fv[i].proto_id !=
1458 			    prof->fv[i].proto_id ||
1459 			    rss_prof->prof.fv[i].offset !=
1460 			    prof->fv[i].offset)
1461 				break;
1462 		}
1463 
1464 		/* current profile is matched, check symmetric hash */
1465 		if (i == ICE_MAX_FV_WORDS) {
1466 			if (rss_prof->symm != cfg->symm)
1467 				goto update_symm;
1468 			return ret;
1469 		}
1470 
1471 		/* current profile is not matched, remove it */
1472 		ret =
1473 		ice_rem_prof_id_flow(hw, ICE_BLK_RSS,
1474 				     ice_get_hw_vsi_num(hw, vsi_handle),
1475 				     id);
1476 		if (ret) {
1477 			dev_err(dev, "remove RSS flow failed\n");
1478 			return ret;
1479 		}
1480 
1481 		ret = ice_rem_prof(hw, ICE_BLK_RSS, id);
1482 		if (ret) {
1483 			dev_err(dev, "remove RSS profile failed\n");
1484 			return ret;
1485 		}
1486 	}
1487 
1488 	/* add new profile */
1489 	ret = ice_flow_set_parser_prof(hw, vsi_handle, 0, prof, ICE_BLK_RSS);
1490 	if (ret) {
1491 		dev_err(dev, "HW profile add failed\n");
1492 		return ret;
1493 	}
1494 
1495 	memcpy(&rss_prof->prof, prof, sizeof(struct ice_parser_profile));
1496 
1497 update_symm:
1498 	rss_prof->symm = cfg->symm;
1499 	ice_rss_update_raw_symm(hw, cfg, id);
1500 	return ret;
1501 }
1502 
1503 /**
1504  * ice_rem_raw_rss_cfg - remove RSS configuration for raw pattern
1505  * @vf: pointer to the VF info
1506  * @cfg: pointer to the RSS raw pattern configuration
1507  *
1508  * This function removes the RSS configuration for raw pattern.
1509  * Check if vsi group is already removed first. If not, remove the
1510  * profile.
1511  *
1512  * Return: 0 on success; negative error code on failure.
1513  */
1514 static int
1515 ice_rem_raw_rss_cfg(struct ice_vf *vf, struct ice_rss_raw_cfg *cfg)
1516 {
1517 	struct ice_parser_profile *prof = &cfg->prof;
1518 	struct device *dev = ice_pf_to_dev(vf->pf);
1519 	struct ice_hw *hw = &vf->pf->hw;
1520 	int ptg, ret = 0;
1521 	u16 vsig, vsi;
1522 	u64 id;
1523 
1524 	id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX);
1525 
1526 	ptg = hw->blk[ICE_BLK_RSS].xlt1.t[id];
1527 
1528 	memset(&vf->rss_prof_info[ptg], 0,
1529 	       sizeof(struct ice_rss_prof_info));
1530 
1531 	/* check if vsig is already removed */
1532 	vsi = ice_get_hw_vsi_num(hw, vf->lan_vsi_idx);
1533 	if (vsi >= ICE_MAX_VSI) {
1534 		ret = -EINVAL;
1535 		goto err;
1536 	}
1537 
1538 	vsig = hw->blk[ICE_BLK_RSS].xlt2.vsis[vsi].vsig;
1539 	if (vsig) {
1540 		ret = ice_rem_prof_id_flow(hw, ICE_BLK_RSS, vsi, id);
1541 		if (ret)
1542 			goto err;
1543 
1544 		ret = ice_rem_prof(hw, ICE_BLK_RSS, id);
1545 		if (ret)
1546 			goto err;
1547 	}
1548 
1549 	return ret;
1550 
1551 err:
1552 	dev_err(dev, "HW profile remove failed\n");
1553 	return ret;
1554 }
1555 
1556 /**
1557  * ice_vc_handle_rss_cfg
1558  * @vf: pointer to the VF info
1559  * @msg: pointer to the message buffer
1560  * @add: add a RSS config if true, otherwise delete a RSS config
1561  *
1562  * This function adds/deletes a RSS config
1563  */
1564 int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
1565 {
1566 	u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
1567 	struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
1568 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1569 	struct device *dev = ice_pf_to_dev(vf->pf);
1570 	struct ice_hw *hw = &vf->pf->hw;
1571 	struct ice_vsi *vsi;
1572 	u8 hash_type;
1573 	bool symm;
1574 	int ret;
1575 
1576 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1577 		dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
1578 			vf->vf_id);
1579 		v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
1580 		goto error_param;
1581 	}
1582 
1583 	if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
1584 		dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
1585 			vf->vf_id);
1586 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1587 		goto error_param;
1588 	}
1589 
1590 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1591 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1592 		goto error_param;
1593 	}
1594 
1595 	if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
1596 	    rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
1597 	    rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
1598 		dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
1599 			vf->vf_id);
1600 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1601 		goto error_param;
1602 	}
1603 
1604 	vsi = ice_get_vf_vsi(vf);
1605 	if (!vsi) {
1606 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1607 		goto error_param;
1608 	}
1609 
1610 	if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
1611 		hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
1612 				  ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
1613 
1614 		ret = ice_vc_rss_hash_update(hw, vsi, hash_type);
1615 		if (ret)
1616 			v_ret = ice_err_to_virt_err(ret);
1617 		goto error_param;
1618 	}
1619 
1620 	hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ :
1621 			  ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
1622 	ret = ice_vc_rss_hash_update(hw, vsi, hash_type);
1623 	if (ret) {
1624 		v_ret = ice_err_to_virt_err(ret);
1625 		goto error_param;
1626 	}
1627 
1628 	symm = rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
1629 	/* Configure RSS hash for raw pattern */
1630 	if (rss_cfg->proto_hdrs.tunnel_level == 0 &&
1631 	    rss_cfg->proto_hdrs.count == 0) {
1632 		struct ice_rss_raw_cfg raw_cfg;
1633 
1634 		if (ice_parse_raw_rss_pattern(vf, &rss_cfg->proto_hdrs,
1635 					      &raw_cfg)) {
1636 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1637 			goto error_param;
1638 		}
1639 
1640 		if (add) {
1641 			raw_cfg.symm = symm;
1642 			if (ice_add_raw_rss_cfg(vf, &raw_cfg))
1643 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1644 		} else {
1645 			if (ice_rem_raw_rss_cfg(vf, &raw_cfg))
1646 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1647 		}
1648 	} else {
1649 		struct ice_rss_hash_cfg cfg;
1650 
1651 		/* Only check for none raw pattern case */
1652 		if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
1653 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1654 			goto error_param;
1655 		}
1656 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
1657 		cfg.hash_flds = ICE_HASH_INVALID;
1658 		cfg.hdr_type = ICE_RSS_ANY_HEADERS;
1659 
1660 		if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) {
1661 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1662 			goto error_param;
1663 		}
1664 
1665 		if (add) {
1666 			cfg.symm = symm;
1667 			if (ice_add_rss_cfg_wrap(vf, &cfg))
1668 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1669 		} else {
1670 			if (ice_rem_rss_cfg_wrap(vf, &cfg))
1671 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1672 		}
1673 	}
1674 
1675 error_param:
1676 	return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
1677 }
1678 
1679 /**
1680  * ice_vc_config_rss_key
1681  * @vf: pointer to the VF info
1682  * @msg: pointer to the msg buffer
1683  *
1684  * Configure the VF's RSS key
1685  */
1686 int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
1687 {
1688 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1689 	struct virtchnl_rss_key *vrk =
1690 		(struct virtchnl_rss_key *)msg;
1691 	struct ice_vsi *vsi;
1692 
1693 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1694 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1695 		goto error_param;
1696 	}
1697 
1698 	if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
1699 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1700 		goto error_param;
1701 	}
1702 
1703 	if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
1704 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1705 		goto error_param;
1706 	}
1707 
1708 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1709 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1710 		goto error_param;
1711 	}
1712 
1713 	vsi = ice_get_vf_vsi(vf);
1714 	if (!vsi) {
1715 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1716 		goto error_param;
1717 	}
1718 
1719 	if (ice_set_rss_key(vsi, vrk->key))
1720 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1721 error_param:
1722 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
1723 				     NULL, 0);
1724 }
1725 
1726 /**
1727  * ice_vc_config_rss_lut
1728  * @vf: pointer to the VF info
1729  * @msg: pointer to the msg buffer
1730  *
1731  * Configure the VF's RSS LUT
1732  */
1733 int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
1734 {
1735 	struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
1736 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1737 	struct ice_vsi *vsi;
1738 
1739 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1740 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1741 		goto error_param;
1742 	}
1743 
1744 	if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
1745 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1746 		goto error_param;
1747 	}
1748 
1749 	if (vrl->lut_entries != ICE_LUT_VSI_SIZE) {
1750 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1751 		goto error_param;
1752 	}
1753 
1754 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1755 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1756 		goto error_param;
1757 	}
1758 
1759 	vsi = ice_get_vf_vsi(vf);
1760 	if (!vsi) {
1761 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1762 		goto error_param;
1763 	}
1764 
1765 	if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE))
1766 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1767 error_param:
1768 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
1769 				     NULL, 0);
1770 }
1771 
1772 /**
1773  * ice_vc_config_rss_hfunc
1774  * @vf: pointer to the VF info
1775  * @msg: pointer to the msg buffer
1776  *
1777  * Configure the VF's RSS Hash function
1778  */
1779 int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg)
1780 {
1781 	struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg;
1782 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1783 	u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
1784 	struct ice_vsi *vsi;
1785 
1786 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1787 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1788 		goto error_param;
1789 	}
1790 
1791 	if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) {
1792 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1793 		goto error_param;
1794 	}
1795 
1796 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1797 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1798 		goto error_param;
1799 	}
1800 
1801 	vsi = ice_get_vf_vsi(vf);
1802 	if (!vsi) {
1803 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1804 		goto error_param;
1805 	}
1806 
1807 	if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
1808 		hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
1809 
1810 	if (ice_set_rss_hfunc(vsi, hfunc))
1811 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1812 error_param:
1813 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret,
1814 				     NULL, 0);
1815 }
1816 
1817 /**
1818  * ice_vc_get_rss_hashcfg - return the RSS Hash configuration
1819  * @vf: pointer to the VF info
1820  */
1821 int ice_vc_get_rss_hashcfg(struct ice_vf *vf)
1822 {
1823 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1824 	struct virtchnl_rss_hashcfg *vrh = NULL;
1825 	int len = 0, ret;
1826 
1827 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1828 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1829 		goto err;
1830 	}
1831 
1832 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1833 		dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n");
1834 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1835 		goto err;
1836 	}
1837 
1838 	len = sizeof(struct virtchnl_rss_hashcfg);
1839 	vrh = kzalloc(len, GFP_KERNEL);
1840 	if (!vrh) {
1841 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1842 		len = 0;
1843 		goto err;
1844 	}
1845 
1846 	vrh->hashcfg = ICE_DEFAULT_RSS_HASHCFG;
1847 err:
1848 	/* send the response back to the VF */
1849 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, v_ret,
1850 				    (u8 *)vrh, len);
1851 	kfree(vrh);
1852 	return ret;
1853 }
1854 
1855 /**
1856  * ice_vc_set_rss_hashcfg - set RSS Hash configuration bits for the VF
1857  * @vf: pointer to the VF info
1858  * @msg: pointer to the msg buffer
1859  */
1860 int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg)
1861 {
1862 	struct virtchnl_rss_hashcfg *vrh = (struct virtchnl_rss_hashcfg *)msg;
1863 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1864 	struct ice_pf *pf = vf->pf;
1865 	struct ice_vsi *vsi;
1866 	struct device *dev;
1867 	int status;
1868 
1869 	dev = ice_pf_to_dev(pf);
1870 
1871 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1872 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1873 		goto err;
1874 	}
1875 
1876 	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
1877 		dev_err(dev, "RSS not supported by PF\n");
1878 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1879 		goto err;
1880 	}
1881 
1882 	vsi = ice_get_vf_vsi(vf);
1883 	if (!vsi) {
1884 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1885 		goto err;
1886 	}
1887 
1888 	/* clear all previously programmed RSS configuration to allow VF drivers
1889 	 * the ability to customize the RSS configuration and/or completely
1890 	 * disable RSS
1891 	 */
1892 	status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
1893 	if (status && !vrh->hashcfg) {
1894 		/* only report failure to clear the current RSS configuration if
1895 		 * that was clearly the VF's intention (i.e. vrh->hashcfg = 0)
1896 		 */
1897 		v_ret = ice_err_to_virt_err(status);
1898 		goto err;
1899 	} else if (status) {
1900 		/* allow the VF to update the RSS configuration even on failure
1901 		 * to clear the current RSS confguration in an attempt to keep
1902 		 * RSS in a working state
1903 		 */
1904 		dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n",
1905 			 vf->vf_id);
1906 	}
1907 
1908 	if (vrh->hashcfg) {
1909 		status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hashcfg);
1910 		v_ret = ice_err_to_virt_err(status);
1911 	}
1912 
1913 	/* save the requested VF configuration */
1914 	if (!v_ret)
1915 		vf->rss_hashcfg = vrh->hashcfg;
1916 
1917 	/* send the response to the VF */
1918 err:
1919 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, v_ret,
1920 				     NULL, 0);
1921 }
1922 
1923