xref: /linux/drivers/net/ethernet/intel/ice/ice_switch.c (revision 23b1b44e6c61295084284aa7d87db863a7802b92)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_lib.h"
5 #include "ice_switch.h"
6 
7 #define ICE_ETH_DA_OFFSET		0
8 #define ICE_ETH_ETHTYPE_OFFSET		12
9 #define ICE_ETH_VLAN_TCI_OFFSET		14
10 #define ICE_MAX_VLAN_ID			0xFFF
11 #define ICE_IPV6_ETHER_ID		0x86DD
12 
13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
14  * struct to configure any switch filter rules.
15  * {DA (6 bytes), SA(6 bytes),
16  * Ether type (2 bytes for header without VLAN tag) OR
17  * VLAN tag (4 bytes for header with VLAN tag) }
18  *
19  * Word on Hardcoded values
20  * byte 0 = 0x2: to identify it as locally administered DA MAC
21  * byte 6 = 0x2: to identify it as locally administered SA MAC
22  * byte 12 = 0x81 & byte 13 = 0x00:
23  *      In case of VLAN filter first two bytes defines ether type (0x8100)
24  *      and remaining two bytes are placeholder for programming a given VLAN ID
25  *      In case of Ether type filter it is treated as header without VLAN tag
26  *      and byte 12 and 13 is used to program a given Ether type instead
27  */
28 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
29 							0x2, 0, 0, 0, 0, 0,
30 							0x81, 0, 0, 0};
31 
32 enum {
33 	ICE_PKT_OUTER_IPV6	= BIT(0),
34 	ICE_PKT_TUN_GTPC	= BIT(1),
35 	ICE_PKT_TUN_GTPU	= BIT(2),
36 	ICE_PKT_TUN_NVGRE	= BIT(3),
37 	ICE_PKT_TUN_UDP		= BIT(4),
38 	ICE_PKT_INNER_IPV6	= BIT(5),
39 	ICE_PKT_INNER_TCP	= BIT(6),
40 	ICE_PKT_INNER_UDP	= BIT(7),
41 	ICE_PKT_GTP_NOPAY	= BIT(8),
42 	ICE_PKT_KMALLOC		= BIT(9),
43 	ICE_PKT_PPPOE		= BIT(10),
44 	ICE_PKT_L2TPV3		= BIT(11),
45 	ICE_PKT_PFCP		= BIT(12),
46 };
47 
48 struct ice_dummy_pkt_offsets {
49 	enum ice_protocol_type type;
50 	u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
51 };
52 
53 struct ice_dummy_pkt_profile {
54 	const struct ice_dummy_pkt_offsets *offsets;
55 	const u8 *pkt;
56 	u32 match;
57 	u16 pkt_len;
58 	u16 offsets_len;
59 };
60 
61 #define ICE_DECLARE_PKT_OFFSETS(type)					\
62 	static const struct ice_dummy_pkt_offsets			\
63 	ice_dummy_##type##_packet_offsets[]
64 
65 #define ICE_DECLARE_PKT_TEMPLATE(type)					\
66 	static const u8 ice_dummy_##type##_packet[]
67 
68 #define ICE_PKT_PROFILE(type, m) {					\
69 	.match		= (m),						\
70 	.pkt		= ice_dummy_##type##_packet,			\
71 	.pkt_len	= sizeof(ice_dummy_##type##_packet),		\
72 	.offsets	= ice_dummy_##type##_packet_offsets,		\
73 	.offsets_len	= sizeof(ice_dummy_##type##_packet_offsets),	\
74 }
75 
76 ICE_DECLARE_PKT_OFFSETS(vlan) = {
77 	{ ICE_VLAN_OFOS,        12 },
78 };
79 
80 ICE_DECLARE_PKT_TEMPLATE(vlan) = {
81 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
82 };
83 
84 ICE_DECLARE_PKT_OFFSETS(qinq) = {
85 	{ ICE_VLAN_EX,          12 },
86 	{ ICE_VLAN_IN,          16 },
87 };
88 
89 ICE_DECLARE_PKT_TEMPLATE(qinq) = {
90 	0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
91 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
92 };
93 
94 ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
95 	{ ICE_MAC_OFOS,		0 },
96 	{ ICE_ETYPE_OL,		12 },
97 	{ ICE_IPV4_OFOS,	14 },
98 	{ ICE_NVGRE,		34 },
99 	{ ICE_MAC_IL,		42 },
100 	{ ICE_ETYPE_IL,		54 },
101 	{ ICE_IPV4_IL,		56 },
102 	{ ICE_TCP_IL,		76 },
103 	{ ICE_PROTOCOL_LAST,	0 },
104 };
105 
106 ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
107 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
108 	0x00, 0x00, 0x00, 0x00,
109 	0x00, 0x00, 0x00, 0x00,
110 
111 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
112 
113 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
114 	0x00, 0x00, 0x00, 0x00,
115 	0x00, 0x2F, 0x00, 0x00,
116 	0x00, 0x00, 0x00, 0x00,
117 	0x00, 0x00, 0x00, 0x00,
118 
119 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
120 	0x00, 0x00, 0x00, 0x00,
121 
122 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
123 	0x00, 0x00, 0x00, 0x00,
124 	0x00, 0x00, 0x00, 0x00,
125 
126 	0x08, 0x00,		/* ICE_ETYPE_IL 54 */
127 
128 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
129 	0x00, 0x00, 0x00, 0x00,
130 	0x00, 0x06, 0x00, 0x00,
131 	0x00, 0x00, 0x00, 0x00,
132 	0x00, 0x00, 0x00, 0x00,
133 
134 	0x00, 0x00, 0x00, 0x00,	/* ICE_TCP_IL 76 */
135 	0x00, 0x00, 0x00, 0x00,
136 	0x00, 0x00, 0x00, 0x00,
137 	0x50, 0x02, 0x20, 0x00,
138 	0x00, 0x00, 0x00, 0x00
139 };
140 
141 ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
142 	{ ICE_MAC_OFOS,		0 },
143 	{ ICE_ETYPE_OL,		12 },
144 	{ ICE_IPV4_OFOS,	14 },
145 	{ ICE_NVGRE,		34 },
146 	{ ICE_MAC_IL,		42 },
147 	{ ICE_ETYPE_IL,		54 },
148 	{ ICE_IPV4_IL,		56 },
149 	{ ICE_UDP_ILOS,		76 },
150 	{ ICE_PROTOCOL_LAST,	0 },
151 };
152 
153 ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
154 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
155 	0x00, 0x00, 0x00, 0x00,
156 	0x00, 0x00, 0x00, 0x00,
157 
158 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
159 
160 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
161 	0x00, 0x00, 0x00, 0x00,
162 	0x00, 0x2F, 0x00, 0x00,
163 	0x00, 0x00, 0x00, 0x00,
164 	0x00, 0x00, 0x00, 0x00,
165 
166 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
167 	0x00, 0x00, 0x00, 0x00,
168 
169 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
170 	0x00, 0x00, 0x00, 0x00,
171 	0x00, 0x00, 0x00, 0x00,
172 
173 	0x08, 0x00,		/* ICE_ETYPE_IL 54 */
174 
175 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
176 	0x00, 0x00, 0x00, 0x00,
177 	0x00, 0x11, 0x00, 0x00,
178 	0x00, 0x00, 0x00, 0x00,
179 	0x00, 0x00, 0x00, 0x00,
180 
181 	0x00, 0x00, 0x00, 0x00,	/* ICE_UDP_ILOS 76 */
182 	0x00, 0x08, 0x00, 0x00,
183 };
184 
185 ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
186 	{ ICE_MAC_OFOS,		0 },
187 	{ ICE_ETYPE_OL,		12 },
188 	{ ICE_IPV4_OFOS,	14 },
189 	{ ICE_UDP_OF,		34 },
190 	{ ICE_VXLAN,		42 },
191 	{ ICE_GENEVE,		42 },
192 	{ ICE_VXLAN_GPE,	42 },
193 	{ ICE_MAC_IL,		50 },
194 	{ ICE_ETYPE_IL,		62 },
195 	{ ICE_IPV4_IL,		64 },
196 	{ ICE_TCP_IL,		84 },
197 	{ ICE_PROTOCOL_LAST,	0 },
198 };
199 
200 ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
201 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
202 	0x00, 0x00, 0x00, 0x00,
203 	0x00, 0x00, 0x00, 0x00,
204 
205 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
206 
207 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
208 	0x00, 0x01, 0x00, 0x00,
209 	0x40, 0x11, 0x00, 0x00,
210 	0x00, 0x00, 0x00, 0x00,
211 	0x00, 0x00, 0x00, 0x00,
212 
213 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
214 	0x00, 0x46, 0x00, 0x00,
215 
216 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
217 	0x00, 0x00, 0x00, 0x00,
218 
219 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
220 	0x00, 0x00, 0x00, 0x00,
221 	0x00, 0x00, 0x00, 0x00,
222 
223 	0x08, 0x00,		/* ICE_ETYPE_IL 62 */
224 
225 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
226 	0x00, 0x01, 0x00, 0x00,
227 	0x40, 0x06, 0x00, 0x00,
228 	0x00, 0x00, 0x00, 0x00,
229 	0x00, 0x00, 0x00, 0x00,
230 
231 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
232 	0x00, 0x00, 0x00, 0x00,
233 	0x00, 0x00, 0x00, 0x00,
234 	0x50, 0x02, 0x20, 0x00,
235 	0x00, 0x00, 0x00, 0x00
236 };
237 
238 ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
239 	{ ICE_MAC_OFOS,		0 },
240 	{ ICE_ETYPE_OL,		12 },
241 	{ ICE_IPV4_OFOS,	14 },
242 	{ ICE_UDP_OF,		34 },
243 	{ ICE_VXLAN,		42 },
244 	{ ICE_GENEVE,		42 },
245 	{ ICE_VXLAN_GPE,	42 },
246 	{ ICE_MAC_IL,		50 },
247 	{ ICE_ETYPE_IL,		62 },
248 	{ ICE_IPV4_IL,		64 },
249 	{ ICE_UDP_ILOS,		84 },
250 	{ ICE_PROTOCOL_LAST,	0 },
251 };
252 
253 ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
254 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
255 	0x00, 0x00, 0x00, 0x00,
256 	0x00, 0x00, 0x00, 0x00,
257 
258 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
259 
260 	0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
261 	0x00, 0x01, 0x00, 0x00,
262 	0x00, 0x11, 0x00, 0x00,
263 	0x00, 0x00, 0x00, 0x00,
264 	0x00, 0x00, 0x00, 0x00,
265 
266 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
267 	0x00, 0x3a, 0x00, 0x00,
268 
269 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
270 	0x00, 0x00, 0x00, 0x00,
271 
272 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
273 	0x00, 0x00, 0x00, 0x00,
274 	0x00, 0x00, 0x00, 0x00,
275 
276 	0x08, 0x00,		/* ICE_ETYPE_IL 62 */
277 
278 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
279 	0x00, 0x01, 0x00, 0x00,
280 	0x00, 0x11, 0x00, 0x00,
281 	0x00, 0x00, 0x00, 0x00,
282 	0x00, 0x00, 0x00, 0x00,
283 
284 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
285 	0x00, 0x08, 0x00, 0x00,
286 };
287 
288 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
289 	{ ICE_MAC_OFOS,		0 },
290 	{ ICE_ETYPE_OL,		12 },
291 	{ ICE_IPV4_OFOS,	14 },
292 	{ ICE_NVGRE,		34 },
293 	{ ICE_MAC_IL,		42 },
294 	{ ICE_ETYPE_IL,		54 },
295 	{ ICE_IPV6_IL,		56 },
296 	{ ICE_TCP_IL,		96 },
297 	{ ICE_PROTOCOL_LAST,	0 },
298 };
299 
300 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
301 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
302 	0x00, 0x00, 0x00, 0x00,
303 	0x00, 0x00, 0x00, 0x00,
304 
305 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
306 
307 	0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
308 	0x00, 0x00, 0x00, 0x00,
309 	0x00, 0x2F, 0x00, 0x00,
310 	0x00, 0x00, 0x00, 0x00,
311 	0x00, 0x00, 0x00, 0x00,
312 
313 	0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
314 	0x00, 0x00, 0x00, 0x00,
315 
316 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
317 	0x00, 0x00, 0x00, 0x00,
318 	0x00, 0x00, 0x00, 0x00,
319 
320 	0x86, 0xdd,		/* ICE_ETYPE_IL 54 */
321 
322 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
323 	0x00, 0x08, 0x06, 0x40,
324 	0x00, 0x00, 0x00, 0x00,
325 	0x00, 0x00, 0x00, 0x00,
326 	0x00, 0x00, 0x00, 0x00,
327 	0x00, 0x00, 0x00, 0x00,
328 	0x00, 0x00, 0x00, 0x00,
329 	0x00, 0x00, 0x00, 0x00,
330 	0x00, 0x00, 0x00, 0x00,
331 	0x00, 0x00, 0x00, 0x00,
332 
333 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
334 	0x00, 0x00, 0x00, 0x00,
335 	0x00, 0x00, 0x00, 0x00,
336 	0x50, 0x02, 0x20, 0x00,
337 	0x00, 0x00, 0x00, 0x00
338 };
339 
340 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
341 	{ ICE_MAC_OFOS,		0 },
342 	{ ICE_ETYPE_OL,		12 },
343 	{ ICE_IPV4_OFOS,	14 },
344 	{ ICE_NVGRE,		34 },
345 	{ ICE_MAC_IL,		42 },
346 	{ ICE_ETYPE_IL,		54 },
347 	{ ICE_IPV6_IL,		56 },
348 	{ ICE_UDP_ILOS,		96 },
349 	{ ICE_PROTOCOL_LAST,	0 },
350 };
351 
352 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
353 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
354 	0x00, 0x00, 0x00, 0x00,
355 	0x00, 0x00, 0x00, 0x00,
356 
357 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
358 
359 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
360 	0x00, 0x00, 0x00, 0x00,
361 	0x00, 0x2F, 0x00, 0x00,
362 	0x00, 0x00, 0x00, 0x00,
363 	0x00, 0x00, 0x00, 0x00,
364 
365 	0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
366 	0x00, 0x00, 0x00, 0x00,
367 
368 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
369 	0x00, 0x00, 0x00, 0x00,
370 	0x00, 0x00, 0x00, 0x00,
371 
372 	0x86, 0xdd,		/* ICE_ETYPE_IL 54 */
373 
374 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
375 	0x00, 0x08, 0x11, 0x40,
376 	0x00, 0x00, 0x00, 0x00,
377 	0x00, 0x00, 0x00, 0x00,
378 	0x00, 0x00, 0x00, 0x00,
379 	0x00, 0x00, 0x00, 0x00,
380 	0x00, 0x00, 0x00, 0x00,
381 	0x00, 0x00, 0x00, 0x00,
382 	0x00, 0x00, 0x00, 0x00,
383 	0x00, 0x00, 0x00, 0x00,
384 
385 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
386 	0x00, 0x08, 0x00, 0x00,
387 };
388 
389 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
390 	{ ICE_MAC_OFOS,		0 },
391 	{ ICE_ETYPE_OL,		12 },
392 	{ ICE_IPV4_OFOS,	14 },
393 	{ ICE_UDP_OF,		34 },
394 	{ ICE_VXLAN,		42 },
395 	{ ICE_GENEVE,		42 },
396 	{ ICE_VXLAN_GPE,	42 },
397 	{ ICE_MAC_IL,		50 },
398 	{ ICE_ETYPE_IL,		62 },
399 	{ ICE_IPV6_IL,		64 },
400 	{ ICE_TCP_IL,		104 },
401 	{ ICE_PROTOCOL_LAST,	0 },
402 };
403 
404 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
405 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
406 	0x00, 0x00, 0x00, 0x00,
407 	0x00, 0x00, 0x00, 0x00,
408 
409 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
410 
411 	0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
412 	0x00, 0x01, 0x00, 0x00,
413 	0x40, 0x11, 0x00, 0x00,
414 	0x00, 0x00, 0x00, 0x00,
415 	0x00, 0x00, 0x00, 0x00,
416 
417 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
418 	0x00, 0x5a, 0x00, 0x00,
419 
420 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
421 	0x00, 0x00, 0x00, 0x00,
422 
423 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
424 	0x00, 0x00, 0x00, 0x00,
425 	0x00, 0x00, 0x00, 0x00,
426 
427 	0x86, 0xdd,		/* ICE_ETYPE_IL 62 */
428 
429 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
430 	0x00, 0x08, 0x06, 0x40,
431 	0x00, 0x00, 0x00, 0x00,
432 	0x00, 0x00, 0x00, 0x00,
433 	0x00, 0x00, 0x00, 0x00,
434 	0x00, 0x00, 0x00, 0x00,
435 	0x00, 0x00, 0x00, 0x00,
436 	0x00, 0x00, 0x00, 0x00,
437 	0x00, 0x00, 0x00, 0x00,
438 	0x00, 0x00, 0x00, 0x00,
439 
440 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
441 	0x00, 0x00, 0x00, 0x00,
442 	0x00, 0x00, 0x00, 0x00,
443 	0x50, 0x02, 0x20, 0x00,
444 	0x00, 0x00, 0x00, 0x00
445 };
446 
447 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
448 	{ ICE_MAC_OFOS,		0 },
449 	{ ICE_ETYPE_OL,		12 },
450 	{ ICE_IPV4_OFOS,	14 },
451 	{ ICE_UDP_OF,		34 },
452 	{ ICE_VXLAN,		42 },
453 	{ ICE_GENEVE,		42 },
454 	{ ICE_VXLAN_GPE,	42 },
455 	{ ICE_MAC_IL,		50 },
456 	{ ICE_ETYPE_IL,		62 },
457 	{ ICE_IPV6_IL,		64 },
458 	{ ICE_UDP_ILOS,		104 },
459 	{ ICE_PROTOCOL_LAST,	0 },
460 };
461 
462 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
463 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
464 	0x00, 0x00, 0x00, 0x00,
465 	0x00, 0x00, 0x00, 0x00,
466 
467 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
468 
469 	0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
470 	0x00, 0x01, 0x00, 0x00,
471 	0x00, 0x11, 0x00, 0x00,
472 	0x00, 0x00, 0x00, 0x00,
473 	0x00, 0x00, 0x00, 0x00,
474 
475 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
476 	0x00, 0x4e, 0x00, 0x00,
477 
478 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
479 	0x00, 0x00, 0x00, 0x00,
480 
481 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
482 	0x00, 0x00, 0x00, 0x00,
483 	0x00, 0x00, 0x00, 0x00,
484 
485 	0x86, 0xdd,		/* ICE_ETYPE_IL 62 */
486 
487 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
488 	0x00, 0x08, 0x11, 0x40,
489 	0x00, 0x00, 0x00, 0x00,
490 	0x00, 0x00, 0x00, 0x00,
491 	0x00, 0x00, 0x00, 0x00,
492 	0x00, 0x00, 0x00, 0x00,
493 	0x00, 0x00, 0x00, 0x00,
494 	0x00, 0x00, 0x00, 0x00,
495 	0x00, 0x00, 0x00, 0x00,
496 	0x00, 0x00, 0x00, 0x00,
497 
498 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
499 	0x00, 0x08, 0x00, 0x00,
500 };
501 
502 /* offset info for MAC + IPv4 + UDP dummy packet */
503 ICE_DECLARE_PKT_OFFSETS(udp) = {
504 	{ ICE_MAC_OFOS,		0 },
505 	{ ICE_ETYPE_OL,		12 },
506 	{ ICE_IPV4_OFOS,	14 },
507 	{ ICE_UDP_ILOS,		34 },
508 	{ ICE_PROTOCOL_LAST,	0 },
509 };
510 
511 /* Dummy packet for MAC + IPv4 + UDP */
512 ICE_DECLARE_PKT_TEMPLATE(udp) = {
513 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
514 	0x00, 0x00, 0x00, 0x00,
515 	0x00, 0x00, 0x00, 0x00,
516 
517 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
518 
519 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
520 	0x00, 0x01, 0x00, 0x00,
521 	0x00, 0x11, 0x00, 0x00,
522 	0x00, 0x00, 0x00, 0x00,
523 	0x00, 0x00, 0x00, 0x00,
524 
525 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
526 	0x00, 0x08, 0x00, 0x00,
527 
528 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
529 };
530 
531 /* offset info for MAC + IPv4 + TCP dummy packet */
532 ICE_DECLARE_PKT_OFFSETS(tcp) = {
533 	{ ICE_MAC_OFOS,		0 },
534 	{ ICE_ETYPE_OL,		12 },
535 	{ ICE_IPV4_OFOS,	14 },
536 	{ ICE_TCP_IL,		34 },
537 	{ ICE_PROTOCOL_LAST,	0 },
538 };
539 
540 /* Dummy packet for MAC + IPv4 + TCP */
541 ICE_DECLARE_PKT_TEMPLATE(tcp) = {
542 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
543 	0x00, 0x00, 0x00, 0x00,
544 	0x00, 0x00, 0x00, 0x00,
545 
546 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
547 
548 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
549 	0x00, 0x01, 0x00, 0x00,
550 	0x00, 0x06, 0x00, 0x00,
551 	0x00, 0x00, 0x00, 0x00,
552 	0x00, 0x00, 0x00, 0x00,
553 
554 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
555 	0x00, 0x00, 0x00, 0x00,
556 	0x00, 0x00, 0x00, 0x00,
557 	0x50, 0x00, 0x00, 0x00,
558 	0x00, 0x00, 0x00, 0x00,
559 
560 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
561 };
562 
563 ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
564 	{ ICE_MAC_OFOS,		0 },
565 	{ ICE_ETYPE_OL,		12 },
566 	{ ICE_IPV6_OFOS,	14 },
567 	{ ICE_TCP_IL,		54 },
568 	{ ICE_PROTOCOL_LAST,	0 },
569 };
570 
571 ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
572 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
573 	0x00, 0x00, 0x00, 0x00,
574 	0x00, 0x00, 0x00, 0x00,
575 
576 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
577 
578 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
579 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
580 	0x00, 0x00, 0x00, 0x00,
581 	0x00, 0x00, 0x00, 0x00,
582 	0x00, 0x00, 0x00, 0x00,
583 	0x00, 0x00, 0x00, 0x00,
584 	0x00, 0x00, 0x00, 0x00,
585 	0x00, 0x00, 0x00, 0x00,
586 	0x00, 0x00, 0x00, 0x00,
587 	0x00, 0x00, 0x00, 0x00,
588 
589 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
590 	0x00, 0x00, 0x00, 0x00,
591 	0x00, 0x00, 0x00, 0x00,
592 	0x50, 0x00, 0x00, 0x00,
593 	0x00, 0x00, 0x00, 0x00,
594 
595 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
596 };
597 
598 /* IPv6 + UDP */
599 ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
600 	{ ICE_MAC_OFOS,		0 },
601 	{ ICE_ETYPE_OL,		12 },
602 	{ ICE_IPV6_OFOS,	14 },
603 	{ ICE_UDP_ILOS,		54 },
604 	{ ICE_PROTOCOL_LAST,	0 },
605 };
606 
607 /* IPv6 + UDP dummy packet */
608 ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
609 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
610 	0x00, 0x00, 0x00, 0x00,
611 	0x00, 0x00, 0x00, 0x00,
612 
613 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
614 
615 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
616 	0x00, 0x10, 0x11, 0x00, /* Next header UDP */
617 	0x00, 0x00, 0x00, 0x00,
618 	0x00, 0x00, 0x00, 0x00,
619 	0x00, 0x00, 0x00, 0x00,
620 	0x00, 0x00, 0x00, 0x00,
621 	0x00, 0x00, 0x00, 0x00,
622 	0x00, 0x00, 0x00, 0x00,
623 	0x00, 0x00, 0x00, 0x00,
624 	0x00, 0x00, 0x00, 0x00,
625 
626 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
627 	0x00, 0x10, 0x00, 0x00,
628 
629 	0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
630 	0x00, 0x00, 0x00, 0x00,
631 
632 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
633 };
634 
635 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
636 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
637 	{ ICE_MAC_OFOS,		0 },
638 	{ ICE_IPV4_OFOS,	14 },
639 	{ ICE_UDP_OF,		34 },
640 	{ ICE_GTP,		42 },
641 	{ ICE_IPV4_IL,		62 },
642 	{ ICE_TCP_IL,		82 },
643 	{ ICE_PROTOCOL_LAST,	0 },
644 };
645 
646 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
647 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
648 	0x00, 0x00, 0x00, 0x00,
649 	0x00, 0x00, 0x00, 0x00,
650 	0x08, 0x00,
651 
652 	0x45, 0x00, 0x00, 0x58, /* IP 14 */
653 	0x00, 0x00, 0x00, 0x00,
654 	0x00, 0x11, 0x00, 0x00,
655 	0x00, 0x00, 0x00, 0x00,
656 	0x00, 0x00, 0x00, 0x00,
657 
658 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
659 	0x00, 0x44, 0x00, 0x00,
660 
661 	0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
662 	0x00, 0x00, 0x00, 0x00,
663 	0x00, 0x00, 0x00, 0x85,
664 
665 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
666 	0x00, 0x00, 0x00, 0x00,
667 
668 	0x45, 0x00, 0x00, 0x28, /* IP 62 */
669 	0x00, 0x00, 0x00, 0x00,
670 	0x00, 0x06, 0x00, 0x00,
671 	0x00, 0x00, 0x00, 0x00,
672 	0x00, 0x00, 0x00, 0x00,
673 
674 	0x00, 0x00, 0x00, 0x00, /* TCP 82 */
675 	0x00, 0x00, 0x00, 0x00,
676 	0x00, 0x00, 0x00, 0x00,
677 	0x50, 0x00, 0x00, 0x00,
678 	0x00, 0x00, 0x00, 0x00,
679 
680 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
681 };
682 
683 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
684 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
685 	{ ICE_MAC_OFOS,		0 },
686 	{ ICE_IPV4_OFOS,	14 },
687 	{ ICE_UDP_OF,		34 },
688 	{ ICE_GTP,		42 },
689 	{ ICE_IPV4_IL,		62 },
690 	{ ICE_UDP_ILOS,		82 },
691 	{ ICE_PROTOCOL_LAST,	0 },
692 };
693 
694 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
695 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
696 	0x00, 0x00, 0x00, 0x00,
697 	0x00, 0x00, 0x00, 0x00,
698 	0x08, 0x00,
699 
700 	0x45, 0x00, 0x00, 0x4c, /* IP 14 */
701 	0x00, 0x00, 0x00, 0x00,
702 	0x00, 0x11, 0x00, 0x00,
703 	0x00, 0x00, 0x00, 0x00,
704 	0x00, 0x00, 0x00, 0x00,
705 
706 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
707 	0x00, 0x38, 0x00, 0x00,
708 
709 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
710 	0x00, 0x00, 0x00, 0x00,
711 	0x00, 0x00, 0x00, 0x85,
712 
713 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
714 	0x00, 0x00, 0x00, 0x00,
715 
716 	0x45, 0x00, 0x00, 0x1c, /* IP 62 */
717 	0x00, 0x00, 0x00, 0x00,
718 	0x00, 0x11, 0x00, 0x00,
719 	0x00, 0x00, 0x00, 0x00,
720 	0x00, 0x00, 0x00, 0x00,
721 
722 	0x00, 0x00, 0x00, 0x00, /* UDP 82 */
723 	0x00, 0x08, 0x00, 0x00,
724 
725 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
726 };
727 
728 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
729 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
730 	{ ICE_MAC_OFOS,		0 },
731 	{ ICE_IPV4_OFOS,	14 },
732 	{ ICE_UDP_OF,		34 },
733 	{ ICE_GTP,		42 },
734 	{ ICE_IPV6_IL,		62 },
735 	{ ICE_TCP_IL,		102 },
736 	{ ICE_PROTOCOL_LAST,	0 },
737 };
738 
739 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
740 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
741 	0x00, 0x00, 0x00, 0x00,
742 	0x00, 0x00, 0x00, 0x00,
743 	0x08, 0x00,
744 
745 	0x45, 0x00, 0x00, 0x6c, /* IP 14 */
746 	0x00, 0x00, 0x00, 0x00,
747 	0x00, 0x11, 0x00, 0x00,
748 	0x00, 0x00, 0x00, 0x00,
749 	0x00, 0x00, 0x00, 0x00,
750 
751 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
752 	0x00, 0x58, 0x00, 0x00,
753 
754 	0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
755 	0x00, 0x00, 0x00, 0x00,
756 	0x00, 0x00, 0x00, 0x85,
757 
758 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
759 	0x00, 0x00, 0x00, 0x00,
760 
761 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
762 	0x00, 0x14, 0x06, 0x00,
763 	0x00, 0x00, 0x00, 0x00,
764 	0x00, 0x00, 0x00, 0x00,
765 	0x00, 0x00, 0x00, 0x00,
766 	0x00, 0x00, 0x00, 0x00,
767 	0x00, 0x00, 0x00, 0x00,
768 	0x00, 0x00, 0x00, 0x00,
769 	0x00, 0x00, 0x00, 0x00,
770 	0x00, 0x00, 0x00, 0x00,
771 
772 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
773 	0x00, 0x00, 0x00, 0x00,
774 	0x00, 0x00, 0x00, 0x00,
775 	0x50, 0x00, 0x00, 0x00,
776 	0x00, 0x00, 0x00, 0x00,
777 
778 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
779 };
780 
781 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
782 	{ ICE_MAC_OFOS,		0 },
783 	{ ICE_IPV4_OFOS,	14 },
784 	{ ICE_UDP_OF,		34 },
785 	{ ICE_GTP,		42 },
786 	{ ICE_IPV6_IL,		62 },
787 	{ ICE_UDP_ILOS,		102 },
788 	{ ICE_PROTOCOL_LAST,	0 },
789 };
790 
791 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
792 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
793 	0x00, 0x00, 0x00, 0x00,
794 	0x00, 0x00, 0x00, 0x00,
795 	0x08, 0x00,
796 
797 	0x45, 0x00, 0x00, 0x60, /* IP 14 */
798 	0x00, 0x00, 0x00, 0x00,
799 	0x00, 0x11, 0x00, 0x00,
800 	0x00, 0x00, 0x00, 0x00,
801 	0x00, 0x00, 0x00, 0x00,
802 
803 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
804 	0x00, 0x4c, 0x00, 0x00,
805 
806 	0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
807 	0x00, 0x00, 0x00, 0x00,
808 	0x00, 0x00, 0x00, 0x85,
809 
810 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
811 	0x00, 0x00, 0x00, 0x00,
812 
813 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
814 	0x00, 0x08, 0x11, 0x00,
815 	0x00, 0x00, 0x00, 0x00,
816 	0x00, 0x00, 0x00, 0x00,
817 	0x00, 0x00, 0x00, 0x00,
818 	0x00, 0x00, 0x00, 0x00,
819 	0x00, 0x00, 0x00, 0x00,
820 	0x00, 0x00, 0x00, 0x00,
821 	0x00, 0x00, 0x00, 0x00,
822 	0x00, 0x00, 0x00, 0x00,
823 
824 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
825 	0x00, 0x08, 0x00, 0x00,
826 
827 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
828 };
829 
830 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
831 	{ ICE_MAC_OFOS,		0 },
832 	{ ICE_IPV6_OFOS,	14 },
833 	{ ICE_UDP_OF,		54 },
834 	{ ICE_GTP,		62 },
835 	{ ICE_IPV4_IL,		82 },
836 	{ ICE_TCP_IL,		102 },
837 	{ ICE_PROTOCOL_LAST,	0 },
838 };
839 
840 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
841 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
842 	0x00, 0x00, 0x00, 0x00,
843 	0x00, 0x00, 0x00, 0x00,
844 	0x86, 0xdd,
845 
846 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
847 	0x00, 0x44, 0x11, 0x00,
848 	0x00, 0x00, 0x00, 0x00,
849 	0x00, 0x00, 0x00, 0x00,
850 	0x00, 0x00, 0x00, 0x00,
851 	0x00, 0x00, 0x00, 0x00,
852 	0x00, 0x00, 0x00, 0x00,
853 	0x00, 0x00, 0x00, 0x00,
854 	0x00, 0x00, 0x00, 0x00,
855 	0x00, 0x00, 0x00, 0x00,
856 
857 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
858 	0x00, 0x44, 0x00, 0x00,
859 
860 	0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
861 	0x00, 0x00, 0x00, 0x00,
862 	0x00, 0x00, 0x00, 0x85,
863 
864 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
865 	0x00, 0x00, 0x00, 0x00,
866 
867 	0x45, 0x00, 0x00, 0x28, /* IP 82 */
868 	0x00, 0x00, 0x00, 0x00,
869 	0x00, 0x06, 0x00, 0x00,
870 	0x00, 0x00, 0x00, 0x00,
871 	0x00, 0x00, 0x00, 0x00,
872 
873 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
874 	0x00, 0x00, 0x00, 0x00,
875 	0x00, 0x00, 0x00, 0x00,
876 	0x50, 0x00, 0x00, 0x00,
877 	0x00, 0x00, 0x00, 0x00,
878 
879 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
880 };
881 
882 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
883 	{ ICE_MAC_OFOS,		0 },
884 	{ ICE_IPV6_OFOS,	14 },
885 	{ ICE_UDP_OF,		54 },
886 	{ ICE_GTP,		62 },
887 	{ ICE_IPV4_IL,		82 },
888 	{ ICE_UDP_ILOS,		102 },
889 	{ ICE_PROTOCOL_LAST,	0 },
890 };
891 
892 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
893 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
894 	0x00, 0x00, 0x00, 0x00,
895 	0x00, 0x00, 0x00, 0x00,
896 	0x86, 0xdd,
897 
898 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
899 	0x00, 0x38, 0x11, 0x00,
900 	0x00, 0x00, 0x00, 0x00,
901 	0x00, 0x00, 0x00, 0x00,
902 	0x00, 0x00, 0x00, 0x00,
903 	0x00, 0x00, 0x00, 0x00,
904 	0x00, 0x00, 0x00, 0x00,
905 	0x00, 0x00, 0x00, 0x00,
906 	0x00, 0x00, 0x00, 0x00,
907 	0x00, 0x00, 0x00, 0x00,
908 
909 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
910 	0x00, 0x38, 0x00, 0x00,
911 
912 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
913 	0x00, 0x00, 0x00, 0x00,
914 	0x00, 0x00, 0x00, 0x85,
915 
916 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
917 	0x00, 0x00, 0x00, 0x00,
918 
919 	0x45, 0x00, 0x00, 0x1c, /* IP 82 */
920 	0x00, 0x00, 0x00, 0x00,
921 	0x00, 0x11, 0x00, 0x00,
922 	0x00, 0x00, 0x00, 0x00,
923 	0x00, 0x00, 0x00, 0x00,
924 
925 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
926 	0x00, 0x08, 0x00, 0x00,
927 
928 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
929 };
930 
931 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
932 	{ ICE_MAC_OFOS,		0 },
933 	{ ICE_IPV6_OFOS,	14 },
934 	{ ICE_UDP_OF,		54 },
935 	{ ICE_GTP,		62 },
936 	{ ICE_IPV6_IL,		82 },
937 	{ ICE_TCP_IL,		122 },
938 	{ ICE_PROTOCOL_LAST,	0 },
939 };
940 
941 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
942 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
943 	0x00, 0x00, 0x00, 0x00,
944 	0x00, 0x00, 0x00, 0x00,
945 	0x86, 0xdd,
946 
947 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
948 	0x00, 0x58, 0x11, 0x00,
949 	0x00, 0x00, 0x00, 0x00,
950 	0x00, 0x00, 0x00, 0x00,
951 	0x00, 0x00, 0x00, 0x00,
952 	0x00, 0x00, 0x00, 0x00,
953 	0x00, 0x00, 0x00, 0x00,
954 	0x00, 0x00, 0x00, 0x00,
955 	0x00, 0x00, 0x00, 0x00,
956 	0x00, 0x00, 0x00, 0x00,
957 
958 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
959 	0x00, 0x58, 0x00, 0x00,
960 
961 	0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
962 	0x00, 0x00, 0x00, 0x00,
963 	0x00, 0x00, 0x00, 0x85,
964 
965 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
966 	0x00, 0x00, 0x00, 0x00,
967 
968 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
969 	0x00, 0x14, 0x06, 0x00,
970 	0x00, 0x00, 0x00, 0x00,
971 	0x00, 0x00, 0x00, 0x00,
972 	0x00, 0x00, 0x00, 0x00,
973 	0x00, 0x00, 0x00, 0x00,
974 	0x00, 0x00, 0x00, 0x00,
975 	0x00, 0x00, 0x00, 0x00,
976 	0x00, 0x00, 0x00, 0x00,
977 	0x00, 0x00, 0x00, 0x00,
978 
979 	0x00, 0x00, 0x00, 0x00, /* TCP 122 */
980 	0x00, 0x00, 0x00, 0x00,
981 	0x00, 0x00, 0x00, 0x00,
982 	0x50, 0x00, 0x00, 0x00,
983 	0x00, 0x00, 0x00, 0x00,
984 
985 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
986 };
987 
988 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
989 	{ ICE_MAC_OFOS,		0 },
990 	{ ICE_IPV6_OFOS,	14 },
991 	{ ICE_UDP_OF,		54 },
992 	{ ICE_GTP,		62 },
993 	{ ICE_IPV6_IL,		82 },
994 	{ ICE_UDP_ILOS,		122 },
995 	{ ICE_PROTOCOL_LAST,	0 },
996 };
997 
998 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
999 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
1000 	0x00, 0x00, 0x00, 0x00,
1001 	0x00, 0x00, 0x00, 0x00,
1002 	0x86, 0xdd,
1003 
1004 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1005 	0x00, 0x4c, 0x11, 0x00,
1006 	0x00, 0x00, 0x00, 0x00,
1007 	0x00, 0x00, 0x00, 0x00,
1008 	0x00, 0x00, 0x00, 0x00,
1009 	0x00, 0x00, 0x00, 0x00,
1010 	0x00, 0x00, 0x00, 0x00,
1011 	0x00, 0x00, 0x00, 0x00,
1012 	0x00, 0x00, 0x00, 0x00,
1013 	0x00, 0x00, 0x00, 0x00,
1014 
1015 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1016 	0x00, 0x4c, 0x00, 0x00,
1017 
1018 	0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
1019 	0x00, 0x00, 0x00, 0x00,
1020 	0x00, 0x00, 0x00, 0x85,
1021 
1022 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1023 	0x00, 0x00, 0x00, 0x00,
1024 
1025 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
1026 	0x00, 0x08, 0x11, 0x00,
1027 	0x00, 0x00, 0x00, 0x00,
1028 	0x00, 0x00, 0x00, 0x00,
1029 	0x00, 0x00, 0x00, 0x00,
1030 	0x00, 0x00, 0x00, 0x00,
1031 	0x00, 0x00, 0x00, 0x00,
1032 	0x00, 0x00, 0x00, 0x00,
1033 	0x00, 0x00, 0x00, 0x00,
1034 	0x00, 0x00, 0x00, 0x00,
1035 
1036 	0x00, 0x00, 0x00, 0x00, /* UDP 122 */
1037 	0x00, 0x08, 0x00, 0x00,
1038 
1039 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
1040 };
1041 
1042 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
1043 	{ ICE_MAC_OFOS,		0 },
1044 	{ ICE_IPV4_OFOS,	14 },
1045 	{ ICE_UDP_OF,		34 },
1046 	{ ICE_GTP_NO_PAY,	42 },
1047 	{ ICE_PROTOCOL_LAST,	0 },
1048 };
1049 
1050 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
1051 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1052 	0x00, 0x00, 0x00, 0x00,
1053 	0x00, 0x00, 0x00, 0x00,
1054 	0x08, 0x00,
1055 
1056 	0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
1057 	0x00, 0x00, 0x40, 0x00,
1058 	0x40, 0x11, 0x00, 0x00,
1059 	0x00, 0x00, 0x00, 0x00,
1060 	0x00, 0x00, 0x00, 0x00,
1061 
1062 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
1063 	0x00, 0x00, 0x00, 0x00,
1064 
1065 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1066 	0x00, 0x00, 0x00, 0x00,
1067 	0x00, 0x00, 0x00, 0x85,
1068 
1069 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1070 	0x00, 0x00, 0x00, 0x00,
1071 
1072 	0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
1073 	0x00, 0x00, 0x40, 0x00,
1074 	0x40, 0x00, 0x00, 0x00,
1075 	0x00, 0x00, 0x00, 0x00,
1076 	0x00, 0x00, 0x00, 0x00,
1077 	0x00, 0x00,
1078 };
1079 
1080 ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
1081 	{ ICE_MAC_OFOS,		0 },
1082 	{ ICE_IPV6_OFOS,	14 },
1083 	{ ICE_UDP_OF,		54 },
1084 	{ ICE_GTP_NO_PAY,	62 },
1085 	{ ICE_PROTOCOL_LAST,	0 },
1086 };
1087 
1088 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
1089 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1090 	0x00, 0x00, 0x00, 0x00,
1091 	0x00, 0x00, 0x00, 0x00,
1092 	0x86, 0xdd,
1093 
1094 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1095 	0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1096 	0x00, 0x00, 0x00, 0x00,
1097 	0x00, 0x00, 0x00, 0x00,
1098 	0x00, 0x00, 0x00, 0x00,
1099 	0x00, 0x00, 0x00, 0x00,
1100 	0x00, 0x00, 0x00, 0x00,
1101 	0x00, 0x00, 0x00, 0x00,
1102 	0x00, 0x00, 0x00, 0x00,
1103 	0x00, 0x00, 0x00, 0x00,
1104 
1105 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1106 	0x00, 0x00, 0x00, 0x00,
1107 
1108 	0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
1109 	0x00, 0x00, 0x00, 0x00,
1110 
1111 	0x00, 0x00,
1112 };
1113 
1114 ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv4) = {
1115 	{ ICE_MAC_OFOS,		0 },
1116 	{ ICE_ETYPE_OL,		12 },
1117 	{ ICE_IPV4_OFOS,	14 },
1118 	{ ICE_UDP_ILOS,		34 },
1119 	{ ICE_PFCP,		42 },
1120 	{ ICE_PROTOCOL_LAST,	0 },
1121 };
1122 
1123 ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv4) = {
1124 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1125 	0x00, 0x00, 0x00, 0x00,
1126 	0x00, 0x00, 0x00, 0x00,
1127 
1128 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
1129 
1130 	0x45, 0x00, 0x00, 0x2c, /* ICE_IPV4_OFOS 14 */
1131 	0x00, 0x01, 0x00, 0x00,
1132 	0x00, 0x11, 0x00, 0x00,
1133 	0x00, 0x00, 0x00, 0x00,
1134 	0x00, 0x00, 0x00, 0x00,
1135 
1136 	0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 34 */
1137 	0x00, 0x18, 0x00, 0x00,
1138 
1139 	0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 42 */
1140 	0x00, 0x00, 0x00, 0x00,
1141 	0x00, 0x00, 0x00, 0x00,
1142 	0x00, 0x00, 0x00, 0x00,
1143 
1144 	0x00, 0x00,		/* 2 bytes for 4 byte alignment */
1145 };
1146 
1147 ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv6) = {
1148 	{ ICE_MAC_OFOS,		0 },
1149 	{ ICE_ETYPE_OL,		12 },
1150 	{ ICE_IPV6_OFOS,	14 },
1151 	{ ICE_UDP_ILOS,		54 },
1152 	{ ICE_PFCP,		62 },
1153 	{ ICE_PROTOCOL_LAST,	0 },
1154 };
1155 
1156 ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv6) = {
1157 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1158 	0x00, 0x00, 0x00, 0x00,
1159 	0x00, 0x00, 0x00, 0x00,
1160 
1161 	0x86, 0xdd,		/* ICE_ETYPE_OL 12 */
1162 
1163 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1164 	0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1165 	0x00, 0x00, 0x00, 0x00,
1166 	0x00, 0x00, 0x00, 0x00,
1167 	0x00, 0x00, 0x00, 0x00,
1168 	0x00, 0x00, 0x00, 0x00,
1169 	0x00, 0x00, 0x00, 0x00,
1170 	0x00, 0x00, 0x00, 0x00,
1171 	0x00, 0x00, 0x00, 0x00,
1172 	0x00, 0x00, 0x00, 0x00,
1173 
1174 	0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 54 */
1175 	0x00, 0x18, 0x00, 0x00,
1176 
1177 	0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 62 */
1178 	0x00, 0x00, 0x00, 0x00,
1179 	0x00, 0x00, 0x00, 0x00,
1180 	0x00, 0x00, 0x00, 0x00,
1181 
1182 	0x00, 0x00,		/* 2 bytes for 4 byte alignment */
1183 };
1184 
1185 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
1186 	{ ICE_MAC_OFOS,		0 },
1187 	{ ICE_ETYPE_OL,		12 },
1188 	{ ICE_PPPOE,		14 },
1189 	{ ICE_IPV4_OFOS,	22 },
1190 	{ ICE_TCP_IL,		42 },
1191 	{ ICE_PROTOCOL_LAST,	0 },
1192 };
1193 
1194 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_tcp) = {
1195 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1196 	0x00, 0x00, 0x00, 0x00,
1197 	0x00, 0x00, 0x00, 0x00,
1198 
1199 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1200 
1201 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1202 	0x00, 0x16,
1203 
1204 	0x00, 0x21,		/* PPP Link Layer 20 */
1205 
1206 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
1207 	0x00, 0x01, 0x00, 0x00,
1208 	0x00, 0x06, 0x00, 0x00,
1209 	0x00, 0x00, 0x00, 0x00,
1210 	0x00, 0x00, 0x00, 0x00,
1211 
1212 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
1213 	0x00, 0x00, 0x00, 0x00,
1214 	0x00, 0x00, 0x00, 0x00,
1215 	0x50, 0x00, 0x00, 0x00,
1216 	0x00, 0x00, 0x00, 0x00,
1217 
1218 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1219 };
1220 
1221 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_udp) = {
1222 	{ ICE_MAC_OFOS,		0 },
1223 	{ ICE_ETYPE_OL,		12 },
1224 	{ ICE_PPPOE,		14 },
1225 	{ ICE_IPV4_OFOS,	22 },
1226 	{ ICE_UDP_ILOS,		42 },
1227 	{ ICE_PROTOCOL_LAST,	0 },
1228 };
1229 
1230 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_udp) = {
1231 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1232 	0x00, 0x00, 0x00, 0x00,
1233 	0x00, 0x00, 0x00, 0x00,
1234 
1235 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1236 
1237 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1238 	0x00, 0x16,
1239 
1240 	0x00, 0x21,		/* PPP Link Layer 20 */
1241 
1242 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1243 	0x00, 0x01, 0x00, 0x00,
1244 	0x00, 0x11, 0x00, 0x00,
1245 	0x00, 0x00, 0x00, 0x00,
1246 	0x00, 0x00, 0x00, 0x00,
1247 
1248 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1249 	0x00, 0x08, 0x00, 0x00,
1250 
1251 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1252 };
1253 
1254 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_tcp) = {
1255 	{ ICE_MAC_OFOS,		0 },
1256 	{ ICE_ETYPE_OL,		12 },
1257 	{ ICE_PPPOE,		14 },
1258 	{ ICE_IPV6_OFOS,	22 },
1259 	{ ICE_TCP_IL,		62 },
1260 	{ ICE_PROTOCOL_LAST,	0 },
1261 };
1262 
1263 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_tcp) = {
1264 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1265 	0x00, 0x00, 0x00, 0x00,
1266 	0x00, 0x00, 0x00, 0x00,
1267 
1268 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1269 
1270 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1271 	0x00, 0x2a,
1272 
1273 	0x00, 0x57,		/* PPP Link Layer 20 */
1274 
1275 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1276 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1277 	0x00, 0x00, 0x00, 0x00,
1278 	0x00, 0x00, 0x00, 0x00,
1279 	0x00, 0x00, 0x00, 0x00,
1280 	0x00, 0x00, 0x00, 0x00,
1281 	0x00, 0x00, 0x00, 0x00,
1282 	0x00, 0x00, 0x00, 0x00,
1283 	0x00, 0x00, 0x00, 0x00,
1284 	0x00, 0x00, 0x00, 0x00,
1285 
1286 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
1287 	0x00, 0x00, 0x00, 0x00,
1288 	0x00, 0x00, 0x00, 0x00,
1289 	0x50, 0x00, 0x00, 0x00,
1290 	0x00, 0x00, 0x00, 0x00,
1291 
1292 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1293 };
1294 
1295 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_udp) = {
1296 	{ ICE_MAC_OFOS,		0 },
1297 	{ ICE_ETYPE_OL,		12 },
1298 	{ ICE_PPPOE,		14 },
1299 	{ ICE_IPV6_OFOS,	22 },
1300 	{ ICE_UDP_ILOS,		62 },
1301 	{ ICE_PROTOCOL_LAST,	0 },
1302 };
1303 
1304 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_udp) = {
1305 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1306 	0x00, 0x00, 0x00, 0x00,
1307 	0x00, 0x00, 0x00, 0x00,
1308 
1309 	0x88, 0x64,		/* ICE_ETYPE_OL 12 */
1310 
1311 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1312 	0x00, 0x2a,
1313 
1314 	0x00, 0x57,		/* PPP Link Layer 20 */
1315 
1316 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1317 	0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1318 	0x00, 0x00, 0x00, 0x00,
1319 	0x00, 0x00, 0x00, 0x00,
1320 	0x00, 0x00, 0x00, 0x00,
1321 	0x00, 0x00, 0x00, 0x00,
1322 	0x00, 0x00, 0x00, 0x00,
1323 	0x00, 0x00, 0x00, 0x00,
1324 	0x00, 0x00, 0x00, 0x00,
1325 	0x00, 0x00, 0x00, 0x00,
1326 
1327 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1328 	0x00, 0x08, 0x00, 0x00,
1329 
1330 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1331 };
1332 
1333 ICE_DECLARE_PKT_OFFSETS(ipv4_l2tpv3) = {
1334 	{ ICE_MAC_OFOS,		0 },
1335 	{ ICE_ETYPE_OL,		12 },
1336 	{ ICE_IPV4_OFOS,	14 },
1337 	{ ICE_L2TPV3,		34 },
1338 	{ ICE_PROTOCOL_LAST,	0 },
1339 };
1340 
1341 ICE_DECLARE_PKT_TEMPLATE(ipv4_l2tpv3) = {
1342 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1343 	0x00, 0x00, 0x00, 0x00,
1344 	0x00, 0x00, 0x00, 0x00,
1345 
1346 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
1347 
1348 	0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1349 	0x00, 0x00, 0x40, 0x00,
1350 	0x40, 0x73, 0x00, 0x00,
1351 	0x00, 0x00, 0x00, 0x00,
1352 	0x00, 0x00, 0x00, 0x00,
1353 
1354 	0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1355 	0x00, 0x00, 0x00, 0x00,
1356 	0x00, 0x00, 0x00, 0x00,
1357 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1358 };
1359 
1360 ICE_DECLARE_PKT_OFFSETS(ipv6_l2tpv3) = {
1361 	{ ICE_MAC_OFOS,		0 },
1362 	{ ICE_ETYPE_OL,		12 },
1363 	{ ICE_IPV6_OFOS,	14 },
1364 	{ ICE_L2TPV3,		54 },
1365 	{ ICE_PROTOCOL_LAST,	0 },
1366 };
1367 
1368 ICE_DECLARE_PKT_TEMPLATE(ipv6_l2tpv3) = {
1369 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1370 	0x00, 0x00, 0x00, 0x00,
1371 	0x00, 0x00, 0x00, 0x00,
1372 
1373 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
1374 
1375 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1376 	0x00, 0x0c, 0x73, 0x40,
1377 	0x00, 0x00, 0x00, 0x00,
1378 	0x00, 0x00, 0x00, 0x00,
1379 	0x00, 0x00, 0x00, 0x00,
1380 	0x00, 0x00, 0x00, 0x00,
1381 	0x00, 0x00, 0x00, 0x00,
1382 	0x00, 0x00, 0x00, 0x00,
1383 	0x00, 0x00, 0x00, 0x00,
1384 	0x00, 0x00, 0x00, 0x00,
1385 
1386 	0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1387 	0x00, 0x00, 0x00, 0x00,
1388 	0x00, 0x00, 0x00, 0x00,
1389 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1390 };
1391 
1392 static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
1393 	ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
1394 				  ICE_PKT_GTP_NOPAY),
1395 	ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1396 					    ICE_PKT_OUTER_IPV6 |
1397 					    ICE_PKT_INNER_IPV6 |
1398 					    ICE_PKT_INNER_UDP),
1399 	ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1400 					    ICE_PKT_OUTER_IPV6 |
1401 					    ICE_PKT_INNER_IPV6),
1402 	ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1403 					    ICE_PKT_OUTER_IPV6 |
1404 					    ICE_PKT_INNER_UDP),
1405 	ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
1406 					    ICE_PKT_OUTER_IPV6),
1407 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
1408 	ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1409 					    ICE_PKT_INNER_IPV6 |
1410 					    ICE_PKT_INNER_UDP),
1411 	ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1412 					    ICE_PKT_INNER_IPV6),
1413 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1414 					    ICE_PKT_INNER_UDP),
1415 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
1416 	ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
1417 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
1418 	ICE_PKT_PROFILE(pfcp_session_ipv6, ICE_PKT_PFCP | ICE_PKT_OUTER_IPV6),
1419 	ICE_PKT_PROFILE(pfcp_session_ipv4, ICE_PKT_PFCP),
1420 	ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
1421 					ICE_PKT_INNER_UDP),
1422 	ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
1423 	ICE_PKT_PROFILE(pppoe_ipv4_udp, ICE_PKT_PPPOE | ICE_PKT_INNER_UDP),
1424 	ICE_PKT_PROFILE(pppoe_ipv4_tcp, ICE_PKT_PPPOE),
1425 	ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
1426 				      ICE_PKT_INNER_TCP),
1427 	ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
1428 	ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
1429 	ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
1430 	ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
1431 					  ICE_PKT_INNER_IPV6 |
1432 					  ICE_PKT_INNER_TCP),
1433 	ICE_PKT_PROFILE(ipv6_l2tpv3, ICE_PKT_L2TPV3 | ICE_PKT_OUTER_IPV6),
1434 	ICE_PKT_PROFILE(ipv4_l2tpv3, ICE_PKT_L2TPV3),
1435 	ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
1436 	ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
1437 					  ICE_PKT_INNER_IPV6),
1438 	ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
1439 	ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
1440 	ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
1441 	ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
1442 	ICE_PKT_PROFILE(tcp, 0),
1443 };
1444 
1445 /* this is a recipe to profile association bitmap */
1446 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1447 			  ICE_MAX_NUM_PROFILES);
1448 
1449 /* this is a profile to recipe association bitmap */
1450 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1451 			  ICE_MAX_NUM_RECIPES);
1452 
1453 /**
1454  * ice_init_def_sw_recp - initialize the recipe book keeping tables
1455  * @hw: pointer to the HW struct
1456  *
1457  * Allocate memory for the entire recipe table and initialize the structures/
1458  * entries corresponding to basic recipes.
1459  */
1460 int ice_init_def_sw_recp(struct ice_hw *hw)
1461 {
1462 	struct ice_sw_recipe *recps;
1463 	u8 i;
1464 
1465 	recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
1466 			     sizeof(*recps), GFP_KERNEL);
1467 	if (!recps)
1468 		return -ENOMEM;
1469 
1470 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1471 		recps[i].root_rid = i;
1472 		INIT_LIST_HEAD(&recps[i].filt_rules);
1473 		INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1474 		INIT_LIST_HEAD(&recps[i].rg_list);
1475 		mutex_init(&recps[i].filt_rule_lock);
1476 	}
1477 
1478 	hw->switch_info->recp_list = recps;
1479 
1480 	return 0;
1481 }
1482 
1483 /**
1484  * ice_aq_get_sw_cfg - get switch configuration
1485  * @hw: pointer to the hardware structure
1486  * @buf: pointer to the result buffer
1487  * @buf_size: length of the buffer available for response
1488  * @req_desc: pointer to requested descriptor
1489  * @num_elems: pointer to number of elements
1490  * @cd: pointer to command details structure or NULL
1491  *
1492  * Get switch configuration (0x0200) to be placed in buf.
1493  * This admin command returns information such as initial VSI/port number
1494  * and switch ID it belongs to.
1495  *
1496  * NOTE: *req_desc is both an input/output parameter.
1497  * The caller of this function first calls this function with *request_desc set
1498  * to 0. If the response from f/w has *req_desc set to 0, all the switch
1499  * configuration information has been returned; if non-zero (meaning not all
1500  * the information was returned), the caller should call this function again
1501  * with *req_desc set to the previous value returned by f/w to get the
1502  * next block of switch configuration information.
1503  *
1504  * *num_elems is output only parameter. This reflects the number of elements
1505  * in response buffer. The caller of this function to use *num_elems while
1506  * parsing the response buffer.
1507  */
1508 static int
1509 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1510 		  u16 buf_size, u16 *req_desc, u16 *num_elems,
1511 		  struct ice_sq_cd *cd)
1512 {
1513 	struct ice_aqc_get_sw_cfg *cmd;
1514 	struct ice_aq_desc desc;
1515 	int status;
1516 
1517 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1518 	cmd = &desc.params.get_sw_conf;
1519 	cmd->element = cpu_to_le16(*req_desc);
1520 
1521 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1522 	if (!status) {
1523 		*req_desc = le16_to_cpu(cmd->element);
1524 		*num_elems = le16_to_cpu(cmd->num_elems);
1525 	}
1526 
1527 	return status;
1528 }
1529 
1530 /**
1531  * ice_aq_add_vsi
1532  * @hw: pointer to the HW struct
1533  * @vsi_ctx: pointer to a VSI context struct
1534  * @cd: pointer to command details structure or NULL
1535  *
1536  * Add a VSI context to the hardware (0x0210)
1537  */
1538 static int
1539 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1540 	       struct ice_sq_cd *cd)
1541 {
1542 	struct ice_aqc_add_update_free_vsi_resp *res;
1543 	struct ice_aqc_add_get_update_free_vsi *cmd;
1544 	struct ice_aq_desc desc;
1545 	int status;
1546 
1547 	cmd = &desc.params.vsi_cmd;
1548 	res = &desc.params.add_update_free_vsi_res;
1549 
1550 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1551 
1552 	if (!vsi_ctx->alloc_from_pool)
1553 		cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
1554 					   ICE_AQ_VSI_IS_VALID);
1555 	cmd->vf_id = vsi_ctx->vf_num;
1556 
1557 	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1558 
1559 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1560 
1561 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1562 				 sizeof(vsi_ctx->info), cd);
1563 
1564 	if (!status) {
1565 		vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1566 		vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
1567 		vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
1568 	}
1569 
1570 	return status;
1571 }
1572 
1573 /**
1574  * ice_aq_free_vsi
1575  * @hw: pointer to the HW struct
1576  * @vsi_ctx: pointer to a VSI context struct
1577  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1578  * @cd: pointer to command details structure or NULL
1579  *
1580  * Free VSI context info from hardware (0x0213)
1581  */
1582 static int
1583 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1584 		bool keep_vsi_alloc, struct ice_sq_cd *cd)
1585 {
1586 	struct ice_aqc_add_update_free_vsi_resp *resp;
1587 	struct ice_aqc_add_get_update_free_vsi *cmd;
1588 	struct ice_aq_desc desc;
1589 	int status;
1590 
1591 	cmd = &desc.params.vsi_cmd;
1592 	resp = &desc.params.add_update_free_vsi_res;
1593 
1594 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1595 
1596 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1597 	if (keep_vsi_alloc)
1598 		cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
1599 
1600 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1601 	if (!status) {
1602 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1603 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1604 	}
1605 
1606 	return status;
1607 }
1608 
1609 /**
1610  * ice_aq_update_vsi
1611  * @hw: pointer to the HW struct
1612  * @vsi_ctx: pointer to a VSI context struct
1613  * @cd: pointer to command details structure or NULL
1614  *
1615  * Update VSI context in the hardware (0x0211)
1616  */
1617 static int
1618 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1619 		  struct ice_sq_cd *cd)
1620 {
1621 	struct ice_aqc_add_update_free_vsi_resp *resp;
1622 	struct ice_aqc_add_get_update_free_vsi *cmd;
1623 	struct ice_aq_desc desc;
1624 	int status;
1625 
1626 	cmd = &desc.params.vsi_cmd;
1627 	resp = &desc.params.add_update_free_vsi_res;
1628 
1629 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1630 
1631 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1632 
1633 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1634 
1635 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1636 				 sizeof(vsi_ctx->info), cd);
1637 
1638 	if (!status) {
1639 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1640 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1641 	}
1642 
1643 	return status;
1644 }
1645 
1646 /**
1647  * ice_is_vsi_valid - check whether the VSI is valid or not
1648  * @hw: pointer to the HW struct
1649  * @vsi_handle: VSI handle
1650  *
1651  * check whether the VSI is valid or not
1652  */
1653 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1654 {
1655 	return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1656 }
1657 
1658 /**
1659  * ice_get_hw_vsi_num - return the HW VSI number
1660  * @hw: pointer to the HW struct
1661  * @vsi_handle: VSI handle
1662  *
1663  * return the HW VSI number
1664  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1665  */
1666 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1667 {
1668 	return hw->vsi_ctx[vsi_handle]->vsi_num;
1669 }
1670 
1671 /**
1672  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1673  * @hw: pointer to the HW struct
1674  * @vsi_handle: VSI handle
1675  *
1676  * return the VSI context entry for a given VSI handle
1677  */
1678 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1679 {
1680 	return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1681 }
1682 
1683 /**
1684  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1685  * @hw: pointer to the HW struct
1686  * @vsi_handle: VSI handle
1687  * @vsi: VSI context pointer
1688  *
1689  * save the VSI context entry for a given VSI handle
1690  */
1691 static void
1692 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1693 {
1694 	hw->vsi_ctx[vsi_handle] = vsi;
1695 }
1696 
1697 /**
1698  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1699  * @hw: pointer to the HW struct
1700  * @vsi_handle: VSI handle
1701  */
1702 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1703 {
1704 	struct ice_vsi_ctx *vsi = ice_get_vsi_ctx(hw, vsi_handle);
1705 	u8 i;
1706 
1707 	if (!vsi)
1708 		return;
1709 	ice_for_each_traffic_class(i) {
1710 		devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
1711 		vsi->lan_q_ctx[i] = NULL;
1712 		devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
1713 		vsi->rdma_q_ctx[i] = NULL;
1714 	}
1715 }
1716 
1717 /**
1718  * ice_clear_vsi_ctx - clear the VSI context entry
1719  * @hw: pointer to the HW struct
1720  * @vsi_handle: VSI handle
1721  *
1722  * clear the VSI context entry
1723  */
1724 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1725 {
1726 	struct ice_vsi_ctx *vsi;
1727 
1728 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
1729 	if (vsi) {
1730 		ice_clear_vsi_q_ctx(hw, vsi_handle);
1731 		devm_kfree(ice_hw_to_dev(hw), vsi);
1732 		hw->vsi_ctx[vsi_handle] = NULL;
1733 	}
1734 }
1735 
1736 /**
1737  * ice_clear_all_vsi_ctx - clear all the VSI context entries
1738  * @hw: pointer to the HW struct
1739  */
1740 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1741 {
1742 	u16 i;
1743 
1744 	for (i = 0; i < ICE_MAX_VSI; i++)
1745 		ice_clear_vsi_ctx(hw, i);
1746 }
1747 
1748 /**
1749  * ice_add_vsi - add VSI context to the hardware and VSI handle list
1750  * @hw: pointer to the HW struct
1751  * @vsi_handle: unique VSI handle provided by drivers
1752  * @vsi_ctx: pointer to a VSI context struct
1753  * @cd: pointer to command details structure or NULL
1754  *
1755  * Add a VSI context to the hardware also add it into the VSI handle list.
1756  * If this function gets called after reset for existing VSIs then update
1757  * with the new HW VSI number in the corresponding VSI handle list entry.
1758  */
1759 int
1760 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1761 	    struct ice_sq_cd *cd)
1762 {
1763 	struct ice_vsi_ctx *tmp_vsi_ctx;
1764 	int status;
1765 
1766 	if (vsi_handle >= ICE_MAX_VSI)
1767 		return -EINVAL;
1768 	status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1769 	if (status)
1770 		return status;
1771 	tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1772 	if (!tmp_vsi_ctx) {
1773 		/* Create a new VSI context */
1774 		tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
1775 					   sizeof(*tmp_vsi_ctx), GFP_KERNEL);
1776 		if (!tmp_vsi_ctx) {
1777 			ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1778 			return -ENOMEM;
1779 		}
1780 		*tmp_vsi_ctx = *vsi_ctx;
1781 		ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1782 	} else {
1783 		/* update with new HW VSI num */
1784 		tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1785 	}
1786 
1787 	return 0;
1788 }
1789 
1790 /**
1791  * ice_free_vsi- free VSI context from hardware and VSI handle list
1792  * @hw: pointer to the HW struct
1793  * @vsi_handle: unique VSI handle
1794  * @vsi_ctx: pointer to a VSI context struct
1795  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1796  * @cd: pointer to command details structure or NULL
1797  *
1798  * Free VSI context info from hardware as well as from VSI handle list
1799  */
1800 int
1801 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1802 	     bool keep_vsi_alloc, struct ice_sq_cd *cd)
1803 {
1804 	int status;
1805 
1806 	if (!ice_is_vsi_valid(hw, vsi_handle))
1807 		return -EINVAL;
1808 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1809 	status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1810 	if (!status)
1811 		ice_clear_vsi_ctx(hw, vsi_handle);
1812 	return status;
1813 }
1814 
1815 /**
1816  * ice_update_vsi
1817  * @hw: pointer to the HW struct
1818  * @vsi_handle: unique VSI handle
1819  * @vsi_ctx: pointer to a VSI context struct
1820  * @cd: pointer to command details structure or NULL
1821  *
1822  * Update VSI context in the hardware
1823  */
1824 int
1825 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1826 	       struct ice_sq_cd *cd)
1827 {
1828 	if (!ice_is_vsi_valid(hw, vsi_handle))
1829 		return -EINVAL;
1830 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1831 	return ice_aq_update_vsi(hw, vsi_ctx, cd);
1832 }
1833 
1834 /**
1835  * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
1836  * @hw: pointer to HW struct
1837  * @vsi_handle: VSI SW index
1838  * @enable: boolean for enable/disable
1839  */
1840 int
1841 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
1842 {
1843 	struct ice_vsi_ctx *ctx, *cached_ctx;
1844 	int status;
1845 
1846 	cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1847 	if (!cached_ctx)
1848 		return -ENOENT;
1849 
1850 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1851 	if (!ctx)
1852 		return -ENOMEM;
1853 
1854 	ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss;
1855 	ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc;
1856 	ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags;
1857 
1858 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
1859 
1860 	if (enable)
1861 		ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1862 	else
1863 		ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1864 
1865 	status = ice_update_vsi(hw, vsi_handle, ctx, NULL);
1866 	if (!status) {
1867 		cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags;
1868 		cached_ctx->info.valid_sections |= ctx->info.valid_sections;
1869 	}
1870 
1871 	kfree(ctx);
1872 	return status;
1873 }
1874 
1875 /**
1876  * ice_aq_alloc_free_vsi_list
1877  * @hw: pointer to the HW struct
1878  * @vsi_list_id: VSI list ID returned or used for lookup
1879  * @lkup_type: switch rule filter lookup type
1880  * @opc: switch rules population command type - pass in the command opcode
1881  *
1882  * allocates or free a VSI list resource
1883  */
1884 static int
1885 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1886 			   enum ice_sw_lkup_type lkup_type,
1887 			   enum ice_adminq_opc opc)
1888 {
1889 	DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
1890 	u16 buf_len = __struct_size(sw_buf);
1891 	struct ice_aqc_res_elem *vsi_ele;
1892 	int status;
1893 
1894 	sw_buf->num_elems = cpu_to_le16(1);
1895 
1896 	if (lkup_type == ICE_SW_LKUP_MAC ||
1897 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1898 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1899 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1900 	    lkup_type == ICE_SW_LKUP_PROMISC ||
1901 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1902 	    lkup_type == ICE_SW_LKUP_DFLT ||
1903 	    lkup_type == ICE_SW_LKUP_LAST) {
1904 		sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1905 	} else if (lkup_type == ICE_SW_LKUP_VLAN) {
1906 		if (opc == ice_aqc_opc_alloc_res)
1907 			sw_buf->res_type =
1908 				cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE |
1909 					    ICE_AQC_RES_TYPE_FLAG_SHARED);
1910 		else
1911 			sw_buf->res_type =
1912 				cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1913 	} else {
1914 		return -EINVAL;
1915 	}
1916 
1917 	if (opc == ice_aqc_opc_free_res)
1918 		sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
1919 
1920 	status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, opc);
1921 	if (status)
1922 		return status;
1923 
1924 	if (opc == ice_aqc_opc_alloc_res) {
1925 		vsi_ele = &sw_buf->elem[0];
1926 		*vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
1927 	}
1928 
1929 	return 0;
1930 }
1931 
1932 /**
1933  * ice_aq_sw_rules - add/update/remove switch rules
1934  * @hw: pointer to the HW struct
1935  * @rule_list: pointer to switch rule population list
1936  * @rule_list_sz: total size of the rule list in bytes
1937  * @num_rules: number of switch rules in the rule_list
1938  * @opc: switch rules population command type - pass in the command opcode
1939  * @cd: pointer to command details structure or NULL
1940  *
1941  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1942  */
1943 int
1944 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1945 		u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1946 {
1947 	struct ice_aq_desc desc;
1948 	int status;
1949 
1950 	if (opc != ice_aqc_opc_add_sw_rules &&
1951 	    opc != ice_aqc_opc_update_sw_rules &&
1952 	    opc != ice_aqc_opc_remove_sw_rules)
1953 		return -EINVAL;
1954 
1955 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1956 
1957 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1958 	desc.params.sw_rules.num_rules_fltr_entry_index =
1959 		cpu_to_le16(num_rules);
1960 	status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1961 	if (opc != ice_aqc_opc_add_sw_rules &&
1962 	    hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1963 		status = -ENOENT;
1964 
1965 	return status;
1966 }
1967 
1968 /**
1969  * ice_aq_add_recipe - add switch recipe
1970  * @hw: pointer to the HW struct
1971  * @s_recipe_list: pointer to switch rule population list
1972  * @num_recipes: number of switch recipes in the list
1973  * @cd: pointer to command details structure or NULL
1974  *
1975  * Add(0x0290)
1976  */
1977 int
1978 ice_aq_add_recipe(struct ice_hw *hw,
1979 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1980 		  u16 num_recipes, struct ice_sq_cd *cd)
1981 {
1982 	struct ice_aqc_add_get_recipe *cmd;
1983 	struct ice_aq_desc desc;
1984 	u16 buf_size;
1985 
1986 	cmd = &desc.params.add_get_recipe;
1987 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1988 
1989 	cmd->num_sub_recipes = cpu_to_le16(num_recipes);
1990 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1991 
1992 	buf_size = num_recipes * sizeof(*s_recipe_list);
1993 
1994 	return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1995 }
1996 
1997 /**
1998  * ice_aq_get_recipe - get switch recipe
1999  * @hw: pointer to the HW struct
2000  * @s_recipe_list: pointer to switch rule population list
2001  * @num_recipes: pointer to the number of recipes (input and output)
2002  * @recipe_root: root recipe number of recipe(s) to retrieve
2003  * @cd: pointer to command details structure or NULL
2004  *
2005  * Get(0x0292)
2006  *
2007  * On input, *num_recipes should equal the number of entries in s_recipe_list.
2008  * On output, *num_recipes will equal the number of entries returned in
2009  * s_recipe_list.
2010  *
2011  * The caller must supply enough space in s_recipe_list to hold all possible
2012  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2013  */
2014 int
2015 ice_aq_get_recipe(struct ice_hw *hw,
2016 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
2017 		  u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2018 {
2019 	struct ice_aqc_add_get_recipe *cmd;
2020 	struct ice_aq_desc desc;
2021 	u16 buf_size;
2022 	int status;
2023 
2024 	if (*num_recipes != ICE_MAX_NUM_RECIPES)
2025 		return -EINVAL;
2026 
2027 	cmd = &desc.params.add_get_recipe;
2028 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2029 
2030 	cmd->return_index = cpu_to_le16(recipe_root);
2031 	cmd->num_sub_recipes = 0;
2032 
2033 	buf_size = *num_recipes * sizeof(*s_recipe_list);
2034 
2035 	status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2036 	*num_recipes = le16_to_cpu(cmd->num_sub_recipes);
2037 
2038 	return status;
2039 }
2040 
2041 /**
2042  * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
2043  * @hw: pointer to the HW struct
2044  * @params: parameters used to update the default recipe
2045  *
2046  * This function only supports updating default recipes and it only supports
2047  * updating a single recipe based on the lkup_idx at a time.
2048  *
2049  * This is done as a read-modify-write operation. First, get the current recipe
2050  * contents based on the recipe's ID. Then modify the field vector index and
2051  * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
2052  * the pre-existing recipe with the modifications.
2053  */
2054 int
2055 ice_update_recipe_lkup_idx(struct ice_hw *hw,
2056 			   struct ice_update_recipe_lkup_idx_params *params)
2057 {
2058 	struct ice_aqc_recipe_data_elem *rcp_list;
2059 	u16 num_recps = ICE_MAX_NUM_RECIPES;
2060 	int status;
2061 
2062 	rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL);
2063 	if (!rcp_list)
2064 		return -ENOMEM;
2065 
2066 	/* read current recipe list from firmware */
2067 	rcp_list->recipe_indx = params->rid;
2068 	status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
2069 	if (status) {
2070 		ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
2071 			  params->rid, status);
2072 		goto error_out;
2073 	}
2074 
2075 	/* only modify existing recipe's lkup_idx and mask if valid, while
2076 	 * leaving all other fields the same, then update the recipe firmware
2077 	 */
2078 	rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
2079 	if (params->mask_valid)
2080 		rcp_list->content.mask[params->lkup_idx] =
2081 			cpu_to_le16(params->mask);
2082 
2083 	if (params->ignore_valid)
2084 		rcp_list->content.lkup_indx[params->lkup_idx] |=
2085 			ICE_AQ_RECIPE_LKUP_IGNORE;
2086 
2087 	status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
2088 	if (status)
2089 		ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
2090 			  params->rid, params->lkup_idx, params->fv_idx,
2091 			  params->mask, params->mask_valid ? "true" : "false",
2092 			  status);
2093 
2094 error_out:
2095 	kfree(rcp_list);
2096 	return status;
2097 }
2098 
2099 /**
2100  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2101  * @hw: pointer to the HW struct
2102  * @profile_id: package profile ID to associate the recipe with
2103  * @r_assoc: Recipe bitmap filled in and need to be returned as response
2104  * @cd: pointer to command details structure or NULL
2105  * Recipe to profile association (0x0291)
2106  */
2107 int
2108 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
2109 			     struct ice_sq_cd *cd)
2110 {
2111 	struct ice_aqc_recipe_to_profile *cmd;
2112 	struct ice_aq_desc desc;
2113 
2114 	cmd = &desc.params.recipe_to_profile;
2115 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2116 	cmd->profile_id = cpu_to_le16(profile_id);
2117 	/* Set the recipe ID bit in the bitmask to let the device know which
2118 	 * profile we are associating the recipe to
2119 	 */
2120 	cmd->recipe_assoc = cpu_to_le64(r_assoc);
2121 
2122 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2123 }
2124 
2125 /**
2126  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2127  * @hw: pointer to the HW struct
2128  * @profile_id: package profile ID to associate the recipe with
2129  * @r_assoc: Recipe bitmap filled in and need to be returned as response
2130  * @cd: pointer to command details structure or NULL
2131  * Associate profile ID with given recipe (0x0293)
2132  */
2133 int
2134 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
2135 			     struct ice_sq_cd *cd)
2136 {
2137 	struct ice_aqc_recipe_to_profile *cmd;
2138 	struct ice_aq_desc desc;
2139 	int status;
2140 
2141 	cmd = &desc.params.recipe_to_profile;
2142 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2143 	cmd->profile_id = cpu_to_le16(profile_id);
2144 
2145 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2146 	if (!status)
2147 		*r_assoc = le64_to_cpu(cmd->recipe_assoc);
2148 
2149 	return status;
2150 }
2151 
2152 /**
2153  * ice_init_chk_recipe_reuse_support - check if recipe reuse is supported
2154  * @hw: pointer to the hardware structure
2155  */
2156 void ice_init_chk_recipe_reuse_support(struct ice_hw *hw)
2157 {
2158 	struct ice_nvm_info *nvm = &hw->flash.nvm;
2159 
2160 	hw->recp_reuse = (nvm->major == 0x4 && nvm->minor >= 0x30) ||
2161 			 nvm->major > 0x4;
2162 }
2163 
2164 /**
2165  * ice_alloc_recipe - add recipe resource
2166  * @hw: pointer to the hardware structure
2167  * @rid: recipe ID returned as response to AQ call
2168  */
2169 int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2170 {
2171 	DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
2172 	u16 buf_len = __struct_size(sw_buf);
2173 	u16 res_type;
2174 	int status;
2175 
2176 	sw_buf->num_elems = cpu_to_le16(1);
2177 	res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, ICE_AQC_RES_TYPE_RECIPE);
2178 	if (hw->recp_reuse)
2179 		res_type |= ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED;
2180 	else
2181 		res_type |= ICE_AQC_RES_TYPE_FLAG_SHARED;
2182 	sw_buf->res_type = cpu_to_le16(res_type);
2183 	status = ice_aq_alloc_free_res(hw, sw_buf, buf_len,
2184 				       ice_aqc_opc_alloc_res);
2185 	if (!status)
2186 		*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
2187 
2188 	return status;
2189 }
2190 
2191 /**
2192  * ice_free_recipe_res - free recipe resource
2193  * @hw: pointer to the hardware structure
2194  * @rid: recipe ID to free
2195  *
2196  * Return: 0 on success, and others on error
2197  */
2198 static int ice_free_recipe_res(struct ice_hw *hw, u16 rid)
2199 {
2200 	return ice_free_hw_res(hw, ICE_AQC_RES_TYPE_RECIPE, 1, &rid);
2201 }
2202 
2203 /**
2204  * ice_release_recipe_res - disassociate and free recipe resource
2205  * @hw: pointer to the hardware structure
2206  * @recp: the recipe struct resource to unassociate and free
2207  *
2208  * Return: 0 on success, and others on error
2209  */
2210 static int ice_release_recipe_res(struct ice_hw *hw,
2211 				  struct ice_sw_recipe *recp)
2212 {
2213 	DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
2214 	struct ice_switch_info *sw = hw->switch_info;
2215 	u64 recp_assoc;
2216 	u32 rid, prof;
2217 	int status;
2218 
2219 	for_each_set_bit(rid, recp->r_bitmap, ICE_MAX_NUM_RECIPES) {
2220 		for_each_set_bit(prof, recipe_to_profile[rid],
2221 				 ICE_MAX_NUM_PROFILES) {
2222 			status = ice_aq_get_recipe_to_profile(hw, prof,
2223 							      &recp_assoc,
2224 							      NULL);
2225 			if (status)
2226 				return status;
2227 
2228 			bitmap_from_arr64(r_bitmap, &recp_assoc,
2229 					  ICE_MAX_NUM_RECIPES);
2230 			bitmap_andnot(r_bitmap, r_bitmap, recp->r_bitmap,
2231 				      ICE_MAX_NUM_RECIPES);
2232 			bitmap_to_arr64(&recp_assoc, r_bitmap,
2233 					ICE_MAX_NUM_RECIPES);
2234 			ice_aq_map_recipe_to_profile(hw, prof,
2235 						     recp_assoc, NULL);
2236 
2237 			clear_bit(rid, profile_to_recipe[prof]);
2238 			clear_bit(prof, recipe_to_profile[rid]);
2239 		}
2240 
2241 		status = ice_free_recipe_res(hw, rid);
2242 		if (status)
2243 			return status;
2244 
2245 		sw->recp_list[rid].recp_created = false;
2246 		sw->recp_list[rid].adv_rule = false;
2247 		memset(&sw->recp_list[rid].lkup_exts, 0,
2248 		       sizeof(sw->recp_list[rid].lkup_exts));
2249 		clear_bit(rid, recp->r_bitmap);
2250 	}
2251 
2252 	return 0;
2253 }
2254 
2255 /**
2256  * ice_get_recp_to_prof_map - updates recipe to profile mapping
2257  * @hw: pointer to hardware structure
2258  *
2259  * This function is used to populate recipe_to_profile matrix where index to
2260  * this array is the recipe ID and the element is the mapping of which profiles
2261  * is this recipe mapped to.
2262  */
2263 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2264 {
2265 	DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
2266 	u64 recp_assoc;
2267 	u16 i;
2268 
2269 	for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2270 		u16 j;
2271 
2272 		bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2273 		bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
2274 		if (ice_aq_get_recipe_to_profile(hw, i, &recp_assoc, NULL))
2275 			continue;
2276 		bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);
2277 		bitmap_copy(profile_to_recipe[i], r_bitmap,
2278 			    ICE_MAX_NUM_RECIPES);
2279 		for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2280 			set_bit(i, recipe_to_profile[j]);
2281 	}
2282 }
2283 
2284 /**
2285  * ice_collect_result_idx - copy result index values
2286  * @buf: buffer that contains the result index
2287  * @recp: the recipe struct to copy data into
2288  */
2289 static void
2290 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
2291 		       struct ice_sw_recipe *recp)
2292 {
2293 	if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2294 		set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2295 			recp->res_idxs);
2296 }
2297 
2298 /**
2299  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2300  * @hw: pointer to hardware structure
2301  * @recps: struct that we need to populate
2302  * @rid: recipe ID that we are populating
2303  * @refresh_required: true if we should get recipe to profile mapping from FW
2304  * @is_add: flag of adding recipe
2305  *
2306  * This function is used to populate all the necessary entries into our
2307  * bookkeeping so that we have a current list of all the recipes that are
2308  * programmed in the firmware.
2309  */
2310 static int
2311 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2312 		    bool *refresh_required, bool is_add)
2313 {
2314 	DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
2315 	struct ice_aqc_recipe_data_elem *tmp;
2316 	u16 num_recps = ICE_MAX_NUM_RECIPES;
2317 	struct ice_prot_lkup_ext *lkup_exts;
2318 	u8 fv_word_idx = 0;
2319 	u16 sub_recps;
2320 	int status;
2321 
2322 	bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
2323 
2324 	/* we need a buffer big enough to accommodate all the recipes */
2325 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
2326 	if (!tmp)
2327 		return -ENOMEM;
2328 
2329 	tmp[0].recipe_indx = rid;
2330 	status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2331 	/* non-zero status meaning recipe doesn't exist */
2332 	if (status)
2333 		goto err_unroll;
2334 
2335 	/* Get recipe to profile map so that we can get the fv from lkups that
2336 	 * we read for a recipe from FW. Since we want to minimize the number of
2337 	 * times we make this FW call, just make one call and cache the copy
2338 	 * until a new recipe is added. This operation is only required the
2339 	 * first time to get the changes from FW. Then to search existing
2340 	 * entries we don't need to update the cache again until another recipe
2341 	 * gets added.
2342 	 */
2343 	if (*refresh_required) {
2344 		ice_get_recp_to_prof_map(hw);
2345 		*refresh_required = false;
2346 	}
2347 
2348 	/* Start populating all the entries for recps[rid] based on lkups from
2349 	 * firmware. Note that we are only creating the root recipe in our
2350 	 * database.
2351 	 */
2352 	lkup_exts = &recps[rid].lkup_exts;
2353 
2354 	for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2355 		struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2356 		struct ice_recp_grp_entry *rg_entry;
2357 		u8 i, prof, idx, prot = 0;
2358 		bool is_root;
2359 		u16 off = 0;
2360 
2361 		rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
2362 					GFP_KERNEL);
2363 		if (!rg_entry) {
2364 			status = -ENOMEM;
2365 			goto err_unroll;
2366 		}
2367 
2368 		idx = root_bufs.recipe_indx;
2369 		is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2370 
2371 		/* Mark all result indices in this chain */
2372 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2373 			set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2374 				result_bm);
2375 
2376 		/* get the first profile that is associated with rid */
2377 		prof = find_first_bit(recipe_to_profile[idx],
2378 				      ICE_MAX_NUM_PROFILES);
2379 		for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2380 			u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2381 
2382 			rg_entry->fv_idx[i] = lkup_indx;
2383 			rg_entry->fv_mask[i] =
2384 				le16_to_cpu(root_bufs.content.mask[i + 1]);
2385 
2386 			/* If the recipe is a chained recipe then all its
2387 			 * child recipe's result will have a result index.
2388 			 * To fill fv_words we should not use those result
2389 			 * index, we only need the protocol ids and offsets.
2390 			 * We will skip all the fv_idx which stores result
2391 			 * index in them. We also need to skip any fv_idx which
2392 			 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2393 			 * valid offset value.
2394 			 */
2395 			if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
2396 			    rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2397 			    rg_entry->fv_idx[i] == 0)
2398 				continue;
2399 
2400 			ice_find_prot_off(hw, ICE_BLK_SW, prof,
2401 					  rg_entry->fv_idx[i], &prot, &off);
2402 			lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2403 			lkup_exts->fv_words[fv_word_idx].off = off;
2404 			lkup_exts->field_mask[fv_word_idx] =
2405 				rg_entry->fv_mask[i];
2406 			fv_word_idx++;
2407 		}
2408 		/* populate rg_list with the data from the child entry of this
2409 		 * recipe
2410 		 */
2411 		list_add(&rg_entry->l_entry, &recps[rid].rg_list);
2412 
2413 		/* Propagate some data to the recipe database */
2414 		recps[idx].is_root = !!is_root;
2415 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2416 		recps[idx].need_pass_l2 = root_bufs.content.act_ctrl &
2417 					  ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
2418 		recps[idx].allow_pass_l2 = root_bufs.content.act_ctrl &
2419 					   ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
2420 		bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2421 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2422 			recps[idx].chain_idx = root_bufs.content.result_indx &
2423 				~ICE_AQ_RECIPE_RESULT_EN;
2424 			set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2425 		} else {
2426 			recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2427 		}
2428 
2429 		if (!is_root) {
2430 			if (hw->recp_reuse && is_add)
2431 				recps[idx].recp_created = true;
2432 
2433 			continue;
2434 		}
2435 
2436 		/* Only do the following for root recipes entries */
2437 		memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2438 		       sizeof(recps[idx].r_bitmap));
2439 		recps[idx].root_rid = root_bufs.content.rid &
2440 			~ICE_AQ_RECIPE_ID_IS_ROOT;
2441 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2442 	}
2443 
2444 	/* Complete initialization of the root recipe entry */
2445 	lkup_exts->n_val_words = fv_word_idx;
2446 	recps[rid].big_recp = (num_recps > 1);
2447 	recps[rid].n_grp_count = (u8)num_recps;
2448 	recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
2449 					   recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
2450 					   GFP_KERNEL);
2451 	if (!recps[rid].root_buf) {
2452 		status = -ENOMEM;
2453 		goto err_unroll;
2454 	}
2455 
2456 	/* Copy result indexes */
2457 	bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2458 	if (is_add)
2459 		recps[rid].recp_created = true;
2460 
2461 err_unroll:
2462 	kfree(tmp);
2463 	return status;
2464 }
2465 
2466 /* ice_init_port_info - Initialize port_info with switch configuration data
2467  * @pi: pointer to port_info
2468  * @vsi_port_num: VSI number or port number
2469  * @type: Type of switch element (port or VSI)
2470  * @swid: switch ID of the switch the element is attached to
2471  * @pf_vf_num: PF or VF number
2472  * @is_vf: true if the element is a VF, false otherwise
2473  */
2474 static void
2475 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2476 		   u16 swid, u16 pf_vf_num, bool is_vf)
2477 {
2478 	switch (type) {
2479 	case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2480 		pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2481 		pi->sw_id = swid;
2482 		pi->pf_vf_num = pf_vf_num;
2483 		pi->is_vf = is_vf;
2484 		break;
2485 	default:
2486 		ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2487 		break;
2488 	}
2489 }
2490 
2491 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2492  * @hw: pointer to the hardware structure
2493  */
2494 int ice_get_initial_sw_cfg(struct ice_hw *hw)
2495 {
2496 	struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2497 	u16 req_desc = 0;
2498 	u16 num_elems;
2499 	int status;
2500 	u16 i;
2501 
2502 	rbuf = kzalloc(ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL);
2503 	if (!rbuf)
2504 		return -ENOMEM;
2505 
2506 	/* Multiple calls to ice_aq_get_sw_cfg may be required
2507 	 * to get all the switch configuration information. The need
2508 	 * for additional calls is indicated by ice_aq_get_sw_cfg
2509 	 * writing a non-zero value in req_desc
2510 	 */
2511 	do {
2512 		struct ice_aqc_get_sw_cfg_resp_elem *ele;
2513 
2514 		status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2515 					   &req_desc, &num_elems, NULL);
2516 
2517 		if (status)
2518 			break;
2519 
2520 		for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2521 			u16 pf_vf_num, swid, vsi_port_num;
2522 			bool is_vf = false;
2523 			u8 res_type;
2524 
2525 			vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
2526 				ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2527 
2528 			pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
2529 				ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2530 
2531 			swid = le16_to_cpu(ele->swid);
2532 
2533 			if (le16_to_cpu(ele->pf_vf_num) &
2534 			    ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2535 				is_vf = true;
2536 
2537 			res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
2538 					ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2539 
2540 			if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
2541 				/* FW VSI is not needed. Just continue. */
2542 				continue;
2543 			}
2544 
2545 			ice_init_port_info(hw->port_info, vsi_port_num,
2546 					   res_type, swid, pf_vf_num, is_vf);
2547 		}
2548 	} while (req_desc && !status);
2549 
2550 	kfree(rbuf);
2551 	return status;
2552 }
2553 
2554 /**
2555  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2556  * @hw: pointer to the hardware structure
2557  * @fi: filter info structure to fill/update
2558  *
2559  * This helper function populates the lb_en and lan_en elements of the provided
2560  * ice_fltr_info struct using the switch's type and characteristics of the
2561  * switch rule being configured.
2562  */
2563 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2564 {
2565 	fi->lb_en = false;
2566 	fi->lan_en = false;
2567 	if ((fi->flag & ICE_FLTR_TX) &&
2568 	    (fi->fltr_act == ICE_FWD_TO_VSI ||
2569 	     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2570 	     fi->fltr_act == ICE_FWD_TO_Q ||
2571 	     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2572 		/* Setting LB for prune actions will result in replicated
2573 		 * packets to the internal switch that will be dropped.
2574 		 */
2575 		if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2576 			fi->lb_en = true;
2577 
2578 		/* Set lan_en to TRUE if
2579 		 * 1. The switch is a VEB AND
2580 		 * 2
2581 		 * 2.1 The lookup is a directional lookup like ethertype,
2582 		 * promiscuous, ethertype-MAC, promiscuous-VLAN
2583 		 * and default-port OR
2584 		 * 2.2 The lookup is VLAN, OR
2585 		 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2586 		 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2587 		 *
2588 		 * OR
2589 		 *
2590 		 * The switch is a VEPA.
2591 		 *
2592 		 * In all other cases, the LAN enable has to be set to false.
2593 		 */
2594 		if (hw->evb_veb) {
2595 			if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2596 			    fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2597 			    fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2598 			    fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2599 			    fi->lkup_type == ICE_SW_LKUP_DFLT ||
2600 			    fi->lkup_type == ICE_SW_LKUP_VLAN ||
2601 			    (fi->lkup_type == ICE_SW_LKUP_MAC &&
2602 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
2603 			    (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2604 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
2605 				fi->lan_en = true;
2606 		} else {
2607 			fi->lan_en = true;
2608 		}
2609 	}
2610 
2611 	if (fi->flag & ICE_FLTR_TX_ONLY)
2612 		fi->lan_en = false;
2613 }
2614 
2615 /**
2616  * ice_fill_eth_hdr - helper to copy dummy_eth_hdr into supplied buffer
2617  * @eth_hdr: pointer to buffer to populate
2618  */
2619 void ice_fill_eth_hdr(u8 *eth_hdr)
2620 {
2621 	memcpy(eth_hdr, dummy_eth_header, DUMMY_ETH_HDR_LEN);
2622 }
2623 
2624 /**
2625  * ice_fill_sw_rule - Helper function to fill switch rule structure
2626  * @hw: pointer to the hardware structure
2627  * @f_info: entry containing packet forwarding information
2628  * @s_rule: switch rule structure to be filled in based on mac_entry
2629  * @opc: switch rules population command type - pass in the command opcode
2630  */
2631 static void
2632 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2633 		 struct ice_sw_rule_lkup_rx_tx *s_rule,
2634 		 enum ice_adminq_opc opc)
2635 {
2636 	u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2637 	u16 vlan_tpid = ETH_P_8021Q;
2638 	void *daddr = NULL;
2639 	u16 eth_hdr_sz;
2640 	u8 *eth_hdr;
2641 	u32 act = 0;
2642 	__be16 *off;
2643 	u8 q_rgn;
2644 
2645 	if (opc == ice_aqc_opc_remove_sw_rules) {
2646 		s_rule->act = 0;
2647 		s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2648 		s_rule->hdr_len = 0;
2649 		return;
2650 	}
2651 
2652 	eth_hdr_sz = sizeof(dummy_eth_header);
2653 	eth_hdr = s_rule->hdr_data;
2654 
2655 	/* initialize the ether header with a dummy header */
2656 	memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
2657 	ice_fill_sw_info(hw, f_info);
2658 
2659 	switch (f_info->fltr_act) {
2660 	case ICE_FWD_TO_VSI:
2661 		act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
2662 				  f_info->fwd_id.hw_vsi_id);
2663 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2664 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2665 				ICE_SINGLE_ACT_VALID_BIT;
2666 		break;
2667 	case ICE_FWD_TO_VSI_LIST:
2668 		act |= ICE_SINGLE_ACT_VSI_LIST;
2669 		act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_LIST_ID_M,
2670 				  f_info->fwd_id.vsi_list_id);
2671 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2672 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2673 				ICE_SINGLE_ACT_VALID_BIT;
2674 		break;
2675 	case ICE_FWD_TO_Q:
2676 		act |= ICE_SINGLE_ACT_TO_Q;
2677 		act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M,
2678 				  f_info->fwd_id.q_id);
2679 		break;
2680 	case ICE_DROP_PACKET:
2681 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2682 			ICE_SINGLE_ACT_VALID_BIT;
2683 		break;
2684 	case ICE_FWD_TO_QGRP:
2685 		q_rgn = f_info->qgrp_size > 0 ?
2686 			(u8)ilog2(f_info->qgrp_size) : 0;
2687 		act |= ICE_SINGLE_ACT_TO_Q;
2688 		act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M,
2689 				  f_info->fwd_id.q_id);
2690 		act |= FIELD_PREP(ICE_SINGLE_ACT_Q_REGION_M, q_rgn);
2691 		break;
2692 	default:
2693 		return;
2694 	}
2695 
2696 	if (f_info->lb_en)
2697 		act |= ICE_SINGLE_ACT_LB_ENABLE;
2698 	if (f_info->lan_en)
2699 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
2700 
2701 	switch (f_info->lkup_type) {
2702 	case ICE_SW_LKUP_MAC:
2703 		daddr = f_info->l_data.mac.mac_addr;
2704 		break;
2705 	case ICE_SW_LKUP_VLAN:
2706 		vlan_id = f_info->l_data.vlan.vlan_id;
2707 		if (f_info->l_data.vlan.tpid_valid)
2708 			vlan_tpid = f_info->l_data.vlan.tpid;
2709 		if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2710 		    f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2711 			act |= ICE_SINGLE_ACT_PRUNE;
2712 			act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2713 		}
2714 		break;
2715 	case ICE_SW_LKUP_ETHERTYPE_MAC:
2716 		daddr = f_info->l_data.ethertype_mac.mac_addr;
2717 		fallthrough;
2718 	case ICE_SW_LKUP_ETHERTYPE:
2719 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2720 		*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
2721 		break;
2722 	case ICE_SW_LKUP_MAC_VLAN:
2723 		daddr = f_info->l_data.mac_vlan.mac_addr;
2724 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
2725 		break;
2726 	case ICE_SW_LKUP_PROMISC_VLAN:
2727 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
2728 		fallthrough;
2729 	case ICE_SW_LKUP_PROMISC:
2730 		daddr = f_info->l_data.mac_vlan.mac_addr;
2731 		break;
2732 	default:
2733 		break;
2734 	}
2735 
2736 	s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ?
2737 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2738 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
2739 
2740 	/* Recipe set depending on lookup type */
2741 	s_rule->recipe_id = cpu_to_le16(f_info->lkup_type);
2742 	s_rule->src = cpu_to_le16(f_info->src);
2743 	s_rule->act = cpu_to_le32(act);
2744 
2745 	if (daddr)
2746 		ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
2747 
2748 	if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2749 		off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2750 		*off = cpu_to_be16(vlan_id);
2751 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2752 		*off = cpu_to_be16(vlan_tpid);
2753 	}
2754 
2755 	/* Create the switch rule with the final dummy Ethernet header */
2756 	if (opc != ice_aqc_opc_update_sw_rules)
2757 		s_rule->hdr_len = cpu_to_le16(eth_hdr_sz);
2758 }
2759 
2760 /**
2761  * ice_add_marker_act
2762  * @hw: pointer to the hardware structure
2763  * @m_ent: the management entry for which sw marker needs to be added
2764  * @sw_marker: sw marker to tag the Rx descriptor with
2765  * @l_id: large action resource ID
2766  *
2767  * Create a large action to hold software marker and update the switch rule
2768  * entry pointed by m_ent with newly created large action
2769  */
2770 static int
2771 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2772 		   u16 sw_marker, u16 l_id)
2773 {
2774 	struct ice_sw_rule_lkup_rx_tx *rx_tx;
2775 	struct ice_sw_rule_lg_act *lg_act;
2776 	/* For software marker we need 3 large actions
2777 	 * 1. FWD action: FWD TO VSI or VSI LIST
2778 	 * 2. GENERIC VALUE action to hold the profile ID
2779 	 * 3. GENERIC VALUE action to hold the software marker ID
2780 	 */
2781 	const u16 num_lg_acts = 3;
2782 	u16 lg_act_size;
2783 	u16 rules_size;
2784 	int status;
2785 	u32 act;
2786 	u16 id;
2787 
2788 	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2789 		return -EINVAL;
2790 
2791 	/* Create two back-to-back switch rules and submit them to the HW using
2792 	 * one memory buffer:
2793 	 *    1. Large Action
2794 	 *    2. Look up Tx Rx
2795 	 */
2796 	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts);
2797 	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx);
2798 	lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
2799 	if (!lg_act)
2800 		return -ENOMEM;
2801 
2802 	rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size);
2803 
2804 	/* Fill in the first switch rule i.e. large action */
2805 	lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
2806 	lg_act->index = cpu_to_le16(l_id);
2807 	lg_act->size = cpu_to_le16(num_lg_acts);
2808 
2809 	/* First action VSI forwarding or VSI list forwarding depending on how
2810 	 * many VSIs
2811 	 */
2812 	id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2813 		m_ent->fltr_info.fwd_id.hw_vsi_id;
2814 
2815 	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2816 	act |= FIELD_PREP(ICE_LG_ACT_VSI_LIST_ID_M, id);
2817 	if (m_ent->vsi_count > 1)
2818 		act |= ICE_LG_ACT_VSI_LIST;
2819 	lg_act->act[0] = cpu_to_le32(act);
2820 
2821 	/* Second action descriptor type */
2822 	act = ICE_LG_ACT_GENERIC;
2823 
2824 	act |= FIELD_PREP(ICE_LG_ACT_GENERIC_VALUE_M, 1);
2825 	lg_act->act[1] = cpu_to_le32(act);
2826 
2827 	act = FIELD_PREP(ICE_LG_ACT_GENERIC_OFFSET_M,
2828 			 ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX);
2829 
2830 	/* Third action Marker value */
2831 	act |= ICE_LG_ACT_GENERIC;
2832 	act |= FIELD_PREP(ICE_LG_ACT_GENERIC_VALUE_M, sw_marker);
2833 
2834 	lg_act->act[2] = cpu_to_le32(act);
2835 
2836 	/* call the fill switch rule to fill the lookup Tx Rx structure */
2837 	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2838 			 ice_aqc_opc_update_sw_rules);
2839 
2840 	/* Update the action to point to the large action ID */
2841 	act = ICE_SINGLE_ACT_PTR;
2842 	act |= FIELD_PREP(ICE_SINGLE_ACT_PTR_VAL_M, l_id);
2843 	rx_tx->act = cpu_to_le32(act);
2844 
2845 	/* Use the filter rule ID of the previously created rule with single
2846 	 * act. Once the update happens, hardware will treat this as large
2847 	 * action
2848 	 */
2849 	rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
2850 
2851 	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2852 				 ice_aqc_opc_update_sw_rules, NULL);
2853 	if (!status) {
2854 		m_ent->lg_act_idx = l_id;
2855 		m_ent->sw_marker_id = sw_marker;
2856 	}
2857 
2858 	devm_kfree(ice_hw_to_dev(hw), lg_act);
2859 	return status;
2860 }
2861 
2862 /**
2863  * ice_create_vsi_list_map
2864  * @hw: pointer to the hardware structure
2865  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2866  * @num_vsi: number of VSI handles in the array
2867  * @vsi_list_id: VSI list ID generated as part of allocate resource
2868  *
2869  * Helper function to create a new entry of VSI list ID to VSI mapping
2870  * using the given VSI list ID
2871  */
2872 static struct ice_vsi_list_map_info *
2873 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2874 			u16 vsi_list_id)
2875 {
2876 	struct ice_switch_info *sw = hw->switch_info;
2877 	struct ice_vsi_list_map_info *v_map;
2878 	int i;
2879 
2880 	v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
2881 	if (!v_map)
2882 		return NULL;
2883 
2884 	v_map->vsi_list_id = vsi_list_id;
2885 	v_map->ref_cnt = 1;
2886 	for (i = 0; i < num_vsi; i++)
2887 		set_bit(vsi_handle_arr[i], v_map->vsi_map);
2888 
2889 	list_add(&v_map->list_entry, &sw->vsi_list_map_head);
2890 	return v_map;
2891 }
2892 
2893 /**
2894  * ice_update_vsi_list_rule
2895  * @hw: pointer to the hardware structure
2896  * @vsi_handle_arr: array of VSI handles to form a VSI list
2897  * @num_vsi: number of VSI handles in the array
2898  * @vsi_list_id: VSI list ID generated as part of allocate resource
2899  * @remove: Boolean value to indicate if this is a remove action
2900  * @opc: switch rules population command type - pass in the command opcode
2901  * @lkup_type: lookup type of the filter
2902  *
2903  * Call AQ command to add a new switch rule or update existing switch rule
2904  * using the given VSI list ID
2905  */
2906 static int
2907 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2908 			 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2909 			 enum ice_sw_lkup_type lkup_type)
2910 {
2911 	struct ice_sw_rule_vsi_list *s_rule;
2912 	u16 s_rule_size;
2913 	u16 rule_type;
2914 	int status;
2915 	int i;
2916 
2917 	if (!num_vsi)
2918 		return -EINVAL;
2919 
2920 	if (lkup_type == ICE_SW_LKUP_MAC ||
2921 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2922 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2923 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2924 	    lkup_type == ICE_SW_LKUP_PROMISC ||
2925 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2926 	    lkup_type == ICE_SW_LKUP_DFLT ||
2927 	    lkup_type == ICE_SW_LKUP_LAST)
2928 		rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2929 			ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2930 	else if (lkup_type == ICE_SW_LKUP_VLAN)
2931 		rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2932 			ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2933 	else
2934 		return -EINVAL;
2935 
2936 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
2937 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2938 	if (!s_rule)
2939 		return -ENOMEM;
2940 	for (i = 0; i < num_vsi; i++) {
2941 		if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2942 			status = -EINVAL;
2943 			goto exit;
2944 		}
2945 		/* AQ call requires hw_vsi_id(s) */
2946 		s_rule->vsi[i] =
2947 			cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2948 	}
2949 
2950 	s_rule->hdr.type = cpu_to_le16(rule_type);
2951 	s_rule->number_vsi = cpu_to_le16(num_vsi);
2952 	s_rule->index = cpu_to_le16(vsi_list_id);
2953 
2954 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2955 
2956 exit:
2957 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2958 	return status;
2959 }
2960 
2961 /**
2962  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2963  * @hw: pointer to the HW struct
2964  * @vsi_handle_arr: array of VSI handles to form a VSI list
2965  * @num_vsi: number of VSI handles in the array
2966  * @vsi_list_id: stores the ID of the VSI list to be created
2967  * @lkup_type: switch rule filter's lookup type
2968  */
2969 static int
2970 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2971 			 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2972 {
2973 	int status;
2974 
2975 	status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2976 					    ice_aqc_opc_alloc_res);
2977 	if (status)
2978 		return status;
2979 
2980 	/* Update the newly created VSI list to include the specified VSIs */
2981 	return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2982 					*vsi_list_id, false,
2983 					ice_aqc_opc_add_sw_rules, lkup_type);
2984 }
2985 
2986 /**
2987  * ice_create_pkt_fwd_rule
2988  * @hw: pointer to the hardware structure
2989  * @f_entry: entry containing packet forwarding information
2990  *
2991  * Create switch rule with given filter information and add an entry
2992  * to the corresponding filter management list to track this switch rule
2993  * and VSI mapping
2994  */
2995 static int
2996 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2997 			struct ice_fltr_list_entry *f_entry)
2998 {
2999 	struct ice_fltr_mgmt_list_entry *fm_entry;
3000 	struct ice_sw_rule_lkup_rx_tx *s_rule;
3001 	enum ice_sw_lkup_type l_type;
3002 	struct ice_sw_recipe *recp;
3003 	int status;
3004 
3005 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
3006 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
3007 			      GFP_KERNEL);
3008 	if (!s_rule)
3009 		return -ENOMEM;
3010 	fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
3011 				GFP_KERNEL);
3012 	if (!fm_entry) {
3013 		status = -ENOMEM;
3014 		goto ice_create_pkt_fwd_rule_exit;
3015 	}
3016 
3017 	fm_entry->fltr_info = f_entry->fltr_info;
3018 
3019 	/* Initialize all the fields for the management entry */
3020 	fm_entry->vsi_count = 1;
3021 	fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3022 	fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3023 	fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3024 
3025 	ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3026 			 ice_aqc_opc_add_sw_rules);
3027 
3028 	status = ice_aq_sw_rules(hw, s_rule,
3029 				 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
3030 				 ice_aqc_opc_add_sw_rules, NULL);
3031 	if (status) {
3032 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
3033 		goto ice_create_pkt_fwd_rule_exit;
3034 	}
3035 
3036 	f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
3037 	fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
3038 
3039 	/* The book keeping entries will get removed when base driver
3040 	 * calls remove filter AQ command
3041 	 */
3042 	l_type = fm_entry->fltr_info.lkup_type;
3043 	recp = &hw->switch_info->recp_list[l_type];
3044 	list_add(&fm_entry->list_entry, &recp->filt_rules);
3045 
3046 ice_create_pkt_fwd_rule_exit:
3047 	devm_kfree(ice_hw_to_dev(hw), s_rule);
3048 	return status;
3049 }
3050 
3051 /**
3052  * ice_update_pkt_fwd_rule
3053  * @hw: pointer to the hardware structure
3054  * @f_info: filter information for switch rule
3055  *
3056  * Call AQ command to update a previously created switch rule with a
3057  * VSI list ID
3058  */
3059 static int
3060 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3061 {
3062 	struct ice_sw_rule_lkup_rx_tx *s_rule;
3063 	int status;
3064 
3065 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
3066 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
3067 			      GFP_KERNEL);
3068 	if (!s_rule)
3069 		return -ENOMEM;
3070 
3071 	ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3072 
3073 	s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
3074 
3075 	/* Update switch rule with new rule set to forward VSI list */
3076 	status = ice_aq_sw_rules(hw, s_rule,
3077 				 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
3078 				 ice_aqc_opc_update_sw_rules, NULL);
3079 
3080 	devm_kfree(ice_hw_to_dev(hw), s_rule);
3081 	return status;
3082 }
3083 
3084 /**
3085  * ice_update_sw_rule_bridge_mode
3086  * @hw: pointer to the HW struct
3087  *
3088  * Updates unicast switch filter rules based on VEB/VEPA mode
3089  */
3090 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3091 {
3092 	struct ice_switch_info *sw = hw->switch_info;
3093 	struct ice_fltr_mgmt_list_entry *fm_entry;
3094 	struct list_head *rule_head;
3095 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3096 	int status = 0;
3097 
3098 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3099 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3100 
3101 	mutex_lock(rule_lock);
3102 	list_for_each_entry(fm_entry, rule_head, list_entry) {
3103 		struct ice_fltr_info *fi = &fm_entry->fltr_info;
3104 		u8 *addr = fi->l_data.mac.mac_addr;
3105 
3106 		/* Update unicast Tx rules to reflect the selected
3107 		 * VEB/VEPA mode
3108 		 */
3109 		if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
3110 		    (fi->fltr_act == ICE_FWD_TO_VSI ||
3111 		     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3112 		     fi->fltr_act == ICE_FWD_TO_Q ||
3113 		     fi->fltr_act == ICE_FWD_TO_QGRP)) {
3114 			status = ice_update_pkt_fwd_rule(hw, fi);
3115 			if (status)
3116 				break;
3117 		}
3118 	}
3119 
3120 	mutex_unlock(rule_lock);
3121 
3122 	return status;
3123 }
3124 
3125 /**
3126  * ice_add_update_vsi_list
3127  * @hw: pointer to the hardware structure
3128  * @m_entry: pointer to current filter management list entry
3129  * @cur_fltr: filter information from the book keeping entry
3130  * @new_fltr: filter information with the new VSI to be added
3131  *
3132  * Call AQ command to add or update previously created VSI list with new VSI.
3133  *
3134  * Helper function to do book keeping associated with adding filter information
3135  * The algorithm to do the book keeping is described below :
3136  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3137  *	if only one VSI has been added till now
3138  *		Allocate a new VSI list and add two VSIs
3139  *		to this list using switch rule command
3140  *		Update the previously created switch rule with the
3141  *		newly created VSI list ID
3142  *	if a VSI list was previously created
3143  *		Add the new VSI to the previously created VSI list set
3144  *		using the update switch rule command
3145  */
3146 static int
3147 ice_add_update_vsi_list(struct ice_hw *hw,
3148 			struct ice_fltr_mgmt_list_entry *m_entry,
3149 			struct ice_fltr_info *cur_fltr,
3150 			struct ice_fltr_info *new_fltr)
3151 {
3152 	u16 vsi_list_id = 0;
3153 	int status = 0;
3154 
3155 	if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3156 	     cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3157 		return -EOPNOTSUPP;
3158 
3159 	if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3160 	     new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3161 	    (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3162 	     cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3163 		return -EOPNOTSUPP;
3164 
3165 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3166 		/* Only one entry existed in the mapping and it was not already
3167 		 * a part of a VSI list. So, create a VSI list with the old and
3168 		 * new VSIs.
3169 		 */
3170 		struct ice_fltr_info tmp_fltr;
3171 		u16 vsi_handle_arr[2];
3172 
3173 		/* A rule already exists with the new VSI being added */
3174 		if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3175 			return -EEXIST;
3176 
3177 		vsi_handle_arr[0] = cur_fltr->vsi_handle;
3178 		vsi_handle_arr[1] = new_fltr->vsi_handle;
3179 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3180 						  &vsi_list_id,
3181 						  new_fltr->lkup_type);
3182 		if (status)
3183 			return status;
3184 
3185 		tmp_fltr = *new_fltr;
3186 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3187 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3188 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3189 		/* Update the previous switch rule of "MAC forward to VSI" to
3190 		 * "MAC fwd to VSI list"
3191 		 */
3192 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3193 		if (status)
3194 			return status;
3195 
3196 		cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3197 		cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3198 		m_entry->vsi_list_info =
3199 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3200 						vsi_list_id);
3201 
3202 		if (!m_entry->vsi_list_info)
3203 			return -ENOMEM;
3204 
3205 		/* If this entry was large action then the large action needs
3206 		 * to be updated to point to FWD to VSI list
3207 		 */
3208 		if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3209 			status =
3210 			    ice_add_marker_act(hw, m_entry,
3211 					       m_entry->sw_marker_id,
3212 					       m_entry->lg_act_idx);
3213 	} else {
3214 		u16 vsi_handle = new_fltr->vsi_handle;
3215 		enum ice_adminq_opc opcode;
3216 
3217 		if (!m_entry->vsi_list_info)
3218 			return -EIO;
3219 
3220 		/* A rule already exists with the new VSI being added */
3221 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
3222 			return 0;
3223 
3224 		/* Update the previously created VSI list set with
3225 		 * the new VSI ID passed in
3226 		 */
3227 		vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3228 		opcode = ice_aqc_opc_update_sw_rules;
3229 
3230 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3231 						  vsi_list_id, false, opcode,
3232 						  new_fltr->lkup_type);
3233 		/* update VSI list mapping info with new VSI ID */
3234 		if (!status)
3235 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
3236 	}
3237 	if (!status)
3238 		m_entry->vsi_count++;
3239 	return status;
3240 }
3241 
3242 /**
3243  * ice_find_rule_entry - Search a rule entry
3244  * @hw: pointer to the hardware structure
3245  * @recp_id: lookup type for which the specified rule needs to be searched
3246  * @f_info: rule information
3247  *
3248  * Helper function to search for a given rule entry
3249  * Returns pointer to entry storing the rule if found
3250  */
3251 static struct ice_fltr_mgmt_list_entry *
3252 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
3253 {
3254 	struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3255 	struct ice_switch_info *sw = hw->switch_info;
3256 	struct list_head *list_head;
3257 
3258 	list_head = &sw->recp_list[recp_id].filt_rules;
3259 	list_for_each_entry(list_itr, list_head, list_entry) {
3260 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3261 			    sizeof(f_info->l_data)) &&
3262 		    f_info->flag == list_itr->fltr_info.flag) {
3263 			ret = list_itr;
3264 			break;
3265 		}
3266 	}
3267 	return ret;
3268 }
3269 
3270 /**
3271  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3272  * @hw: pointer to the hardware structure
3273  * @recp_id: lookup type for which VSI lists needs to be searched
3274  * @vsi_handle: VSI handle to be found in VSI list
3275  * @vsi_list_id: VSI list ID found containing vsi_handle
3276  *
3277  * Helper function to search a VSI list with single entry containing given VSI
3278  * handle element. This can be extended further to search VSI list with more
3279  * than 1 vsi_count. Returns pointer to VSI list entry if found.
3280  */
3281 struct ice_vsi_list_map_info *
3282 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
3283 			u16 *vsi_list_id)
3284 {
3285 	struct ice_vsi_list_map_info *map_info = NULL;
3286 	struct ice_switch_info *sw = hw->switch_info;
3287 	struct ice_fltr_mgmt_list_entry *list_itr;
3288 	struct list_head *list_head;
3289 
3290 	list_head = &sw->recp_list[recp_id].filt_rules;
3291 	list_for_each_entry(list_itr, list_head, list_entry) {
3292 		if (list_itr->vsi_list_info) {
3293 			map_info = list_itr->vsi_list_info;
3294 			if (test_bit(vsi_handle, map_info->vsi_map)) {
3295 				*vsi_list_id = map_info->vsi_list_id;
3296 				return map_info;
3297 			}
3298 		}
3299 	}
3300 	return NULL;
3301 }
3302 
3303 /**
3304  * ice_add_rule_internal - add rule for a given lookup type
3305  * @hw: pointer to the hardware structure
3306  * @recp_id: lookup type (recipe ID) for which rule has to be added
3307  * @f_entry: structure containing MAC forwarding information
3308  *
3309  * Adds or updates the rule lists for a given recipe
3310  */
3311 static int
3312 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
3313 		      struct ice_fltr_list_entry *f_entry)
3314 {
3315 	struct ice_switch_info *sw = hw->switch_info;
3316 	struct ice_fltr_info *new_fltr, *cur_fltr;
3317 	struct ice_fltr_mgmt_list_entry *m_entry;
3318 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3319 	int status = 0;
3320 
3321 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3322 		return -EINVAL;
3323 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3324 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3325 
3326 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3327 
3328 	mutex_lock(rule_lock);
3329 	new_fltr = &f_entry->fltr_info;
3330 	if (new_fltr->flag & ICE_FLTR_RX)
3331 		new_fltr->src = hw->port_info->lport;
3332 	else if (new_fltr->flag & ICE_FLTR_TX)
3333 		new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
3334 
3335 	m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
3336 	if (!m_entry) {
3337 		mutex_unlock(rule_lock);
3338 		return ice_create_pkt_fwd_rule(hw, f_entry);
3339 	}
3340 
3341 	cur_fltr = &m_entry->fltr_info;
3342 	status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3343 	mutex_unlock(rule_lock);
3344 
3345 	return status;
3346 }
3347 
3348 /**
3349  * ice_remove_vsi_list_rule
3350  * @hw: pointer to the hardware structure
3351  * @vsi_list_id: VSI list ID generated as part of allocate resource
3352  * @lkup_type: switch rule filter lookup type
3353  *
3354  * The VSI list should be emptied before this function is called to remove the
3355  * VSI list.
3356  */
3357 static int
3358 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3359 			 enum ice_sw_lkup_type lkup_type)
3360 {
3361 	struct ice_sw_rule_vsi_list *s_rule;
3362 	u16 s_rule_size;
3363 	int status;
3364 
3365 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0);
3366 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3367 	if (!s_rule)
3368 		return -ENOMEM;
3369 
3370 	s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3371 	s_rule->index = cpu_to_le16(vsi_list_id);
3372 
3373 	/* Free the vsi_list resource that we allocated. It is assumed that the
3374 	 * list is empty at this point.
3375 	 */
3376 	status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3377 					    ice_aqc_opc_free_res);
3378 
3379 	devm_kfree(ice_hw_to_dev(hw), s_rule);
3380 	return status;
3381 }
3382 
3383 /**
3384  * ice_rem_update_vsi_list
3385  * @hw: pointer to the hardware structure
3386  * @vsi_handle: VSI handle of the VSI to remove
3387  * @fm_list: filter management entry for which the VSI list management needs to
3388  *           be done
3389  */
3390 static int
3391 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3392 			struct ice_fltr_mgmt_list_entry *fm_list)
3393 {
3394 	enum ice_sw_lkup_type lkup_type;
3395 	u16 vsi_list_id;
3396 	int status = 0;
3397 
3398 	if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3399 	    fm_list->vsi_count == 0)
3400 		return -EINVAL;
3401 
3402 	/* A rule with the VSI being removed does not exist */
3403 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
3404 		return -ENOENT;
3405 
3406 	lkup_type = fm_list->fltr_info.lkup_type;
3407 	vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3408 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3409 					  ice_aqc_opc_update_sw_rules,
3410 					  lkup_type);
3411 	if (status)
3412 		return status;
3413 
3414 	fm_list->vsi_count--;
3415 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3416 
3417 	if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3418 		struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3419 		struct ice_vsi_list_map_info *vsi_list_info =
3420 			fm_list->vsi_list_info;
3421 		u16 rem_vsi_handle;
3422 
3423 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
3424 						ICE_MAX_VSI);
3425 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3426 			return -EIO;
3427 
3428 		/* Make sure VSI list is empty before removing it below */
3429 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3430 						  vsi_list_id, true,
3431 						  ice_aqc_opc_update_sw_rules,
3432 						  lkup_type);
3433 		if (status)
3434 			return status;
3435 
3436 		tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3437 		tmp_fltr_info.fwd_id.hw_vsi_id =
3438 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
3439 		tmp_fltr_info.vsi_handle = rem_vsi_handle;
3440 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3441 		if (status) {
3442 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3443 				  tmp_fltr_info.fwd_id.hw_vsi_id, status);
3444 			return status;
3445 		}
3446 
3447 		fm_list->fltr_info = tmp_fltr_info;
3448 	}
3449 
3450 	if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3451 	    (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3452 		struct ice_vsi_list_map_info *vsi_list_info =
3453 			fm_list->vsi_list_info;
3454 
3455 		/* Remove the VSI list since it is no longer used */
3456 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3457 		if (status) {
3458 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3459 				  vsi_list_id, status);
3460 			return status;
3461 		}
3462 
3463 		list_del(&vsi_list_info->list_entry);
3464 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
3465 		fm_list->vsi_list_info = NULL;
3466 	}
3467 
3468 	return status;
3469 }
3470 
3471 /**
3472  * ice_remove_rule_internal - Remove a filter rule of a given type
3473  * @hw: pointer to the hardware structure
3474  * @recp_id: recipe ID for which the rule needs to removed
3475  * @f_entry: rule entry containing filter information
3476  */
3477 static int
3478 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
3479 			 struct ice_fltr_list_entry *f_entry)
3480 {
3481 	struct ice_switch_info *sw = hw->switch_info;
3482 	struct ice_fltr_mgmt_list_entry *list_elem;
3483 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3484 	bool remove_rule = false;
3485 	u16 vsi_handle;
3486 	int status = 0;
3487 
3488 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3489 		return -EINVAL;
3490 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3491 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3492 
3493 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3494 	mutex_lock(rule_lock);
3495 	list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
3496 	if (!list_elem) {
3497 		status = -ENOENT;
3498 		goto exit;
3499 	}
3500 
3501 	if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3502 		remove_rule = true;
3503 	} else if (!list_elem->vsi_list_info) {
3504 		status = -ENOENT;
3505 		goto exit;
3506 	} else if (list_elem->vsi_list_info->ref_cnt > 1) {
3507 		/* a ref_cnt > 1 indicates that the vsi_list is being
3508 		 * shared by multiple rules. Decrement the ref_cnt and
3509 		 * remove this rule, but do not modify the list, as it
3510 		 * is in-use by other rules.
3511 		 */
3512 		list_elem->vsi_list_info->ref_cnt--;
3513 		remove_rule = true;
3514 	} else {
3515 		/* a ref_cnt of 1 indicates the vsi_list is only used
3516 		 * by one rule. However, the original removal request is only
3517 		 * for a single VSI. Update the vsi_list first, and only
3518 		 * remove the rule if there are no further VSIs in this list.
3519 		 */
3520 		vsi_handle = f_entry->fltr_info.vsi_handle;
3521 		status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3522 		if (status)
3523 			goto exit;
3524 		/* if VSI count goes to zero after updating the VSI list */
3525 		if (list_elem->vsi_count == 0)
3526 			remove_rule = true;
3527 	}
3528 
3529 	if (remove_rule) {
3530 		/* Remove the lookup rule */
3531 		struct ice_sw_rule_lkup_rx_tx *s_rule;
3532 
3533 		s_rule = devm_kzalloc(ice_hw_to_dev(hw),
3534 				      ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3535 				      GFP_KERNEL);
3536 		if (!s_rule) {
3537 			status = -ENOMEM;
3538 			goto exit;
3539 		}
3540 
3541 		ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3542 				 ice_aqc_opc_remove_sw_rules);
3543 
3544 		status = ice_aq_sw_rules(hw, s_rule,
3545 					 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3546 					 1, ice_aqc_opc_remove_sw_rules, NULL);
3547 
3548 		/* Remove a book keeping from the list */
3549 		devm_kfree(ice_hw_to_dev(hw), s_rule);
3550 
3551 		if (status)
3552 			goto exit;
3553 
3554 		list_del(&list_elem->list_entry);
3555 		devm_kfree(ice_hw_to_dev(hw), list_elem);
3556 	}
3557 exit:
3558 	mutex_unlock(rule_lock);
3559 	return status;
3560 }
3561 
3562 /**
3563  * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
3564  * @hw: pointer to the hardware structure
3565  * @vlan_id: VLAN ID
3566  * @vsi_handle: check MAC filter for this VSI
3567  */
3568 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
3569 {
3570 	struct ice_fltr_mgmt_list_entry *entry;
3571 	struct list_head *rule_head;
3572 	struct ice_switch_info *sw;
3573 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3574 	u16 hw_vsi_id;
3575 
3576 	if (vlan_id > ICE_MAX_VLAN_ID)
3577 		return false;
3578 
3579 	if (!ice_is_vsi_valid(hw, vsi_handle))
3580 		return false;
3581 
3582 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3583 	sw = hw->switch_info;
3584 	rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3585 	if (!rule_head)
3586 		return false;
3587 
3588 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3589 	mutex_lock(rule_lock);
3590 	list_for_each_entry(entry, rule_head, list_entry) {
3591 		struct ice_fltr_info *f_info = &entry->fltr_info;
3592 		u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
3593 		struct ice_vsi_list_map_info *map_info;
3594 
3595 		if (entry_vlan_id > ICE_MAX_VLAN_ID)
3596 			continue;
3597 
3598 		if (f_info->flag != ICE_FLTR_TX ||
3599 		    f_info->src_id != ICE_SRC_ID_VSI ||
3600 		    f_info->lkup_type != ICE_SW_LKUP_VLAN)
3601 			continue;
3602 
3603 		/* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
3604 		if (f_info->fltr_act != ICE_FWD_TO_VSI &&
3605 		    f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
3606 			continue;
3607 
3608 		if (f_info->fltr_act == ICE_FWD_TO_VSI) {
3609 			if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3610 				continue;
3611 		} else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3612 			/* If filter_action is FWD_TO_VSI_LIST, make sure
3613 			 * that VSI being checked is part of VSI list
3614 			 */
3615 			if (entry->vsi_count == 1 &&
3616 			    entry->vsi_list_info) {
3617 				map_info = entry->vsi_list_info;
3618 				if (!test_bit(vsi_handle, map_info->vsi_map))
3619 					continue;
3620 			}
3621 		}
3622 
3623 		if (vlan_id == entry_vlan_id) {
3624 			mutex_unlock(rule_lock);
3625 			return true;
3626 		}
3627 	}
3628 	mutex_unlock(rule_lock);
3629 
3630 	return false;
3631 }
3632 
3633 /**
3634  * ice_add_mac - Add a MAC address based filter rule
3635  * @hw: pointer to the hardware structure
3636  * @m_list: list of MAC addresses and forwarding information
3637  */
3638 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
3639 {
3640 	struct ice_fltr_list_entry *m_list_itr;
3641 	int status = 0;
3642 
3643 	if (!m_list || !hw)
3644 		return -EINVAL;
3645 
3646 	list_for_each_entry(m_list_itr, m_list, list_entry) {
3647 		u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3648 		u16 vsi_handle;
3649 		u16 hw_vsi_id;
3650 
3651 		m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3652 		vsi_handle = m_list_itr->fltr_info.vsi_handle;
3653 		if (!ice_is_vsi_valid(hw, vsi_handle))
3654 			return -EINVAL;
3655 		hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3656 		m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3657 		/* update the src in case it is VSI num */
3658 		if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3659 			return -EINVAL;
3660 		m_list_itr->fltr_info.src = hw_vsi_id;
3661 		if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3662 		    is_zero_ether_addr(add))
3663 			return -EINVAL;
3664 
3665 		m_list_itr->status = ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3666 							   m_list_itr);
3667 		if (m_list_itr->status)
3668 			return m_list_itr->status;
3669 	}
3670 
3671 	return status;
3672 }
3673 
3674 /**
3675  * ice_add_vlan_internal - Add one VLAN based filter rule
3676  * @hw: pointer to the hardware structure
3677  * @f_entry: filter entry containing one VLAN information
3678  */
3679 static int
3680 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3681 {
3682 	struct ice_switch_info *sw = hw->switch_info;
3683 	struct ice_fltr_mgmt_list_entry *v_list_itr;
3684 	struct ice_fltr_info *new_fltr, *cur_fltr;
3685 	enum ice_sw_lkup_type lkup_type;
3686 	u16 vsi_list_id = 0, vsi_handle;
3687 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3688 	int status = 0;
3689 
3690 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3691 		return -EINVAL;
3692 
3693 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3694 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3695 	new_fltr = &f_entry->fltr_info;
3696 
3697 	/* VLAN ID should only be 12 bits */
3698 	if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3699 		return -EINVAL;
3700 
3701 	if (new_fltr->src_id != ICE_SRC_ID_VSI)
3702 		return -EINVAL;
3703 
3704 	new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3705 	lkup_type = new_fltr->lkup_type;
3706 	vsi_handle = new_fltr->vsi_handle;
3707 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3708 	mutex_lock(rule_lock);
3709 	v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3710 	if (!v_list_itr) {
3711 		struct ice_vsi_list_map_info *map_info = NULL;
3712 
3713 		if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3714 			/* All VLAN pruning rules use a VSI list. Check if
3715 			 * there is already a VSI list containing VSI that we
3716 			 * want to add. If found, use the same vsi_list_id for
3717 			 * this new VLAN rule or else create a new list.
3718 			 */
3719 			map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3720 							   vsi_handle,
3721 							   &vsi_list_id);
3722 			if (!map_info) {
3723 				status = ice_create_vsi_list_rule(hw,
3724 								  &vsi_handle,
3725 								  1,
3726 								  &vsi_list_id,
3727 								  lkup_type);
3728 				if (status)
3729 					goto exit;
3730 			}
3731 			/* Convert the action to forwarding to a VSI list. */
3732 			new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3733 			new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3734 		}
3735 
3736 		status = ice_create_pkt_fwd_rule(hw, f_entry);
3737 		if (!status) {
3738 			v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3739 							 new_fltr);
3740 			if (!v_list_itr) {
3741 				status = -ENOENT;
3742 				goto exit;
3743 			}
3744 			/* reuse VSI list for new rule and increment ref_cnt */
3745 			if (map_info) {
3746 				v_list_itr->vsi_list_info = map_info;
3747 				map_info->ref_cnt++;
3748 			} else {
3749 				v_list_itr->vsi_list_info =
3750 					ice_create_vsi_list_map(hw, &vsi_handle,
3751 								1, vsi_list_id);
3752 			}
3753 		}
3754 	} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3755 		/* Update existing VSI list to add new VSI ID only if it used
3756 		 * by one VLAN rule.
3757 		 */
3758 		cur_fltr = &v_list_itr->fltr_info;
3759 		status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3760 						 new_fltr);
3761 	} else {
3762 		/* If VLAN rule exists and VSI list being used by this rule is
3763 		 * referenced by more than 1 VLAN rule. Then create a new VSI
3764 		 * list appending previous VSI with new VSI and update existing
3765 		 * VLAN rule to point to new VSI list ID
3766 		 */
3767 		struct ice_fltr_info tmp_fltr;
3768 		u16 vsi_handle_arr[2];
3769 		u16 cur_handle;
3770 
3771 		/* Current implementation only supports reusing VSI list with
3772 		 * one VSI count. We should never hit below condition
3773 		 */
3774 		if (v_list_itr->vsi_count > 1 &&
3775 		    v_list_itr->vsi_list_info->ref_cnt > 1) {
3776 			ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3777 			status = -EIO;
3778 			goto exit;
3779 		}
3780 
3781 		cur_handle =
3782 			find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3783 				       ICE_MAX_VSI);
3784 
3785 		/* A rule already exists with the new VSI being added */
3786 		if (cur_handle == vsi_handle) {
3787 			status = -EEXIST;
3788 			goto exit;
3789 		}
3790 
3791 		vsi_handle_arr[0] = cur_handle;
3792 		vsi_handle_arr[1] = vsi_handle;
3793 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3794 						  &vsi_list_id, lkup_type);
3795 		if (status)
3796 			goto exit;
3797 
3798 		tmp_fltr = v_list_itr->fltr_info;
3799 		tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3800 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3801 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3802 		/* Update the previous switch rule to a new VSI list which
3803 		 * includes current VSI that is requested
3804 		 */
3805 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3806 		if (status)
3807 			goto exit;
3808 
3809 		/* before overriding VSI list map info. decrement ref_cnt of
3810 		 * previous VSI list
3811 		 */
3812 		v_list_itr->vsi_list_info->ref_cnt--;
3813 
3814 		/* now update to newly created list */
3815 		v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3816 		v_list_itr->vsi_list_info =
3817 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3818 						vsi_list_id);
3819 		v_list_itr->vsi_count++;
3820 	}
3821 
3822 exit:
3823 	mutex_unlock(rule_lock);
3824 	return status;
3825 }
3826 
3827 /**
3828  * ice_add_vlan - Add VLAN based filter rule
3829  * @hw: pointer to the hardware structure
3830  * @v_list: list of VLAN entries and forwarding information
3831  */
3832 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
3833 {
3834 	struct ice_fltr_list_entry *v_list_itr;
3835 
3836 	if (!v_list || !hw)
3837 		return -EINVAL;
3838 
3839 	list_for_each_entry(v_list_itr, v_list, list_entry) {
3840 		if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3841 			return -EINVAL;
3842 		v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3843 		v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3844 		if (v_list_itr->status)
3845 			return v_list_itr->status;
3846 	}
3847 	return 0;
3848 }
3849 
3850 /**
3851  * ice_add_eth_mac - Add ethertype and MAC based filter rule
3852  * @hw: pointer to the hardware structure
3853  * @em_list: list of ether type MAC filter, MAC is optional
3854  *
3855  * This function requires the caller to populate the entries in
3856  * the filter list with the necessary fields (including flags to
3857  * indicate Tx or Rx rules).
3858  */
3859 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3860 {
3861 	struct ice_fltr_list_entry *em_list_itr;
3862 
3863 	if (!em_list || !hw)
3864 		return -EINVAL;
3865 
3866 	list_for_each_entry(em_list_itr, em_list, list_entry) {
3867 		enum ice_sw_lkup_type l_type =
3868 			em_list_itr->fltr_info.lkup_type;
3869 
3870 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3871 		    l_type != ICE_SW_LKUP_ETHERTYPE)
3872 			return -EINVAL;
3873 
3874 		em_list_itr->status = ice_add_rule_internal(hw, l_type,
3875 							    em_list_itr);
3876 		if (em_list_itr->status)
3877 			return em_list_itr->status;
3878 	}
3879 	return 0;
3880 }
3881 
3882 /**
3883  * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3884  * @hw: pointer to the hardware structure
3885  * @em_list: list of ethertype or ethertype MAC entries
3886  */
3887 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3888 {
3889 	struct ice_fltr_list_entry *em_list_itr, *tmp;
3890 
3891 	if (!em_list || !hw)
3892 		return -EINVAL;
3893 
3894 	list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
3895 		enum ice_sw_lkup_type l_type =
3896 			em_list_itr->fltr_info.lkup_type;
3897 
3898 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3899 		    l_type != ICE_SW_LKUP_ETHERTYPE)
3900 			return -EINVAL;
3901 
3902 		em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3903 							       em_list_itr);
3904 		if (em_list_itr->status)
3905 			return em_list_itr->status;
3906 	}
3907 	return 0;
3908 }
3909 
3910 /**
3911  * ice_rem_sw_rule_info
3912  * @hw: pointer to the hardware structure
3913  * @rule_head: pointer to the switch list structure that we want to delete
3914  */
3915 static void
3916 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3917 {
3918 	if (!list_empty(rule_head)) {
3919 		struct ice_fltr_mgmt_list_entry *entry;
3920 		struct ice_fltr_mgmt_list_entry *tmp;
3921 
3922 		list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
3923 			list_del(&entry->list_entry);
3924 			devm_kfree(ice_hw_to_dev(hw), entry);
3925 		}
3926 	}
3927 }
3928 
3929 /**
3930  * ice_rem_adv_rule_info
3931  * @hw: pointer to the hardware structure
3932  * @rule_head: pointer to the switch list structure that we want to delete
3933  */
3934 static void
3935 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3936 {
3937 	struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3938 	struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3939 
3940 	if (list_empty(rule_head))
3941 		return;
3942 
3943 	list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
3944 		list_del(&lst_itr->list_entry);
3945 		devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
3946 		devm_kfree(ice_hw_to_dev(hw), lst_itr);
3947 	}
3948 }
3949 
3950 /**
3951  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3952  * @pi: pointer to the port_info structure
3953  * @vsi_handle: VSI handle to set as default
3954  * @set: true to add the above mentioned switch rule, false to remove it
3955  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3956  *
3957  * add filter rule to set/unset given VSI as default VSI for the switch
3958  * (represented by swid)
3959  */
3960 int
3961 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3962 		 u8 direction)
3963 {
3964 	struct ice_fltr_list_entry f_list_entry;
3965 	struct ice_fltr_info f_info;
3966 	struct ice_hw *hw = pi->hw;
3967 	u16 hw_vsi_id;
3968 	int status;
3969 
3970 	if (!ice_is_vsi_valid(hw, vsi_handle))
3971 		return -EINVAL;
3972 
3973 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3974 
3975 	memset(&f_info, 0, sizeof(f_info));
3976 
3977 	f_info.lkup_type = ICE_SW_LKUP_DFLT;
3978 	f_info.flag = direction;
3979 	f_info.fltr_act = ICE_FWD_TO_VSI;
3980 	f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3981 	f_info.vsi_handle = vsi_handle;
3982 
3983 	if (f_info.flag & ICE_FLTR_RX) {
3984 		f_info.src = hw->port_info->lport;
3985 		f_info.src_id = ICE_SRC_ID_LPORT;
3986 	} else if (f_info.flag & ICE_FLTR_TX) {
3987 		f_info.src_id = ICE_SRC_ID_VSI;
3988 		f_info.src = hw_vsi_id;
3989 		f_info.flag |= ICE_FLTR_TX_ONLY;
3990 	}
3991 	f_list_entry.fltr_info = f_info;
3992 
3993 	if (set)
3994 		status = ice_add_rule_internal(hw, ICE_SW_LKUP_DFLT,
3995 					       &f_list_entry);
3996 	else
3997 		status = ice_remove_rule_internal(hw, ICE_SW_LKUP_DFLT,
3998 						  &f_list_entry);
3999 
4000 	return status;
4001 }
4002 
4003 /**
4004  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4005  * @fm_entry: filter entry to inspect
4006  * @vsi_handle: VSI handle to compare with filter info
4007  */
4008 static bool
4009 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4010 {
4011 	return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4012 		 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4013 		(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4014 		 fm_entry->vsi_list_info &&
4015 		 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
4016 }
4017 
4018 /**
4019  * ice_check_if_dflt_vsi - check if VSI is default VSI
4020  * @pi: pointer to the port_info structure
4021  * @vsi_handle: vsi handle to check for in filter list
4022  * @rule_exists: indicates if there are any VSI's in the rule list
4023  *
4024  * checks if the VSI is in a default VSI list, and also indicates
4025  * if the default VSI list is empty
4026  */
4027 bool
4028 ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
4029 		      bool *rule_exists)
4030 {
4031 	struct ice_fltr_mgmt_list_entry *fm_entry;
4032 	struct ice_sw_recipe *recp_list;
4033 	struct list_head *rule_head;
4034 	struct mutex *rule_lock; /* Lock to protect filter rule list */
4035 	bool ret = false;
4036 
4037 	recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
4038 	rule_lock = &recp_list->filt_rule_lock;
4039 	rule_head = &recp_list->filt_rules;
4040 
4041 	mutex_lock(rule_lock);
4042 
4043 	if (rule_exists && !list_empty(rule_head))
4044 		*rule_exists = true;
4045 
4046 	list_for_each_entry(fm_entry, rule_head, list_entry) {
4047 		if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
4048 			ret = true;
4049 			break;
4050 		}
4051 	}
4052 
4053 	mutex_unlock(rule_lock);
4054 
4055 	return ret;
4056 }
4057 
4058 /**
4059  * ice_remove_mac - remove a MAC address based filter rule
4060  * @hw: pointer to the hardware structure
4061  * @m_list: list of MAC addresses and forwarding information
4062  *
4063  * This function removes either a MAC filter rule or a specific VSI from a
4064  * VSI list for a multicast MAC address.
4065  *
4066  * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
4067  * be aware that this call will only work if all the entries passed into m_list
4068  * were added previously. It will not attempt to do a partial remove of entries
4069  * that were found.
4070  */
4071 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
4072 {
4073 	struct ice_fltr_list_entry *list_itr, *tmp;
4074 
4075 	if (!m_list)
4076 		return -EINVAL;
4077 
4078 	list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
4079 		enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4080 		u16 vsi_handle;
4081 
4082 		if (l_type != ICE_SW_LKUP_MAC)
4083 			return -EINVAL;
4084 
4085 		vsi_handle = list_itr->fltr_info.vsi_handle;
4086 		if (!ice_is_vsi_valid(hw, vsi_handle))
4087 			return -EINVAL;
4088 
4089 		list_itr->fltr_info.fwd_id.hw_vsi_id =
4090 					ice_get_hw_vsi_num(hw, vsi_handle);
4091 
4092 		list_itr->status = ice_remove_rule_internal(hw,
4093 							    ICE_SW_LKUP_MAC,
4094 							    list_itr);
4095 		if (list_itr->status)
4096 			return list_itr->status;
4097 	}
4098 	return 0;
4099 }
4100 
4101 /**
4102  * ice_remove_vlan - Remove VLAN based filter rule
4103  * @hw: pointer to the hardware structure
4104  * @v_list: list of VLAN entries and forwarding information
4105  */
4106 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
4107 {
4108 	struct ice_fltr_list_entry *v_list_itr, *tmp;
4109 
4110 	if (!v_list || !hw)
4111 		return -EINVAL;
4112 
4113 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4114 		enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4115 
4116 		if (l_type != ICE_SW_LKUP_VLAN)
4117 			return -EINVAL;
4118 		v_list_itr->status = ice_remove_rule_internal(hw,
4119 							      ICE_SW_LKUP_VLAN,
4120 							      v_list_itr);
4121 		if (v_list_itr->status)
4122 			return v_list_itr->status;
4123 	}
4124 	return 0;
4125 }
4126 
4127 /**
4128  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4129  * @hw: pointer to the hardware structure
4130  * @vsi_handle: VSI handle to remove filters from
4131  * @vsi_list_head: pointer to the list to add entry to
4132  * @fi: pointer to fltr_info of filter entry to copy & add
4133  *
4134  * Helper function, used when creating a list of filters to remove from
4135  * a specific VSI. The entry added to vsi_list_head is a COPY of the
4136  * original filter entry, with the exception of fltr_info.fltr_act and
4137  * fltr_info.fwd_id fields. These are set such that later logic can
4138  * extract which VSI to remove the fltr from, and pass on that information.
4139  */
4140 static int
4141 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4142 			       struct list_head *vsi_list_head,
4143 			       struct ice_fltr_info *fi)
4144 {
4145 	struct ice_fltr_list_entry *tmp;
4146 
4147 	/* this memory is freed up in the caller function
4148 	 * once filters for this VSI are removed
4149 	 */
4150 	tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
4151 	if (!tmp)
4152 		return -ENOMEM;
4153 
4154 	tmp->fltr_info = *fi;
4155 
4156 	/* Overwrite these fields to indicate which VSI to remove filter from,
4157 	 * so find and remove logic can extract the information from the
4158 	 * list entries. Note that original entries will still have proper
4159 	 * values.
4160 	 */
4161 	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4162 	tmp->fltr_info.vsi_handle = vsi_handle;
4163 	tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4164 
4165 	list_add(&tmp->list_entry, vsi_list_head);
4166 
4167 	return 0;
4168 }
4169 
4170 /**
4171  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4172  * @hw: pointer to the hardware structure
4173  * @vsi_handle: VSI handle to remove filters from
4174  * @lkup_list_head: pointer to the list that has certain lookup type filters
4175  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4176  *
4177  * Locates all filters in lkup_list_head that are used by the given VSI,
4178  * and adds COPIES of those entries to vsi_list_head (intended to be used
4179  * to remove the listed filters).
4180  * Note that this means all entries in vsi_list_head must be explicitly
4181  * deallocated by the caller when done with list.
4182  */
4183 static int
4184 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4185 			 struct list_head *lkup_list_head,
4186 			 struct list_head *vsi_list_head)
4187 {
4188 	struct ice_fltr_mgmt_list_entry *fm_entry;
4189 	int status = 0;
4190 
4191 	/* check to make sure VSI ID is valid and within boundary */
4192 	if (!ice_is_vsi_valid(hw, vsi_handle))
4193 		return -EINVAL;
4194 
4195 	list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
4196 		if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
4197 			continue;
4198 
4199 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4200 							vsi_list_head,
4201 							&fm_entry->fltr_info);
4202 		if (status)
4203 			return status;
4204 	}
4205 	return status;
4206 }
4207 
4208 /**
4209  * ice_determine_promisc_mask
4210  * @fi: filter info to parse
4211  *
4212  * Helper function to determine which ICE_PROMISC_ mask corresponds
4213  * to given filter into.
4214  */
4215 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4216 {
4217 	u16 vid = fi->l_data.mac_vlan.vlan_id;
4218 	u8 *macaddr = fi->l_data.mac.mac_addr;
4219 	bool is_tx_fltr = false;
4220 	u8 promisc_mask = 0;
4221 
4222 	if (fi->flag == ICE_FLTR_TX)
4223 		is_tx_fltr = true;
4224 
4225 	if (is_broadcast_ether_addr(macaddr))
4226 		promisc_mask |= is_tx_fltr ?
4227 			ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4228 	else if (is_multicast_ether_addr(macaddr))
4229 		promisc_mask |= is_tx_fltr ?
4230 			ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4231 	else if (is_unicast_ether_addr(macaddr))
4232 		promisc_mask |= is_tx_fltr ?
4233 			ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4234 	if (vid)
4235 		promisc_mask |= is_tx_fltr ?
4236 			ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4237 
4238 	return promisc_mask;
4239 }
4240 
4241 /**
4242  * ice_remove_promisc - Remove promisc based filter rules
4243  * @hw: pointer to the hardware structure
4244  * @recp_id: recipe ID for which the rule needs to removed
4245  * @v_list: list of promisc entries
4246  */
4247 static int
4248 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
4249 {
4250 	struct ice_fltr_list_entry *v_list_itr, *tmp;
4251 
4252 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4253 		v_list_itr->status =
4254 			ice_remove_rule_internal(hw, recp_id, v_list_itr);
4255 		if (v_list_itr->status)
4256 			return v_list_itr->status;
4257 	}
4258 	return 0;
4259 }
4260 
4261 /**
4262  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4263  * @hw: pointer to the hardware structure
4264  * @vsi_handle: VSI handle to clear mode
4265  * @promisc_mask: mask of promiscuous config bits to clear
4266  * @vid: VLAN ID to clear VLAN promiscuous
4267  */
4268 int
4269 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4270 		      u16 vid)
4271 {
4272 	struct ice_switch_info *sw = hw->switch_info;
4273 	struct ice_fltr_list_entry *fm_entry, *tmp;
4274 	struct list_head remove_list_head;
4275 	struct ice_fltr_mgmt_list_entry *itr;
4276 	struct list_head *rule_head;
4277 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
4278 	int status = 0;
4279 	u8 recipe_id;
4280 
4281 	if (!ice_is_vsi_valid(hw, vsi_handle))
4282 		return -EINVAL;
4283 
4284 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4285 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4286 	else
4287 		recipe_id = ICE_SW_LKUP_PROMISC;
4288 
4289 	rule_head = &sw->recp_list[recipe_id].filt_rules;
4290 	rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4291 
4292 	INIT_LIST_HEAD(&remove_list_head);
4293 
4294 	mutex_lock(rule_lock);
4295 	list_for_each_entry(itr, rule_head, list_entry) {
4296 		struct ice_fltr_info *fltr_info;
4297 		u8 fltr_promisc_mask = 0;
4298 
4299 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
4300 			continue;
4301 		fltr_info = &itr->fltr_info;
4302 
4303 		if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4304 		    vid != fltr_info->l_data.mac_vlan.vlan_id)
4305 			continue;
4306 
4307 		fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4308 
4309 		/* Skip if filter is not completely specified by given mask */
4310 		if (fltr_promisc_mask & ~promisc_mask)
4311 			continue;
4312 
4313 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4314 							&remove_list_head,
4315 							fltr_info);
4316 		if (status) {
4317 			mutex_unlock(rule_lock);
4318 			goto free_fltr_list;
4319 		}
4320 	}
4321 	mutex_unlock(rule_lock);
4322 
4323 	status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4324 
4325 free_fltr_list:
4326 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4327 		list_del(&fm_entry->list_entry);
4328 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
4329 	}
4330 
4331 	return status;
4332 }
4333 
4334 /**
4335  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4336  * @hw: pointer to the hardware structure
4337  * @vsi_handle: VSI handle to configure
4338  * @promisc_mask: mask of promiscuous config bits
4339  * @vid: VLAN ID to set VLAN promiscuous
4340  */
4341 int
4342 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4343 {
4344 	enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4345 	struct ice_fltr_list_entry f_list_entry;
4346 	struct ice_fltr_info new_fltr;
4347 	bool is_tx_fltr;
4348 	int status = 0;
4349 	u16 hw_vsi_id;
4350 	int pkt_type;
4351 	u8 recipe_id;
4352 
4353 	if (!ice_is_vsi_valid(hw, vsi_handle))
4354 		return -EINVAL;
4355 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4356 
4357 	memset(&new_fltr, 0, sizeof(new_fltr));
4358 
4359 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4360 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4361 		new_fltr.l_data.mac_vlan.vlan_id = vid;
4362 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4363 	} else {
4364 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4365 		recipe_id = ICE_SW_LKUP_PROMISC;
4366 	}
4367 
4368 	/* Separate filters must be set for each direction/packet type
4369 	 * combination, so we will loop over the mask value, store the
4370 	 * individual type, and clear it out in the input mask as it
4371 	 * is found.
4372 	 */
4373 	while (promisc_mask) {
4374 		u8 *mac_addr;
4375 
4376 		pkt_type = 0;
4377 		is_tx_fltr = false;
4378 
4379 		if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4380 			promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4381 			pkt_type = UCAST_FLTR;
4382 		} else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4383 			promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4384 			pkt_type = UCAST_FLTR;
4385 			is_tx_fltr = true;
4386 		} else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4387 			promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4388 			pkt_type = MCAST_FLTR;
4389 		} else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4390 			promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4391 			pkt_type = MCAST_FLTR;
4392 			is_tx_fltr = true;
4393 		} else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4394 			promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4395 			pkt_type = BCAST_FLTR;
4396 		} else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4397 			promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4398 			pkt_type = BCAST_FLTR;
4399 			is_tx_fltr = true;
4400 		}
4401 
4402 		/* Check for VLAN promiscuous flag */
4403 		if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4404 			promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4405 		} else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4406 			promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4407 			is_tx_fltr = true;
4408 		}
4409 
4410 		/* Set filter DA based on packet type */
4411 		mac_addr = new_fltr.l_data.mac.mac_addr;
4412 		if (pkt_type == BCAST_FLTR) {
4413 			eth_broadcast_addr(mac_addr);
4414 		} else if (pkt_type == MCAST_FLTR ||
4415 			   pkt_type == UCAST_FLTR) {
4416 			/* Use the dummy ether header DA */
4417 			ether_addr_copy(mac_addr, dummy_eth_header);
4418 			if (pkt_type == MCAST_FLTR)
4419 				mac_addr[0] |= 0x1;	/* Set multicast bit */
4420 		}
4421 
4422 		/* Need to reset this to zero for all iterations */
4423 		new_fltr.flag = 0;
4424 		if (is_tx_fltr) {
4425 			new_fltr.flag |= ICE_FLTR_TX;
4426 			new_fltr.src = hw_vsi_id;
4427 		} else {
4428 			new_fltr.flag |= ICE_FLTR_RX;
4429 			new_fltr.src = hw->port_info->lport;
4430 		}
4431 
4432 		new_fltr.fltr_act = ICE_FWD_TO_VSI;
4433 		new_fltr.vsi_handle = vsi_handle;
4434 		new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4435 		f_list_entry.fltr_info = new_fltr;
4436 
4437 		status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4438 		if (status)
4439 			goto set_promisc_exit;
4440 	}
4441 
4442 set_promisc_exit:
4443 	return status;
4444 }
4445 
4446 /**
4447  * ice_set_vlan_vsi_promisc
4448  * @hw: pointer to the hardware structure
4449  * @vsi_handle: VSI handle to configure
4450  * @promisc_mask: mask of promiscuous config bits
4451  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4452  *
4453  * Configure VSI with all associated VLANs to given promiscuous mode(s)
4454  */
4455 int
4456 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4457 			 bool rm_vlan_promisc)
4458 {
4459 	struct ice_switch_info *sw = hw->switch_info;
4460 	struct ice_fltr_list_entry *list_itr, *tmp;
4461 	struct list_head vsi_list_head;
4462 	struct list_head *vlan_head;
4463 	struct mutex *vlan_lock; /* Lock to protect filter rule list */
4464 	u16 vlan_id;
4465 	int status;
4466 
4467 	INIT_LIST_HEAD(&vsi_list_head);
4468 	vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4469 	vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4470 	mutex_lock(vlan_lock);
4471 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4472 					  &vsi_list_head);
4473 	mutex_unlock(vlan_lock);
4474 	if (status)
4475 		goto free_fltr_list;
4476 
4477 	list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
4478 		/* Avoid enabling or disabling VLAN zero twice when in double
4479 		 * VLAN mode
4480 		 */
4481 		if (ice_is_dvm_ena(hw) &&
4482 		    list_itr->fltr_info.l_data.vlan.tpid == 0)
4483 			continue;
4484 
4485 		vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4486 		if (rm_vlan_promisc)
4487 			status = ice_clear_vsi_promisc(hw, vsi_handle,
4488 						       promisc_mask, vlan_id);
4489 		else
4490 			status = ice_set_vsi_promisc(hw, vsi_handle,
4491 						     promisc_mask, vlan_id);
4492 		if (status && status != -EEXIST)
4493 			break;
4494 	}
4495 
4496 free_fltr_list:
4497 	list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
4498 		list_del(&list_itr->list_entry);
4499 		devm_kfree(ice_hw_to_dev(hw), list_itr);
4500 	}
4501 	return status;
4502 }
4503 
4504 /**
4505  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4506  * @hw: pointer to the hardware structure
4507  * @vsi_handle: VSI handle to remove filters from
4508  * @lkup: switch rule filter lookup type
4509  */
4510 static void
4511 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4512 			 enum ice_sw_lkup_type lkup)
4513 {
4514 	struct ice_switch_info *sw = hw->switch_info;
4515 	struct ice_fltr_list_entry *fm_entry;
4516 	struct list_head remove_list_head;
4517 	struct list_head *rule_head;
4518 	struct ice_fltr_list_entry *tmp;
4519 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
4520 	int status;
4521 
4522 	INIT_LIST_HEAD(&remove_list_head);
4523 	rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4524 	rule_head = &sw->recp_list[lkup].filt_rules;
4525 	mutex_lock(rule_lock);
4526 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4527 					  &remove_list_head);
4528 	mutex_unlock(rule_lock);
4529 	if (status)
4530 		goto free_fltr_list;
4531 
4532 	switch (lkup) {
4533 	case ICE_SW_LKUP_MAC:
4534 		ice_remove_mac(hw, &remove_list_head);
4535 		break;
4536 	case ICE_SW_LKUP_VLAN:
4537 		ice_remove_vlan(hw, &remove_list_head);
4538 		break;
4539 	case ICE_SW_LKUP_PROMISC:
4540 	case ICE_SW_LKUP_PROMISC_VLAN:
4541 		ice_remove_promisc(hw, lkup, &remove_list_head);
4542 		break;
4543 	case ICE_SW_LKUP_MAC_VLAN:
4544 	case ICE_SW_LKUP_ETHERTYPE:
4545 	case ICE_SW_LKUP_ETHERTYPE_MAC:
4546 	case ICE_SW_LKUP_DFLT:
4547 	case ICE_SW_LKUP_LAST:
4548 	default:
4549 		ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
4550 		break;
4551 	}
4552 
4553 free_fltr_list:
4554 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4555 		list_del(&fm_entry->list_entry);
4556 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
4557 	}
4558 }
4559 
4560 /**
4561  * ice_remove_vsi_fltr - Remove all filters for a VSI
4562  * @hw: pointer to the hardware structure
4563  * @vsi_handle: VSI handle to remove filters from
4564  */
4565 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4566 {
4567 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4568 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4569 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4570 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4571 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4572 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4573 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4574 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4575 }
4576 
4577 /**
4578  * ice_alloc_res_cntr - allocating resource counter
4579  * @hw: pointer to the hardware structure
4580  * @type: type of resource
4581  * @alloc_shared: if set it is shared else dedicated
4582  * @num_items: number of entries requested for FD resource type
4583  * @counter_id: counter index returned by AQ call
4584  */
4585 int
4586 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4587 		   u16 *counter_id)
4588 {
4589 	DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
4590 	u16 buf_len = __struct_size(buf);
4591 	int status;
4592 
4593 	buf->num_elems = cpu_to_le16(num_items);
4594 	buf->res_type = cpu_to_le16(FIELD_PREP(ICE_AQC_RES_TYPE_M, type) |
4595 				    alloc_shared);
4596 
4597 	status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res);
4598 	if (status)
4599 		return status;
4600 
4601 	*counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
4602 	return status;
4603 }
4604 
4605 /**
4606  * ice_free_res_cntr - free resource counter
4607  * @hw: pointer to the hardware structure
4608  * @type: type of resource
4609  * @alloc_shared: if set it is shared else dedicated
4610  * @num_items: number of entries to be freed for FD resource type
4611  * @counter_id: counter ID resource which needs to be freed
4612  */
4613 int
4614 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4615 		  u16 counter_id)
4616 {
4617 	DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
4618 	u16 buf_len = __struct_size(buf);
4619 	int status;
4620 
4621 	buf->num_elems = cpu_to_le16(num_items);
4622 	buf->res_type = cpu_to_le16(FIELD_PREP(ICE_AQC_RES_TYPE_M, type) |
4623 				    alloc_shared);
4624 	buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
4625 
4626 	status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res);
4627 	if (status)
4628 		ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
4629 
4630 	return status;
4631 }
4632 
4633 #define ICE_PROTOCOL_ENTRY(id, ...) {		\
4634 	.prot_type	= id,			\
4635 	.offs		= {__VA_ARGS__},	\
4636 }
4637 
4638 /**
4639  * ice_share_res - set a resource as shared or dedicated
4640  * @hw: hw struct of original owner of resource
4641  * @type: resource type
4642  * @shared: is the resource being set to shared
4643  * @res_id: resource id (descriptor)
4644  */
4645 int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
4646 {
4647 	DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
4648 	u16 buf_len = __struct_size(buf);
4649 	u16 res_type;
4650 	int status;
4651 
4652 	buf->num_elems = cpu_to_le16(1);
4653 	res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, type);
4654 	if (shared)
4655 		res_type |= ICE_AQC_RES_TYPE_FLAG_SHARED;
4656 
4657 	buf->res_type = cpu_to_le16(res_type);
4658 	buf->elem[0].e.sw_resp = cpu_to_le16(res_id);
4659 	status = ice_aq_alloc_free_res(hw, buf, buf_len,
4660 				       ice_aqc_opc_share_res);
4661 	if (status)
4662 		ice_debug(hw, ICE_DBG_SW, "Could not set resource type %u id %u to %s\n",
4663 			  type, res_id, shared ? "SHARED" : "DEDICATED");
4664 
4665 	return status;
4666 }
4667 
4668 /* This is mapping table entry that maps every word within a given protocol
4669  * structure to the real byte offset as per the specification of that
4670  * protocol header.
4671  * for example dst address is 3 words in ethertype header and corresponding
4672  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4673  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4674  * matching entry describing its field. This needs to be updated if new
4675  * structure is added to that union.
4676  */
4677 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4678 	ICE_PROTOCOL_ENTRY(ICE_MAC_OFOS, 0, 2, 4, 6, 8, 10, 12),
4679 	ICE_PROTOCOL_ENTRY(ICE_MAC_IL, 0, 2, 4, 6, 8, 10, 12),
4680 	ICE_PROTOCOL_ENTRY(ICE_ETYPE_OL, 0),
4681 	ICE_PROTOCOL_ENTRY(ICE_ETYPE_IL, 0),
4682 	ICE_PROTOCOL_ENTRY(ICE_VLAN_OFOS, 2, 0),
4683 	ICE_PROTOCOL_ENTRY(ICE_IPV4_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18),
4684 	ICE_PROTOCOL_ENTRY(ICE_IPV4_IL,	0, 2, 4, 6, 8, 10, 12, 14, 16, 18),
4685 	ICE_PROTOCOL_ENTRY(ICE_IPV6_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18,
4686 			   20, 22, 24, 26, 28, 30, 32, 34, 36, 38),
4687 	ICE_PROTOCOL_ENTRY(ICE_IPV6_IL, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
4688 			   22, 24, 26, 28, 30, 32, 34, 36, 38),
4689 	ICE_PROTOCOL_ENTRY(ICE_TCP_IL, 0, 2),
4690 	ICE_PROTOCOL_ENTRY(ICE_UDP_OF, 0, 2),
4691 	ICE_PROTOCOL_ENTRY(ICE_UDP_ILOS, 0, 2),
4692 	ICE_PROTOCOL_ENTRY(ICE_VXLAN, 8, 10, 12, 14),
4693 	ICE_PROTOCOL_ENTRY(ICE_GENEVE, 8, 10, 12, 14),
4694 	ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6),
4695 	ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22),
4696 	ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14),
4697 	ICE_PROTOCOL_ENTRY(ICE_PFCP, 8, 10, 12, 14, 16, 18, 20, 22),
4698 	ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6),
4699 	ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10),
4700 	ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0),
4701 	ICE_PROTOCOL_ENTRY(ICE_VLAN_IN, 2, 0),
4702 	ICE_PROTOCOL_ENTRY(ICE_HW_METADATA,
4703 			   ICE_SOURCE_PORT_MDID_OFFSET,
4704 			   ICE_PTYPE_MDID_OFFSET,
4705 			   ICE_PACKET_LENGTH_MDID_OFFSET,
4706 			   ICE_SOURCE_VSI_MDID_OFFSET,
4707 			   ICE_PKT_VLAN_MDID_OFFSET,
4708 			   ICE_PKT_TUNNEL_MDID_OFFSET,
4709 			   ICE_PKT_TCP_MDID_OFFSET,
4710 			   ICE_PKT_ERROR_MDID_OFFSET),
4711 };
4712 
4713 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4714 	{ ICE_MAC_OFOS,		ICE_MAC_OFOS_HW },
4715 	{ ICE_MAC_IL,		ICE_MAC_IL_HW },
4716 	{ ICE_ETYPE_OL,		ICE_ETYPE_OL_HW },
4717 	{ ICE_ETYPE_IL,		ICE_ETYPE_IL_HW },
4718 	{ ICE_VLAN_OFOS,	ICE_VLAN_OL_HW },
4719 	{ ICE_IPV4_OFOS,	ICE_IPV4_OFOS_HW },
4720 	{ ICE_IPV4_IL,		ICE_IPV4_IL_HW },
4721 	{ ICE_IPV6_OFOS,	ICE_IPV6_OFOS_HW },
4722 	{ ICE_IPV6_IL,		ICE_IPV6_IL_HW },
4723 	{ ICE_TCP_IL,		ICE_TCP_IL_HW },
4724 	{ ICE_UDP_OF,		ICE_UDP_OF_HW },
4725 	{ ICE_UDP_ILOS,		ICE_UDP_ILOS_HW },
4726 	{ ICE_VXLAN,		ICE_UDP_OF_HW },
4727 	{ ICE_GENEVE,		ICE_UDP_OF_HW },
4728 	{ ICE_NVGRE,		ICE_GRE_OF_HW },
4729 	{ ICE_GTP,		ICE_UDP_OF_HW },
4730 	{ ICE_GTP_NO_PAY,	ICE_UDP_ILOS_HW },
4731 	{ ICE_PFCP,		ICE_UDP_ILOS_HW },
4732 	{ ICE_PPPOE,		ICE_PPPOE_HW },
4733 	{ ICE_L2TPV3,		ICE_L2TPV3_HW },
4734 	{ ICE_VLAN_EX,          ICE_VLAN_OF_HW },
4735 	{ ICE_VLAN_IN,          ICE_VLAN_OL_HW },
4736 	{ ICE_HW_METADATA,      ICE_META_DATA_ID_HW },
4737 };
4738 
4739 /**
4740  * ice_find_recp - find a recipe
4741  * @hw: pointer to the hardware structure
4742  * @lkup_exts: extension sequence to match
4743  * @rinfo: information regarding the rule e.g. priority and action info
4744  * @is_add: flag of adding recipe
4745  *
4746  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4747  */
4748 static u16
4749 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
4750 	      const struct ice_adv_rule_info *rinfo, bool is_add)
4751 {
4752 	bool refresh_required = true;
4753 	struct ice_sw_recipe *recp;
4754 	u8 i;
4755 
4756 	/* Walk through existing recipes to find a match */
4757 	recp = hw->switch_info->recp_list;
4758 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4759 		/* If recipe was not created for this ID, in SW bookkeeping,
4760 		 * check if FW has an entry for this recipe. If the FW has an
4761 		 * entry update it in our SW bookkeeping and continue with the
4762 		 * matching.
4763 		 */
4764 		if (hw->recp_reuse) {
4765 			if (ice_get_recp_frm_fw(hw,
4766 						hw->switch_info->recp_list, i,
4767 						&refresh_required, is_add))
4768 				continue;
4769 		}
4770 
4771 		/* Skip inverse action recipes */
4772 		if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4773 		    ICE_AQ_RECIPE_ACT_INV_ACT)
4774 			continue;
4775 
4776 		/* if number of words we are looking for match */
4777 		if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4778 			struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
4779 			struct ice_fv_word *be = lkup_exts->fv_words;
4780 			u16 *cr = recp[i].lkup_exts.field_mask;
4781 			u16 *de = lkup_exts->field_mask;
4782 			bool found = true;
4783 			u8 pe, qr;
4784 
4785 			/* ar, cr, and qr are related to the recipe words, while
4786 			 * be, de, and pe are related to the lookup words
4787 			 */
4788 			for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
4789 				for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
4790 				     qr++) {
4791 					if (ar[qr].off == be[pe].off &&
4792 					    ar[qr].prot_id == be[pe].prot_id &&
4793 					    cr[qr] == de[pe])
4794 						/* Found the "pe"th word in the
4795 						 * given recipe
4796 						 */
4797 						break;
4798 				}
4799 				/* After walking through all the words in the
4800 				 * "i"th recipe if "p"th word was not found then
4801 				 * this recipe is not what we are looking for.
4802 				 * So break out from this loop and try the next
4803 				 * recipe
4804 				 */
4805 				if (qr >= recp[i].lkup_exts.n_val_words) {
4806 					found = false;
4807 					break;
4808 				}
4809 			}
4810 			/* If for "i"th recipe the found was never set to false
4811 			 * then it means we found our match
4812 			 * Also tun type and *_pass_l2 of recipe needs to be
4813 			 * checked
4814 			 */
4815 			if (found && recp[i].tun_type == rinfo->tun_type &&
4816 			    recp[i].need_pass_l2 == rinfo->need_pass_l2 &&
4817 			    recp[i].allow_pass_l2 == rinfo->allow_pass_l2)
4818 				return i; /* Return the recipe ID */
4819 		}
4820 	}
4821 	return ICE_MAX_NUM_RECIPES;
4822 }
4823 
4824 /**
4825  * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
4826  *
4827  * As protocol id for outer vlan is different in dvm and svm, if dvm is
4828  * supported protocol array record for outer vlan has to be modified to
4829  * reflect the value proper for DVM.
4830  */
4831 void ice_change_proto_id_to_dvm(void)
4832 {
4833 	u8 i;
4834 
4835 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4836 		if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
4837 		    ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
4838 			ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
4839 }
4840 
4841 /**
4842  * ice_prot_type_to_id - get protocol ID from protocol type
4843  * @type: protocol type
4844  * @id: pointer to variable that will receive the ID
4845  *
4846  * Returns true if found, false otherwise
4847  */
4848 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
4849 {
4850 	u8 i;
4851 
4852 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4853 		if (ice_prot_id_tbl[i].type == type) {
4854 			*id = ice_prot_id_tbl[i].protocol_id;
4855 			return true;
4856 		}
4857 	return false;
4858 }
4859 
4860 /**
4861  * ice_fill_valid_words - count valid words
4862  * @rule: advanced rule with lookup information
4863  * @lkup_exts: byte offset extractions of the words that are valid
4864  *
4865  * calculate valid words in a lookup rule using mask value
4866  */
4867 static u8
4868 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4869 		     struct ice_prot_lkup_ext *lkup_exts)
4870 {
4871 	u8 j, word, prot_id, ret_val;
4872 
4873 	if (!ice_prot_type_to_id(rule->type, &prot_id))
4874 		return 0;
4875 
4876 	word = lkup_exts->n_val_words;
4877 
4878 	for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4879 		if (((u16 *)&rule->m_u)[j] &&
4880 		    rule->type < ARRAY_SIZE(ice_prot_ext)) {
4881 			/* No more space to accommodate */
4882 			if (word >= ICE_MAX_CHAIN_WORDS)
4883 				return 0;
4884 			lkup_exts->fv_words[word].off =
4885 				ice_prot_ext[rule->type].offs[j];
4886 			lkup_exts->fv_words[word].prot_id =
4887 				ice_prot_id_tbl[rule->type].protocol_id;
4888 			lkup_exts->field_mask[word] =
4889 				be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
4890 			word++;
4891 		}
4892 
4893 	ret_val = word - lkup_exts->n_val_words;
4894 	lkup_exts->n_val_words = word;
4895 
4896 	return ret_val;
4897 }
4898 
4899 /**
4900  * ice_create_first_fit_recp_def - Create a recipe grouping
4901  * @hw: pointer to the hardware structure
4902  * @lkup_exts: an array of protocol header extractions
4903  * @rg_list: pointer to a list that stores new recipe groups
4904  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4905  *
4906  * Using first fit algorithm, take all the words that are still not done
4907  * and start grouping them in 4-word groups. Each group makes up one
4908  * recipe.
4909  */
4910 static int
4911 ice_create_first_fit_recp_def(struct ice_hw *hw,
4912 			      struct ice_prot_lkup_ext *lkup_exts,
4913 			      struct list_head *rg_list,
4914 			      u8 *recp_cnt)
4915 {
4916 	struct ice_pref_recipe_group *grp = NULL;
4917 	u8 j;
4918 
4919 	*recp_cnt = 0;
4920 
4921 	/* Walk through every word in the rule to check if it is not done. If so
4922 	 * then this word needs to be part of a new recipe.
4923 	 */
4924 	for (j = 0; j < lkup_exts->n_val_words; j++)
4925 		if (!test_bit(j, lkup_exts->done)) {
4926 			if (!grp ||
4927 			    grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4928 				struct ice_recp_grp_entry *entry;
4929 
4930 				entry = devm_kzalloc(ice_hw_to_dev(hw),
4931 						     sizeof(*entry),
4932 						     GFP_KERNEL);
4933 				if (!entry)
4934 					return -ENOMEM;
4935 				list_add(&entry->l_entry, rg_list);
4936 				grp = &entry->r_group;
4937 				(*recp_cnt)++;
4938 			}
4939 
4940 			grp->pairs[grp->n_val_pairs].prot_id =
4941 				lkup_exts->fv_words[j].prot_id;
4942 			grp->pairs[grp->n_val_pairs].off =
4943 				lkup_exts->fv_words[j].off;
4944 			grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4945 			grp->n_val_pairs++;
4946 		}
4947 
4948 	return 0;
4949 }
4950 
4951 /**
4952  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4953  * @hw: pointer to the hardware structure
4954  * @fv_list: field vector with the extraction sequence information
4955  * @rg_list: recipe groupings with protocol-offset pairs
4956  *
4957  * Helper function to fill in the field vector indices for protocol-offset
4958  * pairs. These indexes are then ultimately programmed into a recipe.
4959  */
4960 static int
4961 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
4962 		       struct list_head *rg_list)
4963 {
4964 	struct ice_sw_fv_list_entry *fv;
4965 	struct ice_recp_grp_entry *rg;
4966 	struct ice_fv_word *fv_ext;
4967 
4968 	if (list_empty(fv_list))
4969 		return 0;
4970 
4971 	fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
4972 			      list_entry);
4973 	fv_ext = fv->fv_ptr->ew;
4974 
4975 	list_for_each_entry(rg, rg_list, l_entry) {
4976 		u8 i;
4977 
4978 		for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4979 			struct ice_fv_word *pr;
4980 			bool found = false;
4981 			u16 mask;
4982 			u8 j;
4983 
4984 			pr = &rg->r_group.pairs[i];
4985 			mask = rg->r_group.mask[i];
4986 
4987 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4988 				if (fv_ext[j].prot_id == pr->prot_id &&
4989 				    fv_ext[j].off == pr->off) {
4990 					found = true;
4991 
4992 					/* Store index of field vector */
4993 					rg->fv_idx[i] = j;
4994 					rg->fv_mask[i] = mask;
4995 					break;
4996 				}
4997 
4998 			/* Protocol/offset could not be found, caller gave an
4999 			 * invalid pair
5000 			 */
5001 			if (!found)
5002 				return -EINVAL;
5003 		}
5004 	}
5005 
5006 	return 0;
5007 }
5008 
5009 /**
5010  * ice_find_free_recp_res_idx - find free result indexes for recipe
5011  * @hw: pointer to hardware structure
5012  * @profiles: bitmap of profiles that will be associated with the new recipe
5013  * @free_idx: pointer to variable to receive the free index bitmap
5014  *
5015  * The algorithm used here is:
5016  *	1. When creating a new recipe, create a set P which contains all
5017  *	   Profiles that will be associated with our new recipe
5018  *
5019  *	2. For each Profile p in set P:
5020  *	    a. Add all recipes associated with Profile p into set R
5021  *	    b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5022  *		[initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5023  *		i. Or just assume they all have the same possible indexes:
5024  *			44, 45, 46, 47
5025  *			i.e., PossibleIndexes = 0x0000F00000000000
5026  *
5027  *	3. For each Recipe r in set R:
5028  *	    a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5029  *	    b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5030  *
5031  *	FreeIndexes will contain the bits indicating the indexes free for use,
5032  *      then the code needs to update the recipe[r].used_result_idx_bits to
5033  *      indicate which indexes were selected for use by this recipe.
5034  */
5035 static u16
5036 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
5037 			   unsigned long *free_idx)
5038 {
5039 	DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
5040 	DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
5041 	DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
5042 	u16 bit;
5043 
5044 	bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
5045 	bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
5046 
5047 	bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
5048 
5049 	/* For each profile we are going to associate the recipe with, add the
5050 	 * recipes that are associated with that profile. This will give us
5051 	 * the set of recipes that our recipe may collide with. Also, determine
5052 	 * what possible result indexes are usable given this set of profiles.
5053 	 */
5054 	for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
5055 		bitmap_or(recipes, recipes, profile_to_recipe[bit],
5056 			  ICE_MAX_NUM_RECIPES);
5057 		bitmap_and(possible_idx, possible_idx,
5058 			   hw->switch_info->prof_res_bm[bit],
5059 			   ICE_MAX_FV_WORDS);
5060 	}
5061 
5062 	/* For each recipe that our new recipe may collide with, determine
5063 	 * which indexes have been used.
5064 	 */
5065 	for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
5066 		bitmap_or(used_idx, used_idx,
5067 			  hw->switch_info->recp_list[bit].res_idxs,
5068 			  ICE_MAX_FV_WORDS);
5069 
5070 	bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5071 
5072 	/* return number of free indexes */
5073 	return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
5074 }
5075 
5076 /**
5077  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5078  * @hw: pointer to hardware structure
5079  * @rm: recipe management list entry
5080  * @profiles: bitmap of profiles that will be associated.
5081  */
5082 static int
5083 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5084 		  unsigned long *profiles)
5085 {
5086 	DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
5087 	struct ice_aqc_recipe_content *content;
5088 	struct ice_aqc_recipe_data_elem *tmp;
5089 	struct ice_aqc_recipe_data_elem *buf;
5090 	struct ice_recp_grp_entry *entry;
5091 	u16 free_res_idx;
5092 	u16 recipe_count;
5093 	u8 chain_idx;
5094 	u8 recps = 0;
5095 	int status;
5096 
5097 	/* When more than one recipe are required, another recipe is needed to
5098 	 * chain them together. Matching a tunnel metadata ID takes up one of
5099 	 * the match fields in the chaining recipe reducing the number of
5100 	 * chained recipes by one.
5101 	 */
5102 	 /* check number of free result indices */
5103 	bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
5104 	free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5105 
5106 	ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5107 		  free_res_idx, rm->n_grp_count);
5108 
5109 	if (rm->n_grp_count > 1) {
5110 		if (rm->n_grp_count > free_res_idx)
5111 			return -ENOSPC;
5112 
5113 		rm->n_grp_count++;
5114 	}
5115 
5116 	if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
5117 		return -ENOSPC;
5118 
5119 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
5120 	if (!tmp)
5121 		return -ENOMEM;
5122 
5123 	buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
5124 			   GFP_KERNEL);
5125 	if (!buf) {
5126 		status = -ENOMEM;
5127 		goto err_mem;
5128 	}
5129 
5130 	bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5131 	recipe_count = ICE_MAX_NUM_RECIPES;
5132 	status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5133 				   NULL);
5134 	if (status || recipe_count == 0)
5135 		goto err_unroll;
5136 
5137 	/* Allocate the recipe resources, and configure them according to the
5138 	 * match fields from protocol headers and extracted field vectors.
5139 	 */
5140 	chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5141 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
5142 		u8 i;
5143 
5144 		status = ice_alloc_recipe(hw, &entry->rid);
5145 		if (status)
5146 			goto err_unroll;
5147 
5148 		content = &buf[recps].content;
5149 
5150 		/* Clear the result index of the located recipe, as this will be
5151 		 * updated, if needed, later in the recipe creation process.
5152 		 */
5153 		tmp[0].content.result_indx = 0;
5154 
5155 		buf[recps] = tmp[0];
5156 		buf[recps].recipe_indx = (u8)entry->rid;
5157 		/* if the recipe is a non-root recipe RID should be programmed
5158 		 * as 0 for the rules to be applied correctly.
5159 		 */
5160 		content->rid = 0;
5161 		memset(&content->lkup_indx, 0,
5162 		       sizeof(content->lkup_indx));
5163 
5164 		/* All recipes use look-up index 0 to match switch ID. */
5165 		content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5166 		content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5167 		/* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5168 		 * to be 0
5169 		 */
5170 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5171 			content->lkup_indx[i] = 0x80;
5172 			content->mask[i] = 0;
5173 		}
5174 
5175 		for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5176 			content->lkup_indx[i + 1] = entry->fv_idx[i];
5177 			content->mask[i + 1] = cpu_to_le16(entry->fv_mask[i]);
5178 		}
5179 
5180 		if (rm->n_grp_count > 1) {
5181 			/* Checks to see if there really is a valid result index
5182 			 * that can be used.
5183 			 */
5184 			if (chain_idx >= ICE_MAX_FV_WORDS) {
5185 				ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
5186 				status = -ENOSPC;
5187 				goto err_unroll;
5188 			}
5189 
5190 			entry->chain_idx = chain_idx;
5191 			content->result_indx =
5192 				ICE_AQ_RECIPE_RESULT_EN |
5193 				FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M,
5194 					   chain_idx);
5195 			clear_bit(chain_idx, result_idx_bm);
5196 			chain_idx = find_first_bit(result_idx_bm,
5197 						   ICE_MAX_FV_WORDS);
5198 		}
5199 
5200 		/* fill recipe dependencies */
5201 		bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
5202 			    ICE_MAX_NUM_RECIPES);
5203 		set_bit(buf[recps].recipe_indx,
5204 			(unsigned long *)buf[recps].recipe_bitmap);
5205 		content->act_ctrl_fwd_priority = rm->priority;
5206 
5207 		if (rm->need_pass_l2)
5208 			content->act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
5209 
5210 		if (rm->allow_pass_l2)
5211 			content->act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
5212 		recps++;
5213 	}
5214 
5215 	if (rm->n_grp_count == 1) {
5216 		rm->root_rid = buf[0].recipe_indx;
5217 		set_bit(buf[0].recipe_indx, rm->r_bitmap);
5218 		buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5219 		if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5220 			memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5221 			       sizeof(buf[0].recipe_bitmap));
5222 		} else {
5223 			status = -EINVAL;
5224 			goto err_unroll;
5225 		}
5226 		/* Applicable only for ROOT_RECIPE, set the fwd_priority for
5227 		 * the recipe which is getting created if specified
5228 		 * by user. Usually any advanced switch filter, which results
5229 		 * into new extraction sequence, ended up creating a new recipe
5230 		 * of type ROOT and usually recipes are associated with profiles
5231 		 * Switch rule referreing newly created recipe, needs to have
5232 		 * either/or 'fwd' or 'join' priority, otherwise switch rule
5233 		 * evaluation will not happen correctly. In other words, if
5234 		 * switch rule to be evaluated on priority basis, then recipe
5235 		 * needs to have priority, otherwise it will be evaluated last.
5236 		 */
5237 		buf[0].content.act_ctrl_fwd_priority = rm->priority;
5238 	} else {
5239 		struct ice_recp_grp_entry *last_chain_entry;
5240 		u16 rid, i;
5241 
5242 		/* Allocate the last recipe that will chain the outcomes of the
5243 		 * other recipes together
5244 		 */
5245 		status = ice_alloc_recipe(hw, &rid);
5246 		if (status)
5247 			goto err_unroll;
5248 
5249 		content = &buf[recps].content;
5250 
5251 		buf[recps].recipe_indx = (u8)rid;
5252 		content->rid = (u8)rid;
5253 		content->rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5254 		/* the new entry created should also be part of rg_list to
5255 		 * make sure we have complete recipe
5256 		 */
5257 		last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
5258 						sizeof(*last_chain_entry),
5259 						GFP_KERNEL);
5260 		if (!last_chain_entry) {
5261 			status = -ENOMEM;
5262 			goto err_unroll;
5263 		}
5264 		last_chain_entry->rid = rid;
5265 		memset(&content->lkup_indx, 0, sizeof(content->lkup_indx));
5266 		/* All recipes use look-up index 0 to match switch ID. */
5267 		content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5268 		content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5269 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5270 			content->lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE;
5271 			content->mask[i] = 0;
5272 		}
5273 
5274 		i = 1;
5275 		/* update r_bitmap with the recp that is used for chaining */
5276 		set_bit(rid, rm->r_bitmap);
5277 		/* this is the recipe that chains all the other recipes so it
5278 		 * should not have a chaining ID to indicate the same
5279 		 */
5280 		last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5281 		list_for_each_entry(entry, &rm->rg_list, l_entry) {
5282 			last_chain_entry->fv_idx[i] = entry->chain_idx;
5283 			content->lkup_indx[i] = entry->chain_idx;
5284 			content->mask[i++] = cpu_to_le16(0xFFFF);
5285 			set_bit(entry->rid, rm->r_bitmap);
5286 		}
5287 		list_add(&last_chain_entry->l_entry, &rm->rg_list);
5288 		if (sizeof(buf[recps].recipe_bitmap) >=
5289 		    sizeof(rm->r_bitmap)) {
5290 			memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5291 			       sizeof(buf[recps].recipe_bitmap));
5292 		} else {
5293 			status = -EINVAL;
5294 			goto err_unroll;
5295 		}
5296 		content->act_ctrl_fwd_priority = rm->priority;
5297 
5298 		recps++;
5299 		rm->root_rid = (u8)rid;
5300 	}
5301 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5302 	if (status)
5303 		goto err_unroll;
5304 
5305 	status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5306 	ice_release_change_lock(hw);
5307 	if (status)
5308 		goto err_unroll;
5309 
5310 	/* Every recipe that just got created add it to the recipe
5311 	 * book keeping list
5312 	 */
5313 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
5314 		struct ice_switch_info *sw = hw->switch_info;
5315 		bool is_root, idx_found = false;
5316 		struct ice_sw_recipe *recp;
5317 		u16 idx, buf_idx = 0;
5318 
5319 		/* find buffer index for copying some data */
5320 		for (idx = 0; idx < rm->n_grp_count; idx++)
5321 			if (buf[idx].recipe_indx == entry->rid) {
5322 				buf_idx = idx;
5323 				idx_found = true;
5324 			}
5325 
5326 		if (!idx_found) {
5327 			status = -EIO;
5328 			goto err_unroll;
5329 		}
5330 
5331 		recp = &sw->recp_list[entry->rid];
5332 		is_root = (rm->root_rid == entry->rid);
5333 		recp->is_root = is_root;
5334 
5335 		recp->root_rid = entry->rid;
5336 		recp->big_recp = (is_root && rm->n_grp_count > 1);
5337 
5338 		memcpy(&recp->ext_words, entry->r_group.pairs,
5339 		       entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
5340 
5341 		memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5342 		       sizeof(recp->r_bitmap));
5343 
5344 		/* Copy non-result fv index values and masks to recipe. This
5345 		 * call will also update the result recipe bitmask.
5346 		 */
5347 		ice_collect_result_idx(&buf[buf_idx], recp);
5348 
5349 		/* for non-root recipes, also copy to the root, this allows
5350 		 * easier matching of a complete chained recipe
5351 		 */
5352 		if (!is_root)
5353 			ice_collect_result_idx(&buf[buf_idx],
5354 					       &sw->recp_list[rm->root_rid]);
5355 
5356 		recp->n_ext_words = entry->r_group.n_val_pairs;
5357 		recp->chain_idx = entry->chain_idx;
5358 		recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5359 		recp->n_grp_count = rm->n_grp_count;
5360 		recp->tun_type = rm->tun_type;
5361 		recp->need_pass_l2 = rm->need_pass_l2;
5362 		recp->allow_pass_l2 = rm->allow_pass_l2;
5363 		recp->recp_created = true;
5364 	}
5365 	rm->root_buf = buf;
5366 	kfree(tmp);
5367 	return status;
5368 
5369 err_unroll:
5370 err_mem:
5371 	kfree(tmp);
5372 	devm_kfree(ice_hw_to_dev(hw), buf);
5373 	return status;
5374 }
5375 
5376 /**
5377  * ice_create_recipe_group - creates recipe group
5378  * @hw: pointer to hardware structure
5379  * @rm: recipe management list entry
5380  * @lkup_exts: lookup elements
5381  */
5382 static int
5383 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5384 			struct ice_prot_lkup_ext *lkup_exts)
5385 {
5386 	u8 recp_count = 0;
5387 	int status;
5388 
5389 	rm->n_grp_count = 0;
5390 
5391 	/* Create recipes for words that are marked not done by packing them
5392 	 * as best fit.
5393 	 */
5394 	status = ice_create_first_fit_recp_def(hw, lkup_exts,
5395 					       &rm->rg_list, &recp_count);
5396 	if (!status) {
5397 		rm->n_grp_count += recp_count;
5398 		rm->n_ext_words = lkup_exts->n_val_words;
5399 		memcpy(&rm->ext_words, lkup_exts->fv_words,
5400 		       sizeof(rm->ext_words));
5401 		memcpy(rm->word_masks, lkup_exts->field_mask,
5402 		       sizeof(rm->word_masks));
5403 	}
5404 
5405 	return status;
5406 }
5407 
5408 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5409  * @hw: pointer to hardware structure
5410  * @rinfo: other information regarding the rule e.g. priority and action info
5411  * @bm: pointer to memory for returning the bitmap of field vectors
5412  */
5413 static void
5414 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5415 			 unsigned long *bm)
5416 {
5417 	enum ice_prof_type prof_type;
5418 
5419 	bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
5420 
5421 	switch (rinfo->tun_type) {
5422 	case ICE_NON_TUN:
5423 		prof_type = ICE_PROF_NON_TUN;
5424 		break;
5425 	case ICE_ALL_TUNNELS:
5426 		prof_type = ICE_PROF_TUN_ALL;
5427 		break;
5428 	case ICE_SW_TUN_GENEVE:
5429 	case ICE_SW_TUN_VXLAN:
5430 		prof_type = ICE_PROF_TUN_UDP;
5431 		break;
5432 	case ICE_SW_TUN_NVGRE:
5433 		prof_type = ICE_PROF_TUN_GRE;
5434 		break;
5435 	case ICE_SW_TUN_GTPU:
5436 		prof_type = ICE_PROF_TUN_GTPU;
5437 		break;
5438 	case ICE_SW_TUN_GTPC:
5439 		prof_type = ICE_PROF_TUN_GTPC;
5440 		break;
5441 	case ICE_SW_TUN_PFCP:
5442 		prof_type = ICE_PROF_TUN_PFCP;
5443 		break;
5444 	case ICE_SW_TUN_AND_NON_TUN:
5445 	default:
5446 		prof_type = ICE_PROF_ALL;
5447 		break;
5448 	}
5449 
5450 	ice_get_sw_fv_bitmap(hw, prof_type, bm);
5451 }
5452 
5453 /**
5454  * ice_subscribe_recipe - subscribe to an existing recipe
5455  * @hw: pointer to the hardware structure
5456  * @rid: recipe ID to subscribe to
5457  *
5458  * Return: 0 on success, and others on error
5459  */
5460 static int ice_subscribe_recipe(struct ice_hw *hw, u16 rid)
5461 {
5462 	DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
5463 	u16 buf_len = __struct_size(sw_buf);
5464 	u16 res_type;
5465 	int status;
5466 
5467 	/* Prepare buffer to allocate resource */
5468 	sw_buf->num_elems = cpu_to_le16(1);
5469 	res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, ICE_AQC_RES_TYPE_RECIPE) |
5470 		   ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED |
5471 		   ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_CTL;
5472 	sw_buf->res_type = cpu_to_le16(res_type);
5473 
5474 	sw_buf->elem[0].e.sw_resp = cpu_to_le16(rid);
5475 
5476 	status = ice_aq_alloc_free_res(hw, sw_buf, buf_len,
5477 				       ice_aqc_opc_alloc_res);
5478 
5479 	return status;
5480 }
5481 
5482 /**
5483  * ice_subscribable_recp_shared - share an existing subscribable recipe
5484  * @hw: pointer to the hardware structure
5485  * @rid: recipe ID to subscribe to
5486  */
5487 static void ice_subscribable_recp_shared(struct ice_hw *hw, u16 rid)
5488 {
5489 	struct ice_sw_recipe *recps = hw->switch_info->recp_list;
5490 	u16 sub_rid;
5491 
5492 	for_each_set_bit(sub_rid, recps[rid].r_bitmap, ICE_MAX_NUM_RECIPES)
5493 		ice_subscribe_recipe(hw, sub_rid);
5494 }
5495 
5496 /**
5497  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5498  * @hw: pointer to hardware structure
5499  * @lkups: lookup elements or match criteria for the advanced recipe, one
5500  *  structure per protocol header
5501  * @lkups_cnt: number of protocols
5502  * @rinfo: other information regarding the rule e.g. priority and action info
5503  * @rid: return the recipe ID of the recipe created
5504  */
5505 static int
5506 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5507 		   u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5508 {
5509 	DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
5510 	DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
5511 	struct ice_prot_lkup_ext *lkup_exts;
5512 	struct ice_recp_grp_entry *r_entry;
5513 	struct ice_sw_fv_list_entry *fvit;
5514 	struct ice_recp_grp_entry *r_tmp;
5515 	struct ice_sw_fv_list_entry *tmp;
5516 	struct ice_sw_recipe *rm;
5517 	int status = 0;
5518 	u16 rid_tmp;
5519 	u8 i;
5520 
5521 	if (!lkups_cnt)
5522 		return -EINVAL;
5523 
5524 	lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
5525 	if (!lkup_exts)
5526 		return -ENOMEM;
5527 
5528 	/* Determine the number of words to be matched and if it exceeds a
5529 	 * recipe's restrictions
5530 	 */
5531 	for (i = 0; i < lkups_cnt; i++) {
5532 		u16 count;
5533 
5534 		if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5535 			status = -EIO;
5536 			goto err_free_lkup_exts;
5537 		}
5538 
5539 		count = ice_fill_valid_words(&lkups[i], lkup_exts);
5540 		if (!count) {
5541 			status = -EIO;
5542 			goto err_free_lkup_exts;
5543 		}
5544 	}
5545 
5546 	rm = kzalloc(sizeof(*rm), GFP_KERNEL);
5547 	if (!rm) {
5548 		status = -ENOMEM;
5549 		goto err_free_lkup_exts;
5550 	}
5551 
5552 	/* Get field vectors that contain fields extracted from all the protocol
5553 	 * headers being programmed.
5554 	 */
5555 	INIT_LIST_HEAD(&rm->fv_list);
5556 	INIT_LIST_HEAD(&rm->rg_list);
5557 
5558 	/* Get bitmap of field vectors (profiles) that are compatible with the
5559 	 * rule request; only these will be searched in the subsequent call to
5560 	 * ice_get_sw_fv_list.
5561 	 */
5562 	ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5563 
5564 	status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
5565 	if (status)
5566 		goto err_unroll;
5567 
5568 	/* Group match words into recipes using preferred recipe grouping
5569 	 * criteria.
5570 	 */
5571 	status = ice_create_recipe_group(hw, rm, lkup_exts);
5572 	if (status)
5573 		goto err_unroll;
5574 
5575 	/* set the recipe priority if specified */
5576 	rm->priority = (u8)rinfo->priority;
5577 
5578 	rm->need_pass_l2 = rinfo->need_pass_l2;
5579 	rm->allow_pass_l2 = rinfo->allow_pass_l2;
5580 
5581 	/* Find offsets from the field vector. Pick the first one for all the
5582 	 * recipes.
5583 	 */
5584 	status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5585 	if (status)
5586 		goto err_unroll;
5587 
5588 	/* get bitmap of all profiles the recipe will be associated with */
5589 	bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
5590 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5591 		ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5592 		set_bit((u16)fvit->profile_id, profiles);
5593 	}
5594 
5595 	/* Look for a recipe which matches our requested fv / mask list */
5596 	*rid = ice_find_recp(hw, lkup_exts, rinfo, true);
5597 	if (*rid < ICE_MAX_NUM_RECIPES) {
5598 		/* Success if found a recipe that match the existing criteria */
5599 		if (hw->recp_reuse)
5600 			ice_subscribable_recp_shared(hw, *rid);
5601 
5602 		goto err_unroll;
5603 	}
5604 
5605 	rm->tun_type = rinfo->tun_type;
5606 	/* Recipe we need does not exist, add a recipe */
5607 	status = ice_add_sw_recipe(hw, rm, profiles);
5608 	if (status)
5609 		goto err_unroll;
5610 
5611 	/* Associate all the recipes created with all the profiles in the
5612 	 * common field vector.
5613 	 */
5614 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5615 		DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
5616 		u64 recp_assoc;
5617 		u16 j;
5618 
5619 		status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5620 						      &recp_assoc, NULL);
5621 		if (status)
5622 			goto err_free_recipe;
5623 
5624 		bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);
5625 		bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
5626 			  ICE_MAX_NUM_RECIPES);
5627 		status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5628 		if (status)
5629 			goto err_free_recipe;
5630 
5631 		bitmap_to_arr64(&recp_assoc, r_bitmap, ICE_MAX_NUM_RECIPES);
5632 		status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5633 						      recp_assoc, NULL);
5634 		ice_release_change_lock(hw);
5635 
5636 		if (status)
5637 			goto err_free_recipe;
5638 
5639 		/* Update profile to recipe bitmap array */
5640 		bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
5641 			    ICE_MAX_NUM_RECIPES);
5642 
5643 		/* Update recipe to profile bitmap array */
5644 		for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
5645 			set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
5646 	}
5647 
5648 	*rid = rm->root_rid;
5649 	memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
5650 	       sizeof(*lkup_exts));
5651 	goto err_unroll;
5652 
5653 err_free_recipe:
5654 	if (hw->recp_reuse) {
5655 		for_each_set_bit(rid_tmp, rm->r_bitmap, ICE_MAX_NUM_RECIPES) {
5656 			if (!ice_free_recipe_res(hw, rid_tmp))
5657 				clear_bit(rid_tmp, rm->r_bitmap);
5658 		}
5659 	}
5660 
5661 err_unroll:
5662 	list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
5663 		list_del(&r_entry->l_entry);
5664 		devm_kfree(ice_hw_to_dev(hw), r_entry);
5665 	}
5666 
5667 	list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
5668 		list_del(&fvit->list_entry);
5669 		devm_kfree(ice_hw_to_dev(hw), fvit);
5670 	}
5671 
5672 	devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
5673 	kfree(rm);
5674 
5675 err_free_lkup_exts:
5676 	kfree(lkup_exts);
5677 
5678 	return status;
5679 }
5680 
5681 /**
5682  * ice_dummy_packet_add_vlan - insert VLAN header to dummy pkt
5683  *
5684  * @dummy_pkt: dummy packet profile pattern to which VLAN tag(s) will be added
5685  * @num_vlan: number of VLAN tags
5686  */
5687 static struct ice_dummy_pkt_profile *
5688 ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile *dummy_pkt,
5689 			  u32 num_vlan)
5690 {
5691 	struct ice_dummy_pkt_profile *profile;
5692 	struct ice_dummy_pkt_offsets *offsets;
5693 	u32 buf_len, off, etype_off, i;
5694 	u8 *pkt;
5695 
5696 	if (num_vlan < 1 || num_vlan > 2)
5697 		return ERR_PTR(-EINVAL);
5698 
5699 	off = num_vlan * VLAN_HLEN;
5700 
5701 	buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet_offsets)) +
5702 		  dummy_pkt->offsets_len;
5703 	offsets = kzalloc(buf_len, GFP_KERNEL);
5704 	if (!offsets)
5705 		return ERR_PTR(-ENOMEM);
5706 
5707 	offsets[0] = dummy_pkt->offsets[0];
5708 	if (num_vlan == 2) {
5709 		offsets[1] = ice_dummy_qinq_packet_offsets[0];
5710 		offsets[2] = ice_dummy_qinq_packet_offsets[1];
5711 	} else if (num_vlan == 1) {
5712 		offsets[1] = ice_dummy_vlan_packet_offsets[0];
5713 	}
5714 
5715 	for (i = 1; dummy_pkt->offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5716 		offsets[i + num_vlan].type = dummy_pkt->offsets[i].type;
5717 		offsets[i + num_vlan].offset =
5718 			dummy_pkt->offsets[i].offset + off;
5719 	}
5720 	offsets[i + num_vlan] = dummy_pkt->offsets[i];
5721 
5722 	etype_off = dummy_pkt->offsets[1].offset;
5723 
5724 	buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet)) +
5725 		  dummy_pkt->pkt_len;
5726 	pkt = kzalloc(buf_len, GFP_KERNEL);
5727 	if (!pkt) {
5728 		kfree(offsets);
5729 		return ERR_PTR(-ENOMEM);
5730 	}
5731 
5732 	memcpy(pkt, dummy_pkt->pkt, etype_off);
5733 	memcpy(pkt + etype_off,
5734 	       num_vlan == 2 ? ice_dummy_qinq_packet : ice_dummy_vlan_packet,
5735 	       off);
5736 	memcpy(pkt + etype_off + off, dummy_pkt->pkt + etype_off,
5737 	       dummy_pkt->pkt_len - etype_off);
5738 
5739 	profile = kzalloc(sizeof(*profile), GFP_KERNEL);
5740 	if (!profile) {
5741 		kfree(offsets);
5742 		kfree(pkt);
5743 		return ERR_PTR(-ENOMEM);
5744 	}
5745 
5746 	profile->offsets = offsets;
5747 	profile->pkt = pkt;
5748 	profile->pkt_len = buf_len;
5749 	profile->match |= ICE_PKT_KMALLOC;
5750 
5751 	return profile;
5752 }
5753 
5754 /**
5755  * ice_find_dummy_packet - find dummy packet
5756  *
5757  * @lkups: lookup elements or match criteria for the advanced recipe, one
5758  *	   structure per protocol header
5759  * @lkups_cnt: number of protocols
5760  * @tun_type: tunnel type
5761  *
5762  * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
5763  */
5764 static const struct ice_dummy_pkt_profile *
5765 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5766 		      enum ice_sw_tunnel_type tun_type)
5767 {
5768 	const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
5769 	u32 match = 0, vlan_count = 0;
5770 	u16 i;
5771 
5772 	switch (tun_type) {
5773 	case ICE_SW_TUN_GTPC:
5774 		match |= ICE_PKT_TUN_GTPC;
5775 		break;
5776 	case ICE_SW_TUN_GTPU:
5777 		match |= ICE_PKT_TUN_GTPU;
5778 		break;
5779 	case ICE_SW_TUN_NVGRE:
5780 		match |= ICE_PKT_TUN_NVGRE;
5781 		break;
5782 	case ICE_SW_TUN_GENEVE:
5783 	case ICE_SW_TUN_VXLAN:
5784 		match |= ICE_PKT_TUN_UDP;
5785 		break;
5786 	case ICE_SW_TUN_PFCP:
5787 		match |= ICE_PKT_PFCP;
5788 		break;
5789 	default:
5790 		break;
5791 	}
5792 
5793 	for (i = 0; i < lkups_cnt; i++) {
5794 		if (lkups[i].type == ICE_UDP_ILOS)
5795 			match |= ICE_PKT_INNER_UDP;
5796 		else if (lkups[i].type == ICE_TCP_IL)
5797 			match |= ICE_PKT_INNER_TCP;
5798 		else if (lkups[i].type == ICE_IPV6_OFOS)
5799 			match |= ICE_PKT_OUTER_IPV6;
5800 		else if (lkups[i].type == ICE_VLAN_OFOS ||
5801 			 lkups[i].type == ICE_VLAN_EX)
5802 			vlan_count++;
5803 		else if (lkups[i].type == ICE_VLAN_IN)
5804 			vlan_count++;
5805 		else if (lkups[i].type == ICE_ETYPE_OL &&
5806 			 lkups[i].h_u.ethertype.ethtype_id ==
5807 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5808 			 lkups[i].m_u.ethertype.ethtype_id ==
5809 				cpu_to_be16(0xFFFF))
5810 			match |= ICE_PKT_OUTER_IPV6;
5811 		else if (lkups[i].type == ICE_ETYPE_IL &&
5812 			 lkups[i].h_u.ethertype.ethtype_id ==
5813 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5814 			 lkups[i].m_u.ethertype.ethtype_id ==
5815 				cpu_to_be16(0xFFFF))
5816 			match |= ICE_PKT_INNER_IPV6;
5817 		else if (lkups[i].type == ICE_IPV6_IL)
5818 			match |= ICE_PKT_INNER_IPV6;
5819 		else if (lkups[i].type == ICE_GTP_NO_PAY)
5820 			match |= ICE_PKT_GTP_NOPAY;
5821 		else if (lkups[i].type == ICE_PPPOE) {
5822 			match |= ICE_PKT_PPPOE;
5823 			if (lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
5824 			    htons(PPP_IPV6))
5825 				match |= ICE_PKT_OUTER_IPV6;
5826 		} else if (lkups[i].type == ICE_L2TPV3)
5827 			match |= ICE_PKT_L2TPV3;
5828 	}
5829 
5830 	while (ret->match && (match & ret->match) != ret->match)
5831 		ret++;
5832 
5833 	if (vlan_count != 0)
5834 		ret = ice_dummy_packet_add_vlan(ret, vlan_count);
5835 
5836 	return ret;
5837 }
5838 
5839 /**
5840  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5841  *
5842  * @lkups: lookup elements or match criteria for the advanced recipe, one
5843  *	   structure per protocol header
5844  * @lkups_cnt: number of protocols
5845  * @s_rule: stores rule information from the match criteria
5846  * @profile: dummy packet profile (the template, its size and header offsets)
5847  */
5848 static int
5849 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5850 			  struct ice_sw_rule_lkup_rx_tx *s_rule,
5851 			  const struct ice_dummy_pkt_profile *profile)
5852 {
5853 	u8 *pkt;
5854 	u16 i;
5855 
5856 	/* Start with a packet with a pre-defined/dummy content. Then, fill
5857 	 * in the header values to be looked up or matched.
5858 	 */
5859 	pkt = s_rule->hdr_data;
5860 
5861 	memcpy(pkt, profile->pkt, profile->pkt_len);
5862 
5863 	for (i = 0; i < lkups_cnt; i++) {
5864 		const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
5865 		enum ice_protocol_type type;
5866 		u16 offset = 0, len = 0, j;
5867 		bool found = false;
5868 
5869 		/* find the start of this layer; it should be found since this
5870 		 * was already checked when search for the dummy packet
5871 		 */
5872 		type = lkups[i].type;
5873 		/* metadata isn't present in the packet */
5874 		if (type == ICE_HW_METADATA)
5875 			continue;
5876 
5877 		for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5878 			if (type == offsets[j].type) {
5879 				offset = offsets[j].offset;
5880 				found = true;
5881 				break;
5882 			}
5883 		}
5884 		/* this should never happen in a correct calling sequence */
5885 		if (!found)
5886 			return -EINVAL;
5887 
5888 		switch (lkups[i].type) {
5889 		case ICE_MAC_OFOS:
5890 		case ICE_MAC_IL:
5891 			len = sizeof(struct ice_ether_hdr);
5892 			break;
5893 		case ICE_ETYPE_OL:
5894 		case ICE_ETYPE_IL:
5895 			len = sizeof(struct ice_ethtype_hdr);
5896 			break;
5897 		case ICE_VLAN_OFOS:
5898 		case ICE_VLAN_EX:
5899 		case ICE_VLAN_IN:
5900 			len = sizeof(struct ice_vlan_hdr);
5901 			break;
5902 		case ICE_IPV4_OFOS:
5903 		case ICE_IPV4_IL:
5904 			len = sizeof(struct ice_ipv4_hdr);
5905 			break;
5906 		case ICE_IPV6_OFOS:
5907 		case ICE_IPV6_IL:
5908 			len = sizeof(struct ice_ipv6_hdr);
5909 			break;
5910 		case ICE_TCP_IL:
5911 		case ICE_UDP_OF:
5912 		case ICE_UDP_ILOS:
5913 			len = sizeof(struct ice_l4_hdr);
5914 			break;
5915 		case ICE_SCTP_IL:
5916 			len = sizeof(struct ice_sctp_hdr);
5917 			break;
5918 		case ICE_NVGRE:
5919 			len = sizeof(struct ice_nvgre_hdr);
5920 			break;
5921 		case ICE_VXLAN:
5922 		case ICE_GENEVE:
5923 			len = sizeof(struct ice_udp_tnl_hdr);
5924 			break;
5925 		case ICE_GTP_NO_PAY:
5926 		case ICE_GTP:
5927 			len = sizeof(struct ice_udp_gtp_hdr);
5928 			break;
5929 		case ICE_PFCP:
5930 			len = sizeof(struct ice_pfcp_hdr);
5931 			break;
5932 		case ICE_PPPOE:
5933 			len = sizeof(struct ice_pppoe_hdr);
5934 			break;
5935 		case ICE_L2TPV3:
5936 			len = sizeof(struct ice_l2tpv3_sess_hdr);
5937 			break;
5938 		default:
5939 			return -EINVAL;
5940 		}
5941 
5942 		/* the length should be a word multiple */
5943 		if (len % ICE_BYTES_PER_WORD)
5944 			return -EIO;
5945 
5946 		/* We have the offset to the header start, the length, the
5947 		 * caller's header values and mask. Use this information to
5948 		 * copy the data into the dummy packet appropriately based on
5949 		 * the mask. Note that we need to only write the bits as
5950 		 * indicated by the mask to make sure we don't improperly write
5951 		 * over any significant packet data.
5952 		 */
5953 		for (j = 0; j < len / sizeof(u16); j++) {
5954 			u16 *ptr = (u16 *)(pkt + offset);
5955 			u16 mask = lkups[i].m_raw[j];
5956 
5957 			if (!mask)
5958 				continue;
5959 
5960 			ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
5961 		}
5962 	}
5963 
5964 	s_rule->hdr_len = cpu_to_le16(profile->pkt_len);
5965 
5966 	return 0;
5967 }
5968 
5969 /**
5970  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5971  * @hw: pointer to the hardware structure
5972  * @tun_type: tunnel type
5973  * @pkt: dummy packet to fill in
5974  * @offsets: offset info for the dummy packet
5975  */
5976 static int
5977 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5978 			u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5979 {
5980 	u16 open_port, i;
5981 
5982 	switch (tun_type) {
5983 	case ICE_SW_TUN_VXLAN:
5984 		if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
5985 			return -EIO;
5986 		break;
5987 	case ICE_SW_TUN_GENEVE:
5988 		if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
5989 			return -EIO;
5990 		break;
5991 	default:
5992 		/* Nothing needs to be done for this tunnel type */
5993 		return 0;
5994 	}
5995 
5996 	/* Find the outer UDP protocol header and insert the port number */
5997 	for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5998 		if (offsets[i].type == ICE_UDP_OF) {
5999 			struct ice_l4_hdr *hdr;
6000 			u16 offset;
6001 
6002 			offset = offsets[i].offset;
6003 			hdr = (struct ice_l4_hdr *)&pkt[offset];
6004 			hdr->dst_port = cpu_to_be16(open_port);
6005 
6006 			return 0;
6007 		}
6008 	}
6009 
6010 	return -EIO;
6011 }
6012 
6013 /**
6014  * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type
6015  * @hw: pointer to hw structure
6016  * @vlan_type: VLAN tag type
6017  * @pkt: dummy packet to fill in
6018  * @offsets: offset info for the dummy packet
6019  */
6020 static int
6021 ice_fill_adv_packet_vlan(struct ice_hw *hw, u16 vlan_type, u8 *pkt,
6022 			 const struct ice_dummy_pkt_offsets *offsets)
6023 {
6024 	u16 i;
6025 
6026 	/* Check if there is something to do */
6027 	if (!vlan_type || !ice_is_dvm_ena(hw))
6028 		return 0;
6029 
6030 	/* Find VLAN header and insert VLAN TPID */
6031 	for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6032 		if (offsets[i].type == ICE_VLAN_OFOS ||
6033 		    offsets[i].type == ICE_VLAN_EX) {
6034 			struct ice_vlan_hdr *hdr;
6035 			u16 offset;
6036 
6037 			offset = offsets[i].offset;
6038 			hdr = (struct ice_vlan_hdr *)&pkt[offset];
6039 			hdr->type = cpu_to_be16(vlan_type);
6040 
6041 			return 0;
6042 		}
6043 	}
6044 
6045 	return -EIO;
6046 }
6047 
6048 static bool ice_rules_equal(const struct ice_adv_rule_info *first,
6049 			    const struct ice_adv_rule_info *second)
6050 {
6051 	return first->sw_act.flag == second->sw_act.flag &&
6052 	       first->tun_type == second->tun_type &&
6053 	       first->vlan_type == second->vlan_type &&
6054 	       first->src_vsi == second->src_vsi &&
6055 	       first->need_pass_l2 == second->need_pass_l2 &&
6056 	       first->allow_pass_l2 == second->allow_pass_l2;
6057 }
6058 
6059 /**
6060  * ice_find_adv_rule_entry - Search a rule entry
6061  * @hw: pointer to the hardware structure
6062  * @lkups: lookup elements or match criteria for the advanced recipe, one
6063  *	   structure per protocol header
6064  * @lkups_cnt: number of protocols
6065  * @recp_id: recipe ID for which we are finding the rule
6066  * @rinfo: other information regarding the rule e.g. priority and action info
6067  *
6068  * Helper function to search for a given advance rule entry
6069  * Returns pointer to entry storing the rule if found
6070  */
6071 static struct ice_adv_fltr_mgmt_list_entry *
6072 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6073 			u16 lkups_cnt, u16 recp_id,
6074 			struct ice_adv_rule_info *rinfo)
6075 {
6076 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
6077 	struct ice_switch_info *sw = hw->switch_info;
6078 	int i;
6079 
6080 	list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
6081 			    list_entry) {
6082 		bool lkups_matched = true;
6083 
6084 		if (lkups_cnt != list_itr->lkups_cnt)
6085 			continue;
6086 		for (i = 0; i < list_itr->lkups_cnt; i++)
6087 			if (memcmp(&list_itr->lkups[i], &lkups[i],
6088 				   sizeof(*lkups))) {
6089 				lkups_matched = false;
6090 				break;
6091 			}
6092 		if (ice_rules_equal(rinfo, &list_itr->rule_info) &&
6093 		    lkups_matched)
6094 			return list_itr;
6095 	}
6096 	return NULL;
6097 }
6098 
6099 /**
6100  * ice_adv_add_update_vsi_list
6101  * @hw: pointer to the hardware structure
6102  * @m_entry: pointer to current adv filter management list entry
6103  * @cur_fltr: filter information from the book keeping entry
6104  * @new_fltr: filter information with the new VSI to be added
6105  *
6106  * Call AQ command to add or update previously created VSI list with new VSI.
6107  *
6108  * Helper function to do book keeping associated with adding filter information
6109  * The algorithm to do the booking keeping is described below :
6110  * When a VSI needs to subscribe to a given advanced filter
6111  *	if only one VSI has been added till now
6112  *		Allocate a new VSI list and add two VSIs
6113  *		to this list using switch rule command
6114  *		Update the previously created switch rule with the
6115  *		newly created VSI list ID
6116  *	if a VSI list was previously created
6117  *		Add the new VSI to the previously created VSI list set
6118  *		using the update switch rule command
6119  */
6120 static int
6121 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6122 			    struct ice_adv_fltr_mgmt_list_entry *m_entry,
6123 			    struct ice_adv_rule_info *cur_fltr,
6124 			    struct ice_adv_rule_info *new_fltr)
6125 {
6126 	u16 vsi_list_id = 0;
6127 	int status;
6128 
6129 	if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6130 	    cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6131 	    cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6132 		return -EOPNOTSUPP;
6133 
6134 	if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6135 	     new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6136 	    (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6137 	     cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6138 		return -EOPNOTSUPP;
6139 
6140 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6141 		 /* Only one entry existed in the mapping and it was not already
6142 		  * a part of a VSI list. So, create a VSI list with the old and
6143 		  * new VSIs.
6144 		  */
6145 		struct ice_fltr_info tmp_fltr;
6146 		u16 vsi_handle_arr[2];
6147 
6148 		/* A rule already exists with the new VSI being added */
6149 		if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6150 		    new_fltr->sw_act.fwd_id.hw_vsi_id)
6151 			return -EEXIST;
6152 
6153 		vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6154 		vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6155 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6156 						  &vsi_list_id,
6157 						  ICE_SW_LKUP_LAST);
6158 		if (status)
6159 			return status;
6160 
6161 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
6162 		tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
6163 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6164 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6165 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6166 		tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6167 
6168 		/* Update the previous switch rule of "forward to VSI" to
6169 		 * "fwd to VSI list"
6170 		 */
6171 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6172 		if (status)
6173 			return status;
6174 
6175 		cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6176 		cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6177 		m_entry->vsi_list_info =
6178 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6179 						vsi_list_id);
6180 	} else {
6181 		u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6182 
6183 		if (!m_entry->vsi_list_info)
6184 			return -EIO;
6185 
6186 		/* A rule already exists with the new VSI being added */
6187 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
6188 			return 0;
6189 
6190 		/* Update the previously created VSI list set with
6191 		 * the new VSI ID passed in
6192 		 */
6193 		vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6194 
6195 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6196 						  vsi_list_id, false,
6197 						  ice_aqc_opc_update_sw_rules,
6198 						  ICE_SW_LKUP_LAST);
6199 		/* update VSI list mapping info with new VSI ID */
6200 		if (!status)
6201 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
6202 	}
6203 	if (!status)
6204 		m_entry->vsi_count++;
6205 	return status;
6206 }
6207 
6208 void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup)
6209 {
6210 	lkup->type = ICE_HW_METADATA;
6211 	lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID21] |=
6212 		cpu_to_be16(ICE_PKT_TUNNEL_MASK);
6213 }
6214 
6215 void ice_rule_add_direction_metadata(struct ice_adv_lkup_elem *lkup)
6216 {
6217 	lkup->type = ICE_HW_METADATA;
6218 	lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID20] |=
6219 		cpu_to_be16(ICE_PKT_FROM_NETWORK);
6220 }
6221 
6222 void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup)
6223 {
6224 	lkup->type = ICE_HW_METADATA;
6225 	lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID20] |=
6226 		cpu_to_be16(ICE_PKT_VLAN_MASK);
6227 }
6228 
6229 void ice_rule_add_src_vsi_metadata(struct ice_adv_lkup_elem *lkup)
6230 {
6231 	lkup->type = ICE_HW_METADATA;
6232 	lkup->m_u.metadata.source_vsi = cpu_to_be16(ICE_MDID_SOURCE_VSI_MASK);
6233 }
6234 
6235 /**
6236  * ice_add_adv_rule - helper function to create an advanced switch rule
6237  * @hw: pointer to the hardware structure
6238  * @lkups: information on the words that needs to be looked up. All words
6239  * together makes one recipe
6240  * @lkups_cnt: num of entries in the lkups array
6241  * @rinfo: other information related to the rule that needs to be programmed
6242  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6243  *               ignored is case of error.
6244  *
6245  * This function can program only 1 rule at a time. The lkups is used to
6246  * describe the all the words that forms the "lookup" portion of the recipe.
6247  * These words can span multiple protocols. Callers to this function need to
6248  * pass in a list of protocol headers with lookup information along and mask
6249  * that determines which words are valid from the given protocol header.
6250  * rinfo describes other information related to this rule such as forwarding
6251  * IDs, priority of this rule, etc.
6252  */
6253 int
6254 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6255 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6256 		 struct ice_rule_query_data *added_entry)
6257 {
6258 	struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6259 	struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
6260 	const struct ice_dummy_pkt_profile *profile;
6261 	u16 rid = 0, i, rule_buf_sz, vsi_handle;
6262 	struct list_head *rule_head;
6263 	struct ice_switch_info *sw;
6264 	u16 word_cnt;
6265 	u32 act = 0;
6266 	int status;
6267 	u8 q_rgn;
6268 
6269 	/* Initialize profile to result index bitmap */
6270 	if (!hw->switch_info->prof_res_bm_init) {
6271 		hw->switch_info->prof_res_bm_init = 1;
6272 		ice_init_prof_result_bm(hw);
6273 	}
6274 
6275 	if (!lkups_cnt)
6276 		return -EINVAL;
6277 
6278 	/* get # of words we need to match */
6279 	word_cnt = 0;
6280 	for (i = 0; i < lkups_cnt; i++) {
6281 		u16 j;
6282 
6283 		for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
6284 			if (lkups[i].m_raw[j])
6285 				word_cnt++;
6286 	}
6287 
6288 	if (!word_cnt)
6289 		return -EINVAL;
6290 
6291 	if (word_cnt > ICE_MAX_CHAIN_WORDS)
6292 		return -ENOSPC;
6293 
6294 	/* locate a dummy packet */
6295 	profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
6296 	if (IS_ERR(profile))
6297 		return PTR_ERR(profile);
6298 
6299 	if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6300 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6301 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6302 	      rinfo->sw_act.fltr_act == ICE_DROP_PACKET ||
6303 	      rinfo->sw_act.fltr_act == ICE_MIRROR_PACKET ||
6304 	      rinfo->sw_act.fltr_act == ICE_NOP)) {
6305 		status = -EIO;
6306 		goto free_pkt_profile;
6307 	}
6308 
6309 	vsi_handle = rinfo->sw_act.vsi_handle;
6310 	if (!ice_is_vsi_valid(hw, vsi_handle)) {
6311 		status =  -EINVAL;
6312 		goto free_pkt_profile;
6313 	}
6314 
6315 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6316 	    rinfo->sw_act.fltr_act == ICE_MIRROR_PACKET ||
6317 	    rinfo->sw_act.fltr_act == ICE_NOP) {
6318 		rinfo->sw_act.fwd_id.hw_vsi_id =
6319 			ice_get_hw_vsi_num(hw, vsi_handle);
6320 	}
6321 
6322 	if (rinfo->src_vsi)
6323 		rinfo->sw_act.src = ice_get_hw_vsi_num(hw, rinfo->src_vsi);
6324 	else
6325 		rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6326 
6327 	status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6328 	if (status)
6329 		goto free_pkt_profile;
6330 	m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6331 	if (m_entry) {
6332 		/* we have to add VSI to VSI_LIST and increment vsi_count.
6333 		 * Also Update VSI list so that we can change forwarding rule
6334 		 * if the rule already exists, we will check if it exists with
6335 		 * same vsi_id, if not then add it to the VSI list if it already
6336 		 * exists if not then create a VSI list and add the existing VSI
6337 		 * ID and the new VSI ID to the list
6338 		 * We will add that VSI to the list
6339 		 */
6340 		status = ice_adv_add_update_vsi_list(hw, m_entry,
6341 						     &m_entry->rule_info,
6342 						     rinfo);
6343 		if (added_entry) {
6344 			added_entry->rid = rid;
6345 			added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6346 			added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6347 		}
6348 		goto free_pkt_profile;
6349 	}
6350 	rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
6351 	s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6352 	if (!s_rule) {
6353 		status = -ENOMEM;
6354 		goto free_pkt_profile;
6355 	}
6356 
6357 	if (rinfo->sw_act.fltr_act != ICE_MIRROR_PACKET) {
6358 		if (!rinfo->flags_info.act_valid) {
6359 			act |= ICE_SINGLE_ACT_LAN_ENABLE;
6360 			act |= ICE_SINGLE_ACT_LB_ENABLE;
6361 		} else {
6362 			act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
6363 							ICE_SINGLE_ACT_LB_ENABLE);
6364 		}
6365 	}
6366 
6367 	switch (rinfo->sw_act.fltr_act) {
6368 	case ICE_FWD_TO_VSI:
6369 		act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
6370 				  rinfo->sw_act.fwd_id.hw_vsi_id);
6371 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6372 		break;
6373 	case ICE_FWD_TO_Q:
6374 		act |= ICE_SINGLE_ACT_TO_Q;
6375 		act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M,
6376 				  rinfo->sw_act.fwd_id.q_id);
6377 		break;
6378 	case ICE_FWD_TO_QGRP:
6379 		q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6380 			(u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
6381 		act |= ICE_SINGLE_ACT_TO_Q;
6382 		act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M,
6383 				  rinfo->sw_act.fwd_id.q_id);
6384 		act |= FIELD_PREP(ICE_SINGLE_ACT_Q_REGION_M, q_rgn);
6385 		break;
6386 	case ICE_DROP_PACKET:
6387 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6388 		       ICE_SINGLE_ACT_VALID_BIT;
6389 		break;
6390 	case ICE_MIRROR_PACKET:
6391 		act |= ICE_SINGLE_ACT_OTHER_ACTS;
6392 		act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
6393 				  rinfo->sw_act.fwd_id.hw_vsi_id);
6394 		break;
6395 	case ICE_NOP:
6396 		act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
6397 				  rinfo->sw_act.fwd_id.hw_vsi_id);
6398 		act &= ~ICE_SINGLE_ACT_VALID_BIT;
6399 		break;
6400 	default:
6401 		status = -EIO;
6402 		goto err_ice_add_adv_rule;
6403 	}
6404 
6405 	/* If there is no matching criteria for direction there
6406 	 * is only one difference between Rx and Tx:
6407 	 * - get switch id base on VSI number from source field (Tx)
6408 	 * - get switch id base on port number (Rx)
6409 	 *
6410 	 * If matching on direction metadata is chose rule direction is
6411 	 * extracted from type value set here.
6412 	 */
6413 	if (rinfo->sw_act.flag & ICE_FLTR_TX) {
6414 		s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
6415 		s_rule->src = cpu_to_le16(rinfo->sw_act.src);
6416 	} else {
6417 		s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
6418 		s_rule->src = cpu_to_le16(hw->port_info->lport);
6419 	}
6420 
6421 	s_rule->recipe_id = cpu_to_le16(rid);
6422 	s_rule->act = cpu_to_le32(act);
6423 
6424 	status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
6425 	if (status)
6426 		goto err_ice_add_adv_rule;
6427 
6428 	status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, s_rule->hdr_data,
6429 					 profile->offsets);
6430 	if (status)
6431 		goto err_ice_add_adv_rule;
6432 
6433 	status = ice_fill_adv_packet_vlan(hw, rinfo->vlan_type,
6434 					  s_rule->hdr_data,
6435 					  profile->offsets);
6436 	if (status)
6437 		goto err_ice_add_adv_rule;
6438 
6439 	status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6440 				 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6441 				 NULL);
6442 	if (status)
6443 		goto err_ice_add_adv_rule;
6444 	adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
6445 				sizeof(struct ice_adv_fltr_mgmt_list_entry),
6446 				GFP_KERNEL);
6447 	if (!adv_fltr) {
6448 		status = -ENOMEM;
6449 		goto err_ice_add_adv_rule;
6450 	}
6451 
6452 	adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
6453 				       lkups_cnt * sizeof(*lkups), GFP_KERNEL);
6454 	if (!adv_fltr->lkups) {
6455 		status = -ENOMEM;
6456 		goto err_ice_add_adv_rule;
6457 	}
6458 
6459 	adv_fltr->lkups_cnt = lkups_cnt;
6460 	adv_fltr->rule_info = *rinfo;
6461 	adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index);
6462 	sw = hw->switch_info;
6463 	sw->recp_list[rid].adv_rule = true;
6464 	rule_head = &sw->recp_list[rid].filt_rules;
6465 
6466 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6467 		adv_fltr->vsi_count = 1;
6468 
6469 	/* Add rule entry to book keeping list */
6470 	list_add(&adv_fltr->list_entry, rule_head);
6471 	if (added_entry) {
6472 		added_entry->rid = rid;
6473 		added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6474 		added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6475 	}
6476 err_ice_add_adv_rule:
6477 	if (status && adv_fltr) {
6478 		devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
6479 		devm_kfree(ice_hw_to_dev(hw), adv_fltr);
6480 	}
6481 
6482 	kfree(s_rule);
6483 
6484 free_pkt_profile:
6485 	if (profile->match & ICE_PKT_KMALLOC) {
6486 		kfree(profile->offsets);
6487 		kfree(profile->pkt);
6488 		kfree(profile);
6489 	}
6490 
6491 	return status;
6492 }
6493 
6494 /**
6495  * ice_replay_vsi_fltr - Replay filters for requested VSI
6496  * @hw: pointer to the hardware structure
6497  * @vsi_handle: driver VSI handle
6498  * @recp_id: Recipe ID for which rules need to be replayed
6499  * @list_head: list for which filters need to be replayed
6500  *
6501  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6502  * It is required to pass valid VSI handle.
6503  */
6504 static int
6505 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6506 		    struct list_head *list_head)
6507 {
6508 	struct ice_fltr_mgmt_list_entry *itr;
6509 	int status = 0;
6510 	u16 hw_vsi_id;
6511 
6512 	if (list_empty(list_head))
6513 		return status;
6514 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6515 
6516 	list_for_each_entry(itr, list_head, list_entry) {
6517 		struct ice_fltr_list_entry f_entry;
6518 
6519 		f_entry.fltr_info = itr->fltr_info;
6520 		if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6521 		    itr->fltr_info.vsi_handle == vsi_handle) {
6522 			/* update the src in case it is VSI num */
6523 			if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6524 				f_entry.fltr_info.src = hw_vsi_id;
6525 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
6526 			if (status)
6527 				goto end;
6528 			continue;
6529 		}
6530 		if (!itr->vsi_list_info ||
6531 		    !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
6532 			continue;
6533 		/* Clearing it so that the logic can add it back */
6534 		clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6535 		f_entry.fltr_info.vsi_handle = vsi_handle;
6536 		f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6537 		/* update the src in case it is VSI num */
6538 		if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6539 			f_entry.fltr_info.src = hw_vsi_id;
6540 		if (recp_id == ICE_SW_LKUP_VLAN)
6541 			status = ice_add_vlan_internal(hw, &f_entry);
6542 		else
6543 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
6544 		if (status)
6545 			goto end;
6546 	}
6547 end:
6548 	return status;
6549 }
6550 
6551 /**
6552  * ice_adv_rem_update_vsi_list
6553  * @hw: pointer to the hardware structure
6554  * @vsi_handle: VSI handle of the VSI to remove
6555  * @fm_list: filter management entry for which the VSI list management needs to
6556  *	     be done
6557  */
6558 static int
6559 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6560 			    struct ice_adv_fltr_mgmt_list_entry *fm_list)
6561 {
6562 	struct ice_vsi_list_map_info *vsi_list_info;
6563 	enum ice_sw_lkup_type lkup_type;
6564 	u16 vsi_list_id;
6565 	int status;
6566 
6567 	if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6568 	    fm_list->vsi_count == 0)
6569 		return -EINVAL;
6570 
6571 	/* A rule with the VSI being removed does not exist */
6572 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
6573 		return -ENOENT;
6574 
6575 	lkup_type = ICE_SW_LKUP_LAST;
6576 	vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6577 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6578 					  ice_aqc_opc_update_sw_rules,
6579 					  lkup_type);
6580 	if (status)
6581 		return status;
6582 
6583 	fm_list->vsi_count--;
6584 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6585 	vsi_list_info = fm_list->vsi_list_info;
6586 	if (fm_list->vsi_count == 1) {
6587 		struct ice_fltr_info tmp_fltr;
6588 		u16 rem_vsi_handle;
6589 
6590 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
6591 						ICE_MAX_VSI);
6592 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6593 			return -EIO;
6594 
6595 		/* Make sure VSI list is empty before removing it below */
6596 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6597 						  vsi_list_id, true,
6598 						  ice_aqc_opc_update_sw_rules,
6599 						  lkup_type);
6600 		if (status)
6601 			return status;
6602 
6603 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
6604 		tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
6605 		tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6606 		fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6607 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6608 		tmp_fltr.fwd_id.hw_vsi_id =
6609 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
6610 		fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6611 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
6612 		fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
6613 
6614 		/* Update the previous switch rule of "MAC forward to VSI" to
6615 		 * "MAC fwd to VSI list"
6616 		 */
6617 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6618 		if (status) {
6619 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6620 				  tmp_fltr.fwd_id.hw_vsi_id, status);
6621 			return status;
6622 		}
6623 		fm_list->vsi_list_info->ref_cnt--;
6624 
6625 		/* Remove the VSI list since it is no longer used */
6626 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6627 		if (status) {
6628 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
6629 				  vsi_list_id, status);
6630 			return status;
6631 		}
6632 
6633 		list_del(&vsi_list_info->list_entry);
6634 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
6635 		fm_list->vsi_list_info = NULL;
6636 	}
6637 
6638 	return status;
6639 }
6640 
6641 /**
6642  * ice_rem_adv_rule - removes existing advanced switch rule
6643  * @hw: pointer to the hardware structure
6644  * @lkups: information on the words that needs to be looked up. All words
6645  *         together makes one recipe
6646  * @lkups_cnt: num of entries in the lkups array
6647  * @rinfo: Its the pointer to the rule information for the rule
6648  *
6649  * This function can be used to remove 1 rule at a time. The lkups is
6650  * used to describe all the words that forms the "lookup" portion of the
6651  * rule. These words can span multiple protocols. Callers to this function
6652  * need to pass in a list of protocol headers with lookup information along
6653  * and mask that determines which words are valid from the given protocol
6654  * header. rinfo describes other information related to this rule such as
6655  * forwarding IDs, priority of this rule, etc.
6656  */
6657 static int
6658 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6659 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6660 {
6661 	struct ice_adv_fltr_mgmt_list_entry *list_elem;
6662 	struct ice_prot_lkup_ext lkup_exts;
6663 	bool remove_rule = false;
6664 	struct mutex *rule_lock; /* Lock to protect filter rule list */
6665 	u16 i, rid, vsi_handle;
6666 	int status = 0;
6667 
6668 	memset(&lkup_exts, 0, sizeof(lkup_exts));
6669 	for (i = 0; i < lkups_cnt; i++) {
6670 		u16 count;
6671 
6672 		if (lkups[i].type >= ICE_PROTOCOL_LAST)
6673 			return -EIO;
6674 
6675 		count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6676 		if (!count)
6677 			return -EIO;
6678 	}
6679 
6680 	rid = ice_find_recp(hw, &lkup_exts, rinfo, false);
6681 	/* If did not find a recipe that match the existing criteria */
6682 	if (rid == ICE_MAX_NUM_RECIPES)
6683 		return -EINVAL;
6684 
6685 	rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6686 	list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6687 	/* the rule is already removed */
6688 	if (!list_elem)
6689 		return 0;
6690 	mutex_lock(rule_lock);
6691 	if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6692 		remove_rule = true;
6693 	} else if (list_elem->vsi_count > 1) {
6694 		remove_rule = false;
6695 		vsi_handle = rinfo->sw_act.vsi_handle;
6696 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6697 	} else {
6698 		vsi_handle = rinfo->sw_act.vsi_handle;
6699 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6700 		if (status) {
6701 			mutex_unlock(rule_lock);
6702 			return status;
6703 		}
6704 		if (list_elem->vsi_count == 0)
6705 			remove_rule = true;
6706 	}
6707 	mutex_unlock(rule_lock);
6708 	if (remove_rule) {
6709 		struct ice_sw_rule_lkup_rx_tx *s_rule;
6710 		u16 rule_buf_sz;
6711 
6712 		rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
6713 		s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6714 		if (!s_rule)
6715 			return -ENOMEM;
6716 		s_rule->act = 0;
6717 		s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id);
6718 		s_rule->hdr_len = 0;
6719 		status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6720 					 rule_buf_sz, 1,
6721 					 ice_aqc_opc_remove_sw_rules, NULL);
6722 		if (!status || status == -ENOENT) {
6723 			struct ice_switch_info *sw = hw->switch_info;
6724 			struct ice_sw_recipe *r_list = sw->recp_list;
6725 
6726 			mutex_lock(rule_lock);
6727 			list_del(&list_elem->list_entry);
6728 			devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
6729 			devm_kfree(ice_hw_to_dev(hw), list_elem);
6730 			mutex_unlock(rule_lock);
6731 			if (list_empty(&r_list[rid].filt_rules)) {
6732 				r_list[rid].adv_rule = false;
6733 
6734 				/* All rules for this recipe are now removed */
6735 				if (hw->recp_reuse)
6736 					ice_release_recipe_res(hw,
6737 							       &r_list[rid]);
6738 			}
6739 		}
6740 		kfree(s_rule);
6741 	}
6742 	return status;
6743 }
6744 
6745 /**
6746  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6747  * @hw: pointer to the hardware structure
6748  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6749  *
6750  * This function is used to remove 1 rule at a time. The removal is based on
6751  * the remove_entry parameter. This function will remove rule for a given
6752  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6753  */
6754 int
6755 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6756 		       struct ice_rule_query_data *remove_entry)
6757 {
6758 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
6759 	struct list_head *list_head;
6760 	struct ice_adv_rule_info rinfo;
6761 	struct ice_switch_info *sw;
6762 
6763 	sw = hw->switch_info;
6764 	if (!sw->recp_list[remove_entry->rid].recp_created)
6765 		return -EINVAL;
6766 	list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6767 	list_for_each_entry(list_itr, list_head, list_entry) {
6768 		if (list_itr->rule_info.fltr_rule_id ==
6769 		    remove_entry->rule_id) {
6770 			rinfo = list_itr->rule_info;
6771 			rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6772 			return ice_rem_adv_rule(hw, list_itr->lkups,
6773 						list_itr->lkups_cnt, &rinfo);
6774 		}
6775 	}
6776 	/* either list is empty or unable to find rule */
6777 	return -ENOENT;
6778 }
6779 
6780 /**
6781  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6782  * @hw: pointer to the hardware structure
6783  * @vsi_handle: driver VSI handle
6784  * @list_head: list for which filters need to be replayed
6785  *
6786  * Replay the advanced rule for the given VSI.
6787  */
6788 static int
6789 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6790 			struct list_head *list_head)
6791 {
6792 	struct ice_rule_query_data added_entry = { 0 };
6793 	struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6794 	int status = 0;
6795 
6796 	if (list_empty(list_head))
6797 		return status;
6798 	list_for_each_entry(adv_fltr, list_head, list_entry) {
6799 		struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6800 		u16 lk_cnt = adv_fltr->lkups_cnt;
6801 
6802 		if (vsi_handle != rinfo->sw_act.vsi_handle)
6803 			continue;
6804 		status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6805 					  &added_entry);
6806 		if (status)
6807 			break;
6808 	}
6809 	return status;
6810 }
6811 
6812 /**
6813  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6814  * @hw: pointer to the hardware structure
6815  * @vsi_handle: driver VSI handle
6816  *
6817  * Replays filters for requested VSI via vsi_handle.
6818  */
6819 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6820 {
6821 	struct ice_switch_info *sw = hw->switch_info;
6822 	int status;
6823 	u8 i;
6824 
6825 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6826 		struct list_head *head;
6827 
6828 		head = &sw->recp_list[i].filt_replay_rules;
6829 		if (!sw->recp_list[i].adv_rule)
6830 			status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6831 		else
6832 			status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6833 		if (status)
6834 			return status;
6835 	}
6836 	return status;
6837 }
6838 
6839 /**
6840  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6841  * @hw: pointer to the HW struct
6842  *
6843  * Deletes the filter replay rules.
6844  */
6845 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6846 {
6847 	struct ice_switch_info *sw = hw->switch_info;
6848 	u8 i;
6849 
6850 	if (!sw)
6851 		return;
6852 
6853 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6854 		if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
6855 			struct list_head *l_head;
6856 
6857 			l_head = &sw->recp_list[i].filt_replay_rules;
6858 			if (!sw->recp_list[i].adv_rule)
6859 				ice_rem_sw_rule_info(hw, l_head);
6860 			else
6861 				ice_rem_adv_rule_info(hw, l_head);
6862 		}
6863 	}
6864 }
6865