xref: /linux/drivers/net/ethernet/intel/ice/ice_switch.c (revision 2dcb8e8782d8e4c38903bf37b1a24d3ffd193da7)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_lib.h"
5 #include "ice_switch.h"
6 
7 #define ICE_ETH_DA_OFFSET		0
8 #define ICE_ETH_ETHTYPE_OFFSET		12
9 #define ICE_ETH_VLAN_TCI_OFFSET		14
10 #define ICE_MAX_VLAN_ID			0xFFF
11 #define ICE_IPV6_ETHER_ID		0x86DD
12 
13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
14  * struct to configure any switch filter rules.
15  * {DA (6 bytes), SA(6 bytes),
16  * Ether type (2 bytes for header without VLAN tag) OR
17  * VLAN tag (4 bytes for header with VLAN tag) }
18  *
19  * Word on Hardcoded values
20  * byte 0 = 0x2: to identify it as locally administered DA MAC
21  * byte 6 = 0x2: to identify it as locally administered SA MAC
22  * byte 12 = 0x81 & byte 13 = 0x00:
23  *	In case of VLAN filter first two bytes defines ether type (0x8100)
24  *	and remaining two bytes are placeholder for programming a given VLAN ID
25  *	In case of Ether type filter it is treated as header without VLAN tag
26  *	and byte 12 and 13 is used to program a given Ether type instead
27  */
28 #define DUMMY_ETH_HDR_LEN		16
29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
30 							0x2, 0, 0, 0, 0, 0,
31 							0x81, 0, 0, 0};
32 
33 enum {
34 	ICE_PKT_VLAN		= BIT(0),
35 	ICE_PKT_OUTER_IPV6	= BIT(1),
36 	ICE_PKT_TUN_GTPC	= BIT(2),
37 	ICE_PKT_TUN_GTPU	= BIT(3),
38 	ICE_PKT_TUN_NVGRE	= BIT(4),
39 	ICE_PKT_TUN_UDP		= BIT(5),
40 	ICE_PKT_INNER_IPV6	= BIT(6),
41 	ICE_PKT_INNER_TCP	= BIT(7),
42 	ICE_PKT_INNER_UDP	= BIT(8),
43 	ICE_PKT_GTP_NOPAY	= BIT(9),
44 };
45 
46 struct ice_dummy_pkt_offsets {
47 	enum ice_protocol_type type;
48 	u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
49 };
50 
51 struct ice_dummy_pkt_profile {
52 	const struct ice_dummy_pkt_offsets *offsets;
53 	const u8 *pkt;
54 	u32 match;
55 	u16 pkt_len;
56 };
57 
58 #define ICE_DECLARE_PKT_OFFSETS(type)				\
59 	static const struct ice_dummy_pkt_offsets		\
60 	ice_dummy_##type##_packet_offsets[]
61 
62 #define ICE_DECLARE_PKT_TEMPLATE(type)				\
63 	static const u8 ice_dummy_##type##_packet[]
64 
65 #define ICE_PKT_PROFILE(type, m) {				\
66 	.match		= (m),					\
67 	.pkt		= ice_dummy_##type##_packet,		\
68 	.pkt_len	= sizeof(ice_dummy_##type##_packet),	\
69 	.offsets	= ice_dummy_##type##_packet_offsets,	\
70 }
71 
72 ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
73 	{ ICE_MAC_OFOS,		0 },
74 	{ ICE_ETYPE_OL,		12 },
75 	{ ICE_IPV4_OFOS,	14 },
76 	{ ICE_NVGRE,		34 },
77 	{ ICE_MAC_IL,		42 },
78 	{ ICE_ETYPE_IL,		54 },
79 	{ ICE_IPV4_IL,		56 },
80 	{ ICE_TCP_IL,		76 },
81 	{ ICE_PROTOCOL_LAST,	0 },
82 };
83 
84 ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
85 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
86 	0x00, 0x00, 0x00, 0x00,
87 	0x00, 0x00, 0x00, 0x00,
88 
89 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
90 
91 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
92 	0x00, 0x00, 0x00, 0x00,
93 	0x00, 0x2F, 0x00, 0x00,
94 	0x00, 0x00, 0x00, 0x00,
95 	0x00, 0x00, 0x00, 0x00,
96 
97 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
98 	0x00, 0x00, 0x00, 0x00,
99 
100 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
101 	0x00, 0x00, 0x00, 0x00,
102 	0x00, 0x00, 0x00, 0x00,
103 
104 	0x08, 0x00,		/* ICE_ETYPE_IL 54 */
105 
106 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
107 	0x00, 0x00, 0x00, 0x00,
108 	0x00, 0x06, 0x00, 0x00,
109 	0x00, 0x00, 0x00, 0x00,
110 	0x00, 0x00, 0x00, 0x00,
111 
112 	0x00, 0x00, 0x00, 0x00,	/* ICE_TCP_IL 76 */
113 	0x00, 0x00, 0x00, 0x00,
114 	0x00, 0x00, 0x00, 0x00,
115 	0x50, 0x02, 0x20, 0x00,
116 	0x00, 0x00, 0x00, 0x00
117 };
118 
119 ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
120 	{ ICE_MAC_OFOS,		0 },
121 	{ ICE_ETYPE_OL,		12 },
122 	{ ICE_IPV4_OFOS,	14 },
123 	{ ICE_NVGRE,		34 },
124 	{ ICE_MAC_IL,		42 },
125 	{ ICE_ETYPE_IL,		54 },
126 	{ ICE_IPV4_IL,		56 },
127 	{ ICE_UDP_ILOS,		76 },
128 	{ ICE_PROTOCOL_LAST,	0 },
129 };
130 
131 ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
132 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
133 	0x00, 0x00, 0x00, 0x00,
134 	0x00, 0x00, 0x00, 0x00,
135 
136 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
137 
138 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
139 	0x00, 0x00, 0x00, 0x00,
140 	0x00, 0x2F, 0x00, 0x00,
141 	0x00, 0x00, 0x00, 0x00,
142 	0x00, 0x00, 0x00, 0x00,
143 
144 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
145 	0x00, 0x00, 0x00, 0x00,
146 
147 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
148 	0x00, 0x00, 0x00, 0x00,
149 	0x00, 0x00, 0x00, 0x00,
150 
151 	0x08, 0x00,		/* ICE_ETYPE_IL 54 */
152 
153 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
154 	0x00, 0x00, 0x00, 0x00,
155 	0x00, 0x11, 0x00, 0x00,
156 	0x00, 0x00, 0x00, 0x00,
157 	0x00, 0x00, 0x00, 0x00,
158 
159 	0x00, 0x00, 0x00, 0x00,	/* ICE_UDP_ILOS 76 */
160 	0x00, 0x08, 0x00, 0x00,
161 };
162 
163 ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
164 	{ ICE_MAC_OFOS,		0 },
165 	{ ICE_ETYPE_OL,		12 },
166 	{ ICE_IPV4_OFOS,	14 },
167 	{ ICE_UDP_OF,		34 },
168 	{ ICE_VXLAN,		42 },
169 	{ ICE_GENEVE,		42 },
170 	{ ICE_VXLAN_GPE,	42 },
171 	{ ICE_MAC_IL,		50 },
172 	{ ICE_ETYPE_IL,		62 },
173 	{ ICE_IPV4_IL,		64 },
174 	{ ICE_TCP_IL,		84 },
175 	{ ICE_PROTOCOL_LAST,	0 },
176 };
177 
178 ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
179 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
180 	0x00, 0x00, 0x00, 0x00,
181 	0x00, 0x00, 0x00, 0x00,
182 
183 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
184 
185 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
186 	0x00, 0x01, 0x00, 0x00,
187 	0x40, 0x11, 0x00, 0x00,
188 	0x00, 0x00, 0x00, 0x00,
189 	0x00, 0x00, 0x00, 0x00,
190 
191 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
192 	0x00, 0x46, 0x00, 0x00,
193 
194 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
195 	0x00, 0x00, 0x00, 0x00,
196 
197 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
198 	0x00, 0x00, 0x00, 0x00,
199 	0x00, 0x00, 0x00, 0x00,
200 
201 	0x08, 0x00,		/* ICE_ETYPE_IL 62 */
202 
203 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
204 	0x00, 0x01, 0x00, 0x00,
205 	0x40, 0x06, 0x00, 0x00,
206 	0x00, 0x00, 0x00, 0x00,
207 	0x00, 0x00, 0x00, 0x00,
208 
209 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
210 	0x00, 0x00, 0x00, 0x00,
211 	0x00, 0x00, 0x00, 0x00,
212 	0x50, 0x02, 0x20, 0x00,
213 	0x00, 0x00, 0x00, 0x00
214 };
215 
216 ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
217 	{ ICE_MAC_OFOS,		0 },
218 	{ ICE_ETYPE_OL,		12 },
219 	{ ICE_IPV4_OFOS,	14 },
220 	{ ICE_UDP_OF,		34 },
221 	{ ICE_VXLAN,		42 },
222 	{ ICE_GENEVE,		42 },
223 	{ ICE_VXLAN_GPE,	42 },
224 	{ ICE_MAC_IL,		50 },
225 	{ ICE_ETYPE_IL,		62 },
226 	{ ICE_IPV4_IL,		64 },
227 	{ ICE_UDP_ILOS,		84 },
228 	{ ICE_PROTOCOL_LAST,	0 },
229 };
230 
231 ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
232 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
233 	0x00, 0x00, 0x00, 0x00,
234 	0x00, 0x00, 0x00, 0x00,
235 
236 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
237 
238 	0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
239 	0x00, 0x01, 0x00, 0x00,
240 	0x00, 0x11, 0x00, 0x00,
241 	0x00, 0x00, 0x00, 0x00,
242 	0x00, 0x00, 0x00, 0x00,
243 
244 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
245 	0x00, 0x3a, 0x00, 0x00,
246 
247 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
248 	0x00, 0x00, 0x00, 0x00,
249 
250 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
251 	0x00, 0x00, 0x00, 0x00,
252 	0x00, 0x00, 0x00, 0x00,
253 
254 	0x08, 0x00,		/* ICE_ETYPE_IL 62 */
255 
256 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
257 	0x00, 0x01, 0x00, 0x00,
258 	0x00, 0x11, 0x00, 0x00,
259 	0x00, 0x00, 0x00, 0x00,
260 	0x00, 0x00, 0x00, 0x00,
261 
262 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
263 	0x00, 0x08, 0x00, 0x00,
264 };
265 
266 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
267 	{ ICE_MAC_OFOS,		0 },
268 	{ ICE_ETYPE_OL,		12 },
269 	{ ICE_IPV4_OFOS,	14 },
270 	{ ICE_NVGRE,		34 },
271 	{ ICE_MAC_IL,		42 },
272 	{ ICE_ETYPE_IL,		54 },
273 	{ ICE_IPV6_IL,		56 },
274 	{ ICE_TCP_IL,		96 },
275 	{ ICE_PROTOCOL_LAST,	0 },
276 };
277 
278 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
279 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
280 	0x00, 0x00, 0x00, 0x00,
281 	0x00, 0x00, 0x00, 0x00,
282 
283 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
284 
285 	0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
286 	0x00, 0x00, 0x00, 0x00,
287 	0x00, 0x2F, 0x00, 0x00,
288 	0x00, 0x00, 0x00, 0x00,
289 	0x00, 0x00, 0x00, 0x00,
290 
291 	0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
292 	0x00, 0x00, 0x00, 0x00,
293 
294 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
295 	0x00, 0x00, 0x00, 0x00,
296 	0x00, 0x00, 0x00, 0x00,
297 
298 	0x86, 0xdd,		/* ICE_ETYPE_IL 54 */
299 
300 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
301 	0x00, 0x08, 0x06, 0x40,
302 	0x00, 0x00, 0x00, 0x00,
303 	0x00, 0x00, 0x00, 0x00,
304 	0x00, 0x00, 0x00, 0x00,
305 	0x00, 0x00, 0x00, 0x00,
306 	0x00, 0x00, 0x00, 0x00,
307 	0x00, 0x00, 0x00, 0x00,
308 	0x00, 0x00, 0x00, 0x00,
309 	0x00, 0x00, 0x00, 0x00,
310 
311 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
312 	0x00, 0x00, 0x00, 0x00,
313 	0x00, 0x00, 0x00, 0x00,
314 	0x50, 0x02, 0x20, 0x00,
315 	0x00, 0x00, 0x00, 0x00
316 };
317 
318 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
319 	{ ICE_MAC_OFOS,		0 },
320 	{ ICE_ETYPE_OL,		12 },
321 	{ ICE_IPV4_OFOS,	14 },
322 	{ ICE_NVGRE,		34 },
323 	{ ICE_MAC_IL,		42 },
324 	{ ICE_ETYPE_IL,		54 },
325 	{ ICE_IPV6_IL,		56 },
326 	{ ICE_UDP_ILOS,		96 },
327 	{ ICE_PROTOCOL_LAST,	0 },
328 };
329 
330 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
331 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
332 	0x00, 0x00, 0x00, 0x00,
333 	0x00, 0x00, 0x00, 0x00,
334 
335 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
336 
337 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
338 	0x00, 0x00, 0x00, 0x00,
339 	0x00, 0x2F, 0x00, 0x00,
340 	0x00, 0x00, 0x00, 0x00,
341 	0x00, 0x00, 0x00, 0x00,
342 
343 	0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
344 	0x00, 0x00, 0x00, 0x00,
345 
346 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
347 	0x00, 0x00, 0x00, 0x00,
348 	0x00, 0x00, 0x00, 0x00,
349 
350 	0x86, 0xdd,		/* ICE_ETYPE_IL 54 */
351 
352 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
353 	0x00, 0x08, 0x11, 0x40,
354 	0x00, 0x00, 0x00, 0x00,
355 	0x00, 0x00, 0x00, 0x00,
356 	0x00, 0x00, 0x00, 0x00,
357 	0x00, 0x00, 0x00, 0x00,
358 	0x00, 0x00, 0x00, 0x00,
359 	0x00, 0x00, 0x00, 0x00,
360 	0x00, 0x00, 0x00, 0x00,
361 	0x00, 0x00, 0x00, 0x00,
362 
363 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
364 	0x00, 0x08, 0x00, 0x00,
365 };
366 
367 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
368 	{ ICE_MAC_OFOS,		0 },
369 	{ ICE_ETYPE_OL,		12 },
370 	{ ICE_IPV4_OFOS,	14 },
371 	{ ICE_UDP_OF,		34 },
372 	{ ICE_VXLAN,		42 },
373 	{ ICE_GENEVE,		42 },
374 	{ ICE_VXLAN_GPE,	42 },
375 	{ ICE_MAC_IL,		50 },
376 	{ ICE_ETYPE_IL,		62 },
377 	{ ICE_IPV6_IL,		64 },
378 	{ ICE_TCP_IL,		104 },
379 	{ ICE_PROTOCOL_LAST,	0 },
380 };
381 
382 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
383 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
384 	0x00, 0x00, 0x00, 0x00,
385 	0x00, 0x00, 0x00, 0x00,
386 
387 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
388 
389 	0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
390 	0x00, 0x01, 0x00, 0x00,
391 	0x40, 0x11, 0x00, 0x00,
392 	0x00, 0x00, 0x00, 0x00,
393 	0x00, 0x00, 0x00, 0x00,
394 
395 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
396 	0x00, 0x5a, 0x00, 0x00,
397 
398 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
399 	0x00, 0x00, 0x00, 0x00,
400 
401 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
402 	0x00, 0x00, 0x00, 0x00,
403 	0x00, 0x00, 0x00, 0x00,
404 
405 	0x86, 0xdd,		/* ICE_ETYPE_IL 62 */
406 
407 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
408 	0x00, 0x08, 0x06, 0x40,
409 	0x00, 0x00, 0x00, 0x00,
410 	0x00, 0x00, 0x00, 0x00,
411 	0x00, 0x00, 0x00, 0x00,
412 	0x00, 0x00, 0x00, 0x00,
413 	0x00, 0x00, 0x00, 0x00,
414 	0x00, 0x00, 0x00, 0x00,
415 	0x00, 0x00, 0x00, 0x00,
416 	0x00, 0x00, 0x00, 0x00,
417 
418 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
419 	0x00, 0x00, 0x00, 0x00,
420 	0x00, 0x00, 0x00, 0x00,
421 	0x50, 0x02, 0x20, 0x00,
422 	0x00, 0x00, 0x00, 0x00
423 };
424 
425 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
426 	{ ICE_MAC_OFOS,		0 },
427 	{ ICE_ETYPE_OL,		12 },
428 	{ ICE_IPV4_OFOS,	14 },
429 	{ ICE_UDP_OF,		34 },
430 	{ ICE_VXLAN,		42 },
431 	{ ICE_GENEVE,		42 },
432 	{ ICE_VXLAN_GPE,	42 },
433 	{ ICE_MAC_IL,		50 },
434 	{ ICE_ETYPE_IL,		62 },
435 	{ ICE_IPV6_IL,		64 },
436 	{ ICE_UDP_ILOS,		104 },
437 	{ ICE_PROTOCOL_LAST,	0 },
438 };
439 
440 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
441 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
442 	0x00, 0x00, 0x00, 0x00,
443 	0x00, 0x00, 0x00, 0x00,
444 
445 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
446 
447 	0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
448 	0x00, 0x01, 0x00, 0x00,
449 	0x00, 0x11, 0x00, 0x00,
450 	0x00, 0x00, 0x00, 0x00,
451 	0x00, 0x00, 0x00, 0x00,
452 
453 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
454 	0x00, 0x4e, 0x00, 0x00,
455 
456 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
457 	0x00, 0x00, 0x00, 0x00,
458 
459 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
460 	0x00, 0x00, 0x00, 0x00,
461 	0x00, 0x00, 0x00, 0x00,
462 
463 	0x86, 0xdd,		/* ICE_ETYPE_IL 62 */
464 
465 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
466 	0x00, 0x08, 0x11, 0x40,
467 	0x00, 0x00, 0x00, 0x00,
468 	0x00, 0x00, 0x00, 0x00,
469 	0x00, 0x00, 0x00, 0x00,
470 	0x00, 0x00, 0x00, 0x00,
471 	0x00, 0x00, 0x00, 0x00,
472 	0x00, 0x00, 0x00, 0x00,
473 	0x00, 0x00, 0x00, 0x00,
474 	0x00, 0x00, 0x00, 0x00,
475 
476 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
477 	0x00, 0x08, 0x00, 0x00,
478 };
479 
480 /* offset info for MAC + IPv4 + UDP dummy packet */
481 ICE_DECLARE_PKT_OFFSETS(udp) = {
482 	{ ICE_MAC_OFOS,		0 },
483 	{ ICE_ETYPE_OL,		12 },
484 	{ ICE_IPV4_OFOS,	14 },
485 	{ ICE_UDP_ILOS,		34 },
486 	{ ICE_PROTOCOL_LAST,	0 },
487 };
488 
489 /* Dummy packet for MAC + IPv4 + UDP */
490 ICE_DECLARE_PKT_TEMPLATE(udp) = {
491 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
492 	0x00, 0x00, 0x00, 0x00,
493 	0x00, 0x00, 0x00, 0x00,
494 
495 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
496 
497 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
498 	0x00, 0x01, 0x00, 0x00,
499 	0x00, 0x11, 0x00, 0x00,
500 	0x00, 0x00, 0x00, 0x00,
501 	0x00, 0x00, 0x00, 0x00,
502 
503 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
504 	0x00, 0x08, 0x00, 0x00,
505 
506 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
507 };
508 
509 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
510 ICE_DECLARE_PKT_OFFSETS(vlan_udp) = {
511 	{ ICE_MAC_OFOS,		0 },
512 	{ ICE_VLAN_OFOS,	12 },
513 	{ ICE_ETYPE_OL,		16 },
514 	{ ICE_IPV4_OFOS,	18 },
515 	{ ICE_UDP_ILOS,		38 },
516 	{ ICE_PROTOCOL_LAST,	0 },
517 };
518 
519 /* C-tag (801.1Q), IPv4:UDP dummy packet */
520 ICE_DECLARE_PKT_TEMPLATE(vlan_udp) = {
521 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
522 	0x00, 0x00, 0x00, 0x00,
523 	0x00, 0x00, 0x00, 0x00,
524 
525 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
526 
527 	0x08, 0x00,		/* ICE_ETYPE_OL 16 */
528 
529 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
530 	0x00, 0x01, 0x00, 0x00,
531 	0x00, 0x11, 0x00, 0x00,
532 	0x00, 0x00, 0x00, 0x00,
533 	0x00, 0x00, 0x00, 0x00,
534 
535 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
536 	0x00, 0x08, 0x00, 0x00,
537 
538 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
539 };
540 
541 /* offset info for MAC + IPv4 + TCP dummy packet */
542 ICE_DECLARE_PKT_OFFSETS(tcp) = {
543 	{ ICE_MAC_OFOS,		0 },
544 	{ ICE_ETYPE_OL,		12 },
545 	{ ICE_IPV4_OFOS,	14 },
546 	{ ICE_TCP_IL,		34 },
547 	{ ICE_PROTOCOL_LAST,	0 },
548 };
549 
550 /* Dummy packet for MAC + IPv4 + TCP */
551 ICE_DECLARE_PKT_TEMPLATE(tcp) = {
552 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
553 	0x00, 0x00, 0x00, 0x00,
554 	0x00, 0x00, 0x00, 0x00,
555 
556 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
557 
558 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
559 	0x00, 0x01, 0x00, 0x00,
560 	0x00, 0x06, 0x00, 0x00,
561 	0x00, 0x00, 0x00, 0x00,
562 	0x00, 0x00, 0x00, 0x00,
563 
564 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
565 	0x00, 0x00, 0x00, 0x00,
566 	0x00, 0x00, 0x00, 0x00,
567 	0x50, 0x00, 0x00, 0x00,
568 	0x00, 0x00, 0x00, 0x00,
569 
570 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
571 };
572 
573 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
574 ICE_DECLARE_PKT_OFFSETS(vlan_tcp) = {
575 	{ ICE_MAC_OFOS,		0 },
576 	{ ICE_VLAN_OFOS,	12 },
577 	{ ICE_ETYPE_OL,		16 },
578 	{ ICE_IPV4_OFOS,	18 },
579 	{ ICE_TCP_IL,		38 },
580 	{ ICE_PROTOCOL_LAST,	0 },
581 };
582 
583 /* C-tag (801.1Q), IPv4:TCP dummy packet */
584 ICE_DECLARE_PKT_TEMPLATE(vlan_tcp) = {
585 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
586 	0x00, 0x00, 0x00, 0x00,
587 	0x00, 0x00, 0x00, 0x00,
588 
589 	0x81, 0x00, 0x00, 0x00,	/* ICE_VLAN_OFOS 12 */
590 
591 	0x08, 0x00,		/* ICE_ETYPE_OL 16 */
592 
593 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
594 	0x00, 0x01, 0x00, 0x00,
595 	0x00, 0x06, 0x00, 0x00,
596 	0x00, 0x00, 0x00, 0x00,
597 	0x00, 0x00, 0x00, 0x00,
598 
599 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
600 	0x00, 0x00, 0x00, 0x00,
601 	0x00, 0x00, 0x00, 0x00,
602 	0x50, 0x00, 0x00, 0x00,
603 	0x00, 0x00, 0x00, 0x00,
604 
605 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
606 };
607 
608 ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
609 	{ ICE_MAC_OFOS,		0 },
610 	{ ICE_ETYPE_OL,		12 },
611 	{ ICE_IPV6_OFOS,	14 },
612 	{ ICE_TCP_IL,		54 },
613 	{ ICE_PROTOCOL_LAST,	0 },
614 };
615 
616 ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
617 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
618 	0x00, 0x00, 0x00, 0x00,
619 	0x00, 0x00, 0x00, 0x00,
620 
621 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
622 
623 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
624 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
625 	0x00, 0x00, 0x00, 0x00,
626 	0x00, 0x00, 0x00, 0x00,
627 	0x00, 0x00, 0x00, 0x00,
628 	0x00, 0x00, 0x00, 0x00,
629 	0x00, 0x00, 0x00, 0x00,
630 	0x00, 0x00, 0x00, 0x00,
631 	0x00, 0x00, 0x00, 0x00,
632 	0x00, 0x00, 0x00, 0x00,
633 
634 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
635 	0x00, 0x00, 0x00, 0x00,
636 	0x00, 0x00, 0x00, 0x00,
637 	0x50, 0x00, 0x00, 0x00,
638 	0x00, 0x00, 0x00, 0x00,
639 
640 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
641 };
642 
643 /* C-tag (802.1Q): IPv6 + TCP */
644 ICE_DECLARE_PKT_OFFSETS(vlan_tcp_ipv6) = {
645 	{ ICE_MAC_OFOS,		0 },
646 	{ ICE_VLAN_OFOS,	12 },
647 	{ ICE_ETYPE_OL,		16 },
648 	{ ICE_IPV6_OFOS,	18 },
649 	{ ICE_TCP_IL,		58 },
650 	{ ICE_PROTOCOL_LAST,	0 },
651 };
652 
653 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
654 ICE_DECLARE_PKT_TEMPLATE(vlan_tcp_ipv6) = {
655 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
656 	0x00, 0x00, 0x00, 0x00,
657 	0x00, 0x00, 0x00, 0x00,
658 
659 	0x81, 0x00, 0x00, 0x00,	/* ICE_VLAN_OFOS 12 */
660 
661 	0x86, 0xDD,		/* ICE_ETYPE_OL 16 */
662 
663 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
664 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
665 	0x00, 0x00, 0x00, 0x00,
666 	0x00, 0x00, 0x00, 0x00,
667 	0x00, 0x00, 0x00, 0x00,
668 	0x00, 0x00, 0x00, 0x00,
669 	0x00, 0x00, 0x00, 0x00,
670 	0x00, 0x00, 0x00, 0x00,
671 	0x00, 0x00, 0x00, 0x00,
672 	0x00, 0x00, 0x00, 0x00,
673 
674 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
675 	0x00, 0x00, 0x00, 0x00,
676 	0x00, 0x00, 0x00, 0x00,
677 	0x50, 0x00, 0x00, 0x00,
678 	0x00, 0x00, 0x00, 0x00,
679 
680 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
681 };
682 
683 /* IPv6 + UDP */
684 ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
685 	{ ICE_MAC_OFOS,		0 },
686 	{ ICE_ETYPE_OL,		12 },
687 	{ ICE_IPV6_OFOS,	14 },
688 	{ ICE_UDP_ILOS,		54 },
689 	{ ICE_PROTOCOL_LAST,	0 },
690 };
691 
692 /* IPv6 + UDP dummy packet */
693 ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
694 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
695 	0x00, 0x00, 0x00, 0x00,
696 	0x00, 0x00, 0x00, 0x00,
697 
698 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
699 
700 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
701 	0x00, 0x10, 0x11, 0x00, /* Next header UDP */
702 	0x00, 0x00, 0x00, 0x00,
703 	0x00, 0x00, 0x00, 0x00,
704 	0x00, 0x00, 0x00, 0x00,
705 	0x00, 0x00, 0x00, 0x00,
706 	0x00, 0x00, 0x00, 0x00,
707 	0x00, 0x00, 0x00, 0x00,
708 	0x00, 0x00, 0x00, 0x00,
709 	0x00, 0x00, 0x00, 0x00,
710 
711 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
712 	0x00, 0x10, 0x00, 0x00,
713 
714 	0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
715 	0x00, 0x00, 0x00, 0x00,
716 
717 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
718 };
719 
720 /* C-tag (802.1Q): IPv6 + UDP */
721 ICE_DECLARE_PKT_OFFSETS(vlan_udp_ipv6) = {
722 	{ ICE_MAC_OFOS,		0 },
723 	{ ICE_VLAN_OFOS,	12 },
724 	{ ICE_ETYPE_OL,		16 },
725 	{ ICE_IPV6_OFOS,	18 },
726 	{ ICE_UDP_ILOS,		58 },
727 	{ ICE_PROTOCOL_LAST,	0 },
728 };
729 
730 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
731 ICE_DECLARE_PKT_TEMPLATE(vlan_udp_ipv6) = {
732 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
733 	0x00, 0x00, 0x00, 0x00,
734 	0x00, 0x00, 0x00, 0x00,
735 
736 	0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
737 
738 	0x86, 0xDD,		/* ICE_ETYPE_OL 16 */
739 
740 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
741 	0x00, 0x08, 0x11, 0x00, /* Next header UDP */
742 	0x00, 0x00, 0x00, 0x00,
743 	0x00, 0x00, 0x00, 0x00,
744 	0x00, 0x00, 0x00, 0x00,
745 	0x00, 0x00, 0x00, 0x00,
746 	0x00, 0x00, 0x00, 0x00,
747 	0x00, 0x00, 0x00, 0x00,
748 	0x00, 0x00, 0x00, 0x00,
749 	0x00, 0x00, 0x00, 0x00,
750 
751 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
752 	0x00, 0x08, 0x00, 0x00,
753 
754 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
755 };
756 
757 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
758 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
759 	{ ICE_MAC_OFOS,		0 },
760 	{ ICE_IPV4_OFOS,	14 },
761 	{ ICE_UDP_OF,		34 },
762 	{ ICE_GTP,		42 },
763 	{ ICE_IPV4_IL,		62 },
764 	{ ICE_TCP_IL,		82 },
765 	{ ICE_PROTOCOL_LAST,	0 },
766 };
767 
768 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
769 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
770 	0x00, 0x00, 0x00, 0x00,
771 	0x00, 0x00, 0x00, 0x00,
772 	0x08, 0x00,
773 
774 	0x45, 0x00, 0x00, 0x58, /* IP 14 */
775 	0x00, 0x00, 0x00, 0x00,
776 	0x00, 0x11, 0x00, 0x00,
777 	0x00, 0x00, 0x00, 0x00,
778 	0x00, 0x00, 0x00, 0x00,
779 
780 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
781 	0x00, 0x44, 0x00, 0x00,
782 
783 	0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
784 	0x00, 0x00, 0x00, 0x00,
785 	0x00, 0x00, 0x00, 0x85,
786 
787 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
788 	0x00, 0x00, 0x00, 0x00,
789 
790 	0x45, 0x00, 0x00, 0x28, /* IP 62 */
791 	0x00, 0x00, 0x00, 0x00,
792 	0x00, 0x06, 0x00, 0x00,
793 	0x00, 0x00, 0x00, 0x00,
794 	0x00, 0x00, 0x00, 0x00,
795 
796 	0x00, 0x00, 0x00, 0x00, /* TCP 82 */
797 	0x00, 0x00, 0x00, 0x00,
798 	0x00, 0x00, 0x00, 0x00,
799 	0x50, 0x00, 0x00, 0x00,
800 	0x00, 0x00, 0x00, 0x00,
801 
802 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
803 };
804 
805 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
806 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
807 	{ ICE_MAC_OFOS,		0 },
808 	{ ICE_IPV4_OFOS,	14 },
809 	{ ICE_UDP_OF,		34 },
810 	{ ICE_GTP,		42 },
811 	{ ICE_IPV4_IL,		62 },
812 	{ ICE_UDP_ILOS,		82 },
813 	{ ICE_PROTOCOL_LAST,	0 },
814 };
815 
816 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
817 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
818 	0x00, 0x00, 0x00, 0x00,
819 	0x00, 0x00, 0x00, 0x00,
820 	0x08, 0x00,
821 
822 	0x45, 0x00, 0x00, 0x4c, /* IP 14 */
823 	0x00, 0x00, 0x00, 0x00,
824 	0x00, 0x11, 0x00, 0x00,
825 	0x00, 0x00, 0x00, 0x00,
826 	0x00, 0x00, 0x00, 0x00,
827 
828 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
829 	0x00, 0x38, 0x00, 0x00,
830 
831 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
832 	0x00, 0x00, 0x00, 0x00,
833 	0x00, 0x00, 0x00, 0x85,
834 
835 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
836 	0x00, 0x00, 0x00, 0x00,
837 
838 	0x45, 0x00, 0x00, 0x1c, /* IP 62 */
839 	0x00, 0x00, 0x00, 0x00,
840 	0x00, 0x11, 0x00, 0x00,
841 	0x00, 0x00, 0x00, 0x00,
842 	0x00, 0x00, 0x00, 0x00,
843 
844 	0x00, 0x00, 0x00, 0x00, /* UDP 82 */
845 	0x00, 0x08, 0x00, 0x00,
846 
847 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
848 };
849 
850 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
851 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
852 	{ ICE_MAC_OFOS,		0 },
853 	{ ICE_IPV4_OFOS,	14 },
854 	{ ICE_UDP_OF,		34 },
855 	{ ICE_GTP,		42 },
856 	{ ICE_IPV6_IL,		62 },
857 	{ ICE_TCP_IL,		102 },
858 	{ ICE_PROTOCOL_LAST,	0 },
859 };
860 
861 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
862 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
863 	0x00, 0x00, 0x00, 0x00,
864 	0x00, 0x00, 0x00, 0x00,
865 	0x08, 0x00,
866 
867 	0x45, 0x00, 0x00, 0x6c, /* IP 14 */
868 	0x00, 0x00, 0x00, 0x00,
869 	0x00, 0x11, 0x00, 0x00,
870 	0x00, 0x00, 0x00, 0x00,
871 	0x00, 0x00, 0x00, 0x00,
872 
873 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
874 	0x00, 0x58, 0x00, 0x00,
875 
876 	0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
877 	0x00, 0x00, 0x00, 0x00,
878 	0x00, 0x00, 0x00, 0x85,
879 
880 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
881 	0x00, 0x00, 0x00, 0x00,
882 
883 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
884 	0x00, 0x14, 0x06, 0x00,
885 	0x00, 0x00, 0x00, 0x00,
886 	0x00, 0x00, 0x00, 0x00,
887 	0x00, 0x00, 0x00, 0x00,
888 	0x00, 0x00, 0x00, 0x00,
889 	0x00, 0x00, 0x00, 0x00,
890 	0x00, 0x00, 0x00, 0x00,
891 	0x00, 0x00, 0x00, 0x00,
892 	0x00, 0x00, 0x00, 0x00,
893 
894 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
895 	0x00, 0x00, 0x00, 0x00,
896 	0x00, 0x00, 0x00, 0x00,
897 	0x50, 0x00, 0x00, 0x00,
898 	0x00, 0x00, 0x00, 0x00,
899 
900 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
901 };
902 
903 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
904 	{ ICE_MAC_OFOS,		0 },
905 	{ ICE_IPV4_OFOS,	14 },
906 	{ ICE_UDP_OF,		34 },
907 	{ ICE_GTP,		42 },
908 	{ ICE_IPV6_IL,		62 },
909 	{ ICE_UDP_ILOS,		102 },
910 	{ ICE_PROTOCOL_LAST,	0 },
911 };
912 
913 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
914 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
915 	0x00, 0x00, 0x00, 0x00,
916 	0x00, 0x00, 0x00, 0x00,
917 	0x08, 0x00,
918 
919 	0x45, 0x00, 0x00, 0x60, /* IP 14 */
920 	0x00, 0x00, 0x00, 0x00,
921 	0x00, 0x11, 0x00, 0x00,
922 	0x00, 0x00, 0x00, 0x00,
923 	0x00, 0x00, 0x00, 0x00,
924 
925 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
926 	0x00, 0x4c, 0x00, 0x00,
927 
928 	0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
929 	0x00, 0x00, 0x00, 0x00,
930 	0x00, 0x00, 0x00, 0x85,
931 
932 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
933 	0x00, 0x00, 0x00, 0x00,
934 
935 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
936 	0x00, 0x08, 0x11, 0x00,
937 	0x00, 0x00, 0x00, 0x00,
938 	0x00, 0x00, 0x00, 0x00,
939 	0x00, 0x00, 0x00, 0x00,
940 	0x00, 0x00, 0x00, 0x00,
941 	0x00, 0x00, 0x00, 0x00,
942 	0x00, 0x00, 0x00, 0x00,
943 	0x00, 0x00, 0x00, 0x00,
944 	0x00, 0x00, 0x00, 0x00,
945 
946 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
947 	0x00, 0x08, 0x00, 0x00,
948 
949 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
950 };
951 
952 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
953 	{ ICE_MAC_OFOS,		0 },
954 	{ ICE_IPV6_OFOS,	14 },
955 	{ ICE_UDP_OF,		54 },
956 	{ ICE_GTP,		62 },
957 	{ ICE_IPV4_IL,		82 },
958 	{ ICE_TCP_IL,		102 },
959 	{ ICE_PROTOCOL_LAST,	0 },
960 };
961 
962 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
963 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
964 	0x00, 0x00, 0x00, 0x00,
965 	0x00, 0x00, 0x00, 0x00,
966 	0x86, 0xdd,
967 
968 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
969 	0x00, 0x44, 0x11, 0x00,
970 	0x00, 0x00, 0x00, 0x00,
971 	0x00, 0x00, 0x00, 0x00,
972 	0x00, 0x00, 0x00, 0x00,
973 	0x00, 0x00, 0x00, 0x00,
974 	0x00, 0x00, 0x00, 0x00,
975 	0x00, 0x00, 0x00, 0x00,
976 	0x00, 0x00, 0x00, 0x00,
977 	0x00, 0x00, 0x00, 0x00,
978 
979 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
980 	0x00, 0x44, 0x00, 0x00,
981 
982 	0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
983 	0x00, 0x00, 0x00, 0x00,
984 	0x00, 0x00, 0x00, 0x85,
985 
986 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
987 	0x00, 0x00, 0x00, 0x00,
988 
989 	0x45, 0x00, 0x00, 0x28, /* IP 82 */
990 	0x00, 0x00, 0x00, 0x00,
991 	0x00, 0x06, 0x00, 0x00,
992 	0x00, 0x00, 0x00, 0x00,
993 	0x00, 0x00, 0x00, 0x00,
994 
995 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
996 	0x00, 0x00, 0x00, 0x00,
997 	0x00, 0x00, 0x00, 0x00,
998 	0x50, 0x00, 0x00, 0x00,
999 	0x00, 0x00, 0x00, 0x00,
1000 
1001 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
1002 };
1003 
1004 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
1005 	{ ICE_MAC_OFOS,		0 },
1006 	{ ICE_IPV6_OFOS,	14 },
1007 	{ ICE_UDP_OF,		54 },
1008 	{ ICE_GTP,		62 },
1009 	{ ICE_IPV4_IL,		82 },
1010 	{ ICE_UDP_ILOS,		102 },
1011 	{ ICE_PROTOCOL_LAST,	0 },
1012 };
1013 
1014 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
1015 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
1016 	0x00, 0x00, 0x00, 0x00,
1017 	0x00, 0x00, 0x00, 0x00,
1018 	0x86, 0xdd,
1019 
1020 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1021 	0x00, 0x38, 0x11, 0x00,
1022 	0x00, 0x00, 0x00, 0x00,
1023 	0x00, 0x00, 0x00, 0x00,
1024 	0x00, 0x00, 0x00, 0x00,
1025 	0x00, 0x00, 0x00, 0x00,
1026 	0x00, 0x00, 0x00, 0x00,
1027 	0x00, 0x00, 0x00, 0x00,
1028 	0x00, 0x00, 0x00, 0x00,
1029 	0x00, 0x00, 0x00, 0x00,
1030 
1031 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1032 	0x00, 0x38, 0x00, 0x00,
1033 
1034 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
1035 	0x00, 0x00, 0x00, 0x00,
1036 	0x00, 0x00, 0x00, 0x85,
1037 
1038 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1039 	0x00, 0x00, 0x00, 0x00,
1040 
1041 	0x45, 0x00, 0x00, 0x1c, /* IP 82 */
1042 	0x00, 0x00, 0x00, 0x00,
1043 	0x00, 0x11, 0x00, 0x00,
1044 	0x00, 0x00, 0x00, 0x00,
1045 	0x00, 0x00, 0x00, 0x00,
1046 
1047 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
1048 	0x00, 0x08, 0x00, 0x00,
1049 
1050 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
1051 };
1052 
1053 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
1054 	{ ICE_MAC_OFOS,		0 },
1055 	{ ICE_IPV6_OFOS,	14 },
1056 	{ ICE_UDP_OF,		54 },
1057 	{ ICE_GTP,		62 },
1058 	{ ICE_IPV6_IL,		82 },
1059 	{ ICE_TCP_IL,		122 },
1060 	{ ICE_PROTOCOL_LAST,	0 },
1061 };
1062 
1063 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
1064 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
1065 	0x00, 0x00, 0x00, 0x00,
1066 	0x00, 0x00, 0x00, 0x00,
1067 	0x86, 0xdd,
1068 
1069 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1070 	0x00, 0x58, 0x11, 0x00,
1071 	0x00, 0x00, 0x00, 0x00,
1072 	0x00, 0x00, 0x00, 0x00,
1073 	0x00, 0x00, 0x00, 0x00,
1074 	0x00, 0x00, 0x00, 0x00,
1075 	0x00, 0x00, 0x00, 0x00,
1076 	0x00, 0x00, 0x00, 0x00,
1077 	0x00, 0x00, 0x00, 0x00,
1078 	0x00, 0x00, 0x00, 0x00,
1079 
1080 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1081 	0x00, 0x58, 0x00, 0x00,
1082 
1083 	0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
1084 	0x00, 0x00, 0x00, 0x00,
1085 	0x00, 0x00, 0x00, 0x85,
1086 
1087 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1088 	0x00, 0x00, 0x00, 0x00,
1089 
1090 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
1091 	0x00, 0x14, 0x06, 0x00,
1092 	0x00, 0x00, 0x00, 0x00,
1093 	0x00, 0x00, 0x00, 0x00,
1094 	0x00, 0x00, 0x00, 0x00,
1095 	0x00, 0x00, 0x00, 0x00,
1096 	0x00, 0x00, 0x00, 0x00,
1097 	0x00, 0x00, 0x00, 0x00,
1098 	0x00, 0x00, 0x00, 0x00,
1099 	0x00, 0x00, 0x00, 0x00,
1100 
1101 	0x00, 0x00, 0x00, 0x00, /* TCP 122 */
1102 	0x00, 0x00, 0x00, 0x00,
1103 	0x00, 0x00, 0x00, 0x00,
1104 	0x50, 0x00, 0x00, 0x00,
1105 	0x00, 0x00, 0x00, 0x00,
1106 
1107 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
1108 };
1109 
1110 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
1111 	{ ICE_MAC_OFOS,		0 },
1112 	{ ICE_IPV6_OFOS,	14 },
1113 	{ ICE_UDP_OF,		54 },
1114 	{ ICE_GTP,		62 },
1115 	{ ICE_IPV6_IL,		82 },
1116 	{ ICE_UDP_ILOS,		122 },
1117 	{ ICE_PROTOCOL_LAST,	0 },
1118 };
1119 
1120 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
1121 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
1122 	0x00, 0x00, 0x00, 0x00,
1123 	0x00, 0x00, 0x00, 0x00,
1124 	0x86, 0xdd,
1125 
1126 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1127 	0x00, 0x4c, 0x11, 0x00,
1128 	0x00, 0x00, 0x00, 0x00,
1129 	0x00, 0x00, 0x00, 0x00,
1130 	0x00, 0x00, 0x00, 0x00,
1131 	0x00, 0x00, 0x00, 0x00,
1132 	0x00, 0x00, 0x00, 0x00,
1133 	0x00, 0x00, 0x00, 0x00,
1134 	0x00, 0x00, 0x00, 0x00,
1135 	0x00, 0x00, 0x00, 0x00,
1136 
1137 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1138 	0x00, 0x4c, 0x00, 0x00,
1139 
1140 	0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
1141 	0x00, 0x00, 0x00, 0x00,
1142 	0x00, 0x00, 0x00, 0x85,
1143 
1144 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1145 	0x00, 0x00, 0x00, 0x00,
1146 
1147 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
1148 	0x00, 0x08, 0x11, 0x00,
1149 	0x00, 0x00, 0x00, 0x00,
1150 	0x00, 0x00, 0x00, 0x00,
1151 	0x00, 0x00, 0x00, 0x00,
1152 	0x00, 0x00, 0x00, 0x00,
1153 	0x00, 0x00, 0x00, 0x00,
1154 	0x00, 0x00, 0x00, 0x00,
1155 	0x00, 0x00, 0x00, 0x00,
1156 	0x00, 0x00, 0x00, 0x00,
1157 
1158 	0x00, 0x00, 0x00, 0x00, /* UDP 122 */
1159 	0x00, 0x08, 0x00, 0x00,
1160 
1161 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
1162 };
1163 
1164 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
1165 	{ ICE_MAC_OFOS,		0 },
1166 	{ ICE_IPV4_OFOS,	14 },
1167 	{ ICE_UDP_OF,		34 },
1168 	{ ICE_GTP_NO_PAY,	42 },
1169 	{ ICE_PROTOCOL_LAST,	0 },
1170 };
1171 
1172 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
1173 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1174 	0x00, 0x00, 0x00, 0x00,
1175 	0x00, 0x00, 0x00, 0x00,
1176 	0x08, 0x00,
1177 
1178 	0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
1179 	0x00, 0x00, 0x40, 0x00,
1180 	0x40, 0x11, 0x00, 0x00,
1181 	0x00, 0x00, 0x00, 0x00,
1182 	0x00, 0x00, 0x00, 0x00,
1183 
1184 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
1185 	0x00, 0x00, 0x00, 0x00,
1186 
1187 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1188 	0x00, 0x00, 0x00, 0x00,
1189 	0x00, 0x00, 0x00, 0x85,
1190 
1191 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1192 	0x00, 0x00, 0x00, 0x00,
1193 
1194 	0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
1195 	0x00, 0x00, 0x40, 0x00,
1196 	0x40, 0x00, 0x00, 0x00,
1197 	0x00, 0x00, 0x00, 0x00,
1198 	0x00, 0x00, 0x00, 0x00,
1199 	0x00, 0x00,
1200 };
1201 
1202 ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
1203 	{ ICE_MAC_OFOS,		0 },
1204 	{ ICE_IPV6_OFOS,	14 },
1205 	{ ICE_UDP_OF,		54 },
1206 	{ ICE_GTP_NO_PAY,	62 },
1207 	{ ICE_PROTOCOL_LAST,	0 },
1208 };
1209 
1210 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
1211 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1212 	0x00, 0x00, 0x00, 0x00,
1213 	0x00, 0x00, 0x00, 0x00,
1214 	0x86, 0xdd,
1215 
1216 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1217 	0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1218 	0x00, 0x00, 0x00, 0x00,
1219 	0x00, 0x00, 0x00, 0x00,
1220 	0x00, 0x00, 0x00, 0x00,
1221 	0x00, 0x00, 0x00, 0x00,
1222 	0x00, 0x00, 0x00, 0x00,
1223 	0x00, 0x00, 0x00, 0x00,
1224 	0x00, 0x00, 0x00, 0x00,
1225 	0x00, 0x00, 0x00, 0x00,
1226 
1227 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1228 	0x00, 0x00, 0x00, 0x00,
1229 
1230 	0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
1231 	0x00, 0x00, 0x00, 0x00,
1232 
1233 	0x00, 0x00,
1234 };
1235 
1236 static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
1237 	ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
1238 				  ICE_PKT_GTP_NOPAY),
1239 	ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1240 					    ICE_PKT_OUTER_IPV6 |
1241 					    ICE_PKT_INNER_IPV6 |
1242 					    ICE_PKT_INNER_UDP),
1243 	ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1244 					    ICE_PKT_OUTER_IPV6 |
1245 					    ICE_PKT_INNER_IPV6),
1246 	ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1247 					    ICE_PKT_OUTER_IPV6 |
1248 					    ICE_PKT_INNER_UDP),
1249 	ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
1250 					    ICE_PKT_OUTER_IPV6),
1251 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
1252 	ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1253 					    ICE_PKT_INNER_IPV6 |
1254 					    ICE_PKT_INNER_UDP),
1255 	ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1256 					    ICE_PKT_INNER_IPV6),
1257 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1258 					    ICE_PKT_INNER_UDP),
1259 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
1260 	ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
1261 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
1262 	ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
1263 				      ICE_PKT_INNER_TCP),
1264 	ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
1265 	ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
1266 	ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
1267 	ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
1268 					  ICE_PKT_INNER_IPV6 |
1269 					  ICE_PKT_INNER_TCP),
1270 	ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
1271 	ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
1272 					  ICE_PKT_INNER_IPV6),
1273 	ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
1274 	ICE_PKT_PROFILE(vlan_udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP |
1275 				       ICE_PKT_VLAN),
1276 	ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
1277 	ICE_PKT_PROFILE(vlan_udp, ICE_PKT_INNER_UDP | ICE_PKT_VLAN),
1278 	ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
1279 	ICE_PKT_PROFILE(vlan_tcp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_VLAN),
1280 	ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
1281 	ICE_PKT_PROFILE(vlan_tcp, ICE_PKT_VLAN),
1282 	ICE_PKT_PROFILE(tcp, 0),
1283 };
1284 
1285 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
1286 	(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
1287 	 (DUMMY_ETH_HDR_LEN * \
1288 	  sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
1289 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
1290 	(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
1291 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
1292 	(offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
1293 	 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
1294 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
1295 	(offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
1296 	 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
1297 
1298 /* this is a recipe to profile association bitmap */
1299 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1300 			  ICE_MAX_NUM_PROFILES);
1301 
1302 /* this is a profile to recipe association bitmap */
1303 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1304 			  ICE_MAX_NUM_RECIPES);
1305 
1306 /**
1307  * ice_init_def_sw_recp - initialize the recipe book keeping tables
1308  * @hw: pointer to the HW struct
1309  *
1310  * Allocate memory for the entire recipe table and initialize the structures/
1311  * entries corresponding to basic recipes.
1312  */
1313 int ice_init_def_sw_recp(struct ice_hw *hw)
1314 {
1315 	struct ice_sw_recipe *recps;
1316 	u8 i;
1317 
1318 	recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
1319 			     sizeof(*recps), GFP_KERNEL);
1320 	if (!recps)
1321 		return -ENOMEM;
1322 
1323 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1324 		recps[i].root_rid = i;
1325 		INIT_LIST_HEAD(&recps[i].filt_rules);
1326 		INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1327 		INIT_LIST_HEAD(&recps[i].rg_list);
1328 		mutex_init(&recps[i].filt_rule_lock);
1329 	}
1330 
1331 	hw->switch_info->recp_list = recps;
1332 
1333 	return 0;
1334 }
1335 
1336 /**
1337  * ice_aq_get_sw_cfg - get switch configuration
1338  * @hw: pointer to the hardware structure
1339  * @buf: pointer to the result buffer
1340  * @buf_size: length of the buffer available for response
1341  * @req_desc: pointer to requested descriptor
1342  * @num_elems: pointer to number of elements
1343  * @cd: pointer to command details structure or NULL
1344  *
1345  * Get switch configuration (0x0200) to be placed in buf.
1346  * This admin command returns information such as initial VSI/port number
1347  * and switch ID it belongs to.
1348  *
1349  * NOTE: *req_desc is both an input/output parameter.
1350  * The caller of this function first calls this function with *request_desc set
1351  * to 0. If the response from f/w has *req_desc set to 0, all the switch
1352  * configuration information has been returned; if non-zero (meaning not all
1353  * the information was returned), the caller should call this function again
1354  * with *req_desc set to the previous value returned by f/w to get the
1355  * next block of switch configuration information.
1356  *
1357  * *num_elems is output only parameter. This reflects the number of elements
1358  * in response buffer. The caller of this function to use *num_elems while
1359  * parsing the response buffer.
1360  */
1361 static int
1362 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1363 		  u16 buf_size, u16 *req_desc, u16 *num_elems,
1364 		  struct ice_sq_cd *cd)
1365 {
1366 	struct ice_aqc_get_sw_cfg *cmd;
1367 	struct ice_aq_desc desc;
1368 	int status;
1369 
1370 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1371 	cmd = &desc.params.get_sw_conf;
1372 	cmd->element = cpu_to_le16(*req_desc);
1373 
1374 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1375 	if (!status) {
1376 		*req_desc = le16_to_cpu(cmd->element);
1377 		*num_elems = le16_to_cpu(cmd->num_elems);
1378 	}
1379 
1380 	return status;
1381 }
1382 
1383 /**
1384  * ice_aq_add_vsi
1385  * @hw: pointer to the HW struct
1386  * @vsi_ctx: pointer to a VSI context struct
1387  * @cd: pointer to command details structure or NULL
1388  *
1389  * Add a VSI context to the hardware (0x0210)
1390  */
1391 static int
1392 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1393 	       struct ice_sq_cd *cd)
1394 {
1395 	struct ice_aqc_add_update_free_vsi_resp *res;
1396 	struct ice_aqc_add_get_update_free_vsi *cmd;
1397 	struct ice_aq_desc desc;
1398 	int status;
1399 
1400 	cmd = &desc.params.vsi_cmd;
1401 	res = &desc.params.add_update_free_vsi_res;
1402 
1403 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1404 
1405 	if (!vsi_ctx->alloc_from_pool)
1406 		cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
1407 					   ICE_AQ_VSI_IS_VALID);
1408 	cmd->vf_id = vsi_ctx->vf_num;
1409 
1410 	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1411 
1412 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1413 
1414 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1415 				 sizeof(vsi_ctx->info), cd);
1416 
1417 	if (!status) {
1418 		vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1419 		vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
1420 		vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
1421 	}
1422 
1423 	return status;
1424 }
1425 
1426 /**
1427  * ice_aq_free_vsi
1428  * @hw: pointer to the HW struct
1429  * @vsi_ctx: pointer to a VSI context struct
1430  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1431  * @cd: pointer to command details structure or NULL
1432  *
1433  * Free VSI context info from hardware (0x0213)
1434  */
1435 static int
1436 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1437 		bool keep_vsi_alloc, struct ice_sq_cd *cd)
1438 {
1439 	struct ice_aqc_add_update_free_vsi_resp *resp;
1440 	struct ice_aqc_add_get_update_free_vsi *cmd;
1441 	struct ice_aq_desc desc;
1442 	int status;
1443 
1444 	cmd = &desc.params.vsi_cmd;
1445 	resp = &desc.params.add_update_free_vsi_res;
1446 
1447 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1448 
1449 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1450 	if (keep_vsi_alloc)
1451 		cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
1452 
1453 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1454 	if (!status) {
1455 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1456 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1457 	}
1458 
1459 	return status;
1460 }
1461 
1462 /**
1463  * ice_aq_update_vsi
1464  * @hw: pointer to the HW struct
1465  * @vsi_ctx: pointer to a VSI context struct
1466  * @cd: pointer to command details structure or NULL
1467  *
1468  * Update VSI context in the hardware (0x0211)
1469  */
1470 static int
1471 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1472 		  struct ice_sq_cd *cd)
1473 {
1474 	struct ice_aqc_add_update_free_vsi_resp *resp;
1475 	struct ice_aqc_add_get_update_free_vsi *cmd;
1476 	struct ice_aq_desc desc;
1477 	int status;
1478 
1479 	cmd = &desc.params.vsi_cmd;
1480 	resp = &desc.params.add_update_free_vsi_res;
1481 
1482 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1483 
1484 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1485 
1486 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1487 
1488 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1489 				 sizeof(vsi_ctx->info), cd);
1490 
1491 	if (!status) {
1492 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1493 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1494 	}
1495 
1496 	return status;
1497 }
1498 
1499 /**
1500  * ice_is_vsi_valid - check whether the VSI is valid or not
1501  * @hw: pointer to the HW struct
1502  * @vsi_handle: VSI handle
1503  *
1504  * check whether the VSI is valid or not
1505  */
1506 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1507 {
1508 	return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1509 }
1510 
1511 /**
1512  * ice_get_hw_vsi_num - return the HW VSI number
1513  * @hw: pointer to the HW struct
1514  * @vsi_handle: VSI handle
1515  *
1516  * return the HW VSI number
1517  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1518  */
1519 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1520 {
1521 	return hw->vsi_ctx[vsi_handle]->vsi_num;
1522 }
1523 
1524 /**
1525  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1526  * @hw: pointer to the HW struct
1527  * @vsi_handle: VSI handle
1528  *
1529  * return the VSI context entry for a given VSI handle
1530  */
1531 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1532 {
1533 	return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1534 }
1535 
1536 /**
1537  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1538  * @hw: pointer to the HW struct
1539  * @vsi_handle: VSI handle
1540  * @vsi: VSI context pointer
1541  *
1542  * save the VSI context entry for a given VSI handle
1543  */
1544 static void
1545 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1546 {
1547 	hw->vsi_ctx[vsi_handle] = vsi;
1548 }
1549 
1550 /**
1551  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1552  * @hw: pointer to the HW struct
1553  * @vsi_handle: VSI handle
1554  */
1555 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1556 {
1557 	struct ice_vsi_ctx *vsi;
1558 	u8 i;
1559 
1560 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
1561 	if (!vsi)
1562 		return;
1563 	ice_for_each_traffic_class(i) {
1564 		if (vsi->lan_q_ctx[i]) {
1565 			devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
1566 			vsi->lan_q_ctx[i] = NULL;
1567 		}
1568 		if (vsi->rdma_q_ctx[i]) {
1569 			devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
1570 			vsi->rdma_q_ctx[i] = NULL;
1571 		}
1572 	}
1573 }
1574 
1575 /**
1576  * ice_clear_vsi_ctx - clear the VSI context entry
1577  * @hw: pointer to the HW struct
1578  * @vsi_handle: VSI handle
1579  *
1580  * clear the VSI context entry
1581  */
1582 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1583 {
1584 	struct ice_vsi_ctx *vsi;
1585 
1586 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
1587 	if (vsi) {
1588 		ice_clear_vsi_q_ctx(hw, vsi_handle);
1589 		devm_kfree(ice_hw_to_dev(hw), vsi);
1590 		hw->vsi_ctx[vsi_handle] = NULL;
1591 	}
1592 }
1593 
1594 /**
1595  * ice_clear_all_vsi_ctx - clear all the VSI context entries
1596  * @hw: pointer to the HW struct
1597  */
1598 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1599 {
1600 	u16 i;
1601 
1602 	for (i = 0; i < ICE_MAX_VSI; i++)
1603 		ice_clear_vsi_ctx(hw, i);
1604 }
1605 
1606 /**
1607  * ice_add_vsi - add VSI context to the hardware and VSI handle list
1608  * @hw: pointer to the HW struct
1609  * @vsi_handle: unique VSI handle provided by drivers
1610  * @vsi_ctx: pointer to a VSI context struct
1611  * @cd: pointer to command details structure or NULL
1612  *
1613  * Add a VSI context to the hardware also add it into the VSI handle list.
1614  * If this function gets called after reset for existing VSIs then update
1615  * with the new HW VSI number in the corresponding VSI handle list entry.
1616  */
1617 int
1618 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1619 	    struct ice_sq_cd *cd)
1620 {
1621 	struct ice_vsi_ctx *tmp_vsi_ctx;
1622 	int status;
1623 
1624 	if (vsi_handle >= ICE_MAX_VSI)
1625 		return -EINVAL;
1626 	status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1627 	if (status)
1628 		return status;
1629 	tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1630 	if (!tmp_vsi_ctx) {
1631 		/* Create a new VSI context */
1632 		tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
1633 					   sizeof(*tmp_vsi_ctx), GFP_KERNEL);
1634 		if (!tmp_vsi_ctx) {
1635 			ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1636 			return -ENOMEM;
1637 		}
1638 		*tmp_vsi_ctx = *vsi_ctx;
1639 		ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1640 	} else {
1641 		/* update with new HW VSI num */
1642 		tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1643 	}
1644 
1645 	return 0;
1646 }
1647 
1648 /**
1649  * ice_free_vsi- free VSI context from hardware and VSI handle list
1650  * @hw: pointer to the HW struct
1651  * @vsi_handle: unique VSI handle
1652  * @vsi_ctx: pointer to a VSI context struct
1653  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1654  * @cd: pointer to command details structure or NULL
1655  *
1656  * Free VSI context info from hardware as well as from VSI handle list
1657  */
1658 int
1659 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1660 	     bool keep_vsi_alloc, struct ice_sq_cd *cd)
1661 {
1662 	int status;
1663 
1664 	if (!ice_is_vsi_valid(hw, vsi_handle))
1665 		return -EINVAL;
1666 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1667 	status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1668 	if (!status)
1669 		ice_clear_vsi_ctx(hw, vsi_handle);
1670 	return status;
1671 }
1672 
1673 /**
1674  * ice_update_vsi
1675  * @hw: pointer to the HW struct
1676  * @vsi_handle: unique VSI handle
1677  * @vsi_ctx: pointer to a VSI context struct
1678  * @cd: pointer to command details structure or NULL
1679  *
1680  * Update VSI context in the hardware
1681  */
1682 int
1683 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1684 	       struct ice_sq_cd *cd)
1685 {
1686 	if (!ice_is_vsi_valid(hw, vsi_handle))
1687 		return -EINVAL;
1688 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1689 	return ice_aq_update_vsi(hw, vsi_ctx, cd);
1690 }
1691 
1692 /**
1693  * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
1694  * @hw: pointer to HW struct
1695  * @vsi_handle: VSI SW index
1696  * @enable: boolean for enable/disable
1697  */
1698 int
1699 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
1700 {
1701 	struct ice_vsi_ctx *ctx;
1702 
1703 	ctx = ice_get_vsi_ctx(hw, vsi_handle);
1704 	if (!ctx)
1705 		return -EIO;
1706 
1707 	if (enable)
1708 		ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1709 	else
1710 		ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1711 
1712 	return ice_update_vsi(hw, vsi_handle, ctx, NULL);
1713 }
1714 
1715 /**
1716  * ice_aq_alloc_free_vsi_list
1717  * @hw: pointer to the HW struct
1718  * @vsi_list_id: VSI list ID returned or used for lookup
1719  * @lkup_type: switch rule filter lookup type
1720  * @opc: switch rules population command type - pass in the command opcode
1721  *
1722  * allocates or free a VSI list resource
1723  */
1724 static int
1725 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1726 			   enum ice_sw_lkup_type lkup_type,
1727 			   enum ice_adminq_opc opc)
1728 {
1729 	struct ice_aqc_alloc_free_res_elem *sw_buf;
1730 	struct ice_aqc_res_elem *vsi_ele;
1731 	u16 buf_len;
1732 	int status;
1733 
1734 	buf_len = struct_size(sw_buf, elem, 1);
1735 	sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
1736 	if (!sw_buf)
1737 		return -ENOMEM;
1738 	sw_buf->num_elems = cpu_to_le16(1);
1739 
1740 	if (lkup_type == ICE_SW_LKUP_MAC ||
1741 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1742 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1743 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1744 	    lkup_type == ICE_SW_LKUP_PROMISC ||
1745 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
1746 		sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1747 	} else if (lkup_type == ICE_SW_LKUP_VLAN) {
1748 		sw_buf->res_type =
1749 			cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1750 	} else {
1751 		status = -EINVAL;
1752 		goto ice_aq_alloc_free_vsi_list_exit;
1753 	}
1754 
1755 	if (opc == ice_aqc_opc_free_res)
1756 		sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
1757 
1758 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1759 	if (status)
1760 		goto ice_aq_alloc_free_vsi_list_exit;
1761 
1762 	if (opc == ice_aqc_opc_alloc_res) {
1763 		vsi_ele = &sw_buf->elem[0];
1764 		*vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
1765 	}
1766 
1767 ice_aq_alloc_free_vsi_list_exit:
1768 	devm_kfree(ice_hw_to_dev(hw), sw_buf);
1769 	return status;
1770 }
1771 
1772 /**
1773  * ice_aq_sw_rules - add/update/remove switch rules
1774  * @hw: pointer to the HW struct
1775  * @rule_list: pointer to switch rule population list
1776  * @rule_list_sz: total size of the rule list in bytes
1777  * @num_rules: number of switch rules in the rule_list
1778  * @opc: switch rules population command type - pass in the command opcode
1779  * @cd: pointer to command details structure or NULL
1780  *
1781  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1782  */
1783 int
1784 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1785 		u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1786 {
1787 	struct ice_aq_desc desc;
1788 	int status;
1789 
1790 	if (opc != ice_aqc_opc_add_sw_rules &&
1791 	    opc != ice_aqc_opc_update_sw_rules &&
1792 	    opc != ice_aqc_opc_remove_sw_rules)
1793 		return -EINVAL;
1794 
1795 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1796 
1797 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1798 	desc.params.sw_rules.num_rules_fltr_entry_index =
1799 		cpu_to_le16(num_rules);
1800 	status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1801 	if (opc != ice_aqc_opc_add_sw_rules &&
1802 	    hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1803 		status = -ENOENT;
1804 
1805 	return status;
1806 }
1807 
1808 /**
1809  * ice_aq_add_recipe - add switch recipe
1810  * @hw: pointer to the HW struct
1811  * @s_recipe_list: pointer to switch rule population list
1812  * @num_recipes: number of switch recipes in the list
1813  * @cd: pointer to command details structure or NULL
1814  *
1815  * Add(0x0290)
1816  */
1817 static int
1818 ice_aq_add_recipe(struct ice_hw *hw,
1819 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1820 		  u16 num_recipes, struct ice_sq_cd *cd)
1821 {
1822 	struct ice_aqc_add_get_recipe *cmd;
1823 	struct ice_aq_desc desc;
1824 	u16 buf_size;
1825 
1826 	cmd = &desc.params.add_get_recipe;
1827 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1828 
1829 	cmd->num_sub_recipes = cpu_to_le16(num_recipes);
1830 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1831 
1832 	buf_size = num_recipes * sizeof(*s_recipe_list);
1833 
1834 	return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1835 }
1836 
1837 /**
1838  * ice_aq_get_recipe - get switch recipe
1839  * @hw: pointer to the HW struct
1840  * @s_recipe_list: pointer to switch rule population list
1841  * @num_recipes: pointer to the number of recipes (input and output)
1842  * @recipe_root: root recipe number of recipe(s) to retrieve
1843  * @cd: pointer to command details structure or NULL
1844  *
1845  * Get(0x0292)
1846  *
1847  * On input, *num_recipes should equal the number of entries in s_recipe_list.
1848  * On output, *num_recipes will equal the number of entries returned in
1849  * s_recipe_list.
1850  *
1851  * The caller must supply enough space in s_recipe_list to hold all possible
1852  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1853  */
1854 static int
1855 ice_aq_get_recipe(struct ice_hw *hw,
1856 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1857 		  u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1858 {
1859 	struct ice_aqc_add_get_recipe *cmd;
1860 	struct ice_aq_desc desc;
1861 	u16 buf_size;
1862 	int status;
1863 
1864 	if (*num_recipes != ICE_MAX_NUM_RECIPES)
1865 		return -EINVAL;
1866 
1867 	cmd = &desc.params.add_get_recipe;
1868 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1869 
1870 	cmd->return_index = cpu_to_le16(recipe_root);
1871 	cmd->num_sub_recipes = 0;
1872 
1873 	buf_size = *num_recipes * sizeof(*s_recipe_list);
1874 
1875 	status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1876 	*num_recipes = le16_to_cpu(cmd->num_sub_recipes);
1877 
1878 	return status;
1879 }
1880 
1881 /**
1882  * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
1883  * @hw: pointer to the HW struct
1884  * @params: parameters used to update the default recipe
1885  *
1886  * This function only supports updating default recipes and it only supports
1887  * updating a single recipe based on the lkup_idx at a time.
1888  *
1889  * This is done as a read-modify-write operation. First, get the current recipe
1890  * contents based on the recipe's ID. Then modify the field vector index and
1891  * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
1892  * the pre-existing recipe with the modifications.
1893  */
1894 int
1895 ice_update_recipe_lkup_idx(struct ice_hw *hw,
1896 			   struct ice_update_recipe_lkup_idx_params *params)
1897 {
1898 	struct ice_aqc_recipe_data_elem *rcp_list;
1899 	u16 num_recps = ICE_MAX_NUM_RECIPES;
1900 	int status;
1901 
1902 	rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL);
1903 	if (!rcp_list)
1904 		return -ENOMEM;
1905 
1906 	/* read current recipe list from firmware */
1907 	rcp_list->recipe_indx = params->rid;
1908 	status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
1909 	if (status) {
1910 		ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
1911 			  params->rid, status);
1912 		goto error_out;
1913 	}
1914 
1915 	/* only modify existing recipe's lkup_idx and mask if valid, while
1916 	 * leaving all other fields the same, then update the recipe firmware
1917 	 */
1918 	rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
1919 	if (params->mask_valid)
1920 		rcp_list->content.mask[params->lkup_idx] =
1921 			cpu_to_le16(params->mask);
1922 
1923 	if (params->ignore_valid)
1924 		rcp_list->content.lkup_indx[params->lkup_idx] |=
1925 			ICE_AQ_RECIPE_LKUP_IGNORE;
1926 
1927 	status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
1928 	if (status)
1929 		ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
1930 			  params->rid, params->lkup_idx, params->fv_idx,
1931 			  params->mask, params->mask_valid ? "true" : "false",
1932 			  status);
1933 
1934 error_out:
1935 	kfree(rcp_list);
1936 	return status;
1937 }
1938 
1939 /**
1940  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1941  * @hw: pointer to the HW struct
1942  * @profile_id: package profile ID to associate the recipe with
1943  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1944  * @cd: pointer to command details structure or NULL
1945  * Recipe to profile association (0x0291)
1946  */
1947 static int
1948 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1949 			     struct ice_sq_cd *cd)
1950 {
1951 	struct ice_aqc_recipe_to_profile *cmd;
1952 	struct ice_aq_desc desc;
1953 
1954 	cmd = &desc.params.recipe_to_profile;
1955 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1956 	cmd->profile_id = cpu_to_le16(profile_id);
1957 	/* Set the recipe ID bit in the bitmask to let the device know which
1958 	 * profile we are associating the recipe to
1959 	 */
1960 	memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
1961 
1962 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1963 }
1964 
1965 /**
1966  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1967  * @hw: pointer to the HW struct
1968  * @profile_id: package profile ID to associate the recipe with
1969  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1970  * @cd: pointer to command details structure or NULL
1971  * Associate profile ID with given recipe (0x0293)
1972  */
1973 static int
1974 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1975 			     struct ice_sq_cd *cd)
1976 {
1977 	struct ice_aqc_recipe_to_profile *cmd;
1978 	struct ice_aq_desc desc;
1979 	int status;
1980 
1981 	cmd = &desc.params.recipe_to_profile;
1982 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1983 	cmd->profile_id = cpu_to_le16(profile_id);
1984 
1985 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1986 	if (!status)
1987 		memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
1988 
1989 	return status;
1990 }
1991 
1992 /**
1993  * ice_alloc_recipe - add recipe resource
1994  * @hw: pointer to the hardware structure
1995  * @rid: recipe ID returned as response to AQ call
1996  */
1997 static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1998 {
1999 	struct ice_aqc_alloc_free_res_elem *sw_buf;
2000 	u16 buf_len;
2001 	int status;
2002 
2003 	buf_len = struct_size(sw_buf, elem, 1);
2004 	sw_buf = kzalloc(buf_len, GFP_KERNEL);
2005 	if (!sw_buf)
2006 		return -ENOMEM;
2007 
2008 	sw_buf->num_elems = cpu_to_le16(1);
2009 	sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
2010 					ICE_AQC_RES_TYPE_S) |
2011 					ICE_AQC_RES_TYPE_FLAG_SHARED);
2012 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2013 				       ice_aqc_opc_alloc_res, NULL);
2014 	if (!status)
2015 		*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
2016 	kfree(sw_buf);
2017 
2018 	return status;
2019 }
2020 
2021 /**
2022  * ice_get_recp_to_prof_map - updates recipe to profile mapping
2023  * @hw: pointer to hardware structure
2024  *
2025  * This function is used to populate recipe_to_profile matrix where index to
2026  * this array is the recipe ID and the element is the mapping of which profiles
2027  * is this recipe mapped to.
2028  */
2029 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2030 {
2031 	DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
2032 	u16 i;
2033 
2034 	for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2035 		u16 j;
2036 
2037 		bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2038 		bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
2039 		if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2040 			continue;
2041 		bitmap_copy(profile_to_recipe[i], r_bitmap,
2042 			    ICE_MAX_NUM_RECIPES);
2043 		for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2044 			set_bit(i, recipe_to_profile[j]);
2045 	}
2046 }
2047 
2048 /**
2049  * ice_collect_result_idx - copy result index values
2050  * @buf: buffer that contains the result index
2051  * @recp: the recipe struct to copy data into
2052  */
2053 static void
2054 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
2055 		       struct ice_sw_recipe *recp)
2056 {
2057 	if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2058 		set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2059 			recp->res_idxs);
2060 }
2061 
2062 /**
2063  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2064  * @hw: pointer to hardware structure
2065  * @recps: struct that we need to populate
2066  * @rid: recipe ID that we are populating
2067  * @refresh_required: true if we should get recipe to profile mapping from FW
2068  *
2069  * This function is used to populate all the necessary entries into our
2070  * bookkeeping so that we have a current list of all the recipes that are
2071  * programmed in the firmware.
2072  */
2073 static int
2074 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2075 		    bool *refresh_required)
2076 {
2077 	DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
2078 	struct ice_aqc_recipe_data_elem *tmp;
2079 	u16 num_recps = ICE_MAX_NUM_RECIPES;
2080 	struct ice_prot_lkup_ext *lkup_exts;
2081 	u8 fv_word_idx = 0;
2082 	u16 sub_recps;
2083 	int status;
2084 
2085 	bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
2086 
2087 	/* we need a buffer big enough to accommodate all the recipes */
2088 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
2089 	if (!tmp)
2090 		return -ENOMEM;
2091 
2092 	tmp[0].recipe_indx = rid;
2093 	status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2094 	/* non-zero status meaning recipe doesn't exist */
2095 	if (status)
2096 		goto err_unroll;
2097 
2098 	/* Get recipe to profile map so that we can get the fv from lkups that
2099 	 * we read for a recipe from FW. Since we want to minimize the number of
2100 	 * times we make this FW call, just make one call and cache the copy
2101 	 * until a new recipe is added. This operation is only required the
2102 	 * first time to get the changes from FW. Then to search existing
2103 	 * entries we don't need to update the cache again until another recipe
2104 	 * gets added.
2105 	 */
2106 	if (*refresh_required) {
2107 		ice_get_recp_to_prof_map(hw);
2108 		*refresh_required = false;
2109 	}
2110 
2111 	/* Start populating all the entries for recps[rid] based on lkups from
2112 	 * firmware. Note that we are only creating the root recipe in our
2113 	 * database.
2114 	 */
2115 	lkup_exts = &recps[rid].lkup_exts;
2116 
2117 	for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2118 		struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2119 		struct ice_recp_grp_entry *rg_entry;
2120 		u8 i, prof, idx, prot = 0;
2121 		bool is_root;
2122 		u16 off = 0;
2123 
2124 		rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
2125 					GFP_KERNEL);
2126 		if (!rg_entry) {
2127 			status = -ENOMEM;
2128 			goto err_unroll;
2129 		}
2130 
2131 		idx = root_bufs.recipe_indx;
2132 		is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2133 
2134 		/* Mark all result indices in this chain */
2135 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2136 			set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2137 				result_bm);
2138 
2139 		/* get the first profile that is associated with rid */
2140 		prof = find_first_bit(recipe_to_profile[idx],
2141 				      ICE_MAX_NUM_PROFILES);
2142 		for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2143 			u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2144 
2145 			rg_entry->fv_idx[i] = lkup_indx;
2146 			rg_entry->fv_mask[i] =
2147 				le16_to_cpu(root_bufs.content.mask[i + 1]);
2148 
2149 			/* If the recipe is a chained recipe then all its
2150 			 * child recipe's result will have a result index.
2151 			 * To fill fv_words we should not use those result
2152 			 * index, we only need the protocol ids and offsets.
2153 			 * We will skip all the fv_idx which stores result
2154 			 * index in them. We also need to skip any fv_idx which
2155 			 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2156 			 * valid offset value.
2157 			 */
2158 			if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
2159 			    rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2160 			    rg_entry->fv_idx[i] == 0)
2161 				continue;
2162 
2163 			ice_find_prot_off(hw, ICE_BLK_SW, prof,
2164 					  rg_entry->fv_idx[i], &prot, &off);
2165 			lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2166 			lkup_exts->fv_words[fv_word_idx].off = off;
2167 			lkup_exts->field_mask[fv_word_idx] =
2168 				rg_entry->fv_mask[i];
2169 			fv_word_idx++;
2170 		}
2171 		/* populate rg_list with the data from the child entry of this
2172 		 * recipe
2173 		 */
2174 		list_add(&rg_entry->l_entry, &recps[rid].rg_list);
2175 
2176 		/* Propagate some data to the recipe database */
2177 		recps[idx].is_root = !!is_root;
2178 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2179 		bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2180 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2181 			recps[idx].chain_idx = root_bufs.content.result_indx &
2182 				~ICE_AQ_RECIPE_RESULT_EN;
2183 			set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2184 		} else {
2185 			recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2186 		}
2187 
2188 		if (!is_root)
2189 			continue;
2190 
2191 		/* Only do the following for root recipes entries */
2192 		memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2193 		       sizeof(recps[idx].r_bitmap));
2194 		recps[idx].root_rid = root_bufs.content.rid &
2195 			~ICE_AQ_RECIPE_ID_IS_ROOT;
2196 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2197 	}
2198 
2199 	/* Complete initialization of the root recipe entry */
2200 	lkup_exts->n_val_words = fv_word_idx;
2201 	recps[rid].big_recp = (num_recps > 1);
2202 	recps[rid].n_grp_count = (u8)num_recps;
2203 	recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
2204 					   recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
2205 					   GFP_KERNEL);
2206 	if (!recps[rid].root_buf) {
2207 		status = -ENOMEM;
2208 		goto err_unroll;
2209 	}
2210 
2211 	/* Copy result indexes */
2212 	bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2213 	recps[rid].recp_created = true;
2214 
2215 err_unroll:
2216 	kfree(tmp);
2217 	return status;
2218 }
2219 
2220 /* ice_init_port_info - Initialize port_info with switch configuration data
2221  * @pi: pointer to port_info
2222  * @vsi_port_num: VSI number or port number
2223  * @type: Type of switch element (port or VSI)
2224  * @swid: switch ID of the switch the element is attached to
2225  * @pf_vf_num: PF or VF number
2226  * @is_vf: true if the element is a VF, false otherwise
2227  */
2228 static void
2229 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2230 		   u16 swid, u16 pf_vf_num, bool is_vf)
2231 {
2232 	switch (type) {
2233 	case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2234 		pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2235 		pi->sw_id = swid;
2236 		pi->pf_vf_num = pf_vf_num;
2237 		pi->is_vf = is_vf;
2238 		pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2239 		pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2240 		break;
2241 	default:
2242 		ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2243 		break;
2244 	}
2245 }
2246 
2247 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2248  * @hw: pointer to the hardware structure
2249  */
2250 int ice_get_initial_sw_cfg(struct ice_hw *hw)
2251 {
2252 	struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2253 	u16 req_desc = 0;
2254 	u16 num_elems;
2255 	int status;
2256 	u16 i;
2257 
2258 	rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
2259 			    GFP_KERNEL);
2260 
2261 	if (!rbuf)
2262 		return -ENOMEM;
2263 
2264 	/* Multiple calls to ice_aq_get_sw_cfg may be required
2265 	 * to get all the switch configuration information. The need
2266 	 * for additional calls is indicated by ice_aq_get_sw_cfg
2267 	 * writing a non-zero value in req_desc
2268 	 */
2269 	do {
2270 		struct ice_aqc_get_sw_cfg_resp_elem *ele;
2271 
2272 		status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2273 					   &req_desc, &num_elems, NULL);
2274 
2275 		if (status)
2276 			break;
2277 
2278 		for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2279 			u16 pf_vf_num, swid, vsi_port_num;
2280 			bool is_vf = false;
2281 			u8 res_type;
2282 
2283 			vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
2284 				ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2285 
2286 			pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
2287 				ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2288 
2289 			swid = le16_to_cpu(ele->swid);
2290 
2291 			if (le16_to_cpu(ele->pf_vf_num) &
2292 			    ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2293 				is_vf = true;
2294 
2295 			res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
2296 					ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2297 
2298 			if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
2299 				/* FW VSI is not needed. Just continue. */
2300 				continue;
2301 			}
2302 
2303 			ice_init_port_info(hw->port_info, vsi_port_num,
2304 					   res_type, swid, pf_vf_num, is_vf);
2305 		}
2306 	} while (req_desc && !status);
2307 
2308 	devm_kfree(ice_hw_to_dev(hw), rbuf);
2309 	return status;
2310 }
2311 
2312 /**
2313  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2314  * @hw: pointer to the hardware structure
2315  * @fi: filter info structure to fill/update
2316  *
2317  * This helper function populates the lb_en and lan_en elements of the provided
2318  * ice_fltr_info struct using the switch's type and characteristics of the
2319  * switch rule being configured.
2320  */
2321 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2322 {
2323 	fi->lb_en = false;
2324 	fi->lan_en = false;
2325 	if ((fi->flag & ICE_FLTR_TX) &&
2326 	    (fi->fltr_act == ICE_FWD_TO_VSI ||
2327 	     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2328 	     fi->fltr_act == ICE_FWD_TO_Q ||
2329 	     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2330 		/* Setting LB for prune actions will result in replicated
2331 		 * packets to the internal switch that will be dropped.
2332 		 */
2333 		if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2334 			fi->lb_en = true;
2335 
2336 		/* Set lan_en to TRUE if
2337 		 * 1. The switch is a VEB AND
2338 		 * 2
2339 		 * 2.1 The lookup is a directional lookup like ethertype,
2340 		 * promiscuous, ethertype-MAC, promiscuous-VLAN
2341 		 * and default-port OR
2342 		 * 2.2 The lookup is VLAN, OR
2343 		 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2344 		 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2345 		 *
2346 		 * OR
2347 		 *
2348 		 * The switch is a VEPA.
2349 		 *
2350 		 * In all other cases, the LAN enable has to be set to false.
2351 		 */
2352 		if (hw->evb_veb) {
2353 			if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2354 			    fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2355 			    fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2356 			    fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2357 			    fi->lkup_type == ICE_SW_LKUP_DFLT ||
2358 			    fi->lkup_type == ICE_SW_LKUP_VLAN ||
2359 			    (fi->lkup_type == ICE_SW_LKUP_MAC &&
2360 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
2361 			    (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2362 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
2363 				fi->lan_en = true;
2364 		} else {
2365 			fi->lan_en = true;
2366 		}
2367 	}
2368 }
2369 
2370 /**
2371  * ice_fill_sw_rule - Helper function to fill switch rule structure
2372  * @hw: pointer to the hardware structure
2373  * @f_info: entry containing packet forwarding information
2374  * @s_rule: switch rule structure to be filled in based on mac_entry
2375  * @opc: switch rules population command type - pass in the command opcode
2376  */
2377 static void
2378 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2379 		 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2380 {
2381 	u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2382 	u16 vlan_tpid = ETH_P_8021Q;
2383 	void *daddr = NULL;
2384 	u16 eth_hdr_sz;
2385 	u8 *eth_hdr;
2386 	u32 act = 0;
2387 	__be16 *off;
2388 	u8 q_rgn;
2389 
2390 	if (opc == ice_aqc_opc_remove_sw_rules) {
2391 		s_rule->pdata.lkup_tx_rx.act = 0;
2392 		s_rule->pdata.lkup_tx_rx.index =
2393 			cpu_to_le16(f_info->fltr_rule_id);
2394 		s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2395 		return;
2396 	}
2397 
2398 	eth_hdr_sz = sizeof(dummy_eth_header);
2399 	eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2400 
2401 	/* initialize the ether header with a dummy header */
2402 	memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
2403 	ice_fill_sw_info(hw, f_info);
2404 
2405 	switch (f_info->fltr_act) {
2406 	case ICE_FWD_TO_VSI:
2407 		act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2408 			ICE_SINGLE_ACT_VSI_ID_M;
2409 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2410 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2411 				ICE_SINGLE_ACT_VALID_BIT;
2412 		break;
2413 	case ICE_FWD_TO_VSI_LIST:
2414 		act |= ICE_SINGLE_ACT_VSI_LIST;
2415 		act |= (f_info->fwd_id.vsi_list_id <<
2416 			ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2417 			ICE_SINGLE_ACT_VSI_LIST_ID_M;
2418 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2419 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2420 				ICE_SINGLE_ACT_VALID_BIT;
2421 		break;
2422 	case ICE_FWD_TO_Q:
2423 		act |= ICE_SINGLE_ACT_TO_Q;
2424 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2425 			ICE_SINGLE_ACT_Q_INDEX_M;
2426 		break;
2427 	case ICE_DROP_PACKET:
2428 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2429 			ICE_SINGLE_ACT_VALID_BIT;
2430 		break;
2431 	case ICE_FWD_TO_QGRP:
2432 		q_rgn = f_info->qgrp_size > 0 ?
2433 			(u8)ilog2(f_info->qgrp_size) : 0;
2434 		act |= ICE_SINGLE_ACT_TO_Q;
2435 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2436 			ICE_SINGLE_ACT_Q_INDEX_M;
2437 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2438 			ICE_SINGLE_ACT_Q_REGION_M;
2439 		break;
2440 	default:
2441 		return;
2442 	}
2443 
2444 	if (f_info->lb_en)
2445 		act |= ICE_SINGLE_ACT_LB_ENABLE;
2446 	if (f_info->lan_en)
2447 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
2448 
2449 	switch (f_info->lkup_type) {
2450 	case ICE_SW_LKUP_MAC:
2451 		daddr = f_info->l_data.mac.mac_addr;
2452 		break;
2453 	case ICE_SW_LKUP_VLAN:
2454 		vlan_id = f_info->l_data.vlan.vlan_id;
2455 		if (f_info->l_data.vlan.tpid_valid)
2456 			vlan_tpid = f_info->l_data.vlan.tpid;
2457 		if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2458 		    f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2459 			act |= ICE_SINGLE_ACT_PRUNE;
2460 			act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2461 		}
2462 		break;
2463 	case ICE_SW_LKUP_ETHERTYPE_MAC:
2464 		daddr = f_info->l_data.ethertype_mac.mac_addr;
2465 		fallthrough;
2466 	case ICE_SW_LKUP_ETHERTYPE:
2467 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2468 		*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
2469 		break;
2470 	case ICE_SW_LKUP_MAC_VLAN:
2471 		daddr = f_info->l_data.mac_vlan.mac_addr;
2472 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
2473 		break;
2474 	case ICE_SW_LKUP_PROMISC_VLAN:
2475 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
2476 		fallthrough;
2477 	case ICE_SW_LKUP_PROMISC:
2478 		daddr = f_info->l_data.mac_vlan.mac_addr;
2479 		break;
2480 	default:
2481 		break;
2482 	}
2483 
2484 	s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2485 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2486 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
2487 
2488 	/* Recipe set depending on lookup type */
2489 	s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
2490 	s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
2491 	s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
2492 
2493 	if (daddr)
2494 		ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
2495 
2496 	if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2497 		off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2498 		*off = cpu_to_be16(vlan_id);
2499 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2500 		*off = cpu_to_be16(vlan_tpid);
2501 	}
2502 
2503 	/* Create the switch rule with the final dummy Ethernet header */
2504 	if (opc != ice_aqc_opc_update_sw_rules)
2505 		s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
2506 }
2507 
2508 /**
2509  * ice_add_marker_act
2510  * @hw: pointer to the hardware structure
2511  * @m_ent: the management entry for which sw marker needs to be added
2512  * @sw_marker: sw marker to tag the Rx descriptor with
2513  * @l_id: large action resource ID
2514  *
2515  * Create a large action to hold software marker and update the switch rule
2516  * entry pointed by m_ent with newly created large action
2517  */
2518 static int
2519 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2520 		   u16 sw_marker, u16 l_id)
2521 {
2522 	struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2523 	/* For software marker we need 3 large actions
2524 	 * 1. FWD action: FWD TO VSI or VSI LIST
2525 	 * 2. GENERIC VALUE action to hold the profile ID
2526 	 * 3. GENERIC VALUE action to hold the software marker ID
2527 	 */
2528 	const u16 num_lg_acts = 3;
2529 	u16 lg_act_size;
2530 	u16 rules_size;
2531 	int status;
2532 	u32 act;
2533 	u16 id;
2534 
2535 	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2536 		return -EINVAL;
2537 
2538 	/* Create two back-to-back switch rules and submit them to the HW using
2539 	 * one memory buffer:
2540 	 *    1. Large Action
2541 	 *    2. Look up Tx Rx
2542 	 */
2543 	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2544 	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2545 	lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
2546 	if (!lg_act)
2547 		return -ENOMEM;
2548 
2549 	rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2550 
2551 	/* Fill in the first switch rule i.e. large action */
2552 	lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
2553 	lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
2554 	lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
2555 
2556 	/* First action VSI forwarding or VSI list forwarding depending on how
2557 	 * many VSIs
2558 	 */
2559 	id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2560 		m_ent->fltr_info.fwd_id.hw_vsi_id;
2561 
2562 	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2563 	act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
2564 	if (m_ent->vsi_count > 1)
2565 		act |= ICE_LG_ACT_VSI_LIST;
2566 	lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
2567 
2568 	/* Second action descriptor type */
2569 	act = ICE_LG_ACT_GENERIC;
2570 
2571 	act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2572 	lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
2573 
2574 	act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2575 	       ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2576 
2577 	/* Third action Marker value */
2578 	act |= ICE_LG_ACT_GENERIC;
2579 	act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2580 		ICE_LG_ACT_GENERIC_VALUE_M;
2581 
2582 	lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
2583 
2584 	/* call the fill switch rule to fill the lookup Tx Rx structure */
2585 	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2586 			 ice_aqc_opc_update_sw_rules);
2587 
2588 	/* Update the action to point to the large action ID */
2589 	rx_tx->pdata.lkup_tx_rx.act =
2590 		cpu_to_le32(ICE_SINGLE_ACT_PTR |
2591 			    ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2592 			     ICE_SINGLE_ACT_PTR_VAL_M));
2593 
2594 	/* Use the filter rule ID of the previously created rule with single
2595 	 * act. Once the update happens, hardware will treat this as large
2596 	 * action
2597 	 */
2598 	rx_tx->pdata.lkup_tx_rx.index =
2599 		cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
2600 
2601 	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2602 				 ice_aqc_opc_update_sw_rules, NULL);
2603 	if (!status) {
2604 		m_ent->lg_act_idx = l_id;
2605 		m_ent->sw_marker_id = sw_marker;
2606 	}
2607 
2608 	devm_kfree(ice_hw_to_dev(hw), lg_act);
2609 	return status;
2610 }
2611 
2612 /**
2613  * ice_create_vsi_list_map
2614  * @hw: pointer to the hardware structure
2615  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2616  * @num_vsi: number of VSI handles in the array
2617  * @vsi_list_id: VSI list ID generated as part of allocate resource
2618  *
2619  * Helper function to create a new entry of VSI list ID to VSI mapping
2620  * using the given VSI list ID
2621  */
2622 static struct ice_vsi_list_map_info *
2623 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2624 			u16 vsi_list_id)
2625 {
2626 	struct ice_switch_info *sw = hw->switch_info;
2627 	struct ice_vsi_list_map_info *v_map;
2628 	int i;
2629 
2630 	v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
2631 	if (!v_map)
2632 		return NULL;
2633 
2634 	v_map->vsi_list_id = vsi_list_id;
2635 	v_map->ref_cnt = 1;
2636 	for (i = 0; i < num_vsi; i++)
2637 		set_bit(vsi_handle_arr[i], v_map->vsi_map);
2638 
2639 	list_add(&v_map->list_entry, &sw->vsi_list_map_head);
2640 	return v_map;
2641 }
2642 
2643 /**
2644  * ice_update_vsi_list_rule
2645  * @hw: pointer to the hardware structure
2646  * @vsi_handle_arr: array of VSI handles to form a VSI list
2647  * @num_vsi: number of VSI handles in the array
2648  * @vsi_list_id: VSI list ID generated as part of allocate resource
2649  * @remove: Boolean value to indicate if this is a remove action
2650  * @opc: switch rules population command type - pass in the command opcode
2651  * @lkup_type: lookup type of the filter
2652  *
2653  * Call AQ command to add a new switch rule or update existing switch rule
2654  * using the given VSI list ID
2655  */
2656 static int
2657 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2658 			 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2659 			 enum ice_sw_lkup_type lkup_type)
2660 {
2661 	struct ice_aqc_sw_rules_elem *s_rule;
2662 	u16 s_rule_size;
2663 	u16 rule_type;
2664 	int status;
2665 	int i;
2666 
2667 	if (!num_vsi)
2668 		return -EINVAL;
2669 
2670 	if (lkup_type == ICE_SW_LKUP_MAC ||
2671 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2672 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2673 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2674 	    lkup_type == ICE_SW_LKUP_PROMISC ||
2675 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
2676 		rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2677 			ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2678 	else if (lkup_type == ICE_SW_LKUP_VLAN)
2679 		rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2680 			ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2681 	else
2682 		return -EINVAL;
2683 
2684 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2685 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2686 	if (!s_rule)
2687 		return -ENOMEM;
2688 	for (i = 0; i < num_vsi; i++) {
2689 		if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2690 			status = -EINVAL;
2691 			goto exit;
2692 		}
2693 		/* AQ call requires hw_vsi_id(s) */
2694 		s_rule->pdata.vsi_list.vsi[i] =
2695 			cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2696 	}
2697 
2698 	s_rule->type = cpu_to_le16(rule_type);
2699 	s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
2700 	s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
2701 
2702 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2703 
2704 exit:
2705 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2706 	return status;
2707 }
2708 
2709 /**
2710  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2711  * @hw: pointer to the HW struct
2712  * @vsi_handle_arr: array of VSI handles to form a VSI list
2713  * @num_vsi: number of VSI handles in the array
2714  * @vsi_list_id: stores the ID of the VSI list to be created
2715  * @lkup_type: switch rule filter's lookup type
2716  */
2717 static int
2718 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2719 			 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2720 {
2721 	int status;
2722 
2723 	status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2724 					    ice_aqc_opc_alloc_res);
2725 	if (status)
2726 		return status;
2727 
2728 	/* Update the newly created VSI list to include the specified VSIs */
2729 	return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2730 					*vsi_list_id, false,
2731 					ice_aqc_opc_add_sw_rules, lkup_type);
2732 }
2733 
2734 /**
2735  * ice_create_pkt_fwd_rule
2736  * @hw: pointer to the hardware structure
2737  * @f_entry: entry containing packet forwarding information
2738  *
2739  * Create switch rule with given filter information and add an entry
2740  * to the corresponding filter management list to track this switch rule
2741  * and VSI mapping
2742  */
2743 static int
2744 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2745 			struct ice_fltr_list_entry *f_entry)
2746 {
2747 	struct ice_fltr_mgmt_list_entry *fm_entry;
2748 	struct ice_aqc_sw_rules_elem *s_rule;
2749 	enum ice_sw_lkup_type l_type;
2750 	struct ice_sw_recipe *recp;
2751 	int status;
2752 
2753 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2754 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
2755 	if (!s_rule)
2756 		return -ENOMEM;
2757 	fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
2758 				GFP_KERNEL);
2759 	if (!fm_entry) {
2760 		status = -ENOMEM;
2761 		goto ice_create_pkt_fwd_rule_exit;
2762 	}
2763 
2764 	fm_entry->fltr_info = f_entry->fltr_info;
2765 
2766 	/* Initialize all the fields for the management entry */
2767 	fm_entry->vsi_count = 1;
2768 	fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2769 	fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2770 	fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2771 
2772 	ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2773 			 ice_aqc_opc_add_sw_rules);
2774 
2775 	status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2776 				 ice_aqc_opc_add_sw_rules, NULL);
2777 	if (status) {
2778 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
2779 		goto ice_create_pkt_fwd_rule_exit;
2780 	}
2781 
2782 	f_entry->fltr_info.fltr_rule_id =
2783 		le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
2784 	fm_entry->fltr_info.fltr_rule_id =
2785 		le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
2786 
2787 	/* The book keeping entries will get removed when base driver
2788 	 * calls remove filter AQ command
2789 	 */
2790 	l_type = fm_entry->fltr_info.lkup_type;
2791 	recp = &hw->switch_info->recp_list[l_type];
2792 	list_add(&fm_entry->list_entry, &recp->filt_rules);
2793 
2794 ice_create_pkt_fwd_rule_exit:
2795 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2796 	return status;
2797 }
2798 
2799 /**
2800  * ice_update_pkt_fwd_rule
2801  * @hw: pointer to the hardware structure
2802  * @f_info: filter information for switch rule
2803  *
2804  * Call AQ command to update a previously created switch rule with a
2805  * VSI list ID
2806  */
2807 static int
2808 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2809 {
2810 	struct ice_aqc_sw_rules_elem *s_rule;
2811 	int status;
2812 
2813 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2814 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
2815 	if (!s_rule)
2816 		return -ENOMEM;
2817 
2818 	ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2819 
2820 	s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
2821 
2822 	/* Update switch rule with new rule set to forward VSI list */
2823 	status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2824 				 ice_aqc_opc_update_sw_rules, NULL);
2825 
2826 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2827 	return status;
2828 }
2829 
2830 /**
2831  * ice_update_sw_rule_bridge_mode
2832  * @hw: pointer to the HW struct
2833  *
2834  * Updates unicast switch filter rules based on VEB/VEPA mode
2835  */
2836 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2837 {
2838 	struct ice_switch_info *sw = hw->switch_info;
2839 	struct ice_fltr_mgmt_list_entry *fm_entry;
2840 	struct list_head *rule_head;
2841 	struct mutex *rule_lock; /* Lock to protect filter rule list */
2842 	int status = 0;
2843 
2844 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2845 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2846 
2847 	mutex_lock(rule_lock);
2848 	list_for_each_entry(fm_entry, rule_head, list_entry) {
2849 		struct ice_fltr_info *fi = &fm_entry->fltr_info;
2850 		u8 *addr = fi->l_data.mac.mac_addr;
2851 
2852 		/* Update unicast Tx rules to reflect the selected
2853 		 * VEB/VEPA mode
2854 		 */
2855 		if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
2856 		    (fi->fltr_act == ICE_FWD_TO_VSI ||
2857 		     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2858 		     fi->fltr_act == ICE_FWD_TO_Q ||
2859 		     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2860 			status = ice_update_pkt_fwd_rule(hw, fi);
2861 			if (status)
2862 				break;
2863 		}
2864 	}
2865 
2866 	mutex_unlock(rule_lock);
2867 
2868 	return status;
2869 }
2870 
2871 /**
2872  * ice_add_update_vsi_list
2873  * @hw: pointer to the hardware structure
2874  * @m_entry: pointer to current filter management list entry
2875  * @cur_fltr: filter information from the book keeping entry
2876  * @new_fltr: filter information with the new VSI to be added
2877  *
2878  * Call AQ command to add or update previously created VSI list with new VSI.
2879  *
2880  * Helper function to do book keeping associated with adding filter information
2881  * The algorithm to do the book keeping is described below :
2882  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2883  *	if only one VSI has been added till now
2884  *		Allocate a new VSI list and add two VSIs
2885  *		to this list using switch rule command
2886  *		Update the previously created switch rule with the
2887  *		newly created VSI list ID
2888  *	if a VSI list was previously created
2889  *		Add the new VSI to the previously created VSI list set
2890  *		using the update switch rule command
2891  */
2892 static int
2893 ice_add_update_vsi_list(struct ice_hw *hw,
2894 			struct ice_fltr_mgmt_list_entry *m_entry,
2895 			struct ice_fltr_info *cur_fltr,
2896 			struct ice_fltr_info *new_fltr)
2897 {
2898 	u16 vsi_list_id = 0;
2899 	int status = 0;
2900 
2901 	if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2902 	     cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2903 		return -EOPNOTSUPP;
2904 
2905 	if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2906 	     new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2907 	    (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2908 	     cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2909 		return -EOPNOTSUPP;
2910 
2911 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2912 		/* Only one entry existed in the mapping and it was not already
2913 		 * a part of a VSI list. So, create a VSI list with the old and
2914 		 * new VSIs.
2915 		 */
2916 		struct ice_fltr_info tmp_fltr;
2917 		u16 vsi_handle_arr[2];
2918 
2919 		/* A rule already exists with the new VSI being added */
2920 		if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2921 			return -EEXIST;
2922 
2923 		vsi_handle_arr[0] = cur_fltr->vsi_handle;
2924 		vsi_handle_arr[1] = new_fltr->vsi_handle;
2925 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2926 						  &vsi_list_id,
2927 						  new_fltr->lkup_type);
2928 		if (status)
2929 			return status;
2930 
2931 		tmp_fltr = *new_fltr;
2932 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2933 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2934 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2935 		/* Update the previous switch rule of "MAC forward to VSI" to
2936 		 * "MAC fwd to VSI list"
2937 		 */
2938 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2939 		if (status)
2940 			return status;
2941 
2942 		cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2943 		cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2944 		m_entry->vsi_list_info =
2945 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2946 						vsi_list_id);
2947 
2948 		if (!m_entry->vsi_list_info)
2949 			return -ENOMEM;
2950 
2951 		/* If this entry was large action then the large action needs
2952 		 * to be updated to point to FWD to VSI list
2953 		 */
2954 		if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2955 			status =
2956 			    ice_add_marker_act(hw, m_entry,
2957 					       m_entry->sw_marker_id,
2958 					       m_entry->lg_act_idx);
2959 	} else {
2960 		u16 vsi_handle = new_fltr->vsi_handle;
2961 		enum ice_adminq_opc opcode;
2962 
2963 		if (!m_entry->vsi_list_info)
2964 			return -EIO;
2965 
2966 		/* A rule already exists with the new VSI being added */
2967 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
2968 			return 0;
2969 
2970 		/* Update the previously created VSI list set with
2971 		 * the new VSI ID passed in
2972 		 */
2973 		vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2974 		opcode = ice_aqc_opc_update_sw_rules;
2975 
2976 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2977 						  vsi_list_id, false, opcode,
2978 						  new_fltr->lkup_type);
2979 		/* update VSI list mapping info with new VSI ID */
2980 		if (!status)
2981 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
2982 	}
2983 	if (!status)
2984 		m_entry->vsi_count++;
2985 	return status;
2986 }
2987 
2988 /**
2989  * ice_find_rule_entry - Search a rule entry
2990  * @hw: pointer to the hardware structure
2991  * @recp_id: lookup type for which the specified rule needs to be searched
2992  * @f_info: rule information
2993  *
2994  * Helper function to search for a given rule entry
2995  * Returns pointer to entry storing the rule if found
2996  */
2997 static struct ice_fltr_mgmt_list_entry *
2998 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2999 {
3000 	struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3001 	struct ice_switch_info *sw = hw->switch_info;
3002 	struct list_head *list_head;
3003 
3004 	list_head = &sw->recp_list[recp_id].filt_rules;
3005 	list_for_each_entry(list_itr, list_head, list_entry) {
3006 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3007 			    sizeof(f_info->l_data)) &&
3008 		    f_info->flag == list_itr->fltr_info.flag) {
3009 			ret = list_itr;
3010 			break;
3011 		}
3012 	}
3013 	return ret;
3014 }
3015 
3016 /**
3017  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3018  * @hw: pointer to the hardware structure
3019  * @recp_id: lookup type for which VSI lists needs to be searched
3020  * @vsi_handle: VSI handle to be found in VSI list
3021  * @vsi_list_id: VSI list ID found containing vsi_handle
3022  *
3023  * Helper function to search a VSI list with single entry containing given VSI
3024  * handle element. This can be extended further to search VSI list with more
3025  * than 1 vsi_count. Returns pointer to VSI list entry if found.
3026  */
3027 static struct ice_vsi_list_map_info *
3028 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
3029 			u16 *vsi_list_id)
3030 {
3031 	struct ice_vsi_list_map_info *map_info = NULL;
3032 	struct ice_switch_info *sw = hw->switch_info;
3033 	struct ice_fltr_mgmt_list_entry *list_itr;
3034 	struct list_head *list_head;
3035 
3036 	list_head = &sw->recp_list[recp_id].filt_rules;
3037 	list_for_each_entry(list_itr, list_head, list_entry) {
3038 		if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
3039 			map_info = list_itr->vsi_list_info;
3040 			if (test_bit(vsi_handle, map_info->vsi_map)) {
3041 				*vsi_list_id = map_info->vsi_list_id;
3042 				return map_info;
3043 			}
3044 		}
3045 	}
3046 	return NULL;
3047 }
3048 
3049 /**
3050  * ice_add_rule_internal - add rule for a given lookup type
3051  * @hw: pointer to the hardware structure
3052  * @recp_id: lookup type (recipe ID) for which rule has to be added
3053  * @f_entry: structure containing MAC forwarding information
3054  *
3055  * Adds or updates the rule lists for a given recipe
3056  */
3057 static int
3058 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
3059 		      struct ice_fltr_list_entry *f_entry)
3060 {
3061 	struct ice_switch_info *sw = hw->switch_info;
3062 	struct ice_fltr_info *new_fltr, *cur_fltr;
3063 	struct ice_fltr_mgmt_list_entry *m_entry;
3064 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3065 	int status = 0;
3066 
3067 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3068 		return -EINVAL;
3069 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3070 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3071 
3072 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3073 
3074 	mutex_lock(rule_lock);
3075 	new_fltr = &f_entry->fltr_info;
3076 	if (new_fltr->flag & ICE_FLTR_RX)
3077 		new_fltr->src = hw->port_info->lport;
3078 	else if (new_fltr->flag & ICE_FLTR_TX)
3079 		new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
3080 
3081 	m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
3082 	if (!m_entry) {
3083 		mutex_unlock(rule_lock);
3084 		return ice_create_pkt_fwd_rule(hw, f_entry);
3085 	}
3086 
3087 	cur_fltr = &m_entry->fltr_info;
3088 	status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3089 	mutex_unlock(rule_lock);
3090 
3091 	return status;
3092 }
3093 
3094 /**
3095  * ice_remove_vsi_list_rule
3096  * @hw: pointer to the hardware structure
3097  * @vsi_list_id: VSI list ID generated as part of allocate resource
3098  * @lkup_type: switch rule filter lookup type
3099  *
3100  * The VSI list should be emptied before this function is called to remove the
3101  * VSI list.
3102  */
3103 static int
3104 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3105 			 enum ice_sw_lkup_type lkup_type)
3106 {
3107 	struct ice_aqc_sw_rules_elem *s_rule;
3108 	u16 s_rule_size;
3109 	int status;
3110 
3111 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
3112 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3113 	if (!s_rule)
3114 		return -ENOMEM;
3115 
3116 	s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3117 	s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
3118 
3119 	/* Free the vsi_list resource that we allocated. It is assumed that the
3120 	 * list is empty at this point.
3121 	 */
3122 	status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3123 					    ice_aqc_opc_free_res);
3124 
3125 	devm_kfree(ice_hw_to_dev(hw), s_rule);
3126 	return status;
3127 }
3128 
3129 /**
3130  * ice_rem_update_vsi_list
3131  * @hw: pointer to the hardware structure
3132  * @vsi_handle: VSI handle of the VSI to remove
3133  * @fm_list: filter management entry for which the VSI list management needs to
3134  *           be done
3135  */
3136 static int
3137 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3138 			struct ice_fltr_mgmt_list_entry *fm_list)
3139 {
3140 	enum ice_sw_lkup_type lkup_type;
3141 	u16 vsi_list_id;
3142 	int status = 0;
3143 
3144 	if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3145 	    fm_list->vsi_count == 0)
3146 		return -EINVAL;
3147 
3148 	/* A rule with the VSI being removed does not exist */
3149 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
3150 		return -ENOENT;
3151 
3152 	lkup_type = fm_list->fltr_info.lkup_type;
3153 	vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3154 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3155 					  ice_aqc_opc_update_sw_rules,
3156 					  lkup_type);
3157 	if (status)
3158 		return status;
3159 
3160 	fm_list->vsi_count--;
3161 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3162 
3163 	if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3164 		struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3165 		struct ice_vsi_list_map_info *vsi_list_info =
3166 			fm_list->vsi_list_info;
3167 		u16 rem_vsi_handle;
3168 
3169 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
3170 						ICE_MAX_VSI);
3171 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3172 			return -EIO;
3173 
3174 		/* Make sure VSI list is empty before removing it below */
3175 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3176 						  vsi_list_id, true,
3177 						  ice_aqc_opc_update_sw_rules,
3178 						  lkup_type);
3179 		if (status)
3180 			return status;
3181 
3182 		tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3183 		tmp_fltr_info.fwd_id.hw_vsi_id =
3184 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
3185 		tmp_fltr_info.vsi_handle = rem_vsi_handle;
3186 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3187 		if (status) {
3188 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3189 				  tmp_fltr_info.fwd_id.hw_vsi_id, status);
3190 			return status;
3191 		}
3192 
3193 		fm_list->fltr_info = tmp_fltr_info;
3194 	}
3195 
3196 	if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3197 	    (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3198 		struct ice_vsi_list_map_info *vsi_list_info =
3199 			fm_list->vsi_list_info;
3200 
3201 		/* Remove the VSI list since it is no longer used */
3202 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3203 		if (status) {
3204 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3205 				  vsi_list_id, status);
3206 			return status;
3207 		}
3208 
3209 		list_del(&vsi_list_info->list_entry);
3210 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
3211 		fm_list->vsi_list_info = NULL;
3212 	}
3213 
3214 	return status;
3215 }
3216 
3217 /**
3218  * ice_remove_rule_internal - Remove a filter rule of a given type
3219  * @hw: pointer to the hardware structure
3220  * @recp_id: recipe ID for which the rule needs to removed
3221  * @f_entry: rule entry containing filter information
3222  */
3223 static int
3224 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
3225 			 struct ice_fltr_list_entry *f_entry)
3226 {
3227 	struct ice_switch_info *sw = hw->switch_info;
3228 	struct ice_fltr_mgmt_list_entry *list_elem;
3229 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3230 	bool remove_rule = false;
3231 	u16 vsi_handle;
3232 	int status = 0;
3233 
3234 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3235 		return -EINVAL;
3236 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3237 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3238 
3239 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3240 	mutex_lock(rule_lock);
3241 	list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
3242 	if (!list_elem) {
3243 		status = -ENOENT;
3244 		goto exit;
3245 	}
3246 
3247 	if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3248 		remove_rule = true;
3249 	} else if (!list_elem->vsi_list_info) {
3250 		status = -ENOENT;
3251 		goto exit;
3252 	} else if (list_elem->vsi_list_info->ref_cnt > 1) {
3253 		/* a ref_cnt > 1 indicates that the vsi_list is being
3254 		 * shared by multiple rules. Decrement the ref_cnt and
3255 		 * remove this rule, but do not modify the list, as it
3256 		 * is in-use by other rules.
3257 		 */
3258 		list_elem->vsi_list_info->ref_cnt--;
3259 		remove_rule = true;
3260 	} else {
3261 		/* a ref_cnt of 1 indicates the vsi_list is only used
3262 		 * by one rule. However, the original removal request is only
3263 		 * for a single VSI. Update the vsi_list first, and only
3264 		 * remove the rule if there are no further VSIs in this list.
3265 		 */
3266 		vsi_handle = f_entry->fltr_info.vsi_handle;
3267 		status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3268 		if (status)
3269 			goto exit;
3270 		/* if VSI count goes to zero after updating the VSI list */
3271 		if (list_elem->vsi_count == 0)
3272 			remove_rule = true;
3273 	}
3274 
3275 	if (remove_rule) {
3276 		/* Remove the lookup rule */
3277 		struct ice_aqc_sw_rules_elem *s_rule;
3278 
3279 		s_rule = devm_kzalloc(ice_hw_to_dev(hw),
3280 				      ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
3281 				      GFP_KERNEL);
3282 		if (!s_rule) {
3283 			status = -ENOMEM;
3284 			goto exit;
3285 		}
3286 
3287 		ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3288 				 ice_aqc_opc_remove_sw_rules);
3289 
3290 		status = ice_aq_sw_rules(hw, s_rule,
3291 					 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3292 					 ice_aqc_opc_remove_sw_rules, NULL);
3293 
3294 		/* Remove a book keeping from the list */
3295 		devm_kfree(ice_hw_to_dev(hw), s_rule);
3296 
3297 		if (status)
3298 			goto exit;
3299 
3300 		list_del(&list_elem->list_entry);
3301 		devm_kfree(ice_hw_to_dev(hw), list_elem);
3302 	}
3303 exit:
3304 	mutex_unlock(rule_lock);
3305 	return status;
3306 }
3307 
3308 /**
3309  * ice_mac_fltr_exist - does this MAC filter exist for given VSI
3310  * @hw: pointer to the hardware structure
3311  * @mac: MAC address to be checked (for MAC filter)
3312  * @vsi_handle: check MAC filter for this VSI
3313  */
3314 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
3315 {
3316 	struct ice_fltr_mgmt_list_entry *entry;
3317 	struct list_head *rule_head;
3318 	struct ice_switch_info *sw;
3319 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3320 	u16 hw_vsi_id;
3321 
3322 	if (!ice_is_vsi_valid(hw, vsi_handle))
3323 		return false;
3324 
3325 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3326 	sw = hw->switch_info;
3327 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3328 	if (!rule_head)
3329 		return false;
3330 
3331 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3332 	mutex_lock(rule_lock);
3333 	list_for_each_entry(entry, rule_head, list_entry) {
3334 		struct ice_fltr_info *f_info = &entry->fltr_info;
3335 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3336 
3337 		if (is_zero_ether_addr(mac_addr))
3338 			continue;
3339 
3340 		if (f_info->flag != ICE_FLTR_TX ||
3341 		    f_info->src_id != ICE_SRC_ID_VSI ||
3342 		    f_info->lkup_type != ICE_SW_LKUP_MAC ||
3343 		    f_info->fltr_act != ICE_FWD_TO_VSI ||
3344 		    hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3345 			continue;
3346 
3347 		if (ether_addr_equal(mac, mac_addr)) {
3348 			mutex_unlock(rule_lock);
3349 			return true;
3350 		}
3351 	}
3352 	mutex_unlock(rule_lock);
3353 	return false;
3354 }
3355 
3356 /**
3357  * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
3358  * @hw: pointer to the hardware structure
3359  * @vlan_id: VLAN ID
3360  * @vsi_handle: check MAC filter for this VSI
3361  */
3362 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
3363 {
3364 	struct ice_fltr_mgmt_list_entry *entry;
3365 	struct list_head *rule_head;
3366 	struct ice_switch_info *sw;
3367 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3368 	u16 hw_vsi_id;
3369 
3370 	if (vlan_id > ICE_MAX_VLAN_ID)
3371 		return false;
3372 
3373 	if (!ice_is_vsi_valid(hw, vsi_handle))
3374 		return false;
3375 
3376 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3377 	sw = hw->switch_info;
3378 	rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3379 	if (!rule_head)
3380 		return false;
3381 
3382 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3383 	mutex_lock(rule_lock);
3384 	list_for_each_entry(entry, rule_head, list_entry) {
3385 		struct ice_fltr_info *f_info = &entry->fltr_info;
3386 		u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
3387 		struct ice_vsi_list_map_info *map_info;
3388 
3389 		if (entry_vlan_id > ICE_MAX_VLAN_ID)
3390 			continue;
3391 
3392 		if (f_info->flag != ICE_FLTR_TX ||
3393 		    f_info->src_id != ICE_SRC_ID_VSI ||
3394 		    f_info->lkup_type != ICE_SW_LKUP_VLAN)
3395 			continue;
3396 
3397 		/* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
3398 		if (f_info->fltr_act != ICE_FWD_TO_VSI &&
3399 		    f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
3400 			continue;
3401 
3402 		if (f_info->fltr_act == ICE_FWD_TO_VSI) {
3403 			if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3404 				continue;
3405 		} else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3406 			/* If filter_action is FWD_TO_VSI_LIST, make sure
3407 			 * that VSI being checked is part of VSI list
3408 			 */
3409 			if (entry->vsi_count == 1 &&
3410 			    entry->vsi_list_info) {
3411 				map_info = entry->vsi_list_info;
3412 				if (!test_bit(vsi_handle, map_info->vsi_map))
3413 					continue;
3414 			}
3415 		}
3416 
3417 		if (vlan_id == entry_vlan_id) {
3418 			mutex_unlock(rule_lock);
3419 			return true;
3420 		}
3421 	}
3422 	mutex_unlock(rule_lock);
3423 
3424 	return false;
3425 }
3426 
3427 /**
3428  * ice_add_mac - Add a MAC address based filter rule
3429  * @hw: pointer to the hardware structure
3430  * @m_list: list of MAC addresses and forwarding information
3431  *
3432  * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3433  * multiple unicast addresses, the function assumes that all the
3434  * addresses are unique in a given add_mac call. It doesn't
3435  * check for duplicates in this case, removing duplicates from a given
3436  * list should be taken care of in the caller of this function.
3437  */
3438 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
3439 {
3440 	struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3441 	struct ice_fltr_list_entry *m_list_itr;
3442 	struct list_head *rule_head;
3443 	u16 total_elem_left, s_rule_size;
3444 	struct ice_switch_info *sw;
3445 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3446 	u16 num_unicast = 0;
3447 	int status = 0;
3448 	u8 elem_sent;
3449 
3450 	if (!m_list || !hw)
3451 		return -EINVAL;
3452 
3453 	s_rule = NULL;
3454 	sw = hw->switch_info;
3455 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3456 	list_for_each_entry(m_list_itr, m_list, list_entry) {
3457 		u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3458 		u16 vsi_handle;
3459 		u16 hw_vsi_id;
3460 
3461 		m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3462 		vsi_handle = m_list_itr->fltr_info.vsi_handle;
3463 		if (!ice_is_vsi_valid(hw, vsi_handle))
3464 			return -EINVAL;
3465 		hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3466 		m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3467 		/* update the src in case it is VSI num */
3468 		if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3469 			return -EINVAL;
3470 		m_list_itr->fltr_info.src = hw_vsi_id;
3471 		if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3472 		    is_zero_ether_addr(add))
3473 			return -EINVAL;
3474 		if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
3475 			/* Don't overwrite the unicast address */
3476 			mutex_lock(rule_lock);
3477 			if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3478 						&m_list_itr->fltr_info)) {
3479 				mutex_unlock(rule_lock);
3480 				return -EEXIST;
3481 			}
3482 			mutex_unlock(rule_lock);
3483 			num_unicast++;
3484 		} else if (is_multicast_ether_addr(add) ||
3485 			   (is_unicast_ether_addr(add) && hw->ucast_shared)) {
3486 			m_list_itr->status =
3487 				ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3488 						      m_list_itr);
3489 			if (m_list_itr->status)
3490 				return m_list_itr->status;
3491 		}
3492 	}
3493 
3494 	mutex_lock(rule_lock);
3495 	/* Exit if no suitable entries were found for adding bulk switch rule */
3496 	if (!num_unicast) {
3497 		status = 0;
3498 		goto ice_add_mac_exit;
3499 	}
3500 
3501 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3502 
3503 	/* Allocate switch rule buffer for the bulk update for unicast */
3504 	s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3505 	s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
3506 			      GFP_KERNEL);
3507 	if (!s_rule) {
3508 		status = -ENOMEM;
3509 		goto ice_add_mac_exit;
3510 	}
3511 
3512 	r_iter = s_rule;
3513 	list_for_each_entry(m_list_itr, m_list, list_entry) {
3514 		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3515 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3516 
3517 		if (is_unicast_ether_addr(mac_addr)) {
3518 			ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3519 					 ice_aqc_opc_add_sw_rules);
3520 			r_iter = (struct ice_aqc_sw_rules_elem *)
3521 				((u8 *)r_iter + s_rule_size);
3522 		}
3523 	}
3524 
3525 	/* Call AQ bulk switch rule update for all unicast addresses */
3526 	r_iter = s_rule;
3527 	/* Call AQ switch rule in AQ_MAX chunk */
3528 	for (total_elem_left = num_unicast; total_elem_left > 0;
3529 	     total_elem_left -= elem_sent) {
3530 		struct ice_aqc_sw_rules_elem *entry = r_iter;
3531 
3532 		elem_sent = min_t(u8, total_elem_left,
3533 				  (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3534 		status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3535 					 elem_sent, ice_aqc_opc_add_sw_rules,
3536 					 NULL);
3537 		if (status)
3538 			goto ice_add_mac_exit;
3539 		r_iter = (struct ice_aqc_sw_rules_elem *)
3540 			((u8 *)r_iter + (elem_sent * s_rule_size));
3541 	}
3542 
3543 	/* Fill up rule ID based on the value returned from FW */
3544 	r_iter = s_rule;
3545 	list_for_each_entry(m_list_itr, m_list, list_entry) {
3546 		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3547 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3548 		struct ice_fltr_mgmt_list_entry *fm_entry;
3549 
3550 		if (is_unicast_ether_addr(mac_addr)) {
3551 			f_info->fltr_rule_id =
3552 				le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
3553 			f_info->fltr_act = ICE_FWD_TO_VSI;
3554 			/* Create an entry to track this MAC address */
3555 			fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
3556 						sizeof(*fm_entry), GFP_KERNEL);
3557 			if (!fm_entry) {
3558 				status = -ENOMEM;
3559 				goto ice_add_mac_exit;
3560 			}
3561 			fm_entry->fltr_info = *f_info;
3562 			fm_entry->vsi_count = 1;
3563 			/* The book keeping entries will get removed when
3564 			 * base driver calls remove filter AQ command
3565 			 */
3566 
3567 			list_add(&fm_entry->list_entry, rule_head);
3568 			r_iter = (struct ice_aqc_sw_rules_elem *)
3569 				((u8 *)r_iter + s_rule_size);
3570 		}
3571 	}
3572 
3573 ice_add_mac_exit:
3574 	mutex_unlock(rule_lock);
3575 	if (s_rule)
3576 		devm_kfree(ice_hw_to_dev(hw), s_rule);
3577 	return status;
3578 }
3579 
3580 /**
3581  * ice_add_vlan_internal - Add one VLAN based filter rule
3582  * @hw: pointer to the hardware structure
3583  * @f_entry: filter entry containing one VLAN information
3584  */
3585 static int
3586 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3587 {
3588 	struct ice_switch_info *sw = hw->switch_info;
3589 	struct ice_fltr_mgmt_list_entry *v_list_itr;
3590 	struct ice_fltr_info *new_fltr, *cur_fltr;
3591 	enum ice_sw_lkup_type lkup_type;
3592 	u16 vsi_list_id = 0, vsi_handle;
3593 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3594 	int status = 0;
3595 
3596 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3597 		return -EINVAL;
3598 
3599 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3600 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3601 	new_fltr = &f_entry->fltr_info;
3602 
3603 	/* VLAN ID should only be 12 bits */
3604 	if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3605 		return -EINVAL;
3606 
3607 	if (new_fltr->src_id != ICE_SRC_ID_VSI)
3608 		return -EINVAL;
3609 
3610 	new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3611 	lkup_type = new_fltr->lkup_type;
3612 	vsi_handle = new_fltr->vsi_handle;
3613 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3614 	mutex_lock(rule_lock);
3615 	v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3616 	if (!v_list_itr) {
3617 		struct ice_vsi_list_map_info *map_info = NULL;
3618 
3619 		if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3620 			/* All VLAN pruning rules use a VSI list. Check if
3621 			 * there is already a VSI list containing VSI that we
3622 			 * want to add. If found, use the same vsi_list_id for
3623 			 * this new VLAN rule or else create a new list.
3624 			 */
3625 			map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3626 							   vsi_handle,
3627 							   &vsi_list_id);
3628 			if (!map_info) {
3629 				status = ice_create_vsi_list_rule(hw,
3630 								  &vsi_handle,
3631 								  1,
3632 								  &vsi_list_id,
3633 								  lkup_type);
3634 				if (status)
3635 					goto exit;
3636 			}
3637 			/* Convert the action to forwarding to a VSI list. */
3638 			new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3639 			new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3640 		}
3641 
3642 		status = ice_create_pkt_fwd_rule(hw, f_entry);
3643 		if (!status) {
3644 			v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3645 							 new_fltr);
3646 			if (!v_list_itr) {
3647 				status = -ENOENT;
3648 				goto exit;
3649 			}
3650 			/* reuse VSI list for new rule and increment ref_cnt */
3651 			if (map_info) {
3652 				v_list_itr->vsi_list_info = map_info;
3653 				map_info->ref_cnt++;
3654 			} else {
3655 				v_list_itr->vsi_list_info =
3656 					ice_create_vsi_list_map(hw, &vsi_handle,
3657 								1, vsi_list_id);
3658 			}
3659 		}
3660 	} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3661 		/* Update existing VSI list to add new VSI ID only if it used
3662 		 * by one VLAN rule.
3663 		 */
3664 		cur_fltr = &v_list_itr->fltr_info;
3665 		status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3666 						 new_fltr);
3667 	} else {
3668 		/* If VLAN rule exists and VSI list being used by this rule is
3669 		 * referenced by more than 1 VLAN rule. Then create a new VSI
3670 		 * list appending previous VSI with new VSI and update existing
3671 		 * VLAN rule to point to new VSI list ID
3672 		 */
3673 		struct ice_fltr_info tmp_fltr;
3674 		u16 vsi_handle_arr[2];
3675 		u16 cur_handle;
3676 
3677 		/* Current implementation only supports reusing VSI list with
3678 		 * one VSI count. We should never hit below condition
3679 		 */
3680 		if (v_list_itr->vsi_count > 1 &&
3681 		    v_list_itr->vsi_list_info->ref_cnt > 1) {
3682 			ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3683 			status = -EIO;
3684 			goto exit;
3685 		}
3686 
3687 		cur_handle =
3688 			find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3689 				       ICE_MAX_VSI);
3690 
3691 		/* A rule already exists with the new VSI being added */
3692 		if (cur_handle == vsi_handle) {
3693 			status = -EEXIST;
3694 			goto exit;
3695 		}
3696 
3697 		vsi_handle_arr[0] = cur_handle;
3698 		vsi_handle_arr[1] = vsi_handle;
3699 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3700 						  &vsi_list_id, lkup_type);
3701 		if (status)
3702 			goto exit;
3703 
3704 		tmp_fltr = v_list_itr->fltr_info;
3705 		tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3706 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3707 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3708 		/* Update the previous switch rule to a new VSI list which
3709 		 * includes current VSI that is requested
3710 		 */
3711 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3712 		if (status)
3713 			goto exit;
3714 
3715 		/* before overriding VSI list map info. decrement ref_cnt of
3716 		 * previous VSI list
3717 		 */
3718 		v_list_itr->vsi_list_info->ref_cnt--;
3719 
3720 		/* now update to newly created list */
3721 		v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3722 		v_list_itr->vsi_list_info =
3723 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3724 						vsi_list_id);
3725 		v_list_itr->vsi_count++;
3726 	}
3727 
3728 exit:
3729 	mutex_unlock(rule_lock);
3730 	return status;
3731 }
3732 
3733 /**
3734  * ice_add_vlan - Add VLAN based filter rule
3735  * @hw: pointer to the hardware structure
3736  * @v_list: list of VLAN entries and forwarding information
3737  */
3738 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
3739 {
3740 	struct ice_fltr_list_entry *v_list_itr;
3741 
3742 	if (!v_list || !hw)
3743 		return -EINVAL;
3744 
3745 	list_for_each_entry(v_list_itr, v_list, list_entry) {
3746 		if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3747 			return -EINVAL;
3748 		v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3749 		v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3750 		if (v_list_itr->status)
3751 			return v_list_itr->status;
3752 	}
3753 	return 0;
3754 }
3755 
3756 /**
3757  * ice_add_eth_mac - Add ethertype and MAC based filter rule
3758  * @hw: pointer to the hardware structure
3759  * @em_list: list of ether type MAC filter, MAC is optional
3760  *
3761  * This function requires the caller to populate the entries in
3762  * the filter list with the necessary fields (including flags to
3763  * indicate Tx or Rx rules).
3764  */
3765 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3766 {
3767 	struct ice_fltr_list_entry *em_list_itr;
3768 
3769 	if (!em_list || !hw)
3770 		return -EINVAL;
3771 
3772 	list_for_each_entry(em_list_itr, em_list, list_entry) {
3773 		enum ice_sw_lkup_type l_type =
3774 			em_list_itr->fltr_info.lkup_type;
3775 
3776 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3777 		    l_type != ICE_SW_LKUP_ETHERTYPE)
3778 			return -EINVAL;
3779 
3780 		em_list_itr->status = ice_add_rule_internal(hw, l_type,
3781 							    em_list_itr);
3782 		if (em_list_itr->status)
3783 			return em_list_itr->status;
3784 	}
3785 	return 0;
3786 }
3787 
3788 /**
3789  * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3790  * @hw: pointer to the hardware structure
3791  * @em_list: list of ethertype or ethertype MAC entries
3792  */
3793 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3794 {
3795 	struct ice_fltr_list_entry *em_list_itr, *tmp;
3796 
3797 	if (!em_list || !hw)
3798 		return -EINVAL;
3799 
3800 	list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
3801 		enum ice_sw_lkup_type l_type =
3802 			em_list_itr->fltr_info.lkup_type;
3803 
3804 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3805 		    l_type != ICE_SW_LKUP_ETHERTYPE)
3806 			return -EINVAL;
3807 
3808 		em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3809 							       em_list_itr);
3810 		if (em_list_itr->status)
3811 			return em_list_itr->status;
3812 	}
3813 	return 0;
3814 }
3815 
3816 /**
3817  * ice_rem_sw_rule_info
3818  * @hw: pointer to the hardware structure
3819  * @rule_head: pointer to the switch list structure that we want to delete
3820  */
3821 static void
3822 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3823 {
3824 	if (!list_empty(rule_head)) {
3825 		struct ice_fltr_mgmt_list_entry *entry;
3826 		struct ice_fltr_mgmt_list_entry *tmp;
3827 
3828 		list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
3829 			list_del(&entry->list_entry);
3830 			devm_kfree(ice_hw_to_dev(hw), entry);
3831 		}
3832 	}
3833 }
3834 
3835 /**
3836  * ice_rem_adv_rule_info
3837  * @hw: pointer to the hardware structure
3838  * @rule_head: pointer to the switch list structure that we want to delete
3839  */
3840 static void
3841 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3842 {
3843 	struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3844 	struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3845 
3846 	if (list_empty(rule_head))
3847 		return;
3848 
3849 	list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
3850 		list_del(&lst_itr->list_entry);
3851 		devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
3852 		devm_kfree(ice_hw_to_dev(hw), lst_itr);
3853 	}
3854 }
3855 
3856 /**
3857  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3858  * @hw: pointer to the hardware structure
3859  * @vsi_handle: VSI handle to set as default
3860  * @set: true to add the above mentioned switch rule, false to remove it
3861  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3862  *
3863  * add filter rule to set/unset given VSI as default VSI for the switch
3864  * (represented by swid)
3865  */
3866 int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
3867 {
3868 	struct ice_aqc_sw_rules_elem *s_rule;
3869 	struct ice_fltr_info f_info;
3870 	enum ice_adminq_opc opcode;
3871 	u16 s_rule_size;
3872 	u16 hw_vsi_id;
3873 	int status;
3874 
3875 	if (!ice_is_vsi_valid(hw, vsi_handle))
3876 		return -EINVAL;
3877 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3878 
3879 	s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3880 		ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3881 
3882 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3883 	if (!s_rule)
3884 		return -ENOMEM;
3885 
3886 	memset(&f_info, 0, sizeof(f_info));
3887 
3888 	f_info.lkup_type = ICE_SW_LKUP_DFLT;
3889 	f_info.flag = direction;
3890 	f_info.fltr_act = ICE_FWD_TO_VSI;
3891 	f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3892 
3893 	if (f_info.flag & ICE_FLTR_RX) {
3894 		f_info.src = hw->port_info->lport;
3895 		f_info.src_id = ICE_SRC_ID_LPORT;
3896 		if (!set)
3897 			f_info.fltr_rule_id =
3898 				hw->port_info->dflt_rx_vsi_rule_id;
3899 	} else if (f_info.flag & ICE_FLTR_TX) {
3900 		f_info.src_id = ICE_SRC_ID_VSI;
3901 		f_info.src = hw_vsi_id;
3902 		if (!set)
3903 			f_info.fltr_rule_id =
3904 				hw->port_info->dflt_tx_vsi_rule_id;
3905 	}
3906 
3907 	if (set)
3908 		opcode = ice_aqc_opc_add_sw_rules;
3909 	else
3910 		opcode = ice_aqc_opc_remove_sw_rules;
3911 
3912 	ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3913 
3914 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3915 	if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3916 		goto out;
3917 	if (set) {
3918 		u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
3919 
3920 		if (f_info.flag & ICE_FLTR_TX) {
3921 			hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
3922 			hw->port_info->dflt_tx_vsi_rule_id = index;
3923 		} else if (f_info.flag & ICE_FLTR_RX) {
3924 			hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
3925 			hw->port_info->dflt_rx_vsi_rule_id = index;
3926 		}
3927 	} else {
3928 		if (f_info.flag & ICE_FLTR_TX) {
3929 			hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3930 			hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3931 		} else if (f_info.flag & ICE_FLTR_RX) {
3932 			hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3933 			hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3934 		}
3935 	}
3936 
3937 out:
3938 	devm_kfree(ice_hw_to_dev(hw), s_rule);
3939 	return status;
3940 }
3941 
3942 /**
3943  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3944  * @hw: pointer to the hardware structure
3945  * @recp_id: lookup type for which the specified rule needs to be searched
3946  * @f_info: rule information
3947  *
3948  * Helper function to search for a unicast rule entry - this is to be used
3949  * to remove unicast MAC filter that is not shared with other VSIs on the
3950  * PF switch.
3951  *
3952  * Returns pointer to entry storing the rule if found
3953  */
3954 static struct ice_fltr_mgmt_list_entry *
3955 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3956 			  struct ice_fltr_info *f_info)
3957 {
3958 	struct ice_switch_info *sw = hw->switch_info;
3959 	struct ice_fltr_mgmt_list_entry *list_itr;
3960 	struct list_head *list_head;
3961 
3962 	list_head = &sw->recp_list[recp_id].filt_rules;
3963 	list_for_each_entry(list_itr, list_head, list_entry) {
3964 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3965 			    sizeof(f_info->l_data)) &&
3966 		    f_info->fwd_id.hw_vsi_id ==
3967 		    list_itr->fltr_info.fwd_id.hw_vsi_id &&
3968 		    f_info->flag == list_itr->fltr_info.flag)
3969 			return list_itr;
3970 	}
3971 	return NULL;
3972 }
3973 
3974 /**
3975  * ice_remove_mac - remove a MAC address based filter rule
3976  * @hw: pointer to the hardware structure
3977  * @m_list: list of MAC addresses and forwarding information
3978  *
3979  * This function removes either a MAC filter rule or a specific VSI from a
3980  * VSI list for a multicast MAC address.
3981  *
3982  * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
3983  * be aware that this call will only work if all the entries passed into m_list
3984  * were added previously. It will not attempt to do a partial remove of entries
3985  * that were found.
3986  */
3987 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
3988 {
3989 	struct ice_fltr_list_entry *list_itr, *tmp;
3990 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3991 
3992 	if (!m_list)
3993 		return -EINVAL;
3994 
3995 	rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3996 	list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
3997 		enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3998 		u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3999 		u16 vsi_handle;
4000 
4001 		if (l_type != ICE_SW_LKUP_MAC)
4002 			return -EINVAL;
4003 
4004 		vsi_handle = list_itr->fltr_info.vsi_handle;
4005 		if (!ice_is_vsi_valid(hw, vsi_handle))
4006 			return -EINVAL;
4007 
4008 		list_itr->fltr_info.fwd_id.hw_vsi_id =
4009 					ice_get_hw_vsi_num(hw, vsi_handle);
4010 		if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
4011 			/* Don't remove the unicast address that belongs to
4012 			 * another VSI on the switch, since it is not being
4013 			 * shared...
4014 			 */
4015 			mutex_lock(rule_lock);
4016 			if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
4017 						       &list_itr->fltr_info)) {
4018 				mutex_unlock(rule_lock);
4019 				return -ENOENT;
4020 			}
4021 			mutex_unlock(rule_lock);
4022 		}
4023 		list_itr->status = ice_remove_rule_internal(hw,
4024 							    ICE_SW_LKUP_MAC,
4025 							    list_itr);
4026 		if (list_itr->status)
4027 			return list_itr->status;
4028 	}
4029 	return 0;
4030 }
4031 
4032 /**
4033  * ice_remove_vlan - Remove VLAN based filter rule
4034  * @hw: pointer to the hardware structure
4035  * @v_list: list of VLAN entries and forwarding information
4036  */
4037 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
4038 {
4039 	struct ice_fltr_list_entry *v_list_itr, *tmp;
4040 
4041 	if (!v_list || !hw)
4042 		return -EINVAL;
4043 
4044 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4045 		enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4046 
4047 		if (l_type != ICE_SW_LKUP_VLAN)
4048 			return -EINVAL;
4049 		v_list_itr->status = ice_remove_rule_internal(hw,
4050 							      ICE_SW_LKUP_VLAN,
4051 							      v_list_itr);
4052 		if (v_list_itr->status)
4053 			return v_list_itr->status;
4054 	}
4055 	return 0;
4056 }
4057 
4058 /**
4059  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4060  * @fm_entry: filter entry to inspect
4061  * @vsi_handle: VSI handle to compare with filter info
4062  */
4063 static bool
4064 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4065 {
4066 	return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4067 		 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4068 		(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4069 		 fm_entry->vsi_list_info &&
4070 		 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
4071 }
4072 
4073 /**
4074  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4075  * @hw: pointer to the hardware structure
4076  * @vsi_handle: VSI handle to remove filters from
4077  * @vsi_list_head: pointer to the list to add entry to
4078  * @fi: pointer to fltr_info of filter entry to copy & add
4079  *
4080  * Helper function, used when creating a list of filters to remove from
4081  * a specific VSI. The entry added to vsi_list_head is a COPY of the
4082  * original filter entry, with the exception of fltr_info.fltr_act and
4083  * fltr_info.fwd_id fields. These are set such that later logic can
4084  * extract which VSI to remove the fltr from, and pass on that information.
4085  */
4086 static int
4087 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4088 			       struct list_head *vsi_list_head,
4089 			       struct ice_fltr_info *fi)
4090 {
4091 	struct ice_fltr_list_entry *tmp;
4092 
4093 	/* this memory is freed up in the caller function
4094 	 * once filters for this VSI are removed
4095 	 */
4096 	tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
4097 	if (!tmp)
4098 		return -ENOMEM;
4099 
4100 	tmp->fltr_info = *fi;
4101 
4102 	/* Overwrite these fields to indicate which VSI to remove filter from,
4103 	 * so find and remove logic can extract the information from the
4104 	 * list entries. Note that original entries will still have proper
4105 	 * values.
4106 	 */
4107 	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4108 	tmp->fltr_info.vsi_handle = vsi_handle;
4109 	tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4110 
4111 	list_add(&tmp->list_entry, vsi_list_head);
4112 
4113 	return 0;
4114 }
4115 
4116 /**
4117  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4118  * @hw: pointer to the hardware structure
4119  * @vsi_handle: VSI handle to remove filters from
4120  * @lkup_list_head: pointer to the list that has certain lookup type filters
4121  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4122  *
4123  * Locates all filters in lkup_list_head that are used by the given VSI,
4124  * and adds COPIES of those entries to vsi_list_head (intended to be used
4125  * to remove the listed filters).
4126  * Note that this means all entries in vsi_list_head must be explicitly
4127  * deallocated by the caller when done with list.
4128  */
4129 static int
4130 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4131 			 struct list_head *lkup_list_head,
4132 			 struct list_head *vsi_list_head)
4133 {
4134 	struct ice_fltr_mgmt_list_entry *fm_entry;
4135 	int status = 0;
4136 
4137 	/* check to make sure VSI ID is valid and within boundary */
4138 	if (!ice_is_vsi_valid(hw, vsi_handle))
4139 		return -EINVAL;
4140 
4141 	list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
4142 		if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
4143 			continue;
4144 
4145 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4146 							vsi_list_head,
4147 							&fm_entry->fltr_info);
4148 		if (status)
4149 			return status;
4150 	}
4151 	return status;
4152 }
4153 
4154 /**
4155  * ice_determine_promisc_mask
4156  * @fi: filter info to parse
4157  *
4158  * Helper function to determine which ICE_PROMISC_ mask corresponds
4159  * to given filter into.
4160  */
4161 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4162 {
4163 	u16 vid = fi->l_data.mac_vlan.vlan_id;
4164 	u8 *macaddr = fi->l_data.mac.mac_addr;
4165 	bool is_tx_fltr = false;
4166 	u8 promisc_mask = 0;
4167 
4168 	if (fi->flag == ICE_FLTR_TX)
4169 		is_tx_fltr = true;
4170 
4171 	if (is_broadcast_ether_addr(macaddr))
4172 		promisc_mask |= is_tx_fltr ?
4173 			ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4174 	else if (is_multicast_ether_addr(macaddr))
4175 		promisc_mask |= is_tx_fltr ?
4176 			ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4177 	else if (is_unicast_ether_addr(macaddr))
4178 		promisc_mask |= is_tx_fltr ?
4179 			ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4180 	if (vid)
4181 		promisc_mask |= is_tx_fltr ?
4182 			ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4183 
4184 	return promisc_mask;
4185 }
4186 
4187 /**
4188  * ice_remove_promisc - Remove promisc based filter rules
4189  * @hw: pointer to the hardware structure
4190  * @recp_id: recipe ID for which the rule needs to removed
4191  * @v_list: list of promisc entries
4192  */
4193 static int
4194 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
4195 {
4196 	struct ice_fltr_list_entry *v_list_itr, *tmp;
4197 
4198 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4199 		v_list_itr->status =
4200 			ice_remove_rule_internal(hw, recp_id, v_list_itr);
4201 		if (v_list_itr->status)
4202 			return v_list_itr->status;
4203 	}
4204 	return 0;
4205 }
4206 
4207 /**
4208  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4209  * @hw: pointer to the hardware structure
4210  * @vsi_handle: VSI handle to clear mode
4211  * @promisc_mask: mask of promiscuous config bits to clear
4212  * @vid: VLAN ID to clear VLAN promiscuous
4213  */
4214 int
4215 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4216 		      u16 vid)
4217 {
4218 	struct ice_switch_info *sw = hw->switch_info;
4219 	struct ice_fltr_list_entry *fm_entry, *tmp;
4220 	struct list_head remove_list_head;
4221 	struct ice_fltr_mgmt_list_entry *itr;
4222 	struct list_head *rule_head;
4223 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
4224 	int status = 0;
4225 	u8 recipe_id;
4226 
4227 	if (!ice_is_vsi_valid(hw, vsi_handle))
4228 		return -EINVAL;
4229 
4230 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4231 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4232 	else
4233 		recipe_id = ICE_SW_LKUP_PROMISC;
4234 
4235 	rule_head = &sw->recp_list[recipe_id].filt_rules;
4236 	rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4237 
4238 	INIT_LIST_HEAD(&remove_list_head);
4239 
4240 	mutex_lock(rule_lock);
4241 	list_for_each_entry(itr, rule_head, list_entry) {
4242 		struct ice_fltr_info *fltr_info;
4243 		u8 fltr_promisc_mask = 0;
4244 
4245 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
4246 			continue;
4247 		fltr_info = &itr->fltr_info;
4248 
4249 		if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4250 		    vid != fltr_info->l_data.mac_vlan.vlan_id)
4251 			continue;
4252 
4253 		fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4254 
4255 		/* Skip if filter is not completely specified by given mask */
4256 		if (fltr_promisc_mask & ~promisc_mask)
4257 			continue;
4258 
4259 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4260 							&remove_list_head,
4261 							fltr_info);
4262 		if (status) {
4263 			mutex_unlock(rule_lock);
4264 			goto free_fltr_list;
4265 		}
4266 	}
4267 	mutex_unlock(rule_lock);
4268 
4269 	status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4270 
4271 free_fltr_list:
4272 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4273 		list_del(&fm_entry->list_entry);
4274 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
4275 	}
4276 
4277 	return status;
4278 }
4279 
4280 /**
4281  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4282  * @hw: pointer to the hardware structure
4283  * @vsi_handle: VSI handle to configure
4284  * @promisc_mask: mask of promiscuous config bits
4285  * @vid: VLAN ID to set VLAN promiscuous
4286  */
4287 int
4288 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4289 {
4290 	enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4291 	struct ice_fltr_list_entry f_list_entry;
4292 	struct ice_fltr_info new_fltr;
4293 	bool is_tx_fltr;
4294 	int status = 0;
4295 	u16 hw_vsi_id;
4296 	int pkt_type;
4297 	u8 recipe_id;
4298 
4299 	if (!ice_is_vsi_valid(hw, vsi_handle))
4300 		return -EINVAL;
4301 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4302 
4303 	memset(&new_fltr, 0, sizeof(new_fltr));
4304 
4305 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4306 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4307 		new_fltr.l_data.mac_vlan.vlan_id = vid;
4308 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4309 	} else {
4310 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4311 		recipe_id = ICE_SW_LKUP_PROMISC;
4312 	}
4313 
4314 	/* Separate filters must be set for each direction/packet type
4315 	 * combination, so we will loop over the mask value, store the
4316 	 * individual type, and clear it out in the input mask as it
4317 	 * is found.
4318 	 */
4319 	while (promisc_mask) {
4320 		u8 *mac_addr;
4321 
4322 		pkt_type = 0;
4323 		is_tx_fltr = false;
4324 
4325 		if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4326 			promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4327 			pkt_type = UCAST_FLTR;
4328 		} else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4329 			promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4330 			pkt_type = UCAST_FLTR;
4331 			is_tx_fltr = true;
4332 		} else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4333 			promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4334 			pkt_type = MCAST_FLTR;
4335 		} else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4336 			promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4337 			pkt_type = MCAST_FLTR;
4338 			is_tx_fltr = true;
4339 		} else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4340 			promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4341 			pkt_type = BCAST_FLTR;
4342 		} else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4343 			promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4344 			pkt_type = BCAST_FLTR;
4345 			is_tx_fltr = true;
4346 		}
4347 
4348 		/* Check for VLAN promiscuous flag */
4349 		if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4350 			promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4351 		} else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4352 			promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4353 			is_tx_fltr = true;
4354 		}
4355 
4356 		/* Set filter DA based on packet type */
4357 		mac_addr = new_fltr.l_data.mac.mac_addr;
4358 		if (pkt_type == BCAST_FLTR) {
4359 			eth_broadcast_addr(mac_addr);
4360 		} else if (pkt_type == MCAST_FLTR ||
4361 			   pkt_type == UCAST_FLTR) {
4362 			/* Use the dummy ether header DA */
4363 			ether_addr_copy(mac_addr, dummy_eth_header);
4364 			if (pkt_type == MCAST_FLTR)
4365 				mac_addr[0] |= 0x1;	/* Set multicast bit */
4366 		}
4367 
4368 		/* Need to reset this to zero for all iterations */
4369 		new_fltr.flag = 0;
4370 		if (is_tx_fltr) {
4371 			new_fltr.flag |= ICE_FLTR_TX;
4372 			new_fltr.src = hw_vsi_id;
4373 		} else {
4374 			new_fltr.flag |= ICE_FLTR_RX;
4375 			new_fltr.src = hw->port_info->lport;
4376 		}
4377 
4378 		new_fltr.fltr_act = ICE_FWD_TO_VSI;
4379 		new_fltr.vsi_handle = vsi_handle;
4380 		new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4381 		f_list_entry.fltr_info = new_fltr;
4382 
4383 		status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4384 		if (status)
4385 			goto set_promisc_exit;
4386 	}
4387 
4388 set_promisc_exit:
4389 	return status;
4390 }
4391 
4392 /**
4393  * ice_set_vlan_vsi_promisc
4394  * @hw: pointer to the hardware structure
4395  * @vsi_handle: VSI handle to configure
4396  * @promisc_mask: mask of promiscuous config bits
4397  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4398  *
4399  * Configure VSI with all associated VLANs to given promiscuous mode(s)
4400  */
4401 int
4402 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4403 			 bool rm_vlan_promisc)
4404 {
4405 	struct ice_switch_info *sw = hw->switch_info;
4406 	struct ice_fltr_list_entry *list_itr, *tmp;
4407 	struct list_head vsi_list_head;
4408 	struct list_head *vlan_head;
4409 	struct mutex *vlan_lock; /* Lock to protect filter rule list */
4410 	u16 vlan_id;
4411 	int status;
4412 
4413 	INIT_LIST_HEAD(&vsi_list_head);
4414 	vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4415 	vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4416 	mutex_lock(vlan_lock);
4417 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4418 					  &vsi_list_head);
4419 	mutex_unlock(vlan_lock);
4420 	if (status)
4421 		goto free_fltr_list;
4422 
4423 	list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
4424 		vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4425 		if (rm_vlan_promisc)
4426 			status = ice_clear_vsi_promisc(hw, vsi_handle,
4427 						       promisc_mask, vlan_id);
4428 		else
4429 			status = ice_set_vsi_promisc(hw, vsi_handle,
4430 						     promisc_mask, vlan_id);
4431 		if (status)
4432 			break;
4433 	}
4434 
4435 free_fltr_list:
4436 	list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
4437 		list_del(&list_itr->list_entry);
4438 		devm_kfree(ice_hw_to_dev(hw), list_itr);
4439 	}
4440 	return status;
4441 }
4442 
4443 /**
4444  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4445  * @hw: pointer to the hardware structure
4446  * @vsi_handle: VSI handle to remove filters from
4447  * @lkup: switch rule filter lookup type
4448  */
4449 static void
4450 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4451 			 enum ice_sw_lkup_type lkup)
4452 {
4453 	struct ice_switch_info *sw = hw->switch_info;
4454 	struct ice_fltr_list_entry *fm_entry;
4455 	struct list_head remove_list_head;
4456 	struct list_head *rule_head;
4457 	struct ice_fltr_list_entry *tmp;
4458 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
4459 	int status;
4460 
4461 	INIT_LIST_HEAD(&remove_list_head);
4462 	rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4463 	rule_head = &sw->recp_list[lkup].filt_rules;
4464 	mutex_lock(rule_lock);
4465 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4466 					  &remove_list_head);
4467 	mutex_unlock(rule_lock);
4468 	if (status)
4469 		goto free_fltr_list;
4470 
4471 	switch (lkup) {
4472 	case ICE_SW_LKUP_MAC:
4473 		ice_remove_mac(hw, &remove_list_head);
4474 		break;
4475 	case ICE_SW_LKUP_VLAN:
4476 		ice_remove_vlan(hw, &remove_list_head);
4477 		break;
4478 	case ICE_SW_LKUP_PROMISC:
4479 	case ICE_SW_LKUP_PROMISC_VLAN:
4480 		ice_remove_promisc(hw, lkup, &remove_list_head);
4481 		break;
4482 	case ICE_SW_LKUP_MAC_VLAN:
4483 	case ICE_SW_LKUP_ETHERTYPE:
4484 	case ICE_SW_LKUP_ETHERTYPE_MAC:
4485 	case ICE_SW_LKUP_DFLT:
4486 	case ICE_SW_LKUP_LAST:
4487 	default:
4488 		ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
4489 		break;
4490 	}
4491 
4492 free_fltr_list:
4493 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4494 		list_del(&fm_entry->list_entry);
4495 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
4496 	}
4497 }
4498 
4499 /**
4500  * ice_remove_vsi_fltr - Remove all filters for a VSI
4501  * @hw: pointer to the hardware structure
4502  * @vsi_handle: VSI handle to remove filters from
4503  */
4504 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4505 {
4506 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4507 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4508 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4509 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4510 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4511 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4512 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4513 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4514 }
4515 
4516 /**
4517  * ice_alloc_res_cntr - allocating resource counter
4518  * @hw: pointer to the hardware structure
4519  * @type: type of resource
4520  * @alloc_shared: if set it is shared else dedicated
4521  * @num_items: number of entries requested for FD resource type
4522  * @counter_id: counter index returned by AQ call
4523  */
4524 int
4525 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4526 		   u16 *counter_id)
4527 {
4528 	struct ice_aqc_alloc_free_res_elem *buf;
4529 	u16 buf_len;
4530 	int status;
4531 
4532 	/* Allocate resource */
4533 	buf_len = struct_size(buf, elem, 1);
4534 	buf = kzalloc(buf_len, GFP_KERNEL);
4535 	if (!buf)
4536 		return -ENOMEM;
4537 
4538 	buf->num_elems = cpu_to_le16(num_items);
4539 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4540 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
4541 
4542 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4543 				       ice_aqc_opc_alloc_res, NULL);
4544 	if (status)
4545 		goto exit;
4546 
4547 	*counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
4548 
4549 exit:
4550 	kfree(buf);
4551 	return status;
4552 }
4553 
4554 /**
4555  * ice_free_res_cntr - free resource counter
4556  * @hw: pointer to the hardware structure
4557  * @type: type of resource
4558  * @alloc_shared: if set it is shared else dedicated
4559  * @num_items: number of entries to be freed for FD resource type
4560  * @counter_id: counter ID resource which needs to be freed
4561  */
4562 int
4563 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4564 		  u16 counter_id)
4565 {
4566 	struct ice_aqc_alloc_free_res_elem *buf;
4567 	u16 buf_len;
4568 	int status;
4569 
4570 	/* Free resource */
4571 	buf_len = struct_size(buf, elem, 1);
4572 	buf = kzalloc(buf_len, GFP_KERNEL);
4573 	if (!buf)
4574 		return -ENOMEM;
4575 
4576 	buf->num_elems = cpu_to_le16(num_items);
4577 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4578 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
4579 	buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
4580 
4581 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4582 				       ice_aqc_opc_free_res, NULL);
4583 	if (status)
4584 		ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
4585 
4586 	kfree(buf);
4587 	return status;
4588 }
4589 
4590 /* This is mapping table entry that maps every word within a given protocol
4591  * structure to the real byte offset as per the specification of that
4592  * protocol header.
4593  * for example dst address is 3 words in ethertype header and corresponding
4594  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4595  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4596  * matching entry describing its field. This needs to be updated if new
4597  * structure is added to that union.
4598  */
4599 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4600 	{ ICE_MAC_OFOS,		{ 0, 2, 4, 6, 8, 10, 12 } },
4601 	{ ICE_MAC_IL,		{ 0, 2, 4, 6, 8, 10, 12 } },
4602 	{ ICE_ETYPE_OL,		{ 0 } },
4603 	{ ICE_ETYPE_IL,		{ 0 } },
4604 	{ ICE_VLAN_OFOS,	{ 2, 0 } },
4605 	{ ICE_IPV4_OFOS,	{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4606 	{ ICE_IPV4_IL,		{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4607 	{ ICE_IPV6_OFOS,	{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4608 				 26, 28, 30, 32, 34, 36, 38 } },
4609 	{ ICE_IPV6_IL,		{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4610 				 26, 28, 30, 32, 34, 36, 38 } },
4611 	{ ICE_TCP_IL,		{ 0, 2 } },
4612 	{ ICE_UDP_OF,		{ 0, 2 } },
4613 	{ ICE_UDP_ILOS,		{ 0, 2 } },
4614 	{ ICE_VXLAN,		{ 8, 10, 12, 14 } },
4615 	{ ICE_GENEVE,		{ 8, 10, 12, 14 } },
4616 	{ ICE_NVGRE,		{ 0, 2, 4, 6 } },
4617 	{ ICE_GTP,		{ 8, 10, 12, 14, 16, 18, 20, 22 } },
4618 	{ ICE_GTP_NO_PAY,	{ 8, 10, 12, 14 } },
4619 };
4620 
4621 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4622 	{ ICE_MAC_OFOS,		ICE_MAC_OFOS_HW },
4623 	{ ICE_MAC_IL,		ICE_MAC_IL_HW },
4624 	{ ICE_ETYPE_OL,		ICE_ETYPE_OL_HW },
4625 	{ ICE_ETYPE_IL,		ICE_ETYPE_IL_HW },
4626 	{ ICE_VLAN_OFOS,	ICE_VLAN_OL_HW },
4627 	{ ICE_IPV4_OFOS,	ICE_IPV4_OFOS_HW },
4628 	{ ICE_IPV4_IL,		ICE_IPV4_IL_HW },
4629 	{ ICE_IPV6_OFOS,	ICE_IPV6_OFOS_HW },
4630 	{ ICE_IPV6_IL,		ICE_IPV6_IL_HW },
4631 	{ ICE_TCP_IL,		ICE_TCP_IL_HW },
4632 	{ ICE_UDP_OF,		ICE_UDP_OF_HW },
4633 	{ ICE_UDP_ILOS,		ICE_UDP_ILOS_HW },
4634 	{ ICE_VXLAN,		ICE_UDP_OF_HW },
4635 	{ ICE_GENEVE,		ICE_UDP_OF_HW },
4636 	{ ICE_NVGRE,		ICE_GRE_OF_HW },
4637 	{ ICE_GTP,		ICE_UDP_OF_HW },
4638 	{ ICE_GTP_NO_PAY,	ICE_UDP_ILOS_HW },
4639 };
4640 
4641 /**
4642  * ice_find_recp - find a recipe
4643  * @hw: pointer to the hardware structure
4644  * @lkup_exts: extension sequence to match
4645  * @tun_type: type of recipe tunnel
4646  *
4647  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4648  */
4649 static u16
4650 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
4651 	      enum ice_sw_tunnel_type tun_type)
4652 {
4653 	bool refresh_required = true;
4654 	struct ice_sw_recipe *recp;
4655 	u8 i;
4656 
4657 	/* Walk through existing recipes to find a match */
4658 	recp = hw->switch_info->recp_list;
4659 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4660 		/* If recipe was not created for this ID, in SW bookkeeping,
4661 		 * check if FW has an entry for this recipe. If the FW has an
4662 		 * entry update it in our SW bookkeeping and continue with the
4663 		 * matching.
4664 		 */
4665 		if (!recp[i].recp_created)
4666 			if (ice_get_recp_frm_fw(hw,
4667 						hw->switch_info->recp_list, i,
4668 						&refresh_required))
4669 				continue;
4670 
4671 		/* Skip inverse action recipes */
4672 		if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4673 		    ICE_AQ_RECIPE_ACT_INV_ACT)
4674 			continue;
4675 
4676 		/* if number of words we are looking for match */
4677 		if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4678 			struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
4679 			struct ice_fv_word *be = lkup_exts->fv_words;
4680 			u16 *cr = recp[i].lkup_exts.field_mask;
4681 			u16 *de = lkup_exts->field_mask;
4682 			bool found = true;
4683 			u8 pe, qr;
4684 
4685 			/* ar, cr, and qr are related to the recipe words, while
4686 			 * be, de, and pe are related to the lookup words
4687 			 */
4688 			for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
4689 				for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
4690 				     qr++) {
4691 					if (ar[qr].off == be[pe].off &&
4692 					    ar[qr].prot_id == be[pe].prot_id &&
4693 					    cr[qr] == de[pe])
4694 						/* Found the "pe"th word in the
4695 						 * given recipe
4696 						 */
4697 						break;
4698 				}
4699 				/* After walking through all the words in the
4700 				 * "i"th recipe if "p"th word was not found then
4701 				 * this recipe is not what we are looking for.
4702 				 * So break out from this loop and try the next
4703 				 * recipe
4704 				 */
4705 				if (qr >= recp[i].lkup_exts.n_val_words) {
4706 					found = false;
4707 					break;
4708 				}
4709 			}
4710 			/* If for "i"th recipe the found was never set to false
4711 			 * then it means we found our match
4712 			 * Also tun type of recipe needs to be checked
4713 			 */
4714 			if (found && recp[i].tun_type == tun_type)
4715 				return i; /* Return the recipe ID */
4716 		}
4717 	}
4718 	return ICE_MAX_NUM_RECIPES;
4719 }
4720 
4721 /**
4722  * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
4723  *
4724  * As protocol id for outer vlan is different in dvm and svm, if dvm is
4725  * supported protocol array record for outer vlan has to be modified to
4726  * reflect the value proper for DVM.
4727  */
4728 void ice_change_proto_id_to_dvm(void)
4729 {
4730 	u8 i;
4731 
4732 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4733 		if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
4734 		    ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
4735 			ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
4736 }
4737 
4738 /**
4739  * ice_prot_type_to_id - get protocol ID from protocol type
4740  * @type: protocol type
4741  * @id: pointer to variable that will receive the ID
4742  *
4743  * Returns true if found, false otherwise
4744  */
4745 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
4746 {
4747 	u8 i;
4748 
4749 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4750 		if (ice_prot_id_tbl[i].type == type) {
4751 			*id = ice_prot_id_tbl[i].protocol_id;
4752 			return true;
4753 		}
4754 	return false;
4755 }
4756 
4757 /**
4758  * ice_fill_valid_words - count valid words
4759  * @rule: advanced rule with lookup information
4760  * @lkup_exts: byte offset extractions of the words that are valid
4761  *
4762  * calculate valid words in a lookup rule using mask value
4763  */
4764 static u8
4765 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4766 		     struct ice_prot_lkup_ext *lkup_exts)
4767 {
4768 	u8 j, word, prot_id, ret_val;
4769 
4770 	if (!ice_prot_type_to_id(rule->type, &prot_id))
4771 		return 0;
4772 
4773 	word = lkup_exts->n_val_words;
4774 
4775 	for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4776 		if (((u16 *)&rule->m_u)[j] &&
4777 		    rule->type < ARRAY_SIZE(ice_prot_ext)) {
4778 			/* No more space to accommodate */
4779 			if (word >= ICE_MAX_CHAIN_WORDS)
4780 				return 0;
4781 			lkup_exts->fv_words[word].off =
4782 				ice_prot_ext[rule->type].offs[j];
4783 			lkup_exts->fv_words[word].prot_id =
4784 				ice_prot_id_tbl[rule->type].protocol_id;
4785 			lkup_exts->field_mask[word] =
4786 				be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
4787 			word++;
4788 		}
4789 
4790 	ret_val = word - lkup_exts->n_val_words;
4791 	lkup_exts->n_val_words = word;
4792 
4793 	return ret_val;
4794 }
4795 
4796 /**
4797  * ice_create_first_fit_recp_def - Create a recipe grouping
4798  * @hw: pointer to the hardware structure
4799  * @lkup_exts: an array of protocol header extractions
4800  * @rg_list: pointer to a list that stores new recipe groups
4801  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4802  *
4803  * Using first fit algorithm, take all the words that are still not done
4804  * and start grouping them in 4-word groups. Each group makes up one
4805  * recipe.
4806  */
4807 static int
4808 ice_create_first_fit_recp_def(struct ice_hw *hw,
4809 			      struct ice_prot_lkup_ext *lkup_exts,
4810 			      struct list_head *rg_list,
4811 			      u8 *recp_cnt)
4812 {
4813 	struct ice_pref_recipe_group *grp = NULL;
4814 	u8 j;
4815 
4816 	*recp_cnt = 0;
4817 
4818 	/* Walk through every word in the rule to check if it is not done. If so
4819 	 * then this word needs to be part of a new recipe.
4820 	 */
4821 	for (j = 0; j < lkup_exts->n_val_words; j++)
4822 		if (!test_bit(j, lkup_exts->done)) {
4823 			if (!grp ||
4824 			    grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4825 				struct ice_recp_grp_entry *entry;
4826 
4827 				entry = devm_kzalloc(ice_hw_to_dev(hw),
4828 						     sizeof(*entry),
4829 						     GFP_KERNEL);
4830 				if (!entry)
4831 					return -ENOMEM;
4832 				list_add(&entry->l_entry, rg_list);
4833 				grp = &entry->r_group;
4834 				(*recp_cnt)++;
4835 			}
4836 
4837 			grp->pairs[grp->n_val_pairs].prot_id =
4838 				lkup_exts->fv_words[j].prot_id;
4839 			grp->pairs[grp->n_val_pairs].off =
4840 				lkup_exts->fv_words[j].off;
4841 			grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4842 			grp->n_val_pairs++;
4843 		}
4844 
4845 	return 0;
4846 }
4847 
4848 /**
4849  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4850  * @hw: pointer to the hardware structure
4851  * @fv_list: field vector with the extraction sequence information
4852  * @rg_list: recipe groupings with protocol-offset pairs
4853  *
4854  * Helper function to fill in the field vector indices for protocol-offset
4855  * pairs. These indexes are then ultimately programmed into a recipe.
4856  */
4857 static int
4858 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
4859 		       struct list_head *rg_list)
4860 {
4861 	struct ice_sw_fv_list_entry *fv;
4862 	struct ice_recp_grp_entry *rg;
4863 	struct ice_fv_word *fv_ext;
4864 
4865 	if (list_empty(fv_list))
4866 		return 0;
4867 
4868 	fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
4869 			      list_entry);
4870 	fv_ext = fv->fv_ptr->ew;
4871 
4872 	list_for_each_entry(rg, rg_list, l_entry) {
4873 		u8 i;
4874 
4875 		for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4876 			struct ice_fv_word *pr;
4877 			bool found = false;
4878 			u16 mask;
4879 			u8 j;
4880 
4881 			pr = &rg->r_group.pairs[i];
4882 			mask = rg->r_group.mask[i];
4883 
4884 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4885 				if (fv_ext[j].prot_id == pr->prot_id &&
4886 				    fv_ext[j].off == pr->off) {
4887 					found = true;
4888 
4889 					/* Store index of field vector */
4890 					rg->fv_idx[i] = j;
4891 					rg->fv_mask[i] = mask;
4892 					break;
4893 				}
4894 
4895 			/* Protocol/offset could not be found, caller gave an
4896 			 * invalid pair
4897 			 */
4898 			if (!found)
4899 				return -EINVAL;
4900 		}
4901 	}
4902 
4903 	return 0;
4904 }
4905 
4906 /**
4907  * ice_find_free_recp_res_idx - find free result indexes for recipe
4908  * @hw: pointer to hardware structure
4909  * @profiles: bitmap of profiles that will be associated with the new recipe
4910  * @free_idx: pointer to variable to receive the free index bitmap
4911  *
4912  * The algorithm used here is:
4913  *	1. When creating a new recipe, create a set P which contains all
4914  *	   Profiles that will be associated with our new recipe
4915  *
4916  *	2. For each Profile p in set P:
4917  *	    a. Add all recipes associated with Profile p into set R
4918  *	    b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4919  *		[initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4920  *		i. Or just assume they all have the same possible indexes:
4921  *			44, 45, 46, 47
4922  *			i.e., PossibleIndexes = 0x0000F00000000000
4923  *
4924  *	3. For each Recipe r in set R:
4925  *	    a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4926  *	    b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4927  *
4928  *	FreeIndexes will contain the bits indicating the indexes free for use,
4929  *      then the code needs to update the recipe[r].used_result_idx_bits to
4930  *      indicate which indexes were selected for use by this recipe.
4931  */
4932 static u16
4933 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
4934 			   unsigned long *free_idx)
4935 {
4936 	DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
4937 	DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
4938 	DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
4939 	u16 bit;
4940 
4941 	bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
4942 	bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
4943 
4944 	bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
4945 
4946 	/* For each profile we are going to associate the recipe with, add the
4947 	 * recipes that are associated with that profile. This will give us
4948 	 * the set of recipes that our recipe may collide with. Also, determine
4949 	 * what possible result indexes are usable given this set of profiles.
4950 	 */
4951 	for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
4952 		bitmap_or(recipes, recipes, profile_to_recipe[bit],
4953 			  ICE_MAX_NUM_RECIPES);
4954 		bitmap_and(possible_idx, possible_idx,
4955 			   hw->switch_info->prof_res_bm[bit],
4956 			   ICE_MAX_FV_WORDS);
4957 	}
4958 
4959 	/* For each recipe that our new recipe may collide with, determine
4960 	 * which indexes have been used.
4961 	 */
4962 	for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
4963 		bitmap_or(used_idx, used_idx,
4964 			  hw->switch_info->recp_list[bit].res_idxs,
4965 			  ICE_MAX_FV_WORDS);
4966 
4967 	bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4968 
4969 	/* return number of free indexes */
4970 	return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
4971 }
4972 
4973 /**
4974  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4975  * @hw: pointer to hardware structure
4976  * @rm: recipe management list entry
4977  * @profiles: bitmap of profiles that will be associated.
4978  */
4979 static int
4980 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4981 		  unsigned long *profiles)
4982 {
4983 	DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
4984 	struct ice_aqc_recipe_data_elem *tmp;
4985 	struct ice_aqc_recipe_data_elem *buf;
4986 	struct ice_recp_grp_entry *entry;
4987 	u16 free_res_idx;
4988 	u16 recipe_count;
4989 	u8 chain_idx;
4990 	u8 recps = 0;
4991 	int status;
4992 
4993 	/* When more than one recipe are required, another recipe is needed to
4994 	 * chain them together. Matching a tunnel metadata ID takes up one of
4995 	 * the match fields in the chaining recipe reducing the number of
4996 	 * chained recipes by one.
4997 	 */
4998 	 /* check number of free result indices */
4999 	bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
5000 	free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5001 
5002 	ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5003 		  free_res_idx, rm->n_grp_count);
5004 
5005 	if (rm->n_grp_count > 1) {
5006 		if (rm->n_grp_count > free_res_idx)
5007 			return -ENOSPC;
5008 
5009 		rm->n_grp_count++;
5010 	}
5011 
5012 	if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
5013 		return -ENOSPC;
5014 
5015 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
5016 	if (!tmp)
5017 		return -ENOMEM;
5018 
5019 	buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
5020 			   GFP_KERNEL);
5021 	if (!buf) {
5022 		status = -ENOMEM;
5023 		goto err_mem;
5024 	}
5025 
5026 	bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5027 	recipe_count = ICE_MAX_NUM_RECIPES;
5028 	status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5029 				   NULL);
5030 	if (status || recipe_count == 0)
5031 		goto err_unroll;
5032 
5033 	/* Allocate the recipe resources, and configure them according to the
5034 	 * match fields from protocol headers and extracted field vectors.
5035 	 */
5036 	chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5037 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
5038 		u8 i;
5039 
5040 		status = ice_alloc_recipe(hw, &entry->rid);
5041 		if (status)
5042 			goto err_unroll;
5043 
5044 		/* Clear the result index of the located recipe, as this will be
5045 		 * updated, if needed, later in the recipe creation process.
5046 		 */
5047 		tmp[0].content.result_indx = 0;
5048 
5049 		buf[recps] = tmp[0];
5050 		buf[recps].recipe_indx = (u8)entry->rid;
5051 		/* if the recipe is a non-root recipe RID should be programmed
5052 		 * as 0 for the rules to be applied correctly.
5053 		 */
5054 		buf[recps].content.rid = 0;
5055 		memset(&buf[recps].content.lkup_indx, 0,
5056 		       sizeof(buf[recps].content.lkup_indx));
5057 
5058 		/* All recipes use look-up index 0 to match switch ID. */
5059 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5060 		buf[recps].content.mask[0] =
5061 			cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5062 		/* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5063 		 * to be 0
5064 		 */
5065 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5066 			buf[recps].content.lkup_indx[i] = 0x80;
5067 			buf[recps].content.mask[i] = 0;
5068 		}
5069 
5070 		for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5071 			buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5072 			buf[recps].content.mask[i + 1] =
5073 				cpu_to_le16(entry->fv_mask[i]);
5074 		}
5075 
5076 		if (rm->n_grp_count > 1) {
5077 			/* Checks to see if there really is a valid result index
5078 			 * that can be used.
5079 			 */
5080 			if (chain_idx >= ICE_MAX_FV_WORDS) {
5081 				ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
5082 				status = -ENOSPC;
5083 				goto err_unroll;
5084 			}
5085 
5086 			entry->chain_idx = chain_idx;
5087 			buf[recps].content.result_indx =
5088 				ICE_AQ_RECIPE_RESULT_EN |
5089 				((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5090 				 ICE_AQ_RECIPE_RESULT_DATA_M);
5091 			clear_bit(chain_idx, result_idx_bm);
5092 			chain_idx = find_first_bit(result_idx_bm,
5093 						   ICE_MAX_FV_WORDS);
5094 		}
5095 
5096 		/* fill recipe dependencies */
5097 		bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
5098 			    ICE_MAX_NUM_RECIPES);
5099 		set_bit(buf[recps].recipe_indx,
5100 			(unsigned long *)buf[recps].recipe_bitmap);
5101 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5102 		recps++;
5103 	}
5104 
5105 	if (rm->n_grp_count == 1) {
5106 		rm->root_rid = buf[0].recipe_indx;
5107 		set_bit(buf[0].recipe_indx, rm->r_bitmap);
5108 		buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5109 		if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5110 			memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5111 			       sizeof(buf[0].recipe_bitmap));
5112 		} else {
5113 			status = -EINVAL;
5114 			goto err_unroll;
5115 		}
5116 		/* Applicable only for ROOT_RECIPE, set the fwd_priority for
5117 		 * the recipe which is getting created if specified
5118 		 * by user. Usually any advanced switch filter, which results
5119 		 * into new extraction sequence, ended up creating a new recipe
5120 		 * of type ROOT and usually recipes are associated with profiles
5121 		 * Switch rule referreing newly created recipe, needs to have
5122 		 * either/or 'fwd' or 'join' priority, otherwise switch rule
5123 		 * evaluation will not happen correctly. In other words, if
5124 		 * switch rule to be evaluated on priority basis, then recipe
5125 		 * needs to have priority, otherwise it will be evaluated last.
5126 		 */
5127 		buf[0].content.act_ctrl_fwd_priority = rm->priority;
5128 	} else {
5129 		struct ice_recp_grp_entry *last_chain_entry;
5130 		u16 rid, i;
5131 
5132 		/* Allocate the last recipe that will chain the outcomes of the
5133 		 * other recipes together
5134 		 */
5135 		status = ice_alloc_recipe(hw, &rid);
5136 		if (status)
5137 			goto err_unroll;
5138 
5139 		buf[recps].recipe_indx = (u8)rid;
5140 		buf[recps].content.rid = (u8)rid;
5141 		buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5142 		/* the new entry created should also be part of rg_list to
5143 		 * make sure we have complete recipe
5144 		 */
5145 		last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
5146 						sizeof(*last_chain_entry),
5147 						GFP_KERNEL);
5148 		if (!last_chain_entry) {
5149 			status = -ENOMEM;
5150 			goto err_unroll;
5151 		}
5152 		last_chain_entry->rid = rid;
5153 		memset(&buf[recps].content.lkup_indx, 0,
5154 		       sizeof(buf[recps].content.lkup_indx));
5155 		/* All recipes use look-up index 0 to match switch ID. */
5156 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5157 		buf[recps].content.mask[0] =
5158 			cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5159 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5160 			buf[recps].content.lkup_indx[i] =
5161 				ICE_AQ_RECIPE_LKUP_IGNORE;
5162 			buf[recps].content.mask[i] = 0;
5163 		}
5164 
5165 		i = 1;
5166 		/* update r_bitmap with the recp that is used for chaining */
5167 		set_bit(rid, rm->r_bitmap);
5168 		/* this is the recipe that chains all the other recipes so it
5169 		 * should not have a chaining ID to indicate the same
5170 		 */
5171 		last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5172 		list_for_each_entry(entry, &rm->rg_list, l_entry) {
5173 			last_chain_entry->fv_idx[i] = entry->chain_idx;
5174 			buf[recps].content.lkup_indx[i] = entry->chain_idx;
5175 			buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
5176 			set_bit(entry->rid, rm->r_bitmap);
5177 		}
5178 		list_add(&last_chain_entry->l_entry, &rm->rg_list);
5179 		if (sizeof(buf[recps].recipe_bitmap) >=
5180 		    sizeof(rm->r_bitmap)) {
5181 			memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5182 			       sizeof(buf[recps].recipe_bitmap));
5183 		} else {
5184 			status = -EINVAL;
5185 			goto err_unroll;
5186 		}
5187 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5188 
5189 		recps++;
5190 		rm->root_rid = (u8)rid;
5191 	}
5192 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5193 	if (status)
5194 		goto err_unroll;
5195 
5196 	status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5197 	ice_release_change_lock(hw);
5198 	if (status)
5199 		goto err_unroll;
5200 
5201 	/* Every recipe that just got created add it to the recipe
5202 	 * book keeping list
5203 	 */
5204 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
5205 		struct ice_switch_info *sw = hw->switch_info;
5206 		bool is_root, idx_found = false;
5207 		struct ice_sw_recipe *recp;
5208 		u16 idx, buf_idx = 0;
5209 
5210 		/* find buffer index for copying some data */
5211 		for (idx = 0; idx < rm->n_grp_count; idx++)
5212 			if (buf[idx].recipe_indx == entry->rid) {
5213 				buf_idx = idx;
5214 				idx_found = true;
5215 			}
5216 
5217 		if (!idx_found) {
5218 			status = -EIO;
5219 			goto err_unroll;
5220 		}
5221 
5222 		recp = &sw->recp_list[entry->rid];
5223 		is_root = (rm->root_rid == entry->rid);
5224 		recp->is_root = is_root;
5225 
5226 		recp->root_rid = entry->rid;
5227 		recp->big_recp = (is_root && rm->n_grp_count > 1);
5228 
5229 		memcpy(&recp->ext_words, entry->r_group.pairs,
5230 		       entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
5231 
5232 		memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5233 		       sizeof(recp->r_bitmap));
5234 
5235 		/* Copy non-result fv index values and masks to recipe. This
5236 		 * call will also update the result recipe bitmask.
5237 		 */
5238 		ice_collect_result_idx(&buf[buf_idx], recp);
5239 
5240 		/* for non-root recipes, also copy to the root, this allows
5241 		 * easier matching of a complete chained recipe
5242 		 */
5243 		if (!is_root)
5244 			ice_collect_result_idx(&buf[buf_idx],
5245 					       &sw->recp_list[rm->root_rid]);
5246 
5247 		recp->n_ext_words = entry->r_group.n_val_pairs;
5248 		recp->chain_idx = entry->chain_idx;
5249 		recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5250 		recp->n_grp_count = rm->n_grp_count;
5251 		recp->tun_type = rm->tun_type;
5252 		recp->recp_created = true;
5253 	}
5254 	rm->root_buf = buf;
5255 	kfree(tmp);
5256 	return status;
5257 
5258 err_unroll:
5259 err_mem:
5260 	kfree(tmp);
5261 	devm_kfree(ice_hw_to_dev(hw), buf);
5262 	return status;
5263 }
5264 
5265 /**
5266  * ice_create_recipe_group - creates recipe group
5267  * @hw: pointer to hardware structure
5268  * @rm: recipe management list entry
5269  * @lkup_exts: lookup elements
5270  */
5271 static int
5272 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5273 			struct ice_prot_lkup_ext *lkup_exts)
5274 {
5275 	u8 recp_count = 0;
5276 	int status;
5277 
5278 	rm->n_grp_count = 0;
5279 
5280 	/* Create recipes for words that are marked not done by packing them
5281 	 * as best fit.
5282 	 */
5283 	status = ice_create_first_fit_recp_def(hw, lkup_exts,
5284 					       &rm->rg_list, &recp_count);
5285 	if (!status) {
5286 		rm->n_grp_count += recp_count;
5287 		rm->n_ext_words = lkup_exts->n_val_words;
5288 		memcpy(&rm->ext_words, lkup_exts->fv_words,
5289 		       sizeof(rm->ext_words));
5290 		memcpy(rm->word_masks, lkup_exts->field_mask,
5291 		       sizeof(rm->word_masks));
5292 	}
5293 
5294 	return status;
5295 }
5296 
5297 /**
5298  * ice_tun_type_match_word - determine if tun type needs a match mask
5299  * @tun_type: tunnel type
5300  * @mask: mask to be used for the tunnel
5301  */
5302 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
5303 {
5304 	switch (tun_type) {
5305 	case ICE_SW_TUN_GENEVE:
5306 	case ICE_SW_TUN_VXLAN:
5307 	case ICE_SW_TUN_NVGRE:
5308 	case ICE_SW_TUN_GTPU:
5309 	case ICE_SW_TUN_GTPC:
5310 		*mask = ICE_TUN_FLAG_MASK;
5311 		return true;
5312 
5313 	default:
5314 		*mask = 0;
5315 		return false;
5316 	}
5317 }
5318 
5319 /**
5320  * ice_add_special_words - Add words that are not protocols, such as metadata
5321  * @rinfo: other information regarding the rule e.g. priority and action info
5322  * @lkup_exts: lookup word structure
5323  */
5324 static int
5325 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5326 		      struct ice_prot_lkup_ext *lkup_exts)
5327 {
5328 	u16 mask;
5329 
5330 	/* If this is a tunneled packet, then add recipe index to match the
5331 	 * tunnel bit in the packet metadata flags.
5332 	 */
5333 	if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
5334 		if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5335 			u8 word = lkup_exts->n_val_words++;
5336 
5337 			lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5338 			lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
5339 			lkup_exts->field_mask[word] = mask;
5340 		} else {
5341 			return -ENOSPC;
5342 		}
5343 	}
5344 
5345 	return 0;
5346 }
5347 
5348 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5349  * @hw: pointer to hardware structure
5350  * @rinfo: other information regarding the rule e.g. priority and action info
5351  * @bm: pointer to memory for returning the bitmap of field vectors
5352  */
5353 static void
5354 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5355 			 unsigned long *bm)
5356 {
5357 	enum ice_prof_type prof_type;
5358 
5359 	bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
5360 
5361 	switch (rinfo->tun_type) {
5362 	case ICE_NON_TUN:
5363 		prof_type = ICE_PROF_NON_TUN;
5364 		break;
5365 	case ICE_ALL_TUNNELS:
5366 		prof_type = ICE_PROF_TUN_ALL;
5367 		break;
5368 	case ICE_SW_TUN_GENEVE:
5369 	case ICE_SW_TUN_VXLAN:
5370 		prof_type = ICE_PROF_TUN_UDP;
5371 		break;
5372 	case ICE_SW_TUN_NVGRE:
5373 		prof_type = ICE_PROF_TUN_GRE;
5374 		break;
5375 	case ICE_SW_TUN_GTPU:
5376 		prof_type = ICE_PROF_TUN_GTPU;
5377 		break;
5378 	case ICE_SW_TUN_GTPC:
5379 		prof_type = ICE_PROF_TUN_GTPC;
5380 		break;
5381 	case ICE_SW_TUN_AND_NON_TUN:
5382 	default:
5383 		prof_type = ICE_PROF_ALL;
5384 		break;
5385 	}
5386 
5387 	ice_get_sw_fv_bitmap(hw, prof_type, bm);
5388 }
5389 
5390 /**
5391  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5392  * @hw: pointer to hardware structure
5393  * @lkups: lookup elements or match criteria for the advanced recipe, one
5394  *  structure per protocol header
5395  * @lkups_cnt: number of protocols
5396  * @rinfo: other information regarding the rule e.g. priority and action info
5397  * @rid: return the recipe ID of the recipe created
5398  */
5399 static int
5400 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5401 		   u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5402 {
5403 	DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
5404 	DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
5405 	struct ice_prot_lkup_ext *lkup_exts;
5406 	struct ice_recp_grp_entry *r_entry;
5407 	struct ice_sw_fv_list_entry *fvit;
5408 	struct ice_recp_grp_entry *r_tmp;
5409 	struct ice_sw_fv_list_entry *tmp;
5410 	struct ice_sw_recipe *rm;
5411 	int status = 0;
5412 	u8 i;
5413 
5414 	if (!lkups_cnt)
5415 		return -EINVAL;
5416 
5417 	lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
5418 	if (!lkup_exts)
5419 		return -ENOMEM;
5420 
5421 	/* Determine the number of words to be matched and if it exceeds a
5422 	 * recipe's restrictions
5423 	 */
5424 	for (i = 0; i < lkups_cnt; i++) {
5425 		u16 count;
5426 
5427 		if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5428 			status = -EIO;
5429 			goto err_free_lkup_exts;
5430 		}
5431 
5432 		count = ice_fill_valid_words(&lkups[i], lkup_exts);
5433 		if (!count) {
5434 			status = -EIO;
5435 			goto err_free_lkup_exts;
5436 		}
5437 	}
5438 
5439 	rm = kzalloc(sizeof(*rm), GFP_KERNEL);
5440 	if (!rm) {
5441 		status = -ENOMEM;
5442 		goto err_free_lkup_exts;
5443 	}
5444 
5445 	/* Get field vectors that contain fields extracted from all the protocol
5446 	 * headers being programmed.
5447 	 */
5448 	INIT_LIST_HEAD(&rm->fv_list);
5449 	INIT_LIST_HEAD(&rm->rg_list);
5450 
5451 	/* Get bitmap of field vectors (profiles) that are compatible with the
5452 	 * rule request; only these will be searched in the subsequent call to
5453 	 * ice_get_sw_fv_list.
5454 	 */
5455 	ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5456 
5457 	status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
5458 	if (status)
5459 		goto err_unroll;
5460 
5461 	/* Create any special protocol/offset pairs, such as looking at tunnel
5462 	 * bits by extracting metadata
5463 	 */
5464 	status = ice_add_special_words(rinfo, lkup_exts);
5465 	if (status)
5466 		goto err_free_lkup_exts;
5467 
5468 	/* Group match words into recipes using preferred recipe grouping
5469 	 * criteria.
5470 	 */
5471 	status = ice_create_recipe_group(hw, rm, lkup_exts);
5472 	if (status)
5473 		goto err_unroll;
5474 
5475 	/* set the recipe priority if specified */
5476 	rm->priority = (u8)rinfo->priority;
5477 
5478 	/* Find offsets from the field vector. Pick the first one for all the
5479 	 * recipes.
5480 	 */
5481 	status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5482 	if (status)
5483 		goto err_unroll;
5484 
5485 	/* get bitmap of all profiles the recipe will be associated with */
5486 	bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
5487 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5488 		ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5489 		set_bit((u16)fvit->profile_id, profiles);
5490 	}
5491 
5492 	/* Look for a recipe which matches our requested fv / mask list */
5493 	*rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
5494 	if (*rid < ICE_MAX_NUM_RECIPES)
5495 		/* Success if found a recipe that match the existing criteria */
5496 		goto err_unroll;
5497 
5498 	rm->tun_type = rinfo->tun_type;
5499 	/* Recipe we need does not exist, add a recipe */
5500 	status = ice_add_sw_recipe(hw, rm, profiles);
5501 	if (status)
5502 		goto err_unroll;
5503 
5504 	/* Associate all the recipes created with all the profiles in the
5505 	 * common field vector.
5506 	 */
5507 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5508 		DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
5509 		u16 j;
5510 
5511 		status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5512 						      (u8 *)r_bitmap, NULL);
5513 		if (status)
5514 			goto err_unroll;
5515 
5516 		bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
5517 			  ICE_MAX_NUM_RECIPES);
5518 		status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5519 		if (status)
5520 			goto err_unroll;
5521 
5522 		status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5523 						      (u8 *)r_bitmap,
5524 						      NULL);
5525 		ice_release_change_lock(hw);
5526 
5527 		if (status)
5528 			goto err_unroll;
5529 
5530 		/* Update profile to recipe bitmap array */
5531 		bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
5532 			    ICE_MAX_NUM_RECIPES);
5533 
5534 		/* Update recipe to profile bitmap array */
5535 		for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
5536 			set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
5537 	}
5538 
5539 	*rid = rm->root_rid;
5540 	memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
5541 	       sizeof(*lkup_exts));
5542 err_unroll:
5543 	list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
5544 		list_del(&r_entry->l_entry);
5545 		devm_kfree(ice_hw_to_dev(hw), r_entry);
5546 	}
5547 
5548 	list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
5549 		list_del(&fvit->list_entry);
5550 		devm_kfree(ice_hw_to_dev(hw), fvit);
5551 	}
5552 
5553 	if (rm->root_buf)
5554 		devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
5555 
5556 	kfree(rm);
5557 
5558 err_free_lkup_exts:
5559 	kfree(lkup_exts);
5560 
5561 	return status;
5562 }
5563 
5564 /**
5565  * ice_find_dummy_packet - find dummy packet
5566  *
5567  * @lkups: lookup elements or match criteria for the advanced recipe, one
5568  *	   structure per protocol header
5569  * @lkups_cnt: number of protocols
5570  * @tun_type: tunnel type
5571  *
5572  * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
5573  */
5574 static const struct ice_dummy_pkt_profile *
5575 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5576 		      enum ice_sw_tunnel_type tun_type)
5577 {
5578 	const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
5579 	u32 match = 0;
5580 	u16 i;
5581 
5582 	switch (tun_type) {
5583 	case ICE_SW_TUN_GTPC:
5584 		match |= ICE_PKT_TUN_GTPC;
5585 		break;
5586 	case ICE_SW_TUN_GTPU:
5587 		match |= ICE_PKT_TUN_GTPU;
5588 		break;
5589 	case ICE_SW_TUN_NVGRE:
5590 		match |= ICE_PKT_TUN_NVGRE;
5591 		break;
5592 	case ICE_SW_TUN_GENEVE:
5593 	case ICE_SW_TUN_VXLAN:
5594 		match |= ICE_PKT_TUN_UDP;
5595 		break;
5596 	default:
5597 		break;
5598 	}
5599 
5600 	for (i = 0; i < lkups_cnt; i++) {
5601 		if (lkups[i].type == ICE_UDP_ILOS)
5602 			match |= ICE_PKT_INNER_UDP;
5603 		else if (lkups[i].type == ICE_TCP_IL)
5604 			match |= ICE_PKT_INNER_TCP;
5605 		else if (lkups[i].type == ICE_IPV6_OFOS)
5606 			match |= ICE_PKT_OUTER_IPV6;
5607 		else if (lkups[i].type == ICE_VLAN_OFOS)
5608 			match |= ICE_PKT_VLAN;
5609 		else if (lkups[i].type == ICE_ETYPE_OL &&
5610 			 lkups[i].h_u.ethertype.ethtype_id ==
5611 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5612 			 lkups[i].m_u.ethertype.ethtype_id ==
5613 				cpu_to_be16(0xFFFF))
5614 			match |= ICE_PKT_OUTER_IPV6;
5615 		else if (lkups[i].type == ICE_ETYPE_IL &&
5616 			 lkups[i].h_u.ethertype.ethtype_id ==
5617 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5618 			 lkups[i].m_u.ethertype.ethtype_id ==
5619 				cpu_to_be16(0xFFFF))
5620 			match |= ICE_PKT_INNER_IPV6;
5621 		else if (lkups[i].type == ICE_IPV6_IL)
5622 			match |= ICE_PKT_INNER_IPV6;
5623 		else if (lkups[i].type == ICE_GTP_NO_PAY)
5624 			match |= ICE_PKT_GTP_NOPAY;
5625 	}
5626 
5627 	while (ret->match && (match & ret->match) != ret->match)
5628 		ret++;
5629 
5630 	return ret;
5631 }
5632 
5633 /**
5634  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5635  *
5636  * @lkups: lookup elements or match criteria for the advanced recipe, one
5637  *	   structure per protocol header
5638  * @lkups_cnt: number of protocols
5639  * @s_rule: stores rule information from the match criteria
5640  * @profile: dummy packet profile (the template, its size and header offsets)
5641  */
5642 static int
5643 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5644 			  struct ice_aqc_sw_rules_elem *s_rule,
5645 			  const struct ice_dummy_pkt_profile *profile)
5646 {
5647 	u8 *pkt;
5648 	u16 i;
5649 
5650 	/* Start with a packet with a pre-defined/dummy content. Then, fill
5651 	 * in the header values to be looked up or matched.
5652 	 */
5653 	pkt = s_rule->pdata.lkup_tx_rx.hdr;
5654 
5655 	memcpy(pkt, profile->pkt, profile->pkt_len);
5656 
5657 	for (i = 0; i < lkups_cnt; i++) {
5658 		const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
5659 		enum ice_protocol_type type;
5660 		u16 offset = 0, len = 0, j;
5661 		bool found = false;
5662 
5663 		/* find the start of this layer; it should be found since this
5664 		 * was already checked when search for the dummy packet
5665 		 */
5666 		type = lkups[i].type;
5667 		for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5668 			if (type == offsets[j].type) {
5669 				offset = offsets[j].offset;
5670 				found = true;
5671 				break;
5672 			}
5673 		}
5674 		/* this should never happen in a correct calling sequence */
5675 		if (!found)
5676 			return -EINVAL;
5677 
5678 		switch (lkups[i].type) {
5679 		case ICE_MAC_OFOS:
5680 		case ICE_MAC_IL:
5681 			len = sizeof(struct ice_ether_hdr);
5682 			break;
5683 		case ICE_ETYPE_OL:
5684 		case ICE_ETYPE_IL:
5685 			len = sizeof(struct ice_ethtype_hdr);
5686 			break;
5687 		case ICE_VLAN_OFOS:
5688 			len = sizeof(struct ice_vlan_hdr);
5689 			break;
5690 		case ICE_IPV4_OFOS:
5691 		case ICE_IPV4_IL:
5692 			len = sizeof(struct ice_ipv4_hdr);
5693 			break;
5694 		case ICE_IPV6_OFOS:
5695 		case ICE_IPV6_IL:
5696 			len = sizeof(struct ice_ipv6_hdr);
5697 			break;
5698 		case ICE_TCP_IL:
5699 		case ICE_UDP_OF:
5700 		case ICE_UDP_ILOS:
5701 			len = sizeof(struct ice_l4_hdr);
5702 			break;
5703 		case ICE_SCTP_IL:
5704 			len = sizeof(struct ice_sctp_hdr);
5705 			break;
5706 		case ICE_NVGRE:
5707 			len = sizeof(struct ice_nvgre_hdr);
5708 			break;
5709 		case ICE_VXLAN:
5710 		case ICE_GENEVE:
5711 			len = sizeof(struct ice_udp_tnl_hdr);
5712 			break;
5713 		case ICE_GTP_NO_PAY:
5714 		case ICE_GTP:
5715 			len = sizeof(struct ice_udp_gtp_hdr);
5716 			break;
5717 		default:
5718 			return -EINVAL;
5719 		}
5720 
5721 		/* the length should be a word multiple */
5722 		if (len % ICE_BYTES_PER_WORD)
5723 			return -EIO;
5724 
5725 		/* We have the offset to the header start, the length, the
5726 		 * caller's header values and mask. Use this information to
5727 		 * copy the data into the dummy packet appropriately based on
5728 		 * the mask. Note that we need to only write the bits as
5729 		 * indicated by the mask to make sure we don't improperly write
5730 		 * over any significant packet data.
5731 		 */
5732 		for (j = 0; j < len / sizeof(u16); j++) {
5733 			u16 *ptr = (u16 *)(pkt + offset);
5734 			u16 mask = lkups[i].m_raw[j];
5735 
5736 			if (!mask)
5737 				continue;
5738 
5739 			ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
5740 		}
5741 	}
5742 
5743 	s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(profile->pkt_len);
5744 
5745 	return 0;
5746 }
5747 
5748 /**
5749  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5750  * @hw: pointer to the hardware structure
5751  * @tun_type: tunnel type
5752  * @pkt: dummy packet to fill in
5753  * @offsets: offset info for the dummy packet
5754  */
5755 static int
5756 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5757 			u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5758 {
5759 	u16 open_port, i;
5760 
5761 	switch (tun_type) {
5762 	case ICE_SW_TUN_VXLAN:
5763 		if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
5764 			return -EIO;
5765 		break;
5766 	case ICE_SW_TUN_GENEVE:
5767 		if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
5768 			return -EIO;
5769 		break;
5770 	default:
5771 		/* Nothing needs to be done for this tunnel type */
5772 		return 0;
5773 	}
5774 
5775 	/* Find the outer UDP protocol header and insert the port number */
5776 	for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5777 		if (offsets[i].type == ICE_UDP_OF) {
5778 			struct ice_l4_hdr *hdr;
5779 			u16 offset;
5780 
5781 			offset = offsets[i].offset;
5782 			hdr = (struct ice_l4_hdr *)&pkt[offset];
5783 			hdr->dst_port = cpu_to_be16(open_port);
5784 
5785 			return 0;
5786 		}
5787 	}
5788 
5789 	return -EIO;
5790 }
5791 
5792 /**
5793  * ice_find_adv_rule_entry - Search a rule entry
5794  * @hw: pointer to the hardware structure
5795  * @lkups: lookup elements or match criteria for the advanced recipe, one
5796  *	   structure per protocol header
5797  * @lkups_cnt: number of protocols
5798  * @recp_id: recipe ID for which we are finding the rule
5799  * @rinfo: other information regarding the rule e.g. priority and action info
5800  *
5801  * Helper function to search for a given advance rule entry
5802  * Returns pointer to entry storing the rule if found
5803  */
5804 static struct ice_adv_fltr_mgmt_list_entry *
5805 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5806 			u16 lkups_cnt, u16 recp_id,
5807 			struct ice_adv_rule_info *rinfo)
5808 {
5809 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
5810 	struct ice_switch_info *sw = hw->switch_info;
5811 	int i;
5812 
5813 	list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
5814 			    list_entry) {
5815 		bool lkups_matched = true;
5816 
5817 		if (lkups_cnt != list_itr->lkups_cnt)
5818 			continue;
5819 		for (i = 0; i < list_itr->lkups_cnt; i++)
5820 			if (memcmp(&list_itr->lkups[i], &lkups[i],
5821 				   sizeof(*lkups))) {
5822 				lkups_matched = false;
5823 				break;
5824 			}
5825 		if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5826 		    rinfo->tun_type == list_itr->rule_info.tun_type &&
5827 		    lkups_matched)
5828 			return list_itr;
5829 	}
5830 	return NULL;
5831 }
5832 
5833 /**
5834  * ice_adv_add_update_vsi_list
5835  * @hw: pointer to the hardware structure
5836  * @m_entry: pointer to current adv filter management list entry
5837  * @cur_fltr: filter information from the book keeping entry
5838  * @new_fltr: filter information with the new VSI to be added
5839  *
5840  * Call AQ command to add or update previously created VSI list with new VSI.
5841  *
5842  * Helper function to do book keeping associated with adding filter information
5843  * The algorithm to do the booking keeping is described below :
5844  * When a VSI needs to subscribe to a given advanced filter
5845  *	if only one VSI has been added till now
5846  *		Allocate a new VSI list and add two VSIs
5847  *		to this list using switch rule command
5848  *		Update the previously created switch rule with the
5849  *		newly created VSI list ID
5850  *	if a VSI list was previously created
5851  *		Add the new VSI to the previously created VSI list set
5852  *		using the update switch rule command
5853  */
5854 static int
5855 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5856 			    struct ice_adv_fltr_mgmt_list_entry *m_entry,
5857 			    struct ice_adv_rule_info *cur_fltr,
5858 			    struct ice_adv_rule_info *new_fltr)
5859 {
5860 	u16 vsi_list_id = 0;
5861 	int status;
5862 
5863 	if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5864 	    cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5865 	    cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5866 		return -EOPNOTSUPP;
5867 
5868 	if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5869 	     new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5870 	    (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5871 	     cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5872 		return -EOPNOTSUPP;
5873 
5874 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5875 		 /* Only one entry existed in the mapping and it was not already
5876 		  * a part of a VSI list. So, create a VSI list with the old and
5877 		  * new VSIs.
5878 		  */
5879 		struct ice_fltr_info tmp_fltr;
5880 		u16 vsi_handle_arr[2];
5881 
5882 		/* A rule already exists with the new VSI being added */
5883 		if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5884 		    new_fltr->sw_act.fwd_id.hw_vsi_id)
5885 			return -EEXIST;
5886 
5887 		vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5888 		vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5889 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5890 						  &vsi_list_id,
5891 						  ICE_SW_LKUP_LAST);
5892 		if (status)
5893 			return status;
5894 
5895 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5896 		tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
5897 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5898 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5899 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5900 		tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
5901 
5902 		/* Update the previous switch rule of "forward to VSI" to
5903 		 * "fwd to VSI list"
5904 		 */
5905 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5906 		if (status)
5907 			return status;
5908 
5909 		cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5910 		cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5911 		m_entry->vsi_list_info =
5912 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5913 						vsi_list_id);
5914 	} else {
5915 		u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5916 
5917 		if (!m_entry->vsi_list_info)
5918 			return -EIO;
5919 
5920 		/* A rule already exists with the new VSI being added */
5921 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
5922 			return 0;
5923 
5924 		/* Update the previously created VSI list set with
5925 		 * the new VSI ID passed in
5926 		 */
5927 		vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5928 
5929 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5930 						  vsi_list_id, false,
5931 						  ice_aqc_opc_update_sw_rules,
5932 						  ICE_SW_LKUP_LAST);
5933 		/* update VSI list mapping info with new VSI ID */
5934 		if (!status)
5935 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
5936 	}
5937 	if (!status)
5938 		m_entry->vsi_count++;
5939 	return status;
5940 }
5941 
5942 /**
5943  * ice_add_adv_rule - helper function to create an advanced switch rule
5944  * @hw: pointer to the hardware structure
5945  * @lkups: information on the words that needs to be looked up. All words
5946  * together makes one recipe
5947  * @lkups_cnt: num of entries in the lkups array
5948  * @rinfo: other information related to the rule that needs to be programmed
5949  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
5950  *               ignored is case of error.
5951  *
5952  * This function can program only 1 rule at a time. The lkups is used to
5953  * describe the all the words that forms the "lookup" portion of the recipe.
5954  * These words can span multiple protocols. Callers to this function need to
5955  * pass in a list of protocol headers with lookup information along and mask
5956  * that determines which words are valid from the given protocol header.
5957  * rinfo describes other information related to this rule such as forwarding
5958  * IDs, priority of this rule, etc.
5959  */
5960 int
5961 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5962 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
5963 		 struct ice_rule_query_data *added_entry)
5964 {
5965 	struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
5966 	struct ice_aqc_sw_rules_elem *s_rule = NULL;
5967 	const struct ice_dummy_pkt_profile *profile;
5968 	u16 rid = 0, i, rule_buf_sz, vsi_handle;
5969 	struct list_head *rule_head;
5970 	struct ice_switch_info *sw;
5971 	u16 word_cnt;
5972 	u32 act = 0;
5973 	int status;
5974 	u8 q_rgn;
5975 
5976 	/* Initialize profile to result index bitmap */
5977 	if (!hw->switch_info->prof_res_bm_init) {
5978 		hw->switch_info->prof_res_bm_init = 1;
5979 		ice_init_prof_result_bm(hw);
5980 	}
5981 
5982 	if (!lkups_cnt)
5983 		return -EINVAL;
5984 
5985 	/* get # of words we need to match */
5986 	word_cnt = 0;
5987 	for (i = 0; i < lkups_cnt; i++) {
5988 		u16 j;
5989 
5990 		for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
5991 			if (lkups[i].m_raw[j])
5992 				word_cnt++;
5993 	}
5994 
5995 	if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
5996 		return -EINVAL;
5997 
5998 	/* locate a dummy packet */
5999 	profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
6000 
6001 	if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6002 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6003 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6004 	      rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6005 		return -EIO;
6006 
6007 	vsi_handle = rinfo->sw_act.vsi_handle;
6008 	if (!ice_is_vsi_valid(hw, vsi_handle))
6009 		return -EINVAL;
6010 
6011 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6012 		rinfo->sw_act.fwd_id.hw_vsi_id =
6013 			ice_get_hw_vsi_num(hw, vsi_handle);
6014 	if (rinfo->sw_act.flag & ICE_FLTR_TX)
6015 		rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6016 
6017 	status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6018 	if (status)
6019 		return status;
6020 	m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6021 	if (m_entry) {
6022 		/* we have to add VSI to VSI_LIST and increment vsi_count.
6023 		 * Also Update VSI list so that we can change forwarding rule
6024 		 * if the rule already exists, we will check if it exists with
6025 		 * same vsi_id, if not then add it to the VSI list if it already
6026 		 * exists if not then create a VSI list and add the existing VSI
6027 		 * ID and the new VSI ID to the list
6028 		 * We will add that VSI to the list
6029 		 */
6030 		status = ice_adv_add_update_vsi_list(hw, m_entry,
6031 						     &m_entry->rule_info,
6032 						     rinfo);
6033 		if (added_entry) {
6034 			added_entry->rid = rid;
6035 			added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6036 			added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6037 		}
6038 		return status;
6039 	}
6040 	rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + profile->pkt_len;
6041 	s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6042 	if (!s_rule)
6043 		return -ENOMEM;
6044 	if (!rinfo->flags_info.act_valid) {
6045 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
6046 		act |= ICE_SINGLE_ACT_LB_ENABLE;
6047 	} else {
6048 		act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
6049 						ICE_SINGLE_ACT_LB_ENABLE);
6050 	}
6051 
6052 	switch (rinfo->sw_act.fltr_act) {
6053 	case ICE_FWD_TO_VSI:
6054 		act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6055 			ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6056 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6057 		break;
6058 	case ICE_FWD_TO_Q:
6059 		act |= ICE_SINGLE_ACT_TO_Q;
6060 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6061 		       ICE_SINGLE_ACT_Q_INDEX_M;
6062 		break;
6063 	case ICE_FWD_TO_QGRP:
6064 		q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6065 			(u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
6066 		act |= ICE_SINGLE_ACT_TO_Q;
6067 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6068 		       ICE_SINGLE_ACT_Q_INDEX_M;
6069 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6070 		       ICE_SINGLE_ACT_Q_REGION_M;
6071 		break;
6072 	case ICE_DROP_PACKET:
6073 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6074 		       ICE_SINGLE_ACT_VALID_BIT;
6075 		break;
6076 	default:
6077 		status = -EIO;
6078 		goto err_ice_add_adv_rule;
6079 	}
6080 
6081 	/* set the rule LOOKUP type based on caller specified 'Rx'
6082 	 * instead of hardcoding it to be either LOOKUP_TX/RX
6083 	 *
6084 	 * for 'Rx' set the source to be the port number
6085 	 * for 'Tx' set the source to be the source HW VSI number (determined
6086 	 * by caller)
6087 	 */
6088 	if (rinfo->rx) {
6089 		s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
6090 		s_rule->pdata.lkup_tx_rx.src =
6091 			cpu_to_le16(hw->port_info->lport);
6092 	} else {
6093 		s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
6094 		s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src);
6095 	}
6096 
6097 	s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid);
6098 	s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
6099 
6100 	status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
6101 	if (status)
6102 		goto err_ice_add_adv_rule;
6103 
6104 	if (rinfo->tun_type != ICE_NON_TUN &&
6105 	    rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6106 		status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6107 						 s_rule->pdata.lkup_tx_rx.hdr,
6108 						 profile->offsets);
6109 		if (status)
6110 			goto err_ice_add_adv_rule;
6111 	}
6112 
6113 	status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6114 				 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6115 				 NULL);
6116 	if (status)
6117 		goto err_ice_add_adv_rule;
6118 	adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
6119 				sizeof(struct ice_adv_fltr_mgmt_list_entry),
6120 				GFP_KERNEL);
6121 	if (!adv_fltr) {
6122 		status = -ENOMEM;
6123 		goto err_ice_add_adv_rule;
6124 	}
6125 
6126 	adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
6127 				       lkups_cnt * sizeof(*lkups), GFP_KERNEL);
6128 	if (!adv_fltr->lkups) {
6129 		status = -ENOMEM;
6130 		goto err_ice_add_adv_rule;
6131 	}
6132 
6133 	adv_fltr->lkups_cnt = lkups_cnt;
6134 	adv_fltr->rule_info = *rinfo;
6135 	adv_fltr->rule_info.fltr_rule_id =
6136 		le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
6137 	sw = hw->switch_info;
6138 	sw->recp_list[rid].adv_rule = true;
6139 	rule_head = &sw->recp_list[rid].filt_rules;
6140 
6141 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6142 		adv_fltr->vsi_count = 1;
6143 
6144 	/* Add rule entry to book keeping list */
6145 	list_add(&adv_fltr->list_entry, rule_head);
6146 	if (added_entry) {
6147 		added_entry->rid = rid;
6148 		added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6149 		added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6150 	}
6151 err_ice_add_adv_rule:
6152 	if (status && adv_fltr) {
6153 		devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
6154 		devm_kfree(ice_hw_to_dev(hw), adv_fltr);
6155 	}
6156 
6157 	kfree(s_rule);
6158 
6159 	return status;
6160 }
6161 
6162 /**
6163  * ice_replay_vsi_fltr - Replay filters for requested VSI
6164  * @hw: pointer to the hardware structure
6165  * @vsi_handle: driver VSI handle
6166  * @recp_id: Recipe ID for which rules need to be replayed
6167  * @list_head: list for which filters need to be replayed
6168  *
6169  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6170  * It is required to pass valid VSI handle.
6171  */
6172 static int
6173 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6174 		    struct list_head *list_head)
6175 {
6176 	struct ice_fltr_mgmt_list_entry *itr;
6177 	int status = 0;
6178 	u16 hw_vsi_id;
6179 
6180 	if (list_empty(list_head))
6181 		return status;
6182 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6183 
6184 	list_for_each_entry(itr, list_head, list_entry) {
6185 		struct ice_fltr_list_entry f_entry;
6186 
6187 		f_entry.fltr_info = itr->fltr_info;
6188 		if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6189 		    itr->fltr_info.vsi_handle == vsi_handle) {
6190 			/* update the src in case it is VSI num */
6191 			if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6192 				f_entry.fltr_info.src = hw_vsi_id;
6193 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
6194 			if (status)
6195 				goto end;
6196 			continue;
6197 		}
6198 		if (!itr->vsi_list_info ||
6199 		    !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
6200 			continue;
6201 		/* Clearing it so that the logic can add it back */
6202 		clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6203 		f_entry.fltr_info.vsi_handle = vsi_handle;
6204 		f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6205 		/* update the src in case it is VSI num */
6206 		if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6207 			f_entry.fltr_info.src = hw_vsi_id;
6208 		if (recp_id == ICE_SW_LKUP_VLAN)
6209 			status = ice_add_vlan_internal(hw, &f_entry);
6210 		else
6211 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
6212 		if (status)
6213 			goto end;
6214 	}
6215 end:
6216 	return status;
6217 }
6218 
6219 /**
6220  * ice_adv_rem_update_vsi_list
6221  * @hw: pointer to the hardware structure
6222  * @vsi_handle: VSI handle of the VSI to remove
6223  * @fm_list: filter management entry for which the VSI list management needs to
6224  *	     be done
6225  */
6226 static int
6227 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6228 			    struct ice_adv_fltr_mgmt_list_entry *fm_list)
6229 {
6230 	struct ice_vsi_list_map_info *vsi_list_info;
6231 	enum ice_sw_lkup_type lkup_type;
6232 	u16 vsi_list_id;
6233 	int status;
6234 
6235 	if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6236 	    fm_list->vsi_count == 0)
6237 		return -EINVAL;
6238 
6239 	/* A rule with the VSI being removed does not exist */
6240 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
6241 		return -ENOENT;
6242 
6243 	lkup_type = ICE_SW_LKUP_LAST;
6244 	vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6245 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6246 					  ice_aqc_opc_update_sw_rules,
6247 					  lkup_type);
6248 	if (status)
6249 		return status;
6250 
6251 	fm_list->vsi_count--;
6252 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6253 	vsi_list_info = fm_list->vsi_list_info;
6254 	if (fm_list->vsi_count == 1) {
6255 		struct ice_fltr_info tmp_fltr;
6256 		u16 rem_vsi_handle;
6257 
6258 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
6259 						ICE_MAX_VSI);
6260 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6261 			return -EIO;
6262 
6263 		/* Make sure VSI list is empty before removing it below */
6264 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6265 						  vsi_list_id, true,
6266 						  ice_aqc_opc_update_sw_rules,
6267 						  lkup_type);
6268 		if (status)
6269 			return status;
6270 
6271 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
6272 		tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
6273 		tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6274 		fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6275 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6276 		tmp_fltr.fwd_id.hw_vsi_id =
6277 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
6278 		fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6279 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
6280 		fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
6281 
6282 		/* Update the previous switch rule of "MAC forward to VSI" to
6283 		 * "MAC fwd to VSI list"
6284 		 */
6285 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6286 		if (status) {
6287 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6288 				  tmp_fltr.fwd_id.hw_vsi_id, status);
6289 			return status;
6290 		}
6291 		fm_list->vsi_list_info->ref_cnt--;
6292 
6293 		/* Remove the VSI list since it is no longer used */
6294 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6295 		if (status) {
6296 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
6297 				  vsi_list_id, status);
6298 			return status;
6299 		}
6300 
6301 		list_del(&vsi_list_info->list_entry);
6302 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
6303 		fm_list->vsi_list_info = NULL;
6304 	}
6305 
6306 	return status;
6307 }
6308 
6309 /**
6310  * ice_rem_adv_rule - removes existing advanced switch rule
6311  * @hw: pointer to the hardware structure
6312  * @lkups: information on the words that needs to be looked up. All words
6313  *         together makes one recipe
6314  * @lkups_cnt: num of entries in the lkups array
6315  * @rinfo: Its the pointer to the rule information for the rule
6316  *
6317  * This function can be used to remove 1 rule at a time. The lkups is
6318  * used to describe all the words that forms the "lookup" portion of the
6319  * rule. These words can span multiple protocols. Callers to this function
6320  * need to pass in a list of protocol headers with lookup information along
6321  * and mask that determines which words are valid from the given protocol
6322  * header. rinfo describes other information related to this rule such as
6323  * forwarding IDs, priority of this rule, etc.
6324  */
6325 static int
6326 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6327 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6328 {
6329 	struct ice_adv_fltr_mgmt_list_entry *list_elem;
6330 	struct ice_prot_lkup_ext lkup_exts;
6331 	bool remove_rule = false;
6332 	struct mutex *rule_lock; /* Lock to protect filter rule list */
6333 	u16 i, rid, vsi_handle;
6334 	int status = 0;
6335 
6336 	memset(&lkup_exts, 0, sizeof(lkup_exts));
6337 	for (i = 0; i < lkups_cnt; i++) {
6338 		u16 count;
6339 
6340 		if (lkups[i].type >= ICE_PROTOCOL_LAST)
6341 			return -EIO;
6342 
6343 		count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6344 		if (!count)
6345 			return -EIO;
6346 	}
6347 
6348 	/* Create any special protocol/offset pairs, such as looking at tunnel
6349 	 * bits by extracting metadata
6350 	 */
6351 	status = ice_add_special_words(rinfo, &lkup_exts);
6352 	if (status)
6353 		return status;
6354 
6355 	rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
6356 	/* If did not find a recipe that match the existing criteria */
6357 	if (rid == ICE_MAX_NUM_RECIPES)
6358 		return -EINVAL;
6359 
6360 	rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6361 	list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6362 	/* the rule is already removed */
6363 	if (!list_elem)
6364 		return 0;
6365 	mutex_lock(rule_lock);
6366 	if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6367 		remove_rule = true;
6368 	} else if (list_elem->vsi_count > 1) {
6369 		remove_rule = false;
6370 		vsi_handle = rinfo->sw_act.vsi_handle;
6371 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6372 	} else {
6373 		vsi_handle = rinfo->sw_act.vsi_handle;
6374 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6375 		if (status) {
6376 			mutex_unlock(rule_lock);
6377 			return status;
6378 		}
6379 		if (list_elem->vsi_count == 0)
6380 			remove_rule = true;
6381 	}
6382 	mutex_unlock(rule_lock);
6383 	if (remove_rule) {
6384 		struct ice_aqc_sw_rules_elem *s_rule;
6385 		u16 rule_buf_sz;
6386 
6387 		rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6388 		s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6389 		if (!s_rule)
6390 			return -ENOMEM;
6391 		s_rule->pdata.lkup_tx_rx.act = 0;
6392 		s_rule->pdata.lkup_tx_rx.index =
6393 			cpu_to_le16(list_elem->rule_info.fltr_rule_id);
6394 		s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6395 		status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6396 					 rule_buf_sz, 1,
6397 					 ice_aqc_opc_remove_sw_rules, NULL);
6398 		if (!status || status == -ENOENT) {
6399 			struct ice_switch_info *sw = hw->switch_info;
6400 
6401 			mutex_lock(rule_lock);
6402 			list_del(&list_elem->list_entry);
6403 			devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
6404 			devm_kfree(ice_hw_to_dev(hw), list_elem);
6405 			mutex_unlock(rule_lock);
6406 			if (list_empty(&sw->recp_list[rid].filt_rules))
6407 				sw->recp_list[rid].adv_rule = false;
6408 		}
6409 		kfree(s_rule);
6410 	}
6411 	return status;
6412 }
6413 
6414 /**
6415  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6416  * @hw: pointer to the hardware structure
6417  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6418  *
6419  * This function is used to remove 1 rule at a time. The removal is based on
6420  * the remove_entry parameter. This function will remove rule for a given
6421  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6422  */
6423 int
6424 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6425 		       struct ice_rule_query_data *remove_entry)
6426 {
6427 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
6428 	struct list_head *list_head;
6429 	struct ice_adv_rule_info rinfo;
6430 	struct ice_switch_info *sw;
6431 
6432 	sw = hw->switch_info;
6433 	if (!sw->recp_list[remove_entry->rid].recp_created)
6434 		return -EINVAL;
6435 	list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6436 	list_for_each_entry(list_itr, list_head, list_entry) {
6437 		if (list_itr->rule_info.fltr_rule_id ==
6438 		    remove_entry->rule_id) {
6439 			rinfo = list_itr->rule_info;
6440 			rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6441 			return ice_rem_adv_rule(hw, list_itr->lkups,
6442 						list_itr->lkups_cnt, &rinfo);
6443 		}
6444 	}
6445 	/* either list is empty or unable to find rule */
6446 	return -ENOENT;
6447 }
6448 
6449 /**
6450  * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
6451  *                            given VSI handle
6452  * @hw: pointer to the hardware structure
6453  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6454  *
6455  * This function is used to remove all the rules for a given VSI and as soon
6456  * as removing a rule fails, it will return immediately with the error code,
6457  * else it will return success.
6458  */
6459 int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6460 {
6461 	struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
6462 	struct ice_vsi_list_map_info *map_info;
6463 	struct ice_adv_rule_info rinfo;
6464 	struct list_head *list_head;
6465 	struct ice_switch_info *sw;
6466 	int status;
6467 	u8 rid;
6468 
6469 	sw = hw->switch_info;
6470 	for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6471 		if (!sw->recp_list[rid].recp_created)
6472 			continue;
6473 		if (!sw->recp_list[rid].adv_rule)
6474 			continue;
6475 
6476 		list_head = &sw->recp_list[rid].filt_rules;
6477 		list_for_each_entry_safe(list_itr, tmp_entry, list_head,
6478 					 list_entry) {
6479 			rinfo = list_itr->rule_info;
6480 
6481 			if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
6482 				map_info = list_itr->vsi_list_info;
6483 				if (!map_info)
6484 					continue;
6485 
6486 				if (!test_bit(vsi_handle, map_info->vsi_map))
6487 					continue;
6488 			} else if (rinfo.sw_act.vsi_handle != vsi_handle) {
6489 				continue;
6490 			}
6491 
6492 			rinfo.sw_act.vsi_handle = vsi_handle;
6493 			status = ice_rem_adv_rule(hw, list_itr->lkups,
6494 						  list_itr->lkups_cnt, &rinfo);
6495 			if (status)
6496 				return status;
6497 		}
6498 	}
6499 	return 0;
6500 }
6501 
6502 /**
6503  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6504  * @hw: pointer to the hardware structure
6505  * @vsi_handle: driver VSI handle
6506  * @list_head: list for which filters need to be replayed
6507  *
6508  * Replay the advanced rule for the given VSI.
6509  */
6510 static int
6511 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6512 			struct list_head *list_head)
6513 {
6514 	struct ice_rule_query_data added_entry = { 0 };
6515 	struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6516 	int status = 0;
6517 
6518 	if (list_empty(list_head))
6519 		return status;
6520 	list_for_each_entry(adv_fltr, list_head, list_entry) {
6521 		struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6522 		u16 lk_cnt = adv_fltr->lkups_cnt;
6523 
6524 		if (vsi_handle != rinfo->sw_act.vsi_handle)
6525 			continue;
6526 		status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6527 					  &added_entry);
6528 		if (status)
6529 			break;
6530 	}
6531 	return status;
6532 }
6533 
6534 /**
6535  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6536  * @hw: pointer to the hardware structure
6537  * @vsi_handle: driver VSI handle
6538  *
6539  * Replays filters for requested VSI via vsi_handle.
6540  */
6541 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6542 {
6543 	struct ice_switch_info *sw = hw->switch_info;
6544 	int status;
6545 	u8 i;
6546 
6547 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6548 		struct list_head *head;
6549 
6550 		head = &sw->recp_list[i].filt_replay_rules;
6551 		if (!sw->recp_list[i].adv_rule)
6552 			status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6553 		else
6554 			status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6555 		if (status)
6556 			return status;
6557 	}
6558 	return status;
6559 }
6560 
6561 /**
6562  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6563  * @hw: pointer to the HW struct
6564  *
6565  * Deletes the filter replay rules.
6566  */
6567 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6568 {
6569 	struct ice_switch_info *sw = hw->switch_info;
6570 	u8 i;
6571 
6572 	if (!sw)
6573 		return;
6574 
6575 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6576 		if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
6577 			struct list_head *l_head;
6578 
6579 			l_head = &sw->recp_list[i].filt_replay_rules;
6580 			if (!sw->recp_list[i].adv_rule)
6581 				ice_rem_sw_rule_info(hw, l_head);
6582 			else
6583 				ice_rem_adv_rule_info(hw, l_head);
6584 		}
6585 	}
6586 }
6587