xref: /linux/drivers/net/ethernet/intel/ice/ice_switch.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_lib.h"
5 #include "ice_switch.h"
6 
7 #define ICE_ETH_DA_OFFSET		0
8 #define ICE_ETH_ETHTYPE_OFFSET		12
9 #define ICE_ETH_VLAN_TCI_OFFSET		14
10 #define ICE_MAX_VLAN_ID			0xFFF
11 #define ICE_IPV6_ETHER_ID		0x86DD
12 
13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
14  * struct to configure any switch filter rules.
15  * {DA (6 bytes), SA(6 bytes),
16  * Ether type (2 bytes for header without VLAN tag) OR
17  * VLAN tag (4 bytes for header with VLAN tag) }
18  *
19  * Word on Hardcoded values
20  * byte 0 = 0x2: to identify it as locally administered DA MAC
21  * byte 6 = 0x2: to identify it as locally administered SA MAC
22  * byte 12 = 0x81 & byte 13 = 0x00:
23  *	In case of VLAN filter first two bytes defines ether type (0x8100)
24  *	and remaining two bytes are placeholder for programming a given VLAN ID
25  *	In case of Ether type filter it is treated as header without VLAN tag
26  *	and byte 12 and 13 is used to program a given Ether type instead
27  */
28 #define DUMMY_ETH_HDR_LEN		16
29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
30 							0x2, 0, 0, 0, 0, 0,
31 							0x81, 0, 0, 0};
32 
33 enum {
34 	ICE_PKT_VLAN		= BIT(0),
35 	ICE_PKT_OUTER_IPV6	= BIT(1),
36 	ICE_PKT_TUN_GTPC	= BIT(2),
37 	ICE_PKT_TUN_GTPU	= BIT(3),
38 	ICE_PKT_TUN_NVGRE	= BIT(4),
39 	ICE_PKT_TUN_UDP		= BIT(5),
40 	ICE_PKT_INNER_IPV6	= BIT(6),
41 	ICE_PKT_INNER_TCP	= BIT(7),
42 	ICE_PKT_INNER_UDP	= BIT(8),
43 	ICE_PKT_GTP_NOPAY	= BIT(9),
44 };
45 
46 struct ice_dummy_pkt_offsets {
47 	enum ice_protocol_type type;
48 	u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
49 };
50 
51 struct ice_dummy_pkt_profile {
52 	const struct ice_dummy_pkt_offsets *offsets;
53 	const u8 *pkt;
54 	u32 match;
55 	u16 pkt_len;
56 };
57 
58 #define ICE_DECLARE_PKT_OFFSETS(type)				\
59 	static const struct ice_dummy_pkt_offsets		\
60 	ice_dummy_##type##_packet_offsets[]
61 
62 #define ICE_DECLARE_PKT_TEMPLATE(type)				\
63 	static const u8 ice_dummy_##type##_packet[]
64 
65 #define ICE_PKT_PROFILE(type, m) {				\
66 	.match		= (m),					\
67 	.pkt		= ice_dummy_##type##_packet,		\
68 	.pkt_len	= sizeof(ice_dummy_##type##_packet),	\
69 	.offsets	= ice_dummy_##type##_packet_offsets,	\
70 }
71 
72 ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
73 	{ ICE_MAC_OFOS,		0 },
74 	{ ICE_ETYPE_OL,		12 },
75 	{ ICE_IPV4_OFOS,	14 },
76 	{ ICE_NVGRE,		34 },
77 	{ ICE_MAC_IL,		42 },
78 	{ ICE_ETYPE_IL,		54 },
79 	{ ICE_IPV4_IL,		56 },
80 	{ ICE_TCP_IL,		76 },
81 	{ ICE_PROTOCOL_LAST,	0 },
82 };
83 
84 ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
85 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
86 	0x00, 0x00, 0x00, 0x00,
87 	0x00, 0x00, 0x00, 0x00,
88 
89 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
90 
91 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
92 	0x00, 0x00, 0x00, 0x00,
93 	0x00, 0x2F, 0x00, 0x00,
94 	0x00, 0x00, 0x00, 0x00,
95 	0x00, 0x00, 0x00, 0x00,
96 
97 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
98 	0x00, 0x00, 0x00, 0x00,
99 
100 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
101 	0x00, 0x00, 0x00, 0x00,
102 	0x00, 0x00, 0x00, 0x00,
103 
104 	0x08, 0x00,		/* ICE_ETYPE_IL 54 */
105 
106 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
107 	0x00, 0x00, 0x00, 0x00,
108 	0x00, 0x06, 0x00, 0x00,
109 	0x00, 0x00, 0x00, 0x00,
110 	0x00, 0x00, 0x00, 0x00,
111 
112 	0x00, 0x00, 0x00, 0x00,	/* ICE_TCP_IL 76 */
113 	0x00, 0x00, 0x00, 0x00,
114 	0x00, 0x00, 0x00, 0x00,
115 	0x50, 0x02, 0x20, 0x00,
116 	0x00, 0x00, 0x00, 0x00
117 };
118 
119 ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
120 	{ ICE_MAC_OFOS,		0 },
121 	{ ICE_ETYPE_OL,		12 },
122 	{ ICE_IPV4_OFOS,	14 },
123 	{ ICE_NVGRE,		34 },
124 	{ ICE_MAC_IL,		42 },
125 	{ ICE_ETYPE_IL,		54 },
126 	{ ICE_IPV4_IL,		56 },
127 	{ ICE_UDP_ILOS,		76 },
128 	{ ICE_PROTOCOL_LAST,	0 },
129 };
130 
131 ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
132 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
133 	0x00, 0x00, 0x00, 0x00,
134 	0x00, 0x00, 0x00, 0x00,
135 
136 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
137 
138 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
139 	0x00, 0x00, 0x00, 0x00,
140 	0x00, 0x2F, 0x00, 0x00,
141 	0x00, 0x00, 0x00, 0x00,
142 	0x00, 0x00, 0x00, 0x00,
143 
144 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
145 	0x00, 0x00, 0x00, 0x00,
146 
147 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
148 	0x00, 0x00, 0x00, 0x00,
149 	0x00, 0x00, 0x00, 0x00,
150 
151 	0x08, 0x00,		/* ICE_ETYPE_IL 54 */
152 
153 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
154 	0x00, 0x00, 0x00, 0x00,
155 	0x00, 0x11, 0x00, 0x00,
156 	0x00, 0x00, 0x00, 0x00,
157 	0x00, 0x00, 0x00, 0x00,
158 
159 	0x00, 0x00, 0x00, 0x00,	/* ICE_UDP_ILOS 76 */
160 	0x00, 0x08, 0x00, 0x00,
161 };
162 
163 ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
164 	{ ICE_MAC_OFOS,		0 },
165 	{ ICE_ETYPE_OL,		12 },
166 	{ ICE_IPV4_OFOS,	14 },
167 	{ ICE_UDP_OF,		34 },
168 	{ ICE_VXLAN,		42 },
169 	{ ICE_GENEVE,		42 },
170 	{ ICE_VXLAN_GPE,	42 },
171 	{ ICE_MAC_IL,		50 },
172 	{ ICE_ETYPE_IL,		62 },
173 	{ ICE_IPV4_IL,		64 },
174 	{ ICE_TCP_IL,		84 },
175 	{ ICE_PROTOCOL_LAST,	0 },
176 };
177 
178 ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
179 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
180 	0x00, 0x00, 0x00, 0x00,
181 	0x00, 0x00, 0x00, 0x00,
182 
183 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
184 
185 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
186 	0x00, 0x01, 0x00, 0x00,
187 	0x40, 0x11, 0x00, 0x00,
188 	0x00, 0x00, 0x00, 0x00,
189 	0x00, 0x00, 0x00, 0x00,
190 
191 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
192 	0x00, 0x46, 0x00, 0x00,
193 
194 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
195 	0x00, 0x00, 0x00, 0x00,
196 
197 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
198 	0x00, 0x00, 0x00, 0x00,
199 	0x00, 0x00, 0x00, 0x00,
200 
201 	0x08, 0x00,		/* ICE_ETYPE_IL 62 */
202 
203 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
204 	0x00, 0x01, 0x00, 0x00,
205 	0x40, 0x06, 0x00, 0x00,
206 	0x00, 0x00, 0x00, 0x00,
207 	0x00, 0x00, 0x00, 0x00,
208 
209 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
210 	0x00, 0x00, 0x00, 0x00,
211 	0x00, 0x00, 0x00, 0x00,
212 	0x50, 0x02, 0x20, 0x00,
213 	0x00, 0x00, 0x00, 0x00
214 };
215 
216 ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
217 	{ ICE_MAC_OFOS,		0 },
218 	{ ICE_ETYPE_OL,		12 },
219 	{ ICE_IPV4_OFOS,	14 },
220 	{ ICE_UDP_OF,		34 },
221 	{ ICE_VXLAN,		42 },
222 	{ ICE_GENEVE,		42 },
223 	{ ICE_VXLAN_GPE,	42 },
224 	{ ICE_MAC_IL,		50 },
225 	{ ICE_ETYPE_IL,		62 },
226 	{ ICE_IPV4_IL,		64 },
227 	{ ICE_UDP_ILOS,		84 },
228 	{ ICE_PROTOCOL_LAST,	0 },
229 };
230 
231 ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
232 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
233 	0x00, 0x00, 0x00, 0x00,
234 	0x00, 0x00, 0x00, 0x00,
235 
236 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
237 
238 	0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
239 	0x00, 0x01, 0x00, 0x00,
240 	0x00, 0x11, 0x00, 0x00,
241 	0x00, 0x00, 0x00, 0x00,
242 	0x00, 0x00, 0x00, 0x00,
243 
244 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
245 	0x00, 0x3a, 0x00, 0x00,
246 
247 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
248 	0x00, 0x00, 0x00, 0x00,
249 
250 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
251 	0x00, 0x00, 0x00, 0x00,
252 	0x00, 0x00, 0x00, 0x00,
253 
254 	0x08, 0x00,		/* ICE_ETYPE_IL 62 */
255 
256 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
257 	0x00, 0x01, 0x00, 0x00,
258 	0x00, 0x11, 0x00, 0x00,
259 	0x00, 0x00, 0x00, 0x00,
260 	0x00, 0x00, 0x00, 0x00,
261 
262 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
263 	0x00, 0x08, 0x00, 0x00,
264 };
265 
266 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
267 	{ ICE_MAC_OFOS,		0 },
268 	{ ICE_ETYPE_OL,		12 },
269 	{ ICE_IPV4_OFOS,	14 },
270 	{ ICE_NVGRE,		34 },
271 	{ ICE_MAC_IL,		42 },
272 	{ ICE_ETYPE_IL,		54 },
273 	{ ICE_IPV6_IL,		56 },
274 	{ ICE_TCP_IL,		96 },
275 	{ ICE_PROTOCOL_LAST,	0 },
276 };
277 
278 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
279 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
280 	0x00, 0x00, 0x00, 0x00,
281 	0x00, 0x00, 0x00, 0x00,
282 
283 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
284 
285 	0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
286 	0x00, 0x00, 0x00, 0x00,
287 	0x00, 0x2F, 0x00, 0x00,
288 	0x00, 0x00, 0x00, 0x00,
289 	0x00, 0x00, 0x00, 0x00,
290 
291 	0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
292 	0x00, 0x00, 0x00, 0x00,
293 
294 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
295 	0x00, 0x00, 0x00, 0x00,
296 	0x00, 0x00, 0x00, 0x00,
297 
298 	0x86, 0xdd,		/* ICE_ETYPE_IL 54 */
299 
300 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
301 	0x00, 0x08, 0x06, 0x40,
302 	0x00, 0x00, 0x00, 0x00,
303 	0x00, 0x00, 0x00, 0x00,
304 	0x00, 0x00, 0x00, 0x00,
305 	0x00, 0x00, 0x00, 0x00,
306 	0x00, 0x00, 0x00, 0x00,
307 	0x00, 0x00, 0x00, 0x00,
308 	0x00, 0x00, 0x00, 0x00,
309 	0x00, 0x00, 0x00, 0x00,
310 
311 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
312 	0x00, 0x00, 0x00, 0x00,
313 	0x00, 0x00, 0x00, 0x00,
314 	0x50, 0x02, 0x20, 0x00,
315 	0x00, 0x00, 0x00, 0x00
316 };
317 
318 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
319 	{ ICE_MAC_OFOS,		0 },
320 	{ ICE_ETYPE_OL,		12 },
321 	{ ICE_IPV4_OFOS,	14 },
322 	{ ICE_NVGRE,		34 },
323 	{ ICE_MAC_IL,		42 },
324 	{ ICE_ETYPE_IL,		54 },
325 	{ ICE_IPV6_IL,		56 },
326 	{ ICE_UDP_ILOS,		96 },
327 	{ ICE_PROTOCOL_LAST,	0 },
328 };
329 
330 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
331 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
332 	0x00, 0x00, 0x00, 0x00,
333 	0x00, 0x00, 0x00, 0x00,
334 
335 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
336 
337 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
338 	0x00, 0x00, 0x00, 0x00,
339 	0x00, 0x2F, 0x00, 0x00,
340 	0x00, 0x00, 0x00, 0x00,
341 	0x00, 0x00, 0x00, 0x00,
342 
343 	0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
344 	0x00, 0x00, 0x00, 0x00,
345 
346 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
347 	0x00, 0x00, 0x00, 0x00,
348 	0x00, 0x00, 0x00, 0x00,
349 
350 	0x86, 0xdd,		/* ICE_ETYPE_IL 54 */
351 
352 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
353 	0x00, 0x08, 0x11, 0x40,
354 	0x00, 0x00, 0x00, 0x00,
355 	0x00, 0x00, 0x00, 0x00,
356 	0x00, 0x00, 0x00, 0x00,
357 	0x00, 0x00, 0x00, 0x00,
358 	0x00, 0x00, 0x00, 0x00,
359 	0x00, 0x00, 0x00, 0x00,
360 	0x00, 0x00, 0x00, 0x00,
361 	0x00, 0x00, 0x00, 0x00,
362 
363 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
364 	0x00, 0x08, 0x00, 0x00,
365 };
366 
367 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
368 	{ ICE_MAC_OFOS,		0 },
369 	{ ICE_ETYPE_OL,		12 },
370 	{ ICE_IPV4_OFOS,	14 },
371 	{ ICE_UDP_OF,		34 },
372 	{ ICE_VXLAN,		42 },
373 	{ ICE_GENEVE,		42 },
374 	{ ICE_VXLAN_GPE,	42 },
375 	{ ICE_MAC_IL,		50 },
376 	{ ICE_ETYPE_IL,		62 },
377 	{ ICE_IPV6_IL,		64 },
378 	{ ICE_TCP_IL,		104 },
379 	{ ICE_PROTOCOL_LAST,	0 },
380 };
381 
382 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
383 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
384 	0x00, 0x00, 0x00, 0x00,
385 	0x00, 0x00, 0x00, 0x00,
386 
387 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
388 
389 	0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
390 	0x00, 0x01, 0x00, 0x00,
391 	0x40, 0x11, 0x00, 0x00,
392 	0x00, 0x00, 0x00, 0x00,
393 	0x00, 0x00, 0x00, 0x00,
394 
395 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
396 	0x00, 0x5a, 0x00, 0x00,
397 
398 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
399 	0x00, 0x00, 0x00, 0x00,
400 
401 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
402 	0x00, 0x00, 0x00, 0x00,
403 	0x00, 0x00, 0x00, 0x00,
404 
405 	0x86, 0xdd,		/* ICE_ETYPE_IL 62 */
406 
407 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
408 	0x00, 0x08, 0x06, 0x40,
409 	0x00, 0x00, 0x00, 0x00,
410 	0x00, 0x00, 0x00, 0x00,
411 	0x00, 0x00, 0x00, 0x00,
412 	0x00, 0x00, 0x00, 0x00,
413 	0x00, 0x00, 0x00, 0x00,
414 	0x00, 0x00, 0x00, 0x00,
415 	0x00, 0x00, 0x00, 0x00,
416 	0x00, 0x00, 0x00, 0x00,
417 
418 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
419 	0x00, 0x00, 0x00, 0x00,
420 	0x00, 0x00, 0x00, 0x00,
421 	0x50, 0x02, 0x20, 0x00,
422 	0x00, 0x00, 0x00, 0x00
423 };
424 
425 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
426 	{ ICE_MAC_OFOS,		0 },
427 	{ ICE_ETYPE_OL,		12 },
428 	{ ICE_IPV4_OFOS,	14 },
429 	{ ICE_UDP_OF,		34 },
430 	{ ICE_VXLAN,		42 },
431 	{ ICE_GENEVE,		42 },
432 	{ ICE_VXLAN_GPE,	42 },
433 	{ ICE_MAC_IL,		50 },
434 	{ ICE_ETYPE_IL,		62 },
435 	{ ICE_IPV6_IL,		64 },
436 	{ ICE_UDP_ILOS,		104 },
437 	{ ICE_PROTOCOL_LAST,	0 },
438 };
439 
440 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
441 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
442 	0x00, 0x00, 0x00, 0x00,
443 	0x00, 0x00, 0x00, 0x00,
444 
445 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
446 
447 	0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
448 	0x00, 0x01, 0x00, 0x00,
449 	0x00, 0x11, 0x00, 0x00,
450 	0x00, 0x00, 0x00, 0x00,
451 	0x00, 0x00, 0x00, 0x00,
452 
453 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
454 	0x00, 0x4e, 0x00, 0x00,
455 
456 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
457 	0x00, 0x00, 0x00, 0x00,
458 
459 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
460 	0x00, 0x00, 0x00, 0x00,
461 	0x00, 0x00, 0x00, 0x00,
462 
463 	0x86, 0xdd,		/* ICE_ETYPE_IL 62 */
464 
465 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
466 	0x00, 0x08, 0x11, 0x40,
467 	0x00, 0x00, 0x00, 0x00,
468 	0x00, 0x00, 0x00, 0x00,
469 	0x00, 0x00, 0x00, 0x00,
470 	0x00, 0x00, 0x00, 0x00,
471 	0x00, 0x00, 0x00, 0x00,
472 	0x00, 0x00, 0x00, 0x00,
473 	0x00, 0x00, 0x00, 0x00,
474 	0x00, 0x00, 0x00, 0x00,
475 
476 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
477 	0x00, 0x08, 0x00, 0x00,
478 };
479 
480 /* offset info for MAC + IPv4 + UDP dummy packet */
481 ICE_DECLARE_PKT_OFFSETS(udp) = {
482 	{ ICE_MAC_OFOS,		0 },
483 	{ ICE_ETYPE_OL,		12 },
484 	{ ICE_IPV4_OFOS,	14 },
485 	{ ICE_UDP_ILOS,		34 },
486 	{ ICE_PROTOCOL_LAST,	0 },
487 };
488 
489 /* Dummy packet for MAC + IPv4 + UDP */
490 ICE_DECLARE_PKT_TEMPLATE(udp) = {
491 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
492 	0x00, 0x00, 0x00, 0x00,
493 	0x00, 0x00, 0x00, 0x00,
494 
495 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
496 
497 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
498 	0x00, 0x01, 0x00, 0x00,
499 	0x00, 0x11, 0x00, 0x00,
500 	0x00, 0x00, 0x00, 0x00,
501 	0x00, 0x00, 0x00, 0x00,
502 
503 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
504 	0x00, 0x08, 0x00, 0x00,
505 
506 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
507 };
508 
509 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
510 ICE_DECLARE_PKT_OFFSETS(vlan_udp) = {
511 	{ ICE_MAC_OFOS,		0 },
512 	{ ICE_VLAN_OFOS,	12 },
513 	{ ICE_ETYPE_OL,		16 },
514 	{ ICE_IPV4_OFOS,	18 },
515 	{ ICE_UDP_ILOS,		38 },
516 	{ ICE_PROTOCOL_LAST,	0 },
517 };
518 
519 /* C-tag (801.1Q), IPv4:UDP dummy packet */
520 ICE_DECLARE_PKT_TEMPLATE(vlan_udp) = {
521 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
522 	0x00, 0x00, 0x00, 0x00,
523 	0x00, 0x00, 0x00, 0x00,
524 
525 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
526 
527 	0x08, 0x00,		/* ICE_ETYPE_OL 16 */
528 
529 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
530 	0x00, 0x01, 0x00, 0x00,
531 	0x00, 0x11, 0x00, 0x00,
532 	0x00, 0x00, 0x00, 0x00,
533 	0x00, 0x00, 0x00, 0x00,
534 
535 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
536 	0x00, 0x08, 0x00, 0x00,
537 
538 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
539 };
540 
541 /* offset info for MAC + IPv4 + TCP dummy packet */
542 ICE_DECLARE_PKT_OFFSETS(tcp) = {
543 	{ ICE_MAC_OFOS,		0 },
544 	{ ICE_ETYPE_OL,		12 },
545 	{ ICE_IPV4_OFOS,	14 },
546 	{ ICE_TCP_IL,		34 },
547 	{ ICE_PROTOCOL_LAST,	0 },
548 };
549 
550 /* Dummy packet for MAC + IPv4 + TCP */
551 ICE_DECLARE_PKT_TEMPLATE(tcp) = {
552 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
553 	0x00, 0x00, 0x00, 0x00,
554 	0x00, 0x00, 0x00, 0x00,
555 
556 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
557 
558 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
559 	0x00, 0x01, 0x00, 0x00,
560 	0x00, 0x06, 0x00, 0x00,
561 	0x00, 0x00, 0x00, 0x00,
562 	0x00, 0x00, 0x00, 0x00,
563 
564 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
565 	0x00, 0x00, 0x00, 0x00,
566 	0x00, 0x00, 0x00, 0x00,
567 	0x50, 0x00, 0x00, 0x00,
568 	0x00, 0x00, 0x00, 0x00,
569 
570 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
571 };
572 
573 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
574 ICE_DECLARE_PKT_OFFSETS(vlan_tcp) = {
575 	{ ICE_MAC_OFOS,		0 },
576 	{ ICE_VLAN_OFOS,	12 },
577 	{ ICE_ETYPE_OL,		16 },
578 	{ ICE_IPV4_OFOS,	18 },
579 	{ ICE_TCP_IL,		38 },
580 	{ ICE_PROTOCOL_LAST,	0 },
581 };
582 
583 /* C-tag (801.1Q), IPv4:TCP dummy packet */
584 ICE_DECLARE_PKT_TEMPLATE(vlan_tcp) = {
585 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
586 	0x00, 0x00, 0x00, 0x00,
587 	0x00, 0x00, 0x00, 0x00,
588 
589 	0x81, 0x00, 0x00, 0x00,	/* ICE_VLAN_OFOS 12 */
590 
591 	0x08, 0x00,		/* ICE_ETYPE_OL 16 */
592 
593 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
594 	0x00, 0x01, 0x00, 0x00,
595 	0x00, 0x06, 0x00, 0x00,
596 	0x00, 0x00, 0x00, 0x00,
597 	0x00, 0x00, 0x00, 0x00,
598 
599 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
600 	0x00, 0x00, 0x00, 0x00,
601 	0x00, 0x00, 0x00, 0x00,
602 	0x50, 0x00, 0x00, 0x00,
603 	0x00, 0x00, 0x00, 0x00,
604 
605 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
606 };
607 
608 ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
609 	{ ICE_MAC_OFOS,		0 },
610 	{ ICE_ETYPE_OL,		12 },
611 	{ ICE_IPV6_OFOS,	14 },
612 	{ ICE_TCP_IL,		54 },
613 	{ ICE_PROTOCOL_LAST,	0 },
614 };
615 
616 ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
617 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
618 	0x00, 0x00, 0x00, 0x00,
619 	0x00, 0x00, 0x00, 0x00,
620 
621 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
622 
623 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
624 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
625 	0x00, 0x00, 0x00, 0x00,
626 	0x00, 0x00, 0x00, 0x00,
627 	0x00, 0x00, 0x00, 0x00,
628 	0x00, 0x00, 0x00, 0x00,
629 	0x00, 0x00, 0x00, 0x00,
630 	0x00, 0x00, 0x00, 0x00,
631 	0x00, 0x00, 0x00, 0x00,
632 	0x00, 0x00, 0x00, 0x00,
633 
634 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
635 	0x00, 0x00, 0x00, 0x00,
636 	0x00, 0x00, 0x00, 0x00,
637 	0x50, 0x00, 0x00, 0x00,
638 	0x00, 0x00, 0x00, 0x00,
639 
640 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
641 };
642 
643 /* C-tag (802.1Q): IPv6 + TCP */
644 ICE_DECLARE_PKT_OFFSETS(vlan_tcp_ipv6) = {
645 	{ ICE_MAC_OFOS,		0 },
646 	{ ICE_VLAN_OFOS,	12 },
647 	{ ICE_ETYPE_OL,		16 },
648 	{ ICE_IPV6_OFOS,	18 },
649 	{ ICE_TCP_IL,		58 },
650 	{ ICE_PROTOCOL_LAST,	0 },
651 };
652 
653 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
654 ICE_DECLARE_PKT_TEMPLATE(vlan_tcp_ipv6) = {
655 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
656 	0x00, 0x00, 0x00, 0x00,
657 	0x00, 0x00, 0x00, 0x00,
658 
659 	0x81, 0x00, 0x00, 0x00,	/* ICE_VLAN_OFOS 12 */
660 
661 	0x86, 0xDD,		/* ICE_ETYPE_OL 16 */
662 
663 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
664 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
665 	0x00, 0x00, 0x00, 0x00,
666 	0x00, 0x00, 0x00, 0x00,
667 	0x00, 0x00, 0x00, 0x00,
668 	0x00, 0x00, 0x00, 0x00,
669 	0x00, 0x00, 0x00, 0x00,
670 	0x00, 0x00, 0x00, 0x00,
671 	0x00, 0x00, 0x00, 0x00,
672 	0x00, 0x00, 0x00, 0x00,
673 
674 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
675 	0x00, 0x00, 0x00, 0x00,
676 	0x00, 0x00, 0x00, 0x00,
677 	0x50, 0x00, 0x00, 0x00,
678 	0x00, 0x00, 0x00, 0x00,
679 
680 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
681 };
682 
683 /* IPv6 + UDP */
684 ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
685 	{ ICE_MAC_OFOS,		0 },
686 	{ ICE_ETYPE_OL,		12 },
687 	{ ICE_IPV6_OFOS,	14 },
688 	{ ICE_UDP_ILOS,		54 },
689 	{ ICE_PROTOCOL_LAST,	0 },
690 };
691 
692 /* IPv6 + UDP dummy packet */
693 ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
694 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
695 	0x00, 0x00, 0x00, 0x00,
696 	0x00, 0x00, 0x00, 0x00,
697 
698 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
699 
700 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
701 	0x00, 0x10, 0x11, 0x00, /* Next header UDP */
702 	0x00, 0x00, 0x00, 0x00,
703 	0x00, 0x00, 0x00, 0x00,
704 	0x00, 0x00, 0x00, 0x00,
705 	0x00, 0x00, 0x00, 0x00,
706 	0x00, 0x00, 0x00, 0x00,
707 	0x00, 0x00, 0x00, 0x00,
708 	0x00, 0x00, 0x00, 0x00,
709 	0x00, 0x00, 0x00, 0x00,
710 
711 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
712 	0x00, 0x10, 0x00, 0x00,
713 
714 	0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
715 	0x00, 0x00, 0x00, 0x00,
716 
717 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
718 };
719 
720 /* C-tag (802.1Q): IPv6 + UDP */
721 ICE_DECLARE_PKT_OFFSETS(vlan_udp_ipv6) = {
722 	{ ICE_MAC_OFOS,		0 },
723 	{ ICE_VLAN_OFOS,	12 },
724 	{ ICE_ETYPE_OL,		16 },
725 	{ ICE_IPV6_OFOS,	18 },
726 	{ ICE_UDP_ILOS,		58 },
727 	{ ICE_PROTOCOL_LAST,	0 },
728 };
729 
730 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
731 ICE_DECLARE_PKT_TEMPLATE(vlan_udp_ipv6) = {
732 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
733 	0x00, 0x00, 0x00, 0x00,
734 	0x00, 0x00, 0x00, 0x00,
735 
736 	0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
737 
738 	0x86, 0xDD,		/* ICE_ETYPE_OL 16 */
739 
740 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
741 	0x00, 0x08, 0x11, 0x00, /* Next header UDP */
742 	0x00, 0x00, 0x00, 0x00,
743 	0x00, 0x00, 0x00, 0x00,
744 	0x00, 0x00, 0x00, 0x00,
745 	0x00, 0x00, 0x00, 0x00,
746 	0x00, 0x00, 0x00, 0x00,
747 	0x00, 0x00, 0x00, 0x00,
748 	0x00, 0x00, 0x00, 0x00,
749 	0x00, 0x00, 0x00, 0x00,
750 
751 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
752 	0x00, 0x08, 0x00, 0x00,
753 
754 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
755 };
756 
757 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
758 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
759 	{ ICE_MAC_OFOS,		0 },
760 	{ ICE_IPV4_OFOS,	14 },
761 	{ ICE_UDP_OF,		34 },
762 	{ ICE_GTP,		42 },
763 	{ ICE_IPV4_IL,		62 },
764 	{ ICE_TCP_IL,		82 },
765 	{ ICE_PROTOCOL_LAST,	0 },
766 };
767 
768 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
769 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
770 	0x00, 0x00, 0x00, 0x00,
771 	0x00, 0x00, 0x00, 0x00,
772 	0x08, 0x00,
773 
774 	0x45, 0x00, 0x00, 0x58, /* IP 14 */
775 	0x00, 0x00, 0x00, 0x00,
776 	0x00, 0x11, 0x00, 0x00,
777 	0x00, 0x00, 0x00, 0x00,
778 	0x00, 0x00, 0x00, 0x00,
779 
780 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
781 	0x00, 0x44, 0x00, 0x00,
782 
783 	0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
784 	0x00, 0x00, 0x00, 0x00,
785 	0x00, 0x00, 0x00, 0x85,
786 
787 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
788 	0x00, 0x00, 0x00, 0x00,
789 
790 	0x45, 0x00, 0x00, 0x28, /* IP 62 */
791 	0x00, 0x00, 0x00, 0x00,
792 	0x00, 0x06, 0x00, 0x00,
793 	0x00, 0x00, 0x00, 0x00,
794 	0x00, 0x00, 0x00, 0x00,
795 
796 	0x00, 0x00, 0x00, 0x00, /* TCP 82 */
797 	0x00, 0x00, 0x00, 0x00,
798 	0x00, 0x00, 0x00, 0x00,
799 	0x50, 0x00, 0x00, 0x00,
800 	0x00, 0x00, 0x00, 0x00,
801 
802 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
803 };
804 
805 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
806 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
807 	{ ICE_MAC_OFOS,		0 },
808 	{ ICE_IPV4_OFOS,	14 },
809 	{ ICE_UDP_OF,		34 },
810 	{ ICE_GTP,		42 },
811 	{ ICE_IPV4_IL,		62 },
812 	{ ICE_UDP_ILOS,		82 },
813 	{ ICE_PROTOCOL_LAST,	0 },
814 };
815 
816 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
817 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
818 	0x00, 0x00, 0x00, 0x00,
819 	0x00, 0x00, 0x00, 0x00,
820 	0x08, 0x00,
821 
822 	0x45, 0x00, 0x00, 0x4c, /* IP 14 */
823 	0x00, 0x00, 0x00, 0x00,
824 	0x00, 0x11, 0x00, 0x00,
825 	0x00, 0x00, 0x00, 0x00,
826 	0x00, 0x00, 0x00, 0x00,
827 
828 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
829 	0x00, 0x38, 0x00, 0x00,
830 
831 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
832 	0x00, 0x00, 0x00, 0x00,
833 	0x00, 0x00, 0x00, 0x85,
834 
835 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
836 	0x00, 0x00, 0x00, 0x00,
837 
838 	0x45, 0x00, 0x00, 0x1c, /* IP 62 */
839 	0x00, 0x00, 0x00, 0x00,
840 	0x00, 0x11, 0x00, 0x00,
841 	0x00, 0x00, 0x00, 0x00,
842 	0x00, 0x00, 0x00, 0x00,
843 
844 	0x00, 0x00, 0x00, 0x00, /* UDP 82 */
845 	0x00, 0x08, 0x00, 0x00,
846 
847 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
848 };
849 
850 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
851 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
852 	{ ICE_MAC_OFOS,		0 },
853 	{ ICE_IPV4_OFOS,	14 },
854 	{ ICE_UDP_OF,		34 },
855 	{ ICE_GTP,		42 },
856 	{ ICE_IPV6_IL,		62 },
857 	{ ICE_TCP_IL,		102 },
858 	{ ICE_PROTOCOL_LAST,	0 },
859 };
860 
861 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
862 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
863 	0x00, 0x00, 0x00, 0x00,
864 	0x00, 0x00, 0x00, 0x00,
865 	0x08, 0x00,
866 
867 	0x45, 0x00, 0x00, 0x6c, /* IP 14 */
868 	0x00, 0x00, 0x00, 0x00,
869 	0x00, 0x11, 0x00, 0x00,
870 	0x00, 0x00, 0x00, 0x00,
871 	0x00, 0x00, 0x00, 0x00,
872 
873 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
874 	0x00, 0x58, 0x00, 0x00,
875 
876 	0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
877 	0x00, 0x00, 0x00, 0x00,
878 	0x00, 0x00, 0x00, 0x85,
879 
880 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
881 	0x00, 0x00, 0x00, 0x00,
882 
883 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
884 	0x00, 0x14, 0x06, 0x00,
885 	0x00, 0x00, 0x00, 0x00,
886 	0x00, 0x00, 0x00, 0x00,
887 	0x00, 0x00, 0x00, 0x00,
888 	0x00, 0x00, 0x00, 0x00,
889 	0x00, 0x00, 0x00, 0x00,
890 	0x00, 0x00, 0x00, 0x00,
891 	0x00, 0x00, 0x00, 0x00,
892 	0x00, 0x00, 0x00, 0x00,
893 
894 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
895 	0x00, 0x00, 0x00, 0x00,
896 	0x00, 0x00, 0x00, 0x00,
897 	0x50, 0x00, 0x00, 0x00,
898 	0x00, 0x00, 0x00, 0x00,
899 
900 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
901 };
902 
903 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
904 	{ ICE_MAC_OFOS,		0 },
905 	{ ICE_IPV4_OFOS,	14 },
906 	{ ICE_UDP_OF,		34 },
907 	{ ICE_GTP,		42 },
908 	{ ICE_IPV6_IL,		62 },
909 	{ ICE_UDP_ILOS,		102 },
910 	{ ICE_PROTOCOL_LAST,	0 },
911 };
912 
913 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
914 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
915 	0x00, 0x00, 0x00, 0x00,
916 	0x00, 0x00, 0x00, 0x00,
917 	0x08, 0x00,
918 
919 	0x45, 0x00, 0x00, 0x60, /* IP 14 */
920 	0x00, 0x00, 0x00, 0x00,
921 	0x00, 0x11, 0x00, 0x00,
922 	0x00, 0x00, 0x00, 0x00,
923 	0x00, 0x00, 0x00, 0x00,
924 
925 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
926 	0x00, 0x4c, 0x00, 0x00,
927 
928 	0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
929 	0x00, 0x00, 0x00, 0x00,
930 	0x00, 0x00, 0x00, 0x85,
931 
932 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
933 	0x00, 0x00, 0x00, 0x00,
934 
935 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
936 	0x00, 0x08, 0x11, 0x00,
937 	0x00, 0x00, 0x00, 0x00,
938 	0x00, 0x00, 0x00, 0x00,
939 	0x00, 0x00, 0x00, 0x00,
940 	0x00, 0x00, 0x00, 0x00,
941 	0x00, 0x00, 0x00, 0x00,
942 	0x00, 0x00, 0x00, 0x00,
943 	0x00, 0x00, 0x00, 0x00,
944 	0x00, 0x00, 0x00, 0x00,
945 
946 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
947 	0x00, 0x08, 0x00, 0x00,
948 
949 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
950 };
951 
952 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
953 	{ ICE_MAC_OFOS,		0 },
954 	{ ICE_IPV6_OFOS,	14 },
955 	{ ICE_UDP_OF,		54 },
956 	{ ICE_GTP,		62 },
957 	{ ICE_IPV4_IL,		82 },
958 	{ ICE_TCP_IL,		102 },
959 	{ ICE_PROTOCOL_LAST,	0 },
960 };
961 
962 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
963 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
964 	0x00, 0x00, 0x00, 0x00,
965 	0x00, 0x00, 0x00, 0x00,
966 	0x86, 0xdd,
967 
968 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
969 	0x00, 0x44, 0x11, 0x00,
970 	0x00, 0x00, 0x00, 0x00,
971 	0x00, 0x00, 0x00, 0x00,
972 	0x00, 0x00, 0x00, 0x00,
973 	0x00, 0x00, 0x00, 0x00,
974 	0x00, 0x00, 0x00, 0x00,
975 	0x00, 0x00, 0x00, 0x00,
976 	0x00, 0x00, 0x00, 0x00,
977 	0x00, 0x00, 0x00, 0x00,
978 
979 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
980 	0x00, 0x44, 0x00, 0x00,
981 
982 	0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
983 	0x00, 0x00, 0x00, 0x00,
984 	0x00, 0x00, 0x00, 0x85,
985 
986 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
987 	0x00, 0x00, 0x00, 0x00,
988 
989 	0x45, 0x00, 0x00, 0x28, /* IP 82 */
990 	0x00, 0x00, 0x00, 0x00,
991 	0x00, 0x06, 0x00, 0x00,
992 	0x00, 0x00, 0x00, 0x00,
993 	0x00, 0x00, 0x00, 0x00,
994 
995 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
996 	0x00, 0x00, 0x00, 0x00,
997 	0x00, 0x00, 0x00, 0x00,
998 	0x50, 0x00, 0x00, 0x00,
999 	0x00, 0x00, 0x00, 0x00,
1000 
1001 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
1002 };
1003 
1004 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
1005 	{ ICE_MAC_OFOS,		0 },
1006 	{ ICE_IPV6_OFOS,	14 },
1007 	{ ICE_UDP_OF,		54 },
1008 	{ ICE_GTP,		62 },
1009 	{ ICE_IPV4_IL,		82 },
1010 	{ ICE_UDP_ILOS,		102 },
1011 	{ ICE_PROTOCOL_LAST,	0 },
1012 };
1013 
1014 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
1015 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
1016 	0x00, 0x00, 0x00, 0x00,
1017 	0x00, 0x00, 0x00, 0x00,
1018 	0x86, 0xdd,
1019 
1020 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1021 	0x00, 0x38, 0x11, 0x00,
1022 	0x00, 0x00, 0x00, 0x00,
1023 	0x00, 0x00, 0x00, 0x00,
1024 	0x00, 0x00, 0x00, 0x00,
1025 	0x00, 0x00, 0x00, 0x00,
1026 	0x00, 0x00, 0x00, 0x00,
1027 	0x00, 0x00, 0x00, 0x00,
1028 	0x00, 0x00, 0x00, 0x00,
1029 	0x00, 0x00, 0x00, 0x00,
1030 
1031 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1032 	0x00, 0x38, 0x00, 0x00,
1033 
1034 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
1035 	0x00, 0x00, 0x00, 0x00,
1036 	0x00, 0x00, 0x00, 0x85,
1037 
1038 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1039 	0x00, 0x00, 0x00, 0x00,
1040 
1041 	0x45, 0x00, 0x00, 0x1c, /* IP 82 */
1042 	0x00, 0x00, 0x00, 0x00,
1043 	0x00, 0x11, 0x00, 0x00,
1044 	0x00, 0x00, 0x00, 0x00,
1045 	0x00, 0x00, 0x00, 0x00,
1046 
1047 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
1048 	0x00, 0x08, 0x00, 0x00,
1049 
1050 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
1051 };
1052 
1053 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
1054 	{ ICE_MAC_OFOS,		0 },
1055 	{ ICE_IPV6_OFOS,	14 },
1056 	{ ICE_UDP_OF,		54 },
1057 	{ ICE_GTP,		62 },
1058 	{ ICE_IPV6_IL,		82 },
1059 	{ ICE_TCP_IL,		122 },
1060 	{ ICE_PROTOCOL_LAST,	0 },
1061 };
1062 
1063 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
1064 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
1065 	0x00, 0x00, 0x00, 0x00,
1066 	0x00, 0x00, 0x00, 0x00,
1067 	0x86, 0xdd,
1068 
1069 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1070 	0x00, 0x58, 0x11, 0x00,
1071 	0x00, 0x00, 0x00, 0x00,
1072 	0x00, 0x00, 0x00, 0x00,
1073 	0x00, 0x00, 0x00, 0x00,
1074 	0x00, 0x00, 0x00, 0x00,
1075 	0x00, 0x00, 0x00, 0x00,
1076 	0x00, 0x00, 0x00, 0x00,
1077 	0x00, 0x00, 0x00, 0x00,
1078 	0x00, 0x00, 0x00, 0x00,
1079 
1080 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1081 	0x00, 0x58, 0x00, 0x00,
1082 
1083 	0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
1084 	0x00, 0x00, 0x00, 0x00,
1085 	0x00, 0x00, 0x00, 0x85,
1086 
1087 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1088 	0x00, 0x00, 0x00, 0x00,
1089 
1090 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
1091 	0x00, 0x14, 0x06, 0x00,
1092 	0x00, 0x00, 0x00, 0x00,
1093 	0x00, 0x00, 0x00, 0x00,
1094 	0x00, 0x00, 0x00, 0x00,
1095 	0x00, 0x00, 0x00, 0x00,
1096 	0x00, 0x00, 0x00, 0x00,
1097 	0x00, 0x00, 0x00, 0x00,
1098 	0x00, 0x00, 0x00, 0x00,
1099 	0x00, 0x00, 0x00, 0x00,
1100 
1101 	0x00, 0x00, 0x00, 0x00, /* TCP 122 */
1102 	0x00, 0x00, 0x00, 0x00,
1103 	0x00, 0x00, 0x00, 0x00,
1104 	0x50, 0x00, 0x00, 0x00,
1105 	0x00, 0x00, 0x00, 0x00,
1106 
1107 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
1108 };
1109 
1110 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
1111 	{ ICE_MAC_OFOS,		0 },
1112 	{ ICE_IPV6_OFOS,	14 },
1113 	{ ICE_UDP_OF,		54 },
1114 	{ ICE_GTP,		62 },
1115 	{ ICE_IPV6_IL,		82 },
1116 	{ ICE_UDP_ILOS,		122 },
1117 	{ ICE_PROTOCOL_LAST,	0 },
1118 };
1119 
1120 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
1121 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
1122 	0x00, 0x00, 0x00, 0x00,
1123 	0x00, 0x00, 0x00, 0x00,
1124 	0x86, 0xdd,
1125 
1126 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1127 	0x00, 0x4c, 0x11, 0x00,
1128 	0x00, 0x00, 0x00, 0x00,
1129 	0x00, 0x00, 0x00, 0x00,
1130 	0x00, 0x00, 0x00, 0x00,
1131 	0x00, 0x00, 0x00, 0x00,
1132 	0x00, 0x00, 0x00, 0x00,
1133 	0x00, 0x00, 0x00, 0x00,
1134 	0x00, 0x00, 0x00, 0x00,
1135 	0x00, 0x00, 0x00, 0x00,
1136 
1137 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1138 	0x00, 0x4c, 0x00, 0x00,
1139 
1140 	0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
1141 	0x00, 0x00, 0x00, 0x00,
1142 	0x00, 0x00, 0x00, 0x85,
1143 
1144 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1145 	0x00, 0x00, 0x00, 0x00,
1146 
1147 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
1148 	0x00, 0x08, 0x11, 0x00,
1149 	0x00, 0x00, 0x00, 0x00,
1150 	0x00, 0x00, 0x00, 0x00,
1151 	0x00, 0x00, 0x00, 0x00,
1152 	0x00, 0x00, 0x00, 0x00,
1153 	0x00, 0x00, 0x00, 0x00,
1154 	0x00, 0x00, 0x00, 0x00,
1155 	0x00, 0x00, 0x00, 0x00,
1156 	0x00, 0x00, 0x00, 0x00,
1157 
1158 	0x00, 0x00, 0x00, 0x00, /* UDP 122 */
1159 	0x00, 0x08, 0x00, 0x00,
1160 
1161 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
1162 };
1163 
1164 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
1165 	{ ICE_MAC_OFOS,		0 },
1166 	{ ICE_IPV4_OFOS,	14 },
1167 	{ ICE_UDP_OF,		34 },
1168 	{ ICE_GTP_NO_PAY,	42 },
1169 	{ ICE_PROTOCOL_LAST,	0 },
1170 };
1171 
1172 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
1173 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1174 	0x00, 0x00, 0x00, 0x00,
1175 	0x00, 0x00, 0x00, 0x00,
1176 	0x08, 0x00,
1177 
1178 	0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
1179 	0x00, 0x00, 0x40, 0x00,
1180 	0x40, 0x11, 0x00, 0x00,
1181 	0x00, 0x00, 0x00, 0x00,
1182 	0x00, 0x00, 0x00, 0x00,
1183 
1184 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
1185 	0x00, 0x00, 0x00, 0x00,
1186 
1187 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1188 	0x00, 0x00, 0x00, 0x00,
1189 	0x00, 0x00, 0x00, 0x85,
1190 
1191 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1192 	0x00, 0x00, 0x00, 0x00,
1193 
1194 	0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
1195 	0x00, 0x00, 0x40, 0x00,
1196 	0x40, 0x00, 0x00, 0x00,
1197 	0x00, 0x00, 0x00, 0x00,
1198 	0x00, 0x00, 0x00, 0x00,
1199 	0x00, 0x00,
1200 };
1201 
1202 ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
1203 	{ ICE_MAC_OFOS,		0 },
1204 	{ ICE_IPV6_OFOS,	14 },
1205 	{ ICE_UDP_OF,		54 },
1206 	{ ICE_GTP_NO_PAY,	62 },
1207 	{ ICE_PROTOCOL_LAST,	0 },
1208 };
1209 
1210 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
1211 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1212 	0x00, 0x00, 0x00, 0x00,
1213 	0x00, 0x00, 0x00, 0x00,
1214 	0x86, 0xdd,
1215 
1216 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1217 	0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1218 	0x00, 0x00, 0x00, 0x00,
1219 	0x00, 0x00, 0x00, 0x00,
1220 	0x00, 0x00, 0x00, 0x00,
1221 	0x00, 0x00, 0x00, 0x00,
1222 	0x00, 0x00, 0x00, 0x00,
1223 	0x00, 0x00, 0x00, 0x00,
1224 	0x00, 0x00, 0x00, 0x00,
1225 	0x00, 0x00, 0x00, 0x00,
1226 
1227 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1228 	0x00, 0x00, 0x00, 0x00,
1229 
1230 	0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
1231 	0x00, 0x00, 0x00, 0x00,
1232 
1233 	0x00, 0x00,
1234 };
1235 
1236 static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
1237 	ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
1238 				  ICE_PKT_GTP_NOPAY),
1239 	ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1240 					    ICE_PKT_OUTER_IPV6 |
1241 					    ICE_PKT_INNER_IPV6 |
1242 					    ICE_PKT_INNER_UDP),
1243 	ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1244 					    ICE_PKT_OUTER_IPV6 |
1245 					    ICE_PKT_INNER_IPV6),
1246 	ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1247 					    ICE_PKT_OUTER_IPV6 |
1248 					    ICE_PKT_INNER_UDP),
1249 	ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
1250 					    ICE_PKT_OUTER_IPV6),
1251 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
1252 	ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1253 					    ICE_PKT_INNER_IPV6 |
1254 					    ICE_PKT_INNER_UDP),
1255 	ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1256 					    ICE_PKT_INNER_IPV6),
1257 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1258 					    ICE_PKT_INNER_UDP),
1259 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
1260 	ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
1261 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
1262 	ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
1263 				      ICE_PKT_INNER_TCP),
1264 	ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
1265 	ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
1266 	ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
1267 	ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
1268 					  ICE_PKT_INNER_IPV6 |
1269 					  ICE_PKT_INNER_TCP),
1270 	ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
1271 	ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
1272 					  ICE_PKT_INNER_IPV6),
1273 	ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
1274 	ICE_PKT_PROFILE(vlan_udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP |
1275 				       ICE_PKT_VLAN),
1276 	ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
1277 	ICE_PKT_PROFILE(vlan_udp, ICE_PKT_INNER_UDP | ICE_PKT_VLAN),
1278 	ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
1279 	ICE_PKT_PROFILE(vlan_tcp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_VLAN),
1280 	ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
1281 	ICE_PKT_PROFILE(vlan_tcp, ICE_PKT_VLAN),
1282 	ICE_PKT_PROFILE(tcp, 0),
1283 };
1284 
1285 #define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l)	struct_size((s), hdr_data, (l))
1286 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s)	\
1287 	ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
1288 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s)	\
1289 	ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
1290 #define ICE_SW_RULE_LG_ACT_SIZE(s, n)		struct_size((s), act, (n))
1291 #define ICE_SW_RULE_VSI_LIST_SIZE(s, n)		struct_size((s), vsi, (n))
1292 
1293 /* this is a recipe to profile association bitmap */
1294 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1295 			  ICE_MAX_NUM_PROFILES);
1296 
1297 /* this is a profile to recipe association bitmap */
1298 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1299 			  ICE_MAX_NUM_RECIPES);
1300 
1301 /**
1302  * ice_init_def_sw_recp - initialize the recipe book keeping tables
1303  * @hw: pointer to the HW struct
1304  *
1305  * Allocate memory for the entire recipe table and initialize the structures/
1306  * entries corresponding to basic recipes.
1307  */
1308 int ice_init_def_sw_recp(struct ice_hw *hw)
1309 {
1310 	struct ice_sw_recipe *recps;
1311 	u8 i;
1312 
1313 	recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
1314 			     sizeof(*recps), GFP_KERNEL);
1315 	if (!recps)
1316 		return -ENOMEM;
1317 
1318 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1319 		recps[i].root_rid = i;
1320 		INIT_LIST_HEAD(&recps[i].filt_rules);
1321 		INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1322 		INIT_LIST_HEAD(&recps[i].rg_list);
1323 		mutex_init(&recps[i].filt_rule_lock);
1324 	}
1325 
1326 	hw->switch_info->recp_list = recps;
1327 
1328 	return 0;
1329 }
1330 
1331 /**
1332  * ice_aq_get_sw_cfg - get switch configuration
1333  * @hw: pointer to the hardware structure
1334  * @buf: pointer to the result buffer
1335  * @buf_size: length of the buffer available for response
1336  * @req_desc: pointer to requested descriptor
1337  * @num_elems: pointer to number of elements
1338  * @cd: pointer to command details structure or NULL
1339  *
1340  * Get switch configuration (0x0200) to be placed in buf.
1341  * This admin command returns information such as initial VSI/port number
1342  * and switch ID it belongs to.
1343  *
1344  * NOTE: *req_desc is both an input/output parameter.
1345  * The caller of this function first calls this function with *request_desc set
1346  * to 0. If the response from f/w has *req_desc set to 0, all the switch
1347  * configuration information has been returned; if non-zero (meaning not all
1348  * the information was returned), the caller should call this function again
1349  * with *req_desc set to the previous value returned by f/w to get the
1350  * next block of switch configuration information.
1351  *
1352  * *num_elems is output only parameter. This reflects the number of elements
1353  * in response buffer. The caller of this function to use *num_elems while
1354  * parsing the response buffer.
1355  */
1356 static int
1357 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1358 		  u16 buf_size, u16 *req_desc, u16 *num_elems,
1359 		  struct ice_sq_cd *cd)
1360 {
1361 	struct ice_aqc_get_sw_cfg *cmd;
1362 	struct ice_aq_desc desc;
1363 	int status;
1364 
1365 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1366 	cmd = &desc.params.get_sw_conf;
1367 	cmd->element = cpu_to_le16(*req_desc);
1368 
1369 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1370 	if (!status) {
1371 		*req_desc = le16_to_cpu(cmd->element);
1372 		*num_elems = le16_to_cpu(cmd->num_elems);
1373 	}
1374 
1375 	return status;
1376 }
1377 
1378 /**
1379  * ice_aq_add_vsi
1380  * @hw: pointer to the HW struct
1381  * @vsi_ctx: pointer to a VSI context struct
1382  * @cd: pointer to command details structure or NULL
1383  *
1384  * Add a VSI context to the hardware (0x0210)
1385  */
1386 static int
1387 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1388 	       struct ice_sq_cd *cd)
1389 {
1390 	struct ice_aqc_add_update_free_vsi_resp *res;
1391 	struct ice_aqc_add_get_update_free_vsi *cmd;
1392 	struct ice_aq_desc desc;
1393 	int status;
1394 
1395 	cmd = &desc.params.vsi_cmd;
1396 	res = &desc.params.add_update_free_vsi_res;
1397 
1398 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1399 
1400 	if (!vsi_ctx->alloc_from_pool)
1401 		cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
1402 					   ICE_AQ_VSI_IS_VALID);
1403 	cmd->vf_id = vsi_ctx->vf_num;
1404 
1405 	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1406 
1407 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1408 
1409 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1410 				 sizeof(vsi_ctx->info), cd);
1411 
1412 	if (!status) {
1413 		vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1414 		vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
1415 		vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
1416 	}
1417 
1418 	return status;
1419 }
1420 
1421 /**
1422  * ice_aq_free_vsi
1423  * @hw: pointer to the HW struct
1424  * @vsi_ctx: pointer to a VSI context struct
1425  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1426  * @cd: pointer to command details structure or NULL
1427  *
1428  * Free VSI context info from hardware (0x0213)
1429  */
1430 static int
1431 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1432 		bool keep_vsi_alloc, struct ice_sq_cd *cd)
1433 {
1434 	struct ice_aqc_add_update_free_vsi_resp *resp;
1435 	struct ice_aqc_add_get_update_free_vsi *cmd;
1436 	struct ice_aq_desc desc;
1437 	int status;
1438 
1439 	cmd = &desc.params.vsi_cmd;
1440 	resp = &desc.params.add_update_free_vsi_res;
1441 
1442 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1443 
1444 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1445 	if (keep_vsi_alloc)
1446 		cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
1447 
1448 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1449 	if (!status) {
1450 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1451 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1452 	}
1453 
1454 	return status;
1455 }
1456 
1457 /**
1458  * ice_aq_update_vsi
1459  * @hw: pointer to the HW struct
1460  * @vsi_ctx: pointer to a VSI context struct
1461  * @cd: pointer to command details structure or NULL
1462  *
1463  * Update VSI context in the hardware (0x0211)
1464  */
1465 static int
1466 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1467 		  struct ice_sq_cd *cd)
1468 {
1469 	struct ice_aqc_add_update_free_vsi_resp *resp;
1470 	struct ice_aqc_add_get_update_free_vsi *cmd;
1471 	struct ice_aq_desc desc;
1472 	int status;
1473 
1474 	cmd = &desc.params.vsi_cmd;
1475 	resp = &desc.params.add_update_free_vsi_res;
1476 
1477 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1478 
1479 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1480 
1481 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1482 
1483 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1484 				 sizeof(vsi_ctx->info), cd);
1485 
1486 	if (!status) {
1487 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1488 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1489 	}
1490 
1491 	return status;
1492 }
1493 
1494 /**
1495  * ice_is_vsi_valid - check whether the VSI is valid or not
1496  * @hw: pointer to the HW struct
1497  * @vsi_handle: VSI handle
1498  *
1499  * check whether the VSI is valid or not
1500  */
1501 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1502 {
1503 	return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1504 }
1505 
1506 /**
1507  * ice_get_hw_vsi_num - return the HW VSI number
1508  * @hw: pointer to the HW struct
1509  * @vsi_handle: VSI handle
1510  *
1511  * return the HW VSI number
1512  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1513  */
1514 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1515 {
1516 	return hw->vsi_ctx[vsi_handle]->vsi_num;
1517 }
1518 
1519 /**
1520  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1521  * @hw: pointer to the HW struct
1522  * @vsi_handle: VSI handle
1523  *
1524  * return the VSI context entry for a given VSI handle
1525  */
1526 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1527 {
1528 	return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1529 }
1530 
1531 /**
1532  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1533  * @hw: pointer to the HW struct
1534  * @vsi_handle: VSI handle
1535  * @vsi: VSI context pointer
1536  *
1537  * save the VSI context entry for a given VSI handle
1538  */
1539 static void
1540 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1541 {
1542 	hw->vsi_ctx[vsi_handle] = vsi;
1543 }
1544 
1545 /**
1546  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1547  * @hw: pointer to the HW struct
1548  * @vsi_handle: VSI handle
1549  */
1550 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1551 {
1552 	struct ice_vsi_ctx *vsi;
1553 	u8 i;
1554 
1555 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
1556 	if (!vsi)
1557 		return;
1558 	ice_for_each_traffic_class(i) {
1559 		if (vsi->lan_q_ctx[i]) {
1560 			devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
1561 			vsi->lan_q_ctx[i] = NULL;
1562 		}
1563 		if (vsi->rdma_q_ctx[i]) {
1564 			devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
1565 			vsi->rdma_q_ctx[i] = NULL;
1566 		}
1567 	}
1568 }
1569 
1570 /**
1571  * ice_clear_vsi_ctx - clear the VSI context entry
1572  * @hw: pointer to the HW struct
1573  * @vsi_handle: VSI handle
1574  *
1575  * clear the VSI context entry
1576  */
1577 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1578 {
1579 	struct ice_vsi_ctx *vsi;
1580 
1581 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
1582 	if (vsi) {
1583 		ice_clear_vsi_q_ctx(hw, vsi_handle);
1584 		devm_kfree(ice_hw_to_dev(hw), vsi);
1585 		hw->vsi_ctx[vsi_handle] = NULL;
1586 	}
1587 }
1588 
1589 /**
1590  * ice_clear_all_vsi_ctx - clear all the VSI context entries
1591  * @hw: pointer to the HW struct
1592  */
1593 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1594 {
1595 	u16 i;
1596 
1597 	for (i = 0; i < ICE_MAX_VSI; i++)
1598 		ice_clear_vsi_ctx(hw, i);
1599 }
1600 
1601 /**
1602  * ice_add_vsi - add VSI context to the hardware and VSI handle list
1603  * @hw: pointer to the HW struct
1604  * @vsi_handle: unique VSI handle provided by drivers
1605  * @vsi_ctx: pointer to a VSI context struct
1606  * @cd: pointer to command details structure or NULL
1607  *
1608  * Add a VSI context to the hardware also add it into the VSI handle list.
1609  * If this function gets called after reset for existing VSIs then update
1610  * with the new HW VSI number in the corresponding VSI handle list entry.
1611  */
1612 int
1613 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1614 	    struct ice_sq_cd *cd)
1615 {
1616 	struct ice_vsi_ctx *tmp_vsi_ctx;
1617 	int status;
1618 
1619 	if (vsi_handle >= ICE_MAX_VSI)
1620 		return -EINVAL;
1621 	status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1622 	if (status)
1623 		return status;
1624 	tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1625 	if (!tmp_vsi_ctx) {
1626 		/* Create a new VSI context */
1627 		tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
1628 					   sizeof(*tmp_vsi_ctx), GFP_KERNEL);
1629 		if (!tmp_vsi_ctx) {
1630 			ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1631 			return -ENOMEM;
1632 		}
1633 		*tmp_vsi_ctx = *vsi_ctx;
1634 		ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1635 	} else {
1636 		/* update with new HW VSI num */
1637 		tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1638 	}
1639 
1640 	return 0;
1641 }
1642 
1643 /**
1644  * ice_free_vsi- free VSI context from hardware and VSI handle list
1645  * @hw: pointer to the HW struct
1646  * @vsi_handle: unique VSI handle
1647  * @vsi_ctx: pointer to a VSI context struct
1648  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1649  * @cd: pointer to command details structure or NULL
1650  *
1651  * Free VSI context info from hardware as well as from VSI handle list
1652  */
1653 int
1654 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1655 	     bool keep_vsi_alloc, struct ice_sq_cd *cd)
1656 {
1657 	int status;
1658 
1659 	if (!ice_is_vsi_valid(hw, vsi_handle))
1660 		return -EINVAL;
1661 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1662 	status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1663 	if (!status)
1664 		ice_clear_vsi_ctx(hw, vsi_handle);
1665 	return status;
1666 }
1667 
1668 /**
1669  * ice_update_vsi
1670  * @hw: pointer to the HW struct
1671  * @vsi_handle: unique VSI handle
1672  * @vsi_ctx: pointer to a VSI context struct
1673  * @cd: pointer to command details structure or NULL
1674  *
1675  * Update VSI context in the hardware
1676  */
1677 int
1678 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1679 	       struct ice_sq_cd *cd)
1680 {
1681 	if (!ice_is_vsi_valid(hw, vsi_handle))
1682 		return -EINVAL;
1683 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1684 	return ice_aq_update_vsi(hw, vsi_ctx, cd);
1685 }
1686 
1687 /**
1688  * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
1689  * @hw: pointer to HW struct
1690  * @vsi_handle: VSI SW index
1691  * @enable: boolean for enable/disable
1692  */
1693 int
1694 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
1695 {
1696 	struct ice_vsi_ctx *ctx;
1697 
1698 	ctx = ice_get_vsi_ctx(hw, vsi_handle);
1699 	if (!ctx)
1700 		return -EIO;
1701 
1702 	if (enable)
1703 		ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1704 	else
1705 		ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1706 
1707 	return ice_update_vsi(hw, vsi_handle, ctx, NULL);
1708 }
1709 
1710 /**
1711  * ice_aq_alloc_free_vsi_list
1712  * @hw: pointer to the HW struct
1713  * @vsi_list_id: VSI list ID returned or used for lookup
1714  * @lkup_type: switch rule filter lookup type
1715  * @opc: switch rules population command type - pass in the command opcode
1716  *
1717  * allocates or free a VSI list resource
1718  */
1719 static int
1720 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1721 			   enum ice_sw_lkup_type lkup_type,
1722 			   enum ice_adminq_opc opc)
1723 {
1724 	struct ice_aqc_alloc_free_res_elem *sw_buf;
1725 	struct ice_aqc_res_elem *vsi_ele;
1726 	u16 buf_len;
1727 	int status;
1728 
1729 	buf_len = struct_size(sw_buf, elem, 1);
1730 	sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
1731 	if (!sw_buf)
1732 		return -ENOMEM;
1733 	sw_buf->num_elems = cpu_to_le16(1);
1734 
1735 	if (lkup_type == ICE_SW_LKUP_MAC ||
1736 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1737 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1738 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1739 	    lkup_type == ICE_SW_LKUP_PROMISC ||
1740 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
1741 		sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1742 	} else if (lkup_type == ICE_SW_LKUP_VLAN) {
1743 		sw_buf->res_type =
1744 			cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1745 	} else {
1746 		status = -EINVAL;
1747 		goto ice_aq_alloc_free_vsi_list_exit;
1748 	}
1749 
1750 	if (opc == ice_aqc_opc_free_res)
1751 		sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
1752 
1753 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1754 	if (status)
1755 		goto ice_aq_alloc_free_vsi_list_exit;
1756 
1757 	if (opc == ice_aqc_opc_alloc_res) {
1758 		vsi_ele = &sw_buf->elem[0];
1759 		*vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
1760 	}
1761 
1762 ice_aq_alloc_free_vsi_list_exit:
1763 	devm_kfree(ice_hw_to_dev(hw), sw_buf);
1764 	return status;
1765 }
1766 
1767 /**
1768  * ice_aq_sw_rules - add/update/remove switch rules
1769  * @hw: pointer to the HW struct
1770  * @rule_list: pointer to switch rule population list
1771  * @rule_list_sz: total size of the rule list in bytes
1772  * @num_rules: number of switch rules in the rule_list
1773  * @opc: switch rules population command type - pass in the command opcode
1774  * @cd: pointer to command details structure or NULL
1775  *
1776  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1777  */
1778 int
1779 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1780 		u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1781 {
1782 	struct ice_aq_desc desc;
1783 	int status;
1784 
1785 	if (opc != ice_aqc_opc_add_sw_rules &&
1786 	    opc != ice_aqc_opc_update_sw_rules &&
1787 	    opc != ice_aqc_opc_remove_sw_rules)
1788 		return -EINVAL;
1789 
1790 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1791 
1792 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1793 	desc.params.sw_rules.num_rules_fltr_entry_index =
1794 		cpu_to_le16(num_rules);
1795 	status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1796 	if (opc != ice_aqc_opc_add_sw_rules &&
1797 	    hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1798 		status = -ENOENT;
1799 
1800 	return status;
1801 }
1802 
1803 /**
1804  * ice_aq_add_recipe - add switch recipe
1805  * @hw: pointer to the HW struct
1806  * @s_recipe_list: pointer to switch rule population list
1807  * @num_recipes: number of switch recipes in the list
1808  * @cd: pointer to command details structure or NULL
1809  *
1810  * Add(0x0290)
1811  */
1812 static int
1813 ice_aq_add_recipe(struct ice_hw *hw,
1814 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1815 		  u16 num_recipes, struct ice_sq_cd *cd)
1816 {
1817 	struct ice_aqc_add_get_recipe *cmd;
1818 	struct ice_aq_desc desc;
1819 	u16 buf_size;
1820 
1821 	cmd = &desc.params.add_get_recipe;
1822 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1823 
1824 	cmd->num_sub_recipes = cpu_to_le16(num_recipes);
1825 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1826 
1827 	buf_size = num_recipes * sizeof(*s_recipe_list);
1828 
1829 	return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1830 }
1831 
1832 /**
1833  * ice_aq_get_recipe - get switch recipe
1834  * @hw: pointer to the HW struct
1835  * @s_recipe_list: pointer to switch rule population list
1836  * @num_recipes: pointer to the number of recipes (input and output)
1837  * @recipe_root: root recipe number of recipe(s) to retrieve
1838  * @cd: pointer to command details structure or NULL
1839  *
1840  * Get(0x0292)
1841  *
1842  * On input, *num_recipes should equal the number of entries in s_recipe_list.
1843  * On output, *num_recipes will equal the number of entries returned in
1844  * s_recipe_list.
1845  *
1846  * The caller must supply enough space in s_recipe_list to hold all possible
1847  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1848  */
1849 static int
1850 ice_aq_get_recipe(struct ice_hw *hw,
1851 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1852 		  u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1853 {
1854 	struct ice_aqc_add_get_recipe *cmd;
1855 	struct ice_aq_desc desc;
1856 	u16 buf_size;
1857 	int status;
1858 
1859 	if (*num_recipes != ICE_MAX_NUM_RECIPES)
1860 		return -EINVAL;
1861 
1862 	cmd = &desc.params.add_get_recipe;
1863 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1864 
1865 	cmd->return_index = cpu_to_le16(recipe_root);
1866 	cmd->num_sub_recipes = 0;
1867 
1868 	buf_size = *num_recipes * sizeof(*s_recipe_list);
1869 
1870 	status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1871 	*num_recipes = le16_to_cpu(cmd->num_sub_recipes);
1872 
1873 	return status;
1874 }
1875 
1876 /**
1877  * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
1878  * @hw: pointer to the HW struct
1879  * @params: parameters used to update the default recipe
1880  *
1881  * This function only supports updating default recipes and it only supports
1882  * updating a single recipe based on the lkup_idx at a time.
1883  *
1884  * This is done as a read-modify-write operation. First, get the current recipe
1885  * contents based on the recipe's ID. Then modify the field vector index and
1886  * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
1887  * the pre-existing recipe with the modifications.
1888  */
1889 int
1890 ice_update_recipe_lkup_idx(struct ice_hw *hw,
1891 			   struct ice_update_recipe_lkup_idx_params *params)
1892 {
1893 	struct ice_aqc_recipe_data_elem *rcp_list;
1894 	u16 num_recps = ICE_MAX_NUM_RECIPES;
1895 	int status;
1896 
1897 	rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL);
1898 	if (!rcp_list)
1899 		return -ENOMEM;
1900 
1901 	/* read current recipe list from firmware */
1902 	rcp_list->recipe_indx = params->rid;
1903 	status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
1904 	if (status) {
1905 		ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
1906 			  params->rid, status);
1907 		goto error_out;
1908 	}
1909 
1910 	/* only modify existing recipe's lkup_idx and mask if valid, while
1911 	 * leaving all other fields the same, then update the recipe firmware
1912 	 */
1913 	rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
1914 	if (params->mask_valid)
1915 		rcp_list->content.mask[params->lkup_idx] =
1916 			cpu_to_le16(params->mask);
1917 
1918 	if (params->ignore_valid)
1919 		rcp_list->content.lkup_indx[params->lkup_idx] |=
1920 			ICE_AQ_RECIPE_LKUP_IGNORE;
1921 
1922 	status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
1923 	if (status)
1924 		ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
1925 			  params->rid, params->lkup_idx, params->fv_idx,
1926 			  params->mask, params->mask_valid ? "true" : "false",
1927 			  status);
1928 
1929 error_out:
1930 	kfree(rcp_list);
1931 	return status;
1932 }
1933 
1934 /**
1935  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1936  * @hw: pointer to the HW struct
1937  * @profile_id: package profile ID to associate the recipe with
1938  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1939  * @cd: pointer to command details structure or NULL
1940  * Recipe to profile association (0x0291)
1941  */
1942 static int
1943 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1944 			     struct ice_sq_cd *cd)
1945 {
1946 	struct ice_aqc_recipe_to_profile *cmd;
1947 	struct ice_aq_desc desc;
1948 
1949 	cmd = &desc.params.recipe_to_profile;
1950 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1951 	cmd->profile_id = cpu_to_le16(profile_id);
1952 	/* Set the recipe ID bit in the bitmask to let the device know which
1953 	 * profile we are associating the recipe to
1954 	 */
1955 	memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
1956 
1957 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1958 }
1959 
1960 /**
1961  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1962  * @hw: pointer to the HW struct
1963  * @profile_id: package profile ID to associate the recipe with
1964  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1965  * @cd: pointer to command details structure or NULL
1966  * Associate profile ID with given recipe (0x0293)
1967  */
1968 static int
1969 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1970 			     struct ice_sq_cd *cd)
1971 {
1972 	struct ice_aqc_recipe_to_profile *cmd;
1973 	struct ice_aq_desc desc;
1974 	int status;
1975 
1976 	cmd = &desc.params.recipe_to_profile;
1977 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1978 	cmd->profile_id = cpu_to_le16(profile_id);
1979 
1980 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1981 	if (!status)
1982 		memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
1983 
1984 	return status;
1985 }
1986 
1987 /**
1988  * ice_alloc_recipe - add recipe resource
1989  * @hw: pointer to the hardware structure
1990  * @rid: recipe ID returned as response to AQ call
1991  */
1992 static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1993 {
1994 	struct ice_aqc_alloc_free_res_elem *sw_buf;
1995 	u16 buf_len;
1996 	int status;
1997 
1998 	buf_len = struct_size(sw_buf, elem, 1);
1999 	sw_buf = kzalloc(buf_len, GFP_KERNEL);
2000 	if (!sw_buf)
2001 		return -ENOMEM;
2002 
2003 	sw_buf->num_elems = cpu_to_le16(1);
2004 	sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
2005 					ICE_AQC_RES_TYPE_S) |
2006 					ICE_AQC_RES_TYPE_FLAG_SHARED);
2007 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2008 				       ice_aqc_opc_alloc_res, NULL);
2009 	if (!status)
2010 		*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
2011 	kfree(sw_buf);
2012 
2013 	return status;
2014 }
2015 
2016 /**
2017  * ice_get_recp_to_prof_map - updates recipe to profile mapping
2018  * @hw: pointer to hardware structure
2019  *
2020  * This function is used to populate recipe_to_profile matrix where index to
2021  * this array is the recipe ID and the element is the mapping of which profiles
2022  * is this recipe mapped to.
2023  */
2024 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2025 {
2026 	DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
2027 	u16 i;
2028 
2029 	for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2030 		u16 j;
2031 
2032 		bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2033 		bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
2034 		if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2035 			continue;
2036 		bitmap_copy(profile_to_recipe[i], r_bitmap,
2037 			    ICE_MAX_NUM_RECIPES);
2038 		for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2039 			set_bit(i, recipe_to_profile[j]);
2040 	}
2041 }
2042 
2043 /**
2044  * ice_collect_result_idx - copy result index values
2045  * @buf: buffer that contains the result index
2046  * @recp: the recipe struct to copy data into
2047  */
2048 static void
2049 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
2050 		       struct ice_sw_recipe *recp)
2051 {
2052 	if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2053 		set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2054 			recp->res_idxs);
2055 }
2056 
2057 /**
2058  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2059  * @hw: pointer to hardware structure
2060  * @recps: struct that we need to populate
2061  * @rid: recipe ID that we are populating
2062  * @refresh_required: true if we should get recipe to profile mapping from FW
2063  *
2064  * This function is used to populate all the necessary entries into our
2065  * bookkeeping so that we have a current list of all the recipes that are
2066  * programmed in the firmware.
2067  */
2068 static int
2069 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2070 		    bool *refresh_required)
2071 {
2072 	DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
2073 	struct ice_aqc_recipe_data_elem *tmp;
2074 	u16 num_recps = ICE_MAX_NUM_RECIPES;
2075 	struct ice_prot_lkup_ext *lkup_exts;
2076 	u8 fv_word_idx = 0;
2077 	u16 sub_recps;
2078 	int status;
2079 
2080 	bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
2081 
2082 	/* we need a buffer big enough to accommodate all the recipes */
2083 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
2084 	if (!tmp)
2085 		return -ENOMEM;
2086 
2087 	tmp[0].recipe_indx = rid;
2088 	status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2089 	/* non-zero status meaning recipe doesn't exist */
2090 	if (status)
2091 		goto err_unroll;
2092 
2093 	/* Get recipe to profile map so that we can get the fv from lkups that
2094 	 * we read for a recipe from FW. Since we want to minimize the number of
2095 	 * times we make this FW call, just make one call and cache the copy
2096 	 * until a new recipe is added. This operation is only required the
2097 	 * first time to get the changes from FW. Then to search existing
2098 	 * entries we don't need to update the cache again until another recipe
2099 	 * gets added.
2100 	 */
2101 	if (*refresh_required) {
2102 		ice_get_recp_to_prof_map(hw);
2103 		*refresh_required = false;
2104 	}
2105 
2106 	/* Start populating all the entries for recps[rid] based on lkups from
2107 	 * firmware. Note that we are only creating the root recipe in our
2108 	 * database.
2109 	 */
2110 	lkup_exts = &recps[rid].lkup_exts;
2111 
2112 	for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2113 		struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2114 		struct ice_recp_grp_entry *rg_entry;
2115 		u8 i, prof, idx, prot = 0;
2116 		bool is_root;
2117 		u16 off = 0;
2118 
2119 		rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
2120 					GFP_KERNEL);
2121 		if (!rg_entry) {
2122 			status = -ENOMEM;
2123 			goto err_unroll;
2124 		}
2125 
2126 		idx = root_bufs.recipe_indx;
2127 		is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2128 
2129 		/* Mark all result indices in this chain */
2130 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2131 			set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2132 				result_bm);
2133 
2134 		/* get the first profile that is associated with rid */
2135 		prof = find_first_bit(recipe_to_profile[idx],
2136 				      ICE_MAX_NUM_PROFILES);
2137 		for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2138 			u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2139 
2140 			rg_entry->fv_idx[i] = lkup_indx;
2141 			rg_entry->fv_mask[i] =
2142 				le16_to_cpu(root_bufs.content.mask[i + 1]);
2143 
2144 			/* If the recipe is a chained recipe then all its
2145 			 * child recipe's result will have a result index.
2146 			 * To fill fv_words we should not use those result
2147 			 * index, we only need the protocol ids and offsets.
2148 			 * We will skip all the fv_idx which stores result
2149 			 * index in them. We also need to skip any fv_idx which
2150 			 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2151 			 * valid offset value.
2152 			 */
2153 			if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
2154 			    rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2155 			    rg_entry->fv_idx[i] == 0)
2156 				continue;
2157 
2158 			ice_find_prot_off(hw, ICE_BLK_SW, prof,
2159 					  rg_entry->fv_idx[i], &prot, &off);
2160 			lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2161 			lkup_exts->fv_words[fv_word_idx].off = off;
2162 			lkup_exts->field_mask[fv_word_idx] =
2163 				rg_entry->fv_mask[i];
2164 			fv_word_idx++;
2165 		}
2166 		/* populate rg_list with the data from the child entry of this
2167 		 * recipe
2168 		 */
2169 		list_add(&rg_entry->l_entry, &recps[rid].rg_list);
2170 
2171 		/* Propagate some data to the recipe database */
2172 		recps[idx].is_root = !!is_root;
2173 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2174 		bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2175 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2176 			recps[idx].chain_idx = root_bufs.content.result_indx &
2177 				~ICE_AQ_RECIPE_RESULT_EN;
2178 			set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2179 		} else {
2180 			recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2181 		}
2182 
2183 		if (!is_root)
2184 			continue;
2185 
2186 		/* Only do the following for root recipes entries */
2187 		memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2188 		       sizeof(recps[idx].r_bitmap));
2189 		recps[idx].root_rid = root_bufs.content.rid &
2190 			~ICE_AQ_RECIPE_ID_IS_ROOT;
2191 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2192 	}
2193 
2194 	/* Complete initialization of the root recipe entry */
2195 	lkup_exts->n_val_words = fv_word_idx;
2196 	recps[rid].big_recp = (num_recps > 1);
2197 	recps[rid].n_grp_count = (u8)num_recps;
2198 	recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
2199 					   recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
2200 					   GFP_KERNEL);
2201 	if (!recps[rid].root_buf) {
2202 		status = -ENOMEM;
2203 		goto err_unroll;
2204 	}
2205 
2206 	/* Copy result indexes */
2207 	bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2208 	recps[rid].recp_created = true;
2209 
2210 err_unroll:
2211 	kfree(tmp);
2212 	return status;
2213 }
2214 
2215 /* ice_init_port_info - Initialize port_info with switch configuration data
2216  * @pi: pointer to port_info
2217  * @vsi_port_num: VSI number or port number
2218  * @type: Type of switch element (port or VSI)
2219  * @swid: switch ID of the switch the element is attached to
2220  * @pf_vf_num: PF or VF number
2221  * @is_vf: true if the element is a VF, false otherwise
2222  */
2223 static void
2224 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2225 		   u16 swid, u16 pf_vf_num, bool is_vf)
2226 {
2227 	switch (type) {
2228 	case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2229 		pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2230 		pi->sw_id = swid;
2231 		pi->pf_vf_num = pf_vf_num;
2232 		pi->is_vf = is_vf;
2233 		pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2234 		pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2235 		break;
2236 	default:
2237 		ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2238 		break;
2239 	}
2240 }
2241 
2242 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2243  * @hw: pointer to the hardware structure
2244  */
2245 int ice_get_initial_sw_cfg(struct ice_hw *hw)
2246 {
2247 	struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2248 	u16 req_desc = 0;
2249 	u16 num_elems;
2250 	int status;
2251 	u16 i;
2252 
2253 	rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
2254 			    GFP_KERNEL);
2255 
2256 	if (!rbuf)
2257 		return -ENOMEM;
2258 
2259 	/* Multiple calls to ice_aq_get_sw_cfg may be required
2260 	 * to get all the switch configuration information. The need
2261 	 * for additional calls is indicated by ice_aq_get_sw_cfg
2262 	 * writing a non-zero value in req_desc
2263 	 */
2264 	do {
2265 		struct ice_aqc_get_sw_cfg_resp_elem *ele;
2266 
2267 		status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2268 					   &req_desc, &num_elems, NULL);
2269 
2270 		if (status)
2271 			break;
2272 
2273 		for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2274 			u16 pf_vf_num, swid, vsi_port_num;
2275 			bool is_vf = false;
2276 			u8 res_type;
2277 
2278 			vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
2279 				ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2280 
2281 			pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
2282 				ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2283 
2284 			swid = le16_to_cpu(ele->swid);
2285 
2286 			if (le16_to_cpu(ele->pf_vf_num) &
2287 			    ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2288 				is_vf = true;
2289 
2290 			res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
2291 					ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2292 
2293 			if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
2294 				/* FW VSI is not needed. Just continue. */
2295 				continue;
2296 			}
2297 
2298 			ice_init_port_info(hw->port_info, vsi_port_num,
2299 					   res_type, swid, pf_vf_num, is_vf);
2300 		}
2301 	} while (req_desc && !status);
2302 
2303 	devm_kfree(ice_hw_to_dev(hw), rbuf);
2304 	return status;
2305 }
2306 
2307 /**
2308  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2309  * @hw: pointer to the hardware structure
2310  * @fi: filter info structure to fill/update
2311  *
2312  * This helper function populates the lb_en and lan_en elements of the provided
2313  * ice_fltr_info struct using the switch's type and characteristics of the
2314  * switch rule being configured.
2315  */
2316 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2317 {
2318 	fi->lb_en = false;
2319 	fi->lan_en = false;
2320 	if ((fi->flag & ICE_FLTR_TX) &&
2321 	    (fi->fltr_act == ICE_FWD_TO_VSI ||
2322 	     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2323 	     fi->fltr_act == ICE_FWD_TO_Q ||
2324 	     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2325 		/* Setting LB for prune actions will result in replicated
2326 		 * packets to the internal switch that will be dropped.
2327 		 */
2328 		if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2329 			fi->lb_en = true;
2330 
2331 		/* Set lan_en to TRUE if
2332 		 * 1. The switch is a VEB AND
2333 		 * 2
2334 		 * 2.1 The lookup is a directional lookup like ethertype,
2335 		 * promiscuous, ethertype-MAC, promiscuous-VLAN
2336 		 * and default-port OR
2337 		 * 2.2 The lookup is VLAN, OR
2338 		 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2339 		 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2340 		 *
2341 		 * OR
2342 		 *
2343 		 * The switch is a VEPA.
2344 		 *
2345 		 * In all other cases, the LAN enable has to be set to false.
2346 		 */
2347 		if (hw->evb_veb) {
2348 			if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2349 			    fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2350 			    fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2351 			    fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2352 			    fi->lkup_type == ICE_SW_LKUP_DFLT ||
2353 			    fi->lkup_type == ICE_SW_LKUP_VLAN ||
2354 			    (fi->lkup_type == ICE_SW_LKUP_MAC &&
2355 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
2356 			    (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2357 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
2358 				fi->lan_en = true;
2359 		} else {
2360 			fi->lan_en = true;
2361 		}
2362 	}
2363 }
2364 
2365 /**
2366  * ice_fill_sw_rule - Helper function to fill switch rule structure
2367  * @hw: pointer to the hardware structure
2368  * @f_info: entry containing packet forwarding information
2369  * @s_rule: switch rule structure to be filled in based on mac_entry
2370  * @opc: switch rules population command type - pass in the command opcode
2371  */
2372 static void
2373 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2374 		 struct ice_sw_rule_lkup_rx_tx *s_rule,
2375 		 enum ice_adminq_opc opc)
2376 {
2377 	u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2378 	u16 vlan_tpid = ETH_P_8021Q;
2379 	void *daddr = NULL;
2380 	u16 eth_hdr_sz;
2381 	u8 *eth_hdr;
2382 	u32 act = 0;
2383 	__be16 *off;
2384 	u8 q_rgn;
2385 
2386 	if (opc == ice_aqc_opc_remove_sw_rules) {
2387 		s_rule->act = 0;
2388 		s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2389 		s_rule->hdr_len = 0;
2390 		return;
2391 	}
2392 
2393 	eth_hdr_sz = sizeof(dummy_eth_header);
2394 	eth_hdr = s_rule->hdr_data;
2395 
2396 	/* initialize the ether header with a dummy header */
2397 	memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
2398 	ice_fill_sw_info(hw, f_info);
2399 
2400 	switch (f_info->fltr_act) {
2401 	case ICE_FWD_TO_VSI:
2402 		act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2403 			ICE_SINGLE_ACT_VSI_ID_M;
2404 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2405 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2406 				ICE_SINGLE_ACT_VALID_BIT;
2407 		break;
2408 	case ICE_FWD_TO_VSI_LIST:
2409 		act |= ICE_SINGLE_ACT_VSI_LIST;
2410 		act |= (f_info->fwd_id.vsi_list_id <<
2411 			ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2412 			ICE_SINGLE_ACT_VSI_LIST_ID_M;
2413 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2414 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2415 				ICE_SINGLE_ACT_VALID_BIT;
2416 		break;
2417 	case ICE_FWD_TO_Q:
2418 		act |= ICE_SINGLE_ACT_TO_Q;
2419 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2420 			ICE_SINGLE_ACT_Q_INDEX_M;
2421 		break;
2422 	case ICE_DROP_PACKET:
2423 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2424 			ICE_SINGLE_ACT_VALID_BIT;
2425 		break;
2426 	case ICE_FWD_TO_QGRP:
2427 		q_rgn = f_info->qgrp_size > 0 ?
2428 			(u8)ilog2(f_info->qgrp_size) : 0;
2429 		act |= ICE_SINGLE_ACT_TO_Q;
2430 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2431 			ICE_SINGLE_ACT_Q_INDEX_M;
2432 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2433 			ICE_SINGLE_ACT_Q_REGION_M;
2434 		break;
2435 	default:
2436 		return;
2437 	}
2438 
2439 	if (f_info->lb_en)
2440 		act |= ICE_SINGLE_ACT_LB_ENABLE;
2441 	if (f_info->lan_en)
2442 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
2443 
2444 	switch (f_info->lkup_type) {
2445 	case ICE_SW_LKUP_MAC:
2446 		daddr = f_info->l_data.mac.mac_addr;
2447 		break;
2448 	case ICE_SW_LKUP_VLAN:
2449 		vlan_id = f_info->l_data.vlan.vlan_id;
2450 		if (f_info->l_data.vlan.tpid_valid)
2451 			vlan_tpid = f_info->l_data.vlan.tpid;
2452 		if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2453 		    f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2454 			act |= ICE_SINGLE_ACT_PRUNE;
2455 			act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2456 		}
2457 		break;
2458 	case ICE_SW_LKUP_ETHERTYPE_MAC:
2459 		daddr = f_info->l_data.ethertype_mac.mac_addr;
2460 		fallthrough;
2461 	case ICE_SW_LKUP_ETHERTYPE:
2462 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2463 		*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
2464 		break;
2465 	case ICE_SW_LKUP_MAC_VLAN:
2466 		daddr = f_info->l_data.mac_vlan.mac_addr;
2467 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
2468 		break;
2469 	case ICE_SW_LKUP_PROMISC_VLAN:
2470 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
2471 		fallthrough;
2472 	case ICE_SW_LKUP_PROMISC:
2473 		daddr = f_info->l_data.mac_vlan.mac_addr;
2474 		break;
2475 	default:
2476 		break;
2477 	}
2478 
2479 	s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ?
2480 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2481 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
2482 
2483 	/* Recipe set depending on lookup type */
2484 	s_rule->recipe_id = cpu_to_le16(f_info->lkup_type);
2485 	s_rule->src = cpu_to_le16(f_info->src);
2486 	s_rule->act = cpu_to_le32(act);
2487 
2488 	if (daddr)
2489 		ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
2490 
2491 	if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2492 		off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2493 		*off = cpu_to_be16(vlan_id);
2494 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2495 		*off = cpu_to_be16(vlan_tpid);
2496 	}
2497 
2498 	/* Create the switch rule with the final dummy Ethernet header */
2499 	if (opc != ice_aqc_opc_update_sw_rules)
2500 		s_rule->hdr_len = cpu_to_le16(eth_hdr_sz);
2501 }
2502 
2503 /**
2504  * ice_add_marker_act
2505  * @hw: pointer to the hardware structure
2506  * @m_ent: the management entry for which sw marker needs to be added
2507  * @sw_marker: sw marker to tag the Rx descriptor with
2508  * @l_id: large action resource ID
2509  *
2510  * Create a large action to hold software marker and update the switch rule
2511  * entry pointed by m_ent with newly created large action
2512  */
2513 static int
2514 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2515 		   u16 sw_marker, u16 l_id)
2516 {
2517 	struct ice_sw_rule_lkup_rx_tx *rx_tx;
2518 	struct ice_sw_rule_lg_act *lg_act;
2519 	/* For software marker we need 3 large actions
2520 	 * 1. FWD action: FWD TO VSI or VSI LIST
2521 	 * 2. GENERIC VALUE action to hold the profile ID
2522 	 * 3. GENERIC VALUE action to hold the software marker ID
2523 	 */
2524 	const u16 num_lg_acts = 3;
2525 	u16 lg_act_size;
2526 	u16 rules_size;
2527 	int status;
2528 	u32 act;
2529 	u16 id;
2530 
2531 	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2532 		return -EINVAL;
2533 
2534 	/* Create two back-to-back switch rules and submit them to the HW using
2535 	 * one memory buffer:
2536 	 *    1. Large Action
2537 	 *    2. Look up Tx Rx
2538 	 */
2539 	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts);
2540 	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx);
2541 	lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
2542 	if (!lg_act)
2543 		return -ENOMEM;
2544 
2545 	rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size);
2546 
2547 	/* Fill in the first switch rule i.e. large action */
2548 	lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
2549 	lg_act->index = cpu_to_le16(l_id);
2550 	lg_act->size = cpu_to_le16(num_lg_acts);
2551 
2552 	/* First action VSI forwarding or VSI list forwarding depending on how
2553 	 * many VSIs
2554 	 */
2555 	id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2556 		m_ent->fltr_info.fwd_id.hw_vsi_id;
2557 
2558 	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2559 	act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
2560 	if (m_ent->vsi_count > 1)
2561 		act |= ICE_LG_ACT_VSI_LIST;
2562 	lg_act->act[0] = cpu_to_le32(act);
2563 
2564 	/* Second action descriptor type */
2565 	act = ICE_LG_ACT_GENERIC;
2566 
2567 	act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2568 	lg_act->act[1] = cpu_to_le32(act);
2569 
2570 	act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2571 	       ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2572 
2573 	/* Third action Marker value */
2574 	act |= ICE_LG_ACT_GENERIC;
2575 	act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2576 		ICE_LG_ACT_GENERIC_VALUE_M;
2577 
2578 	lg_act->act[2] = cpu_to_le32(act);
2579 
2580 	/* call the fill switch rule to fill the lookup Tx Rx structure */
2581 	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2582 			 ice_aqc_opc_update_sw_rules);
2583 
2584 	/* Update the action to point to the large action ID */
2585 	rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
2586 				 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2587 				  ICE_SINGLE_ACT_PTR_VAL_M));
2588 
2589 	/* Use the filter rule ID of the previously created rule with single
2590 	 * act. Once the update happens, hardware will treat this as large
2591 	 * action
2592 	 */
2593 	rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
2594 
2595 	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2596 				 ice_aqc_opc_update_sw_rules, NULL);
2597 	if (!status) {
2598 		m_ent->lg_act_idx = l_id;
2599 		m_ent->sw_marker_id = sw_marker;
2600 	}
2601 
2602 	devm_kfree(ice_hw_to_dev(hw), lg_act);
2603 	return status;
2604 }
2605 
2606 /**
2607  * ice_create_vsi_list_map
2608  * @hw: pointer to the hardware structure
2609  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2610  * @num_vsi: number of VSI handles in the array
2611  * @vsi_list_id: VSI list ID generated as part of allocate resource
2612  *
2613  * Helper function to create a new entry of VSI list ID to VSI mapping
2614  * using the given VSI list ID
2615  */
2616 static struct ice_vsi_list_map_info *
2617 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2618 			u16 vsi_list_id)
2619 {
2620 	struct ice_switch_info *sw = hw->switch_info;
2621 	struct ice_vsi_list_map_info *v_map;
2622 	int i;
2623 
2624 	v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
2625 	if (!v_map)
2626 		return NULL;
2627 
2628 	v_map->vsi_list_id = vsi_list_id;
2629 	v_map->ref_cnt = 1;
2630 	for (i = 0; i < num_vsi; i++)
2631 		set_bit(vsi_handle_arr[i], v_map->vsi_map);
2632 
2633 	list_add(&v_map->list_entry, &sw->vsi_list_map_head);
2634 	return v_map;
2635 }
2636 
2637 /**
2638  * ice_update_vsi_list_rule
2639  * @hw: pointer to the hardware structure
2640  * @vsi_handle_arr: array of VSI handles to form a VSI list
2641  * @num_vsi: number of VSI handles in the array
2642  * @vsi_list_id: VSI list ID generated as part of allocate resource
2643  * @remove: Boolean value to indicate if this is a remove action
2644  * @opc: switch rules population command type - pass in the command opcode
2645  * @lkup_type: lookup type of the filter
2646  *
2647  * Call AQ command to add a new switch rule or update existing switch rule
2648  * using the given VSI list ID
2649  */
2650 static int
2651 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2652 			 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2653 			 enum ice_sw_lkup_type lkup_type)
2654 {
2655 	struct ice_sw_rule_vsi_list *s_rule;
2656 	u16 s_rule_size;
2657 	u16 rule_type;
2658 	int status;
2659 	int i;
2660 
2661 	if (!num_vsi)
2662 		return -EINVAL;
2663 
2664 	if (lkup_type == ICE_SW_LKUP_MAC ||
2665 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2666 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2667 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2668 	    lkup_type == ICE_SW_LKUP_PROMISC ||
2669 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
2670 		rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2671 			ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2672 	else if (lkup_type == ICE_SW_LKUP_VLAN)
2673 		rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2674 			ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2675 	else
2676 		return -EINVAL;
2677 
2678 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
2679 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2680 	if (!s_rule)
2681 		return -ENOMEM;
2682 	for (i = 0; i < num_vsi; i++) {
2683 		if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2684 			status = -EINVAL;
2685 			goto exit;
2686 		}
2687 		/* AQ call requires hw_vsi_id(s) */
2688 		s_rule->vsi[i] =
2689 			cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2690 	}
2691 
2692 	s_rule->hdr.type = cpu_to_le16(rule_type);
2693 	s_rule->number_vsi = cpu_to_le16(num_vsi);
2694 	s_rule->index = cpu_to_le16(vsi_list_id);
2695 
2696 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2697 
2698 exit:
2699 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2700 	return status;
2701 }
2702 
2703 /**
2704  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2705  * @hw: pointer to the HW struct
2706  * @vsi_handle_arr: array of VSI handles to form a VSI list
2707  * @num_vsi: number of VSI handles in the array
2708  * @vsi_list_id: stores the ID of the VSI list to be created
2709  * @lkup_type: switch rule filter's lookup type
2710  */
2711 static int
2712 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2713 			 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2714 {
2715 	int status;
2716 
2717 	status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2718 					    ice_aqc_opc_alloc_res);
2719 	if (status)
2720 		return status;
2721 
2722 	/* Update the newly created VSI list to include the specified VSIs */
2723 	return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2724 					*vsi_list_id, false,
2725 					ice_aqc_opc_add_sw_rules, lkup_type);
2726 }
2727 
2728 /**
2729  * ice_create_pkt_fwd_rule
2730  * @hw: pointer to the hardware structure
2731  * @f_entry: entry containing packet forwarding information
2732  *
2733  * Create switch rule with given filter information and add an entry
2734  * to the corresponding filter management list to track this switch rule
2735  * and VSI mapping
2736  */
2737 static int
2738 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2739 			struct ice_fltr_list_entry *f_entry)
2740 {
2741 	struct ice_fltr_mgmt_list_entry *fm_entry;
2742 	struct ice_sw_rule_lkup_rx_tx *s_rule;
2743 	enum ice_sw_lkup_type l_type;
2744 	struct ice_sw_recipe *recp;
2745 	int status;
2746 
2747 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2748 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2749 			      GFP_KERNEL);
2750 	if (!s_rule)
2751 		return -ENOMEM;
2752 	fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
2753 				GFP_KERNEL);
2754 	if (!fm_entry) {
2755 		status = -ENOMEM;
2756 		goto ice_create_pkt_fwd_rule_exit;
2757 	}
2758 
2759 	fm_entry->fltr_info = f_entry->fltr_info;
2760 
2761 	/* Initialize all the fields for the management entry */
2762 	fm_entry->vsi_count = 1;
2763 	fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2764 	fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2765 	fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2766 
2767 	ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2768 			 ice_aqc_opc_add_sw_rules);
2769 
2770 	status = ice_aq_sw_rules(hw, s_rule,
2771 				 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2772 				 ice_aqc_opc_add_sw_rules, NULL);
2773 	if (status) {
2774 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
2775 		goto ice_create_pkt_fwd_rule_exit;
2776 	}
2777 
2778 	f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2779 	fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2780 
2781 	/* The book keeping entries will get removed when base driver
2782 	 * calls remove filter AQ command
2783 	 */
2784 	l_type = fm_entry->fltr_info.lkup_type;
2785 	recp = &hw->switch_info->recp_list[l_type];
2786 	list_add(&fm_entry->list_entry, &recp->filt_rules);
2787 
2788 ice_create_pkt_fwd_rule_exit:
2789 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2790 	return status;
2791 }
2792 
2793 /**
2794  * ice_update_pkt_fwd_rule
2795  * @hw: pointer to the hardware structure
2796  * @f_info: filter information for switch rule
2797  *
2798  * Call AQ command to update a previously created switch rule with a
2799  * VSI list ID
2800  */
2801 static int
2802 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2803 {
2804 	struct ice_sw_rule_lkup_rx_tx *s_rule;
2805 	int status;
2806 
2807 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2808 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2809 			      GFP_KERNEL);
2810 	if (!s_rule)
2811 		return -ENOMEM;
2812 
2813 	ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2814 
2815 	s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2816 
2817 	/* Update switch rule with new rule set to forward VSI list */
2818 	status = ice_aq_sw_rules(hw, s_rule,
2819 				 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2820 				 ice_aqc_opc_update_sw_rules, NULL);
2821 
2822 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2823 	return status;
2824 }
2825 
2826 /**
2827  * ice_update_sw_rule_bridge_mode
2828  * @hw: pointer to the HW struct
2829  *
2830  * Updates unicast switch filter rules based on VEB/VEPA mode
2831  */
2832 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2833 {
2834 	struct ice_switch_info *sw = hw->switch_info;
2835 	struct ice_fltr_mgmt_list_entry *fm_entry;
2836 	struct list_head *rule_head;
2837 	struct mutex *rule_lock; /* Lock to protect filter rule list */
2838 	int status = 0;
2839 
2840 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2841 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2842 
2843 	mutex_lock(rule_lock);
2844 	list_for_each_entry(fm_entry, rule_head, list_entry) {
2845 		struct ice_fltr_info *fi = &fm_entry->fltr_info;
2846 		u8 *addr = fi->l_data.mac.mac_addr;
2847 
2848 		/* Update unicast Tx rules to reflect the selected
2849 		 * VEB/VEPA mode
2850 		 */
2851 		if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
2852 		    (fi->fltr_act == ICE_FWD_TO_VSI ||
2853 		     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2854 		     fi->fltr_act == ICE_FWD_TO_Q ||
2855 		     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2856 			status = ice_update_pkt_fwd_rule(hw, fi);
2857 			if (status)
2858 				break;
2859 		}
2860 	}
2861 
2862 	mutex_unlock(rule_lock);
2863 
2864 	return status;
2865 }
2866 
2867 /**
2868  * ice_add_update_vsi_list
2869  * @hw: pointer to the hardware structure
2870  * @m_entry: pointer to current filter management list entry
2871  * @cur_fltr: filter information from the book keeping entry
2872  * @new_fltr: filter information with the new VSI to be added
2873  *
2874  * Call AQ command to add or update previously created VSI list with new VSI.
2875  *
2876  * Helper function to do book keeping associated with adding filter information
2877  * The algorithm to do the book keeping is described below :
2878  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2879  *	if only one VSI has been added till now
2880  *		Allocate a new VSI list and add two VSIs
2881  *		to this list using switch rule command
2882  *		Update the previously created switch rule with the
2883  *		newly created VSI list ID
2884  *	if a VSI list was previously created
2885  *		Add the new VSI to the previously created VSI list set
2886  *		using the update switch rule command
2887  */
2888 static int
2889 ice_add_update_vsi_list(struct ice_hw *hw,
2890 			struct ice_fltr_mgmt_list_entry *m_entry,
2891 			struct ice_fltr_info *cur_fltr,
2892 			struct ice_fltr_info *new_fltr)
2893 {
2894 	u16 vsi_list_id = 0;
2895 	int status = 0;
2896 
2897 	if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2898 	     cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2899 		return -EOPNOTSUPP;
2900 
2901 	if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2902 	     new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2903 	    (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2904 	     cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2905 		return -EOPNOTSUPP;
2906 
2907 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2908 		/* Only one entry existed in the mapping and it was not already
2909 		 * a part of a VSI list. So, create a VSI list with the old and
2910 		 * new VSIs.
2911 		 */
2912 		struct ice_fltr_info tmp_fltr;
2913 		u16 vsi_handle_arr[2];
2914 
2915 		/* A rule already exists with the new VSI being added */
2916 		if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2917 			return -EEXIST;
2918 
2919 		vsi_handle_arr[0] = cur_fltr->vsi_handle;
2920 		vsi_handle_arr[1] = new_fltr->vsi_handle;
2921 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2922 						  &vsi_list_id,
2923 						  new_fltr->lkup_type);
2924 		if (status)
2925 			return status;
2926 
2927 		tmp_fltr = *new_fltr;
2928 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2929 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2930 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2931 		/* Update the previous switch rule of "MAC forward to VSI" to
2932 		 * "MAC fwd to VSI list"
2933 		 */
2934 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2935 		if (status)
2936 			return status;
2937 
2938 		cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2939 		cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2940 		m_entry->vsi_list_info =
2941 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2942 						vsi_list_id);
2943 
2944 		if (!m_entry->vsi_list_info)
2945 			return -ENOMEM;
2946 
2947 		/* If this entry was large action then the large action needs
2948 		 * to be updated to point to FWD to VSI list
2949 		 */
2950 		if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2951 			status =
2952 			    ice_add_marker_act(hw, m_entry,
2953 					       m_entry->sw_marker_id,
2954 					       m_entry->lg_act_idx);
2955 	} else {
2956 		u16 vsi_handle = new_fltr->vsi_handle;
2957 		enum ice_adminq_opc opcode;
2958 
2959 		if (!m_entry->vsi_list_info)
2960 			return -EIO;
2961 
2962 		/* A rule already exists with the new VSI being added */
2963 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
2964 			return 0;
2965 
2966 		/* Update the previously created VSI list set with
2967 		 * the new VSI ID passed in
2968 		 */
2969 		vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2970 		opcode = ice_aqc_opc_update_sw_rules;
2971 
2972 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2973 						  vsi_list_id, false, opcode,
2974 						  new_fltr->lkup_type);
2975 		/* update VSI list mapping info with new VSI ID */
2976 		if (!status)
2977 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
2978 	}
2979 	if (!status)
2980 		m_entry->vsi_count++;
2981 	return status;
2982 }
2983 
2984 /**
2985  * ice_find_rule_entry - Search a rule entry
2986  * @hw: pointer to the hardware structure
2987  * @recp_id: lookup type for which the specified rule needs to be searched
2988  * @f_info: rule information
2989  *
2990  * Helper function to search for a given rule entry
2991  * Returns pointer to entry storing the rule if found
2992  */
2993 static struct ice_fltr_mgmt_list_entry *
2994 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2995 {
2996 	struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2997 	struct ice_switch_info *sw = hw->switch_info;
2998 	struct list_head *list_head;
2999 
3000 	list_head = &sw->recp_list[recp_id].filt_rules;
3001 	list_for_each_entry(list_itr, list_head, list_entry) {
3002 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3003 			    sizeof(f_info->l_data)) &&
3004 		    f_info->flag == list_itr->fltr_info.flag) {
3005 			ret = list_itr;
3006 			break;
3007 		}
3008 	}
3009 	return ret;
3010 }
3011 
3012 /**
3013  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3014  * @hw: pointer to the hardware structure
3015  * @recp_id: lookup type for which VSI lists needs to be searched
3016  * @vsi_handle: VSI handle to be found in VSI list
3017  * @vsi_list_id: VSI list ID found containing vsi_handle
3018  *
3019  * Helper function to search a VSI list with single entry containing given VSI
3020  * handle element. This can be extended further to search VSI list with more
3021  * than 1 vsi_count. Returns pointer to VSI list entry if found.
3022  */
3023 static struct ice_vsi_list_map_info *
3024 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
3025 			u16 *vsi_list_id)
3026 {
3027 	struct ice_vsi_list_map_info *map_info = NULL;
3028 	struct ice_switch_info *sw = hw->switch_info;
3029 	struct ice_fltr_mgmt_list_entry *list_itr;
3030 	struct list_head *list_head;
3031 
3032 	list_head = &sw->recp_list[recp_id].filt_rules;
3033 	list_for_each_entry(list_itr, list_head, list_entry) {
3034 		if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
3035 			map_info = list_itr->vsi_list_info;
3036 			if (test_bit(vsi_handle, map_info->vsi_map)) {
3037 				*vsi_list_id = map_info->vsi_list_id;
3038 				return map_info;
3039 			}
3040 		}
3041 	}
3042 	return NULL;
3043 }
3044 
3045 /**
3046  * ice_add_rule_internal - add rule for a given lookup type
3047  * @hw: pointer to the hardware structure
3048  * @recp_id: lookup type (recipe ID) for which rule has to be added
3049  * @f_entry: structure containing MAC forwarding information
3050  *
3051  * Adds or updates the rule lists for a given recipe
3052  */
3053 static int
3054 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
3055 		      struct ice_fltr_list_entry *f_entry)
3056 {
3057 	struct ice_switch_info *sw = hw->switch_info;
3058 	struct ice_fltr_info *new_fltr, *cur_fltr;
3059 	struct ice_fltr_mgmt_list_entry *m_entry;
3060 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3061 	int status = 0;
3062 
3063 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3064 		return -EINVAL;
3065 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3066 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3067 
3068 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3069 
3070 	mutex_lock(rule_lock);
3071 	new_fltr = &f_entry->fltr_info;
3072 	if (new_fltr->flag & ICE_FLTR_RX)
3073 		new_fltr->src = hw->port_info->lport;
3074 	else if (new_fltr->flag & ICE_FLTR_TX)
3075 		new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
3076 
3077 	m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
3078 	if (!m_entry) {
3079 		mutex_unlock(rule_lock);
3080 		return ice_create_pkt_fwd_rule(hw, f_entry);
3081 	}
3082 
3083 	cur_fltr = &m_entry->fltr_info;
3084 	status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3085 	mutex_unlock(rule_lock);
3086 
3087 	return status;
3088 }
3089 
3090 /**
3091  * ice_remove_vsi_list_rule
3092  * @hw: pointer to the hardware structure
3093  * @vsi_list_id: VSI list ID generated as part of allocate resource
3094  * @lkup_type: switch rule filter lookup type
3095  *
3096  * The VSI list should be emptied before this function is called to remove the
3097  * VSI list.
3098  */
3099 static int
3100 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3101 			 enum ice_sw_lkup_type lkup_type)
3102 {
3103 	struct ice_sw_rule_vsi_list *s_rule;
3104 	u16 s_rule_size;
3105 	int status;
3106 
3107 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0);
3108 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3109 	if (!s_rule)
3110 		return -ENOMEM;
3111 
3112 	s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3113 	s_rule->index = cpu_to_le16(vsi_list_id);
3114 
3115 	/* Free the vsi_list resource that we allocated. It is assumed that the
3116 	 * list is empty at this point.
3117 	 */
3118 	status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3119 					    ice_aqc_opc_free_res);
3120 
3121 	devm_kfree(ice_hw_to_dev(hw), s_rule);
3122 	return status;
3123 }
3124 
3125 /**
3126  * ice_rem_update_vsi_list
3127  * @hw: pointer to the hardware structure
3128  * @vsi_handle: VSI handle of the VSI to remove
3129  * @fm_list: filter management entry for which the VSI list management needs to
3130  *           be done
3131  */
3132 static int
3133 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3134 			struct ice_fltr_mgmt_list_entry *fm_list)
3135 {
3136 	enum ice_sw_lkup_type lkup_type;
3137 	u16 vsi_list_id;
3138 	int status = 0;
3139 
3140 	if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3141 	    fm_list->vsi_count == 0)
3142 		return -EINVAL;
3143 
3144 	/* A rule with the VSI being removed does not exist */
3145 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
3146 		return -ENOENT;
3147 
3148 	lkup_type = fm_list->fltr_info.lkup_type;
3149 	vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3150 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3151 					  ice_aqc_opc_update_sw_rules,
3152 					  lkup_type);
3153 	if (status)
3154 		return status;
3155 
3156 	fm_list->vsi_count--;
3157 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3158 
3159 	if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3160 		struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3161 		struct ice_vsi_list_map_info *vsi_list_info =
3162 			fm_list->vsi_list_info;
3163 		u16 rem_vsi_handle;
3164 
3165 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
3166 						ICE_MAX_VSI);
3167 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3168 			return -EIO;
3169 
3170 		/* Make sure VSI list is empty before removing it below */
3171 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3172 						  vsi_list_id, true,
3173 						  ice_aqc_opc_update_sw_rules,
3174 						  lkup_type);
3175 		if (status)
3176 			return status;
3177 
3178 		tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3179 		tmp_fltr_info.fwd_id.hw_vsi_id =
3180 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
3181 		tmp_fltr_info.vsi_handle = rem_vsi_handle;
3182 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3183 		if (status) {
3184 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3185 				  tmp_fltr_info.fwd_id.hw_vsi_id, status);
3186 			return status;
3187 		}
3188 
3189 		fm_list->fltr_info = tmp_fltr_info;
3190 	}
3191 
3192 	if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3193 	    (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3194 		struct ice_vsi_list_map_info *vsi_list_info =
3195 			fm_list->vsi_list_info;
3196 
3197 		/* Remove the VSI list since it is no longer used */
3198 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3199 		if (status) {
3200 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3201 				  vsi_list_id, status);
3202 			return status;
3203 		}
3204 
3205 		list_del(&vsi_list_info->list_entry);
3206 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
3207 		fm_list->vsi_list_info = NULL;
3208 	}
3209 
3210 	return status;
3211 }
3212 
3213 /**
3214  * ice_remove_rule_internal - Remove a filter rule of a given type
3215  * @hw: pointer to the hardware structure
3216  * @recp_id: recipe ID for which the rule needs to removed
3217  * @f_entry: rule entry containing filter information
3218  */
3219 static int
3220 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
3221 			 struct ice_fltr_list_entry *f_entry)
3222 {
3223 	struct ice_switch_info *sw = hw->switch_info;
3224 	struct ice_fltr_mgmt_list_entry *list_elem;
3225 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3226 	bool remove_rule = false;
3227 	u16 vsi_handle;
3228 	int status = 0;
3229 
3230 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3231 		return -EINVAL;
3232 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3233 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3234 
3235 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3236 	mutex_lock(rule_lock);
3237 	list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
3238 	if (!list_elem) {
3239 		status = -ENOENT;
3240 		goto exit;
3241 	}
3242 
3243 	if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3244 		remove_rule = true;
3245 	} else if (!list_elem->vsi_list_info) {
3246 		status = -ENOENT;
3247 		goto exit;
3248 	} else if (list_elem->vsi_list_info->ref_cnt > 1) {
3249 		/* a ref_cnt > 1 indicates that the vsi_list is being
3250 		 * shared by multiple rules. Decrement the ref_cnt and
3251 		 * remove this rule, but do not modify the list, as it
3252 		 * is in-use by other rules.
3253 		 */
3254 		list_elem->vsi_list_info->ref_cnt--;
3255 		remove_rule = true;
3256 	} else {
3257 		/* a ref_cnt of 1 indicates the vsi_list is only used
3258 		 * by one rule. However, the original removal request is only
3259 		 * for a single VSI. Update the vsi_list first, and only
3260 		 * remove the rule if there are no further VSIs in this list.
3261 		 */
3262 		vsi_handle = f_entry->fltr_info.vsi_handle;
3263 		status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3264 		if (status)
3265 			goto exit;
3266 		/* if VSI count goes to zero after updating the VSI list */
3267 		if (list_elem->vsi_count == 0)
3268 			remove_rule = true;
3269 	}
3270 
3271 	if (remove_rule) {
3272 		/* Remove the lookup rule */
3273 		struct ice_sw_rule_lkup_rx_tx *s_rule;
3274 
3275 		s_rule = devm_kzalloc(ice_hw_to_dev(hw),
3276 				      ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3277 				      GFP_KERNEL);
3278 		if (!s_rule) {
3279 			status = -ENOMEM;
3280 			goto exit;
3281 		}
3282 
3283 		ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3284 				 ice_aqc_opc_remove_sw_rules);
3285 
3286 		status = ice_aq_sw_rules(hw, s_rule,
3287 					 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3288 					 1, ice_aqc_opc_remove_sw_rules, NULL);
3289 
3290 		/* Remove a book keeping from the list */
3291 		devm_kfree(ice_hw_to_dev(hw), s_rule);
3292 
3293 		if (status)
3294 			goto exit;
3295 
3296 		list_del(&list_elem->list_entry);
3297 		devm_kfree(ice_hw_to_dev(hw), list_elem);
3298 	}
3299 exit:
3300 	mutex_unlock(rule_lock);
3301 	return status;
3302 }
3303 
3304 /**
3305  * ice_mac_fltr_exist - does this MAC filter exist for given VSI
3306  * @hw: pointer to the hardware structure
3307  * @mac: MAC address to be checked (for MAC filter)
3308  * @vsi_handle: check MAC filter for this VSI
3309  */
3310 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
3311 {
3312 	struct ice_fltr_mgmt_list_entry *entry;
3313 	struct list_head *rule_head;
3314 	struct ice_switch_info *sw;
3315 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3316 	u16 hw_vsi_id;
3317 
3318 	if (!ice_is_vsi_valid(hw, vsi_handle))
3319 		return false;
3320 
3321 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3322 	sw = hw->switch_info;
3323 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3324 	if (!rule_head)
3325 		return false;
3326 
3327 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3328 	mutex_lock(rule_lock);
3329 	list_for_each_entry(entry, rule_head, list_entry) {
3330 		struct ice_fltr_info *f_info = &entry->fltr_info;
3331 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3332 
3333 		if (is_zero_ether_addr(mac_addr))
3334 			continue;
3335 
3336 		if (f_info->flag != ICE_FLTR_TX ||
3337 		    f_info->src_id != ICE_SRC_ID_VSI ||
3338 		    f_info->lkup_type != ICE_SW_LKUP_MAC ||
3339 		    f_info->fltr_act != ICE_FWD_TO_VSI ||
3340 		    hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3341 			continue;
3342 
3343 		if (ether_addr_equal(mac, mac_addr)) {
3344 			mutex_unlock(rule_lock);
3345 			return true;
3346 		}
3347 	}
3348 	mutex_unlock(rule_lock);
3349 	return false;
3350 }
3351 
3352 /**
3353  * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
3354  * @hw: pointer to the hardware structure
3355  * @vlan_id: VLAN ID
3356  * @vsi_handle: check MAC filter for this VSI
3357  */
3358 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
3359 {
3360 	struct ice_fltr_mgmt_list_entry *entry;
3361 	struct list_head *rule_head;
3362 	struct ice_switch_info *sw;
3363 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3364 	u16 hw_vsi_id;
3365 
3366 	if (vlan_id > ICE_MAX_VLAN_ID)
3367 		return false;
3368 
3369 	if (!ice_is_vsi_valid(hw, vsi_handle))
3370 		return false;
3371 
3372 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3373 	sw = hw->switch_info;
3374 	rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3375 	if (!rule_head)
3376 		return false;
3377 
3378 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3379 	mutex_lock(rule_lock);
3380 	list_for_each_entry(entry, rule_head, list_entry) {
3381 		struct ice_fltr_info *f_info = &entry->fltr_info;
3382 		u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
3383 		struct ice_vsi_list_map_info *map_info;
3384 
3385 		if (entry_vlan_id > ICE_MAX_VLAN_ID)
3386 			continue;
3387 
3388 		if (f_info->flag != ICE_FLTR_TX ||
3389 		    f_info->src_id != ICE_SRC_ID_VSI ||
3390 		    f_info->lkup_type != ICE_SW_LKUP_VLAN)
3391 			continue;
3392 
3393 		/* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
3394 		if (f_info->fltr_act != ICE_FWD_TO_VSI &&
3395 		    f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
3396 			continue;
3397 
3398 		if (f_info->fltr_act == ICE_FWD_TO_VSI) {
3399 			if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3400 				continue;
3401 		} else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3402 			/* If filter_action is FWD_TO_VSI_LIST, make sure
3403 			 * that VSI being checked is part of VSI list
3404 			 */
3405 			if (entry->vsi_count == 1 &&
3406 			    entry->vsi_list_info) {
3407 				map_info = entry->vsi_list_info;
3408 				if (!test_bit(vsi_handle, map_info->vsi_map))
3409 					continue;
3410 			}
3411 		}
3412 
3413 		if (vlan_id == entry_vlan_id) {
3414 			mutex_unlock(rule_lock);
3415 			return true;
3416 		}
3417 	}
3418 	mutex_unlock(rule_lock);
3419 
3420 	return false;
3421 }
3422 
3423 /**
3424  * ice_add_mac - Add a MAC address based filter rule
3425  * @hw: pointer to the hardware structure
3426  * @m_list: list of MAC addresses and forwarding information
3427  *
3428  * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3429  * multiple unicast addresses, the function assumes that all the
3430  * addresses are unique in a given add_mac call. It doesn't
3431  * check for duplicates in this case, removing duplicates from a given
3432  * list should be taken care of in the caller of this function.
3433  */
3434 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
3435 {
3436 	struct ice_sw_rule_lkup_rx_tx *s_rule, *r_iter;
3437 	struct ice_fltr_list_entry *m_list_itr;
3438 	struct list_head *rule_head;
3439 	u16 total_elem_left, s_rule_size;
3440 	struct ice_switch_info *sw;
3441 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3442 	u16 num_unicast = 0;
3443 	int status = 0;
3444 	u8 elem_sent;
3445 
3446 	if (!m_list || !hw)
3447 		return -EINVAL;
3448 
3449 	s_rule = NULL;
3450 	sw = hw->switch_info;
3451 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3452 	list_for_each_entry(m_list_itr, m_list, list_entry) {
3453 		u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3454 		u16 vsi_handle;
3455 		u16 hw_vsi_id;
3456 
3457 		m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3458 		vsi_handle = m_list_itr->fltr_info.vsi_handle;
3459 		if (!ice_is_vsi_valid(hw, vsi_handle))
3460 			return -EINVAL;
3461 		hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3462 		m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3463 		/* update the src in case it is VSI num */
3464 		if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3465 			return -EINVAL;
3466 		m_list_itr->fltr_info.src = hw_vsi_id;
3467 		if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3468 		    is_zero_ether_addr(add))
3469 			return -EINVAL;
3470 		if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
3471 			/* Don't overwrite the unicast address */
3472 			mutex_lock(rule_lock);
3473 			if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3474 						&m_list_itr->fltr_info)) {
3475 				mutex_unlock(rule_lock);
3476 				return -EEXIST;
3477 			}
3478 			mutex_unlock(rule_lock);
3479 			num_unicast++;
3480 		} else if (is_multicast_ether_addr(add) ||
3481 			   (is_unicast_ether_addr(add) && hw->ucast_shared)) {
3482 			m_list_itr->status =
3483 				ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3484 						      m_list_itr);
3485 			if (m_list_itr->status)
3486 				return m_list_itr->status;
3487 		}
3488 	}
3489 
3490 	mutex_lock(rule_lock);
3491 	/* Exit if no suitable entries were found for adding bulk switch rule */
3492 	if (!num_unicast) {
3493 		status = 0;
3494 		goto ice_add_mac_exit;
3495 	}
3496 
3497 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3498 
3499 	/* Allocate switch rule buffer for the bulk update for unicast */
3500 	s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
3501 	s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
3502 			      GFP_KERNEL);
3503 	if (!s_rule) {
3504 		status = -ENOMEM;
3505 		goto ice_add_mac_exit;
3506 	}
3507 
3508 	r_iter = s_rule;
3509 	list_for_each_entry(m_list_itr, m_list, list_entry) {
3510 		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3511 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3512 
3513 		if (is_unicast_ether_addr(mac_addr)) {
3514 			ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3515 					 ice_aqc_opc_add_sw_rules);
3516 			r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
3517 		}
3518 	}
3519 
3520 	/* Call AQ bulk switch rule update for all unicast addresses */
3521 	r_iter = s_rule;
3522 	/* Call AQ switch rule in AQ_MAX chunk */
3523 	for (total_elem_left = num_unicast; total_elem_left > 0;
3524 	     total_elem_left -= elem_sent) {
3525 		struct ice_sw_rule_lkup_rx_tx *entry = r_iter;
3526 
3527 		elem_sent = min_t(u8, total_elem_left,
3528 				  (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3529 		status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3530 					 elem_sent, ice_aqc_opc_add_sw_rules,
3531 					 NULL);
3532 		if (status)
3533 			goto ice_add_mac_exit;
3534 		r_iter = (typeof(s_rule))
3535 			((u8 *)r_iter + (elem_sent * s_rule_size));
3536 	}
3537 
3538 	/* Fill up rule ID based on the value returned from FW */
3539 	r_iter = s_rule;
3540 	list_for_each_entry(m_list_itr, m_list, list_entry) {
3541 		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3542 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3543 		struct ice_fltr_mgmt_list_entry *fm_entry;
3544 
3545 		if (is_unicast_ether_addr(mac_addr)) {
3546 			f_info->fltr_rule_id = le16_to_cpu(r_iter->index);
3547 			f_info->fltr_act = ICE_FWD_TO_VSI;
3548 			/* Create an entry to track this MAC address */
3549 			fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
3550 						sizeof(*fm_entry), GFP_KERNEL);
3551 			if (!fm_entry) {
3552 				status = -ENOMEM;
3553 				goto ice_add_mac_exit;
3554 			}
3555 			fm_entry->fltr_info = *f_info;
3556 			fm_entry->vsi_count = 1;
3557 			/* The book keeping entries will get removed when
3558 			 * base driver calls remove filter AQ command
3559 			 */
3560 
3561 			list_add(&fm_entry->list_entry, rule_head);
3562 			r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
3563 		}
3564 	}
3565 
3566 ice_add_mac_exit:
3567 	mutex_unlock(rule_lock);
3568 	if (s_rule)
3569 		devm_kfree(ice_hw_to_dev(hw), s_rule);
3570 	return status;
3571 }
3572 
3573 /**
3574  * ice_add_vlan_internal - Add one VLAN based filter rule
3575  * @hw: pointer to the hardware structure
3576  * @f_entry: filter entry containing one VLAN information
3577  */
3578 static int
3579 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3580 {
3581 	struct ice_switch_info *sw = hw->switch_info;
3582 	struct ice_fltr_mgmt_list_entry *v_list_itr;
3583 	struct ice_fltr_info *new_fltr, *cur_fltr;
3584 	enum ice_sw_lkup_type lkup_type;
3585 	u16 vsi_list_id = 0, vsi_handle;
3586 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3587 	int status = 0;
3588 
3589 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3590 		return -EINVAL;
3591 
3592 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3593 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3594 	new_fltr = &f_entry->fltr_info;
3595 
3596 	/* VLAN ID should only be 12 bits */
3597 	if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3598 		return -EINVAL;
3599 
3600 	if (new_fltr->src_id != ICE_SRC_ID_VSI)
3601 		return -EINVAL;
3602 
3603 	new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3604 	lkup_type = new_fltr->lkup_type;
3605 	vsi_handle = new_fltr->vsi_handle;
3606 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3607 	mutex_lock(rule_lock);
3608 	v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3609 	if (!v_list_itr) {
3610 		struct ice_vsi_list_map_info *map_info = NULL;
3611 
3612 		if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3613 			/* All VLAN pruning rules use a VSI list. Check if
3614 			 * there is already a VSI list containing VSI that we
3615 			 * want to add. If found, use the same vsi_list_id for
3616 			 * this new VLAN rule or else create a new list.
3617 			 */
3618 			map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3619 							   vsi_handle,
3620 							   &vsi_list_id);
3621 			if (!map_info) {
3622 				status = ice_create_vsi_list_rule(hw,
3623 								  &vsi_handle,
3624 								  1,
3625 								  &vsi_list_id,
3626 								  lkup_type);
3627 				if (status)
3628 					goto exit;
3629 			}
3630 			/* Convert the action to forwarding to a VSI list. */
3631 			new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3632 			new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3633 		}
3634 
3635 		status = ice_create_pkt_fwd_rule(hw, f_entry);
3636 		if (!status) {
3637 			v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3638 							 new_fltr);
3639 			if (!v_list_itr) {
3640 				status = -ENOENT;
3641 				goto exit;
3642 			}
3643 			/* reuse VSI list for new rule and increment ref_cnt */
3644 			if (map_info) {
3645 				v_list_itr->vsi_list_info = map_info;
3646 				map_info->ref_cnt++;
3647 			} else {
3648 				v_list_itr->vsi_list_info =
3649 					ice_create_vsi_list_map(hw, &vsi_handle,
3650 								1, vsi_list_id);
3651 			}
3652 		}
3653 	} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3654 		/* Update existing VSI list to add new VSI ID only if it used
3655 		 * by one VLAN rule.
3656 		 */
3657 		cur_fltr = &v_list_itr->fltr_info;
3658 		status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3659 						 new_fltr);
3660 	} else {
3661 		/* If VLAN rule exists and VSI list being used by this rule is
3662 		 * referenced by more than 1 VLAN rule. Then create a new VSI
3663 		 * list appending previous VSI with new VSI and update existing
3664 		 * VLAN rule to point to new VSI list ID
3665 		 */
3666 		struct ice_fltr_info tmp_fltr;
3667 		u16 vsi_handle_arr[2];
3668 		u16 cur_handle;
3669 
3670 		/* Current implementation only supports reusing VSI list with
3671 		 * one VSI count. We should never hit below condition
3672 		 */
3673 		if (v_list_itr->vsi_count > 1 &&
3674 		    v_list_itr->vsi_list_info->ref_cnt > 1) {
3675 			ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3676 			status = -EIO;
3677 			goto exit;
3678 		}
3679 
3680 		cur_handle =
3681 			find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3682 				       ICE_MAX_VSI);
3683 
3684 		/* A rule already exists with the new VSI being added */
3685 		if (cur_handle == vsi_handle) {
3686 			status = -EEXIST;
3687 			goto exit;
3688 		}
3689 
3690 		vsi_handle_arr[0] = cur_handle;
3691 		vsi_handle_arr[1] = vsi_handle;
3692 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3693 						  &vsi_list_id, lkup_type);
3694 		if (status)
3695 			goto exit;
3696 
3697 		tmp_fltr = v_list_itr->fltr_info;
3698 		tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3699 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3700 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3701 		/* Update the previous switch rule to a new VSI list which
3702 		 * includes current VSI that is requested
3703 		 */
3704 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3705 		if (status)
3706 			goto exit;
3707 
3708 		/* before overriding VSI list map info. decrement ref_cnt of
3709 		 * previous VSI list
3710 		 */
3711 		v_list_itr->vsi_list_info->ref_cnt--;
3712 
3713 		/* now update to newly created list */
3714 		v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3715 		v_list_itr->vsi_list_info =
3716 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3717 						vsi_list_id);
3718 		v_list_itr->vsi_count++;
3719 	}
3720 
3721 exit:
3722 	mutex_unlock(rule_lock);
3723 	return status;
3724 }
3725 
3726 /**
3727  * ice_add_vlan - Add VLAN based filter rule
3728  * @hw: pointer to the hardware structure
3729  * @v_list: list of VLAN entries and forwarding information
3730  */
3731 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
3732 {
3733 	struct ice_fltr_list_entry *v_list_itr;
3734 
3735 	if (!v_list || !hw)
3736 		return -EINVAL;
3737 
3738 	list_for_each_entry(v_list_itr, v_list, list_entry) {
3739 		if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3740 			return -EINVAL;
3741 		v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3742 		v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3743 		if (v_list_itr->status)
3744 			return v_list_itr->status;
3745 	}
3746 	return 0;
3747 }
3748 
3749 /**
3750  * ice_add_eth_mac - Add ethertype and MAC based filter rule
3751  * @hw: pointer to the hardware structure
3752  * @em_list: list of ether type MAC filter, MAC is optional
3753  *
3754  * This function requires the caller to populate the entries in
3755  * the filter list with the necessary fields (including flags to
3756  * indicate Tx or Rx rules).
3757  */
3758 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3759 {
3760 	struct ice_fltr_list_entry *em_list_itr;
3761 
3762 	if (!em_list || !hw)
3763 		return -EINVAL;
3764 
3765 	list_for_each_entry(em_list_itr, em_list, list_entry) {
3766 		enum ice_sw_lkup_type l_type =
3767 			em_list_itr->fltr_info.lkup_type;
3768 
3769 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3770 		    l_type != ICE_SW_LKUP_ETHERTYPE)
3771 			return -EINVAL;
3772 
3773 		em_list_itr->status = ice_add_rule_internal(hw, l_type,
3774 							    em_list_itr);
3775 		if (em_list_itr->status)
3776 			return em_list_itr->status;
3777 	}
3778 	return 0;
3779 }
3780 
3781 /**
3782  * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3783  * @hw: pointer to the hardware structure
3784  * @em_list: list of ethertype or ethertype MAC entries
3785  */
3786 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3787 {
3788 	struct ice_fltr_list_entry *em_list_itr, *tmp;
3789 
3790 	if (!em_list || !hw)
3791 		return -EINVAL;
3792 
3793 	list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
3794 		enum ice_sw_lkup_type l_type =
3795 			em_list_itr->fltr_info.lkup_type;
3796 
3797 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3798 		    l_type != ICE_SW_LKUP_ETHERTYPE)
3799 			return -EINVAL;
3800 
3801 		em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3802 							       em_list_itr);
3803 		if (em_list_itr->status)
3804 			return em_list_itr->status;
3805 	}
3806 	return 0;
3807 }
3808 
3809 /**
3810  * ice_rem_sw_rule_info
3811  * @hw: pointer to the hardware structure
3812  * @rule_head: pointer to the switch list structure that we want to delete
3813  */
3814 static void
3815 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3816 {
3817 	if (!list_empty(rule_head)) {
3818 		struct ice_fltr_mgmt_list_entry *entry;
3819 		struct ice_fltr_mgmt_list_entry *tmp;
3820 
3821 		list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
3822 			list_del(&entry->list_entry);
3823 			devm_kfree(ice_hw_to_dev(hw), entry);
3824 		}
3825 	}
3826 }
3827 
3828 /**
3829  * ice_rem_adv_rule_info
3830  * @hw: pointer to the hardware structure
3831  * @rule_head: pointer to the switch list structure that we want to delete
3832  */
3833 static void
3834 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3835 {
3836 	struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3837 	struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3838 
3839 	if (list_empty(rule_head))
3840 		return;
3841 
3842 	list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
3843 		list_del(&lst_itr->list_entry);
3844 		devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
3845 		devm_kfree(ice_hw_to_dev(hw), lst_itr);
3846 	}
3847 }
3848 
3849 /**
3850  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3851  * @hw: pointer to the hardware structure
3852  * @vsi_handle: VSI handle to set as default
3853  * @set: true to add the above mentioned switch rule, false to remove it
3854  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3855  *
3856  * add filter rule to set/unset given VSI as default VSI for the switch
3857  * (represented by swid)
3858  */
3859 int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
3860 {
3861 	struct ice_sw_rule_lkup_rx_tx *s_rule;
3862 	struct ice_fltr_info f_info;
3863 	enum ice_adminq_opc opcode;
3864 	u16 s_rule_size;
3865 	u16 hw_vsi_id;
3866 	int status;
3867 
3868 	if (!ice_is_vsi_valid(hw, vsi_handle))
3869 		return -EINVAL;
3870 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3871 
3872 	s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule) :
3873 			    ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
3874 
3875 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3876 	if (!s_rule)
3877 		return -ENOMEM;
3878 
3879 	memset(&f_info, 0, sizeof(f_info));
3880 
3881 	f_info.lkup_type = ICE_SW_LKUP_DFLT;
3882 	f_info.flag = direction;
3883 	f_info.fltr_act = ICE_FWD_TO_VSI;
3884 	f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3885 
3886 	if (f_info.flag & ICE_FLTR_RX) {
3887 		f_info.src = hw->port_info->lport;
3888 		f_info.src_id = ICE_SRC_ID_LPORT;
3889 		if (!set)
3890 			f_info.fltr_rule_id =
3891 				hw->port_info->dflt_rx_vsi_rule_id;
3892 	} else if (f_info.flag & ICE_FLTR_TX) {
3893 		f_info.src_id = ICE_SRC_ID_VSI;
3894 		f_info.src = hw_vsi_id;
3895 		if (!set)
3896 			f_info.fltr_rule_id =
3897 				hw->port_info->dflt_tx_vsi_rule_id;
3898 	}
3899 
3900 	if (set)
3901 		opcode = ice_aqc_opc_add_sw_rules;
3902 	else
3903 		opcode = ice_aqc_opc_remove_sw_rules;
3904 
3905 	ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3906 
3907 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3908 	if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3909 		goto out;
3910 	if (set) {
3911 		u16 index = le16_to_cpu(s_rule->index);
3912 
3913 		if (f_info.flag & ICE_FLTR_TX) {
3914 			hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
3915 			hw->port_info->dflt_tx_vsi_rule_id = index;
3916 		} else if (f_info.flag & ICE_FLTR_RX) {
3917 			hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
3918 			hw->port_info->dflt_rx_vsi_rule_id = index;
3919 		}
3920 	} else {
3921 		if (f_info.flag & ICE_FLTR_TX) {
3922 			hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3923 			hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3924 		} else if (f_info.flag & ICE_FLTR_RX) {
3925 			hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3926 			hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3927 		}
3928 	}
3929 
3930 out:
3931 	devm_kfree(ice_hw_to_dev(hw), s_rule);
3932 	return status;
3933 }
3934 
3935 /**
3936  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3937  * @hw: pointer to the hardware structure
3938  * @recp_id: lookup type for which the specified rule needs to be searched
3939  * @f_info: rule information
3940  *
3941  * Helper function to search for a unicast rule entry - this is to be used
3942  * to remove unicast MAC filter that is not shared with other VSIs on the
3943  * PF switch.
3944  *
3945  * Returns pointer to entry storing the rule if found
3946  */
3947 static struct ice_fltr_mgmt_list_entry *
3948 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3949 			  struct ice_fltr_info *f_info)
3950 {
3951 	struct ice_switch_info *sw = hw->switch_info;
3952 	struct ice_fltr_mgmt_list_entry *list_itr;
3953 	struct list_head *list_head;
3954 
3955 	list_head = &sw->recp_list[recp_id].filt_rules;
3956 	list_for_each_entry(list_itr, list_head, list_entry) {
3957 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3958 			    sizeof(f_info->l_data)) &&
3959 		    f_info->fwd_id.hw_vsi_id ==
3960 		    list_itr->fltr_info.fwd_id.hw_vsi_id &&
3961 		    f_info->flag == list_itr->fltr_info.flag)
3962 			return list_itr;
3963 	}
3964 	return NULL;
3965 }
3966 
3967 /**
3968  * ice_remove_mac - remove a MAC address based filter rule
3969  * @hw: pointer to the hardware structure
3970  * @m_list: list of MAC addresses and forwarding information
3971  *
3972  * This function removes either a MAC filter rule or a specific VSI from a
3973  * VSI list for a multicast MAC address.
3974  *
3975  * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
3976  * be aware that this call will only work if all the entries passed into m_list
3977  * were added previously. It will not attempt to do a partial remove of entries
3978  * that were found.
3979  */
3980 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
3981 {
3982 	struct ice_fltr_list_entry *list_itr, *tmp;
3983 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3984 
3985 	if (!m_list)
3986 		return -EINVAL;
3987 
3988 	rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3989 	list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
3990 		enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3991 		u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3992 		u16 vsi_handle;
3993 
3994 		if (l_type != ICE_SW_LKUP_MAC)
3995 			return -EINVAL;
3996 
3997 		vsi_handle = list_itr->fltr_info.vsi_handle;
3998 		if (!ice_is_vsi_valid(hw, vsi_handle))
3999 			return -EINVAL;
4000 
4001 		list_itr->fltr_info.fwd_id.hw_vsi_id =
4002 					ice_get_hw_vsi_num(hw, vsi_handle);
4003 		if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
4004 			/* Don't remove the unicast address that belongs to
4005 			 * another VSI on the switch, since it is not being
4006 			 * shared...
4007 			 */
4008 			mutex_lock(rule_lock);
4009 			if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
4010 						       &list_itr->fltr_info)) {
4011 				mutex_unlock(rule_lock);
4012 				return -ENOENT;
4013 			}
4014 			mutex_unlock(rule_lock);
4015 		}
4016 		list_itr->status = ice_remove_rule_internal(hw,
4017 							    ICE_SW_LKUP_MAC,
4018 							    list_itr);
4019 		if (list_itr->status)
4020 			return list_itr->status;
4021 	}
4022 	return 0;
4023 }
4024 
4025 /**
4026  * ice_remove_vlan - Remove VLAN based filter rule
4027  * @hw: pointer to the hardware structure
4028  * @v_list: list of VLAN entries and forwarding information
4029  */
4030 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
4031 {
4032 	struct ice_fltr_list_entry *v_list_itr, *tmp;
4033 
4034 	if (!v_list || !hw)
4035 		return -EINVAL;
4036 
4037 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4038 		enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4039 
4040 		if (l_type != ICE_SW_LKUP_VLAN)
4041 			return -EINVAL;
4042 		v_list_itr->status = ice_remove_rule_internal(hw,
4043 							      ICE_SW_LKUP_VLAN,
4044 							      v_list_itr);
4045 		if (v_list_itr->status)
4046 			return v_list_itr->status;
4047 	}
4048 	return 0;
4049 }
4050 
4051 /**
4052  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4053  * @fm_entry: filter entry to inspect
4054  * @vsi_handle: VSI handle to compare with filter info
4055  */
4056 static bool
4057 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4058 {
4059 	return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4060 		 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4061 		(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4062 		 fm_entry->vsi_list_info &&
4063 		 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
4064 }
4065 
4066 /**
4067  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4068  * @hw: pointer to the hardware structure
4069  * @vsi_handle: VSI handle to remove filters from
4070  * @vsi_list_head: pointer to the list to add entry to
4071  * @fi: pointer to fltr_info of filter entry to copy & add
4072  *
4073  * Helper function, used when creating a list of filters to remove from
4074  * a specific VSI. The entry added to vsi_list_head is a COPY of the
4075  * original filter entry, with the exception of fltr_info.fltr_act and
4076  * fltr_info.fwd_id fields. These are set such that later logic can
4077  * extract which VSI to remove the fltr from, and pass on that information.
4078  */
4079 static int
4080 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4081 			       struct list_head *vsi_list_head,
4082 			       struct ice_fltr_info *fi)
4083 {
4084 	struct ice_fltr_list_entry *tmp;
4085 
4086 	/* this memory is freed up in the caller function
4087 	 * once filters for this VSI are removed
4088 	 */
4089 	tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
4090 	if (!tmp)
4091 		return -ENOMEM;
4092 
4093 	tmp->fltr_info = *fi;
4094 
4095 	/* Overwrite these fields to indicate which VSI to remove filter from,
4096 	 * so find and remove logic can extract the information from the
4097 	 * list entries. Note that original entries will still have proper
4098 	 * values.
4099 	 */
4100 	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4101 	tmp->fltr_info.vsi_handle = vsi_handle;
4102 	tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4103 
4104 	list_add(&tmp->list_entry, vsi_list_head);
4105 
4106 	return 0;
4107 }
4108 
4109 /**
4110  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4111  * @hw: pointer to the hardware structure
4112  * @vsi_handle: VSI handle to remove filters from
4113  * @lkup_list_head: pointer to the list that has certain lookup type filters
4114  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4115  *
4116  * Locates all filters in lkup_list_head that are used by the given VSI,
4117  * and adds COPIES of those entries to vsi_list_head (intended to be used
4118  * to remove the listed filters).
4119  * Note that this means all entries in vsi_list_head must be explicitly
4120  * deallocated by the caller when done with list.
4121  */
4122 static int
4123 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4124 			 struct list_head *lkup_list_head,
4125 			 struct list_head *vsi_list_head)
4126 {
4127 	struct ice_fltr_mgmt_list_entry *fm_entry;
4128 	int status = 0;
4129 
4130 	/* check to make sure VSI ID is valid and within boundary */
4131 	if (!ice_is_vsi_valid(hw, vsi_handle))
4132 		return -EINVAL;
4133 
4134 	list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
4135 		if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
4136 			continue;
4137 
4138 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4139 							vsi_list_head,
4140 							&fm_entry->fltr_info);
4141 		if (status)
4142 			return status;
4143 	}
4144 	return status;
4145 }
4146 
4147 /**
4148  * ice_determine_promisc_mask
4149  * @fi: filter info to parse
4150  *
4151  * Helper function to determine which ICE_PROMISC_ mask corresponds
4152  * to given filter into.
4153  */
4154 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4155 {
4156 	u16 vid = fi->l_data.mac_vlan.vlan_id;
4157 	u8 *macaddr = fi->l_data.mac.mac_addr;
4158 	bool is_tx_fltr = false;
4159 	u8 promisc_mask = 0;
4160 
4161 	if (fi->flag == ICE_FLTR_TX)
4162 		is_tx_fltr = true;
4163 
4164 	if (is_broadcast_ether_addr(macaddr))
4165 		promisc_mask |= is_tx_fltr ?
4166 			ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4167 	else if (is_multicast_ether_addr(macaddr))
4168 		promisc_mask |= is_tx_fltr ?
4169 			ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4170 	else if (is_unicast_ether_addr(macaddr))
4171 		promisc_mask |= is_tx_fltr ?
4172 			ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4173 	if (vid)
4174 		promisc_mask |= is_tx_fltr ?
4175 			ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4176 
4177 	return promisc_mask;
4178 }
4179 
4180 /**
4181  * ice_remove_promisc - Remove promisc based filter rules
4182  * @hw: pointer to the hardware structure
4183  * @recp_id: recipe ID for which the rule needs to removed
4184  * @v_list: list of promisc entries
4185  */
4186 static int
4187 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
4188 {
4189 	struct ice_fltr_list_entry *v_list_itr, *tmp;
4190 
4191 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4192 		v_list_itr->status =
4193 			ice_remove_rule_internal(hw, recp_id, v_list_itr);
4194 		if (v_list_itr->status)
4195 			return v_list_itr->status;
4196 	}
4197 	return 0;
4198 }
4199 
4200 /**
4201  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4202  * @hw: pointer to the hardware structure
4203  * @vsi_handle: VSI handle to clear mode
4204  * @promisc_mask: mask of promiscuous config bits to clear
4205  * @vid: VLAN ID to clear VLAN promiscuous
4206  */
4207 int
4208 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4209 		      u16 vid)
4210 {
4211 	struct ice_switch_info *sw = hw->switch_info;
4212 	struct ice_fltr_list_entry *fm_entry, *tmp;
4213 	struct list_head remove_list_head;
4214 	struct ice_fltr_mgmt_list_entry *itr;
4215 	struct list_head *rule_head;
4216 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
4217 	int status = 0;
4218 	u8 recipe_id;
4219 
4220 	if (!ice_is_vsi_valid(hw, vsi_handle))
4221 		return -EINVAL;
4222 
4223 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4224 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4225 	else
4226 		recipe_id = ICE_SW_LKUP_PROMISC;
4227 
4228 	rule_head = &sw->recp_list[recipe_id].filt_rules;
4229 	rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4230 
4231 	INIT_LIST_HEAD(&remove_list_head);
4232 
4233 	mutex_lock(rule_lock);
4234 	list_for_each_entry(itr, rule_head, list_entry) {
4235 		struct ice_fltr_info *fltr_info;
4236 		u8 fltr_promisc_mask = 0;
4237 
4238 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
4239 			continue;
4240 		fltr_info = &itr->fltr_info;
4241 
4242 		if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4243 		    vid != fltr_info->l_data.mac_vlan.vlan_id)
4244 			continue;
4245 
4246 		fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4247 
4248 		/* Skip if filter is not completely specified by given mask */
4249 		if (fltr_promisc_mask & ~promisc_mask)
4250 			continue;
4251 
4252 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4253 							&remove_list_head,
4254 							fltr_info);
4255 		if (status) {
4256 			mutex_unlock(rule_lock);
4257 			goto free_fltr_list;
4258 		}
4259 	}
4260 	mutex_unlock(rule_lock);
4261 
4262 	status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4263 
4264 free_fltr_list:
4265 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4266 		list_del(&fm_entry->list_entry);
4267 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
4268 	}
4269 
4270 	return status;
4271 }
4272 
4273 /**
4274  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4275  * @hw: pointer to the hardware structure
4276  * @vsi_handle: VSI handle to configure
4277  * @promisc_mask: mask of promiscuous config bits
4278  * @vid: VLAN ID to set VLAN promiscuous
4279  */
4280 int
4281 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4282 {
4283 	enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4284 	struct ice_fltr_list_entry f_list_entry;
4285 	struct ice_fltr_info new_fltr;
4286 	bool is_tx_fltr;
4287 	int status = 0;
4288 	u16 hw_vsi_id;
4289 	int pkt_type;
4290 	u8 recipe_id;
4291 
4292 	if (!ice_is_vsi_valid(hw, vsi_handle))
4293 		return -EINVAL;
4294 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4295 
4296 	memset(&new_fltr, 0, sizeof(new_fltr));
4297 
4298 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4299 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4300 		new_fltr.l_data.mac_vlan.vlan_id = vid;
4301 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4302 	} else {
4303 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4304 		recipe_id = ICE_SW_LKUP_PROMISC;
4305 	}
4306 
4307 	/* Separate filters must be set for each direction/packet type
4308 	 * combination, so we will loop over the mask value, store the
4309 	 * individual type, and clear it out in the input mask as it
4310 	 * is found.
4311 	 */
4312 	while (promisc_mask) {
4313 		u8 *mac_addr;
4314 
4315 		pkt_type = 0;
4316 		is_tx_fltr = false;
4317 
4318 		if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4319 			promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4320 			pkt_type = UCAST_FLTR;
4321 		} else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4322 			promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4323 			pkt_type = UCAST_FLTR;
4324 			is_tx_fltr = true;
4325 		} else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4326 			promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4327 			pkt_type = MCAST_FLTR;
4328 		} else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4329 			promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4330 			pkt_type = MCAST_FLTR;
4331 			is_tx_fltr = true;
4332 		} else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4333 			promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4334 			pkt_type = BCAST_FLTR;
4335 		} else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4336 			promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4337 			pkt_type = BCAST_FLTR;
4338 			is_tx_fltr = true;
4339 		}
4340 
4341 		/* Check for VLAN promiscuous flag */
4342 		if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4343 			promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4344 		} else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4345 			promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4346 			is_tx_fltr = true;
4347 		}
4348 
4349 		/* Set filter DA based on packet type */
4350 		mac_addr = new_fltr.l_data.mac.mac_addr;
4351 		if (pkt_type == BCAST_FLTR) {
4352 			eth_broadcast_addr(mac_addr);
4353 		} else if (pkt_type == MCAST_FLTR ||
4354 			   pkt_type == UCAST_FLTR) {
4355 			/* Use the dummy ether header DA */
4356 			ether_addr_copy(mac_addr, dummy_eth_header);
4357 			if (pkt_type == MCAST_FLTR)
4358 				mac_addr[0] |= 0x1;	/* Set multicast bit */
4359 		}
4360 
4361 		/* Need to reset this to zero for all iterations */
4362 		new_fltr.flag = 0;
4363 		if (is_tx_fltr) {
4364 			new_fltr.flag |= ICE_FLTR_TX;
4365 			new_fltr.src = hw_vsi_id;
4366 		} else {
4367 			new_fltr.flag |= ICE_FLTR_RX;
4368 			new_fltr.src = hw->port_info->lport;
4369 		}
4370 
4371 		new_fltr.fltr_act = ICE_FWD_TO_VSI;
4372 		new_fltr.vsi_handle = vsi_handle;
4373 		new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4374 		f_list_entry.fltr_info = new_fltr;
4375 
4376 		status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4377 		if (status)
4378 			goto set_promisc_exit;
4379 	}
4380 
4381 set_promisc_exit:
4382 	return status;
4383 }
4384 
4385 /**
4386  * ice_set_vlan_vsi_promisc
4387  * @hw: pointer to the hardware structure
4388  * @vsi_handle: VSI handle to configure
4389  * @promisc_mask: mask of promiscuous config bits
4390  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4391  *
4392  * Configure VSI with all associated VLANs to given promiscuous mode(s)
4393  */
4394 int
4395 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4396 			 bool rm_vlan_promisc)
4397 {
4398 	struct ice_switch_info *sw = hw->switch_info;
4399 	struct ice_fltr_list_entry *list_itr, *tmp;
4400 	struct list_head vsi_list_head;
4401 	struct list_head *vlan_head;
4402 	struct mutex *vlan_lock; /* Lock to protect filter rule list */
4403 	u16 vlan_id;
4404 	int status;
4405 
4406 	INIT_LIST_HEAD(&vsi_list_head);
4407 	vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4408 	vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4409 	mutex_lock(vlan_lock);
4410 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4411 					  &vsi_list_head);
4412 	mutex_unlock(vlan_lock);
4413 	if (status)
4414 		goto free_fltr_list;
4415 
4416 	list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
4417 		vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4418 		if (rm_vlan_promisc)
4419 			status = ice_clear_vsi_promisc(hw, vsi_handle,
4420 						       promisc_mask, vlan_id);
4421 		else
4422 			status = ice_set_vsi_promisc(hw, vsi_handle,
4423 						     promisc_mask, vlan_id);
4424 		if (status)
4425 			break;
4426 	}
4427 
4428 free_fltr_list:
4429 	list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
4430 		list_del(&list_itr->list_entry);
4431 		devm_kfree(ice_hw_to_dev(hw), list_itr);
4432 	}
4433 	return status;
4434 }
4435 
4436 /**
4437  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4438  * @hw: pointer to the hardware structure
4439  * @vsi_handle: VSI handle to remove filters from
4440  * @lkup: switch rule filter lookup type
4441  */
4442 static void
4443 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4444 			 enum ice_sw_lkup_type lkup)
4445 {
4446 	struct ice_switch_info *sw = hw->switch_info;
4447 	struct ice_fltr_list_entry *fm_entry;
4448 	struct list_head remove_list_head;
4449 	struct list_head *rule_head;
4450 	struct ice_fltr_list_entry *tmp;
4451 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
4452 	int status;
4453 
4454 	INIT_LIST_HEAD(&remove_list_head);
4455 	rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4456 	rule_head = &sw->recp_list[lkup].filt_rules;
4457 	mutex_lock(rule_lock);
4458 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4459 					  &remove_list_head);
4460 	mutex_unlock(rule_lock);
4461 	if (status)
4462 		goto free_fltr_list;
4463 
4464 	switch (lkup) {
4465 	case ICE_SW_LKUP_MAC:
4466 		ice_remove_mac(hw, &remove_list_head);
4467 		break;
4468 	case ICE_SW_LKUP_VLAN:
4469 		ice_remove_vlan(hw, &remove_list_head);
4470 		break;
4471 	case ICE_SW_LKUP_PROMISC:
4472 	case ICE_SW_LKUP_PROMISC_VLAN:
4473 		ice_remove_promisc(hw, lkup, &remove_list_head);
4474 		break;
4475 	case ICE_SW_LKUP_MAC_VLAN:
4476 	case ICE_SW_LKUP_ETHERTYPE:
4477 	case ICE_SW_LKUP_ETHERTYPE_MAC:
4478 	case ICE_SW_LKUP_DFLT:
4479 	case ICE_SW_LKUP_LAST:
4480 	default:
4481 		ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
4482 		break;
4483 	}
4484 
4485 free_fltr_list:
4486 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4487 		list_del(&fm_entry->list_entry);
4488 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
4489 	}
4490 }
4491 
4492 /**
4493  * ice_remove_vsi_fltr - Remove all filters for a VSI
4494  * @hw: pointer to the hardware structure
4495  * @vsi_handle: VSI handle to remove filters from
4496  */
4497 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4498 {
4499 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4500 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4501 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4502 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4503 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4504 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4505 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4506 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4507 }
4508 
4509 /**
4510  * ice_alloc_res_cntr - allocating resource counter
4511  * @hw: pointer to the hardware structure
4512  * @type: type of resource
4513  * @alloc_shared: if set it is shared else dedicated
4514  * @num_items: number of entries requested for FD resource type
4515  * @counter_id: counter index returned by AQ call
4516  */
4517 int
4518 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4519 		   u16 *counter_id)
4520 {
4521 	struct ice_aqc_alloc_free_res_elem *buf;
4522 	u16 buf_len;
4523 	int status;
4524 
4525 	/* Allocate resource */
4526 	buf_len = struct_size(buf, elem, 1);
4527 	buf = kzalloc(buf_len, GFP_KERNEL);
4528 	if (!buf)
4529 		return -ENOMEM;
4530 
4531 	buf->num_elems = cpu_to_le16(num_items);
4532 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4533 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
4534 
4535 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4536 				       ice_aqc_opc_alloc_res, NULL);
4537 	if (status)
4538 		goto exit;
4539 
4540 	*counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
4541 
4542 exit:
4543 	kfree(buf);
4544 	return status;
4545 }
4546 
4547 /**
4548  * ice_free_res_cntr - free resource counter
4549  * @hw: pointer to the hardware structure
4550  * @type: type of resource
4551  * @alloc_shared: if set it is shared else dedicated
4552  * @num_items: number of entries to be freed for FD resource type
4553  * @counter_id: counter ID resource which needs to be freed
4554  */
4555 int
4556 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4557 		  u16 counter_id)
4558 {
4559 	struct ice_aqc_alloc_free_res_elem *buf;
4560 	u16 buf_len;
4561 	int status;
4562 
4563 	/* Free resource */
4564 	buf_len = struct_size(buf, elem, 1);
4565 	buf = kzalloc(buf_len, GFP_KERNEL);
4566 	if (!buf)
4567 		return -ENOMEM;
4568 
4569 	buf->num_elems = cpu_to_le16(num_items);
4570 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4571 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
4572 	buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
4573 
4574 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4575 				       ice_aqc_opc_free_res, NULL);
4576 	if (status)
4577 		ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
4578 
4579 	kfree(buf);
4580 	return status;
4581 }
4582 
4583 /* This is mapping table entry that maps every word within a given protocol
4584  * structure to the real byte offset as per the specification of that
4585  * protocol header.
4586  * for example dst address is 3 words in ethertype header and corresponding
4587  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4588  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4589  * matching entry describing its field. This needs to be updated if new
4590  * structure is added to that union.
4591  */
4592 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4593 	{ ICE_MAC_OFOS,		{ 0, 2, 4, 6, 8, 10, 12 } },
4594 	{ ICE_MAC_IL,		{ 0, 2, 4, 6, 8, 10, 12 } },
4595 	{ ICE_ETYPE_OL,		{ 0 } },
4596 	{ ICE_ETYPE_IL,		{ 0 } },
4597 	{ ICE_VLAN_OFOS,	{ 2, 0 } },
4598 	{ ICE_IPV4_OFOS,	{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4599 	{ ICE_IPV4_IL,		{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4600 	{ ICE_IPV6_OFOS,	{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4601 				 26, 28, 30, 32, 34, 36, 38 } },
4602 	{ ICE_IPV6_IL,		{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4603 				 26, 28, 30, 32, 34, 36, 38 } },
4604 	{ ICE_TCP_IL,		{ 0, 2 } },
4605 	{ ICE_UDP_OF,		{ 0, 2 } },
4606 	{ ICE_UDP_ILOS,		{ 0, 2 } },
4607 	{ ICE_VXLAN,		{ 8, 10, 12, 14 } },
4608 	{ ICE_GENEVE,		{ 8, 10, 12, 14 } },
4609 	{ ICE_NVGRE,		{ 0, 2, 4, 6 } },
4610 	{ ICE_GTP,		{ 8, 10, 12, 14, 16, 18, 20, 22 } },
4611 	{ ICE_GTP_NO_PAY,	{ 8, 10, 12, 14 } },
4612 };
4613 
4614 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4615 	{ ICE_MAC_OFOS,		ICE_MAC_OFOS_HW },
4616 	{ ICE_MAC_IL,		ICE_MAC_IL_HW },
4617 	{ ICE_ETYPE_OL,		ICE_ETYPE_OL_HW },
4618 	{ ICE_ETYPE_IL,		ICE_ETYPE_IL_HW },
4619 	{ ICE_VLAN_OFOS,	ICE_VLAN_OL_HW },
4620 	{ ICE_IPV4_OFOS,	ICE_IPV4_OFOS_HW },
4621 	{ ICE_IPV4_IL,		ICE_IPV4_IL_HW },
4622 	{ ICE_IPV6_OFOS,	ICE_IPV6_OFOS_HW },
4623 	{ ICE_IPV6_IL,		ICE_IPV6_IL_HW },
4624 	{ ICE_TCP_IL,		ICE_TCP_IL_HW },
4625 	{ ICE_UDP_OF,		ICE_UDP_OF_HW },
4626 	{ ICE_UDP_ILOS,		ICE_UDP_ILOS_HW },
4627 	{ ICE_VXLAN,		ICE_UDP_OF_HW },
4628 	{ ICE_GENEVE,		ICE_UDP_OF_HW },
4629 	{ ICE_NVGRE,		ICE_GRE_OF_HW },
4630 	{ ICE_GTP,		ICE_UDP_OF_HW },
4631 	{ ICE_GTP_NO_PAY,	ICE_UDP_ILOS_HW },
4632 };
4633 
4634 /**
4635  * ice_find_recp - find a recipe
4636  * @hw: pointer to the hardware structure
4637  * @lkup_exts: extension sequence to match
4638  * @tun_type: type of recipe tunnel
4639  *
4640  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4641  */
4642 static u16
4643 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
4644 	      enum ice_sw_tunnel_type tun_type)
4645 {
4646 	bool refresh_required = true;
4647 	struct ice_sw_recipe *recp;
4648 	u8 i;
4649 
4650 	/* Walk through existing recipes to find a match */
4651 	recp = hw->switch_info->recp_list;
4652 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4653 		/* If recipe was not created for this ID, in SW bookkeeping,
4654 		 * check if FW has an entry for this recipe. If the FW has an
4655 		 * entry update it in our SW bookkeeping and continue with the
4656 		 * matching.
4657 		 */
4658 		if (!recp[i].recp_created)
4659 			if (ice_get_recp_frm_fw(hw,
4660 						hw->switch_info->recp_list, i,
4661 						&refresh_required))
4662 				continue;
4663 
4664 		/* Skip inverse action recipes */
4665 		if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4666 		    ICE_AQ_RECIPE_ACT_INV_ACT)
4667 			continue;
4668 
4669 		/* if number of words we are looking for match */
4670 		if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4671 			struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
4672 			struct ice_fv_word *be = lkup_exts->fv_words;
4673 			u16 *cr = recp[i].lkup_exts.field_mask;
4674 			u16 *de = lkup_exts->field_mask;
4675 			bool found = true;
4676 			u8 pe, qr;
4677 
4678 			/* ar, cr, and qr are related to the recipe words, while
4679 			 * be, de, and pe are related to the lookup words
4680 			 */
4681 			for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
4682 				for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
4683 				     qr++) {
4684 					if (ar[qr].off == be[pe].off &&
4685 					    ar[qr].prot_id == be[pe].prot_id &&
4686 					    cr[qr] == de[pe])
4687 						/* Found the "pe"th word in the
4688 						 * given recipe
4689 						 */
4690 						break;
4691 				}
4692 				/* After walking through all the words in the
4693 				 * "i"th recipe if "p"th word was not found then
4694 				 * this recipe is not what we are looking for.
4695 				 * So break out from this loop and try the next
4696 				 * recipe
4697 				 */
4698 				if (qr >= recp[i].lkup_exts.n_val_words) {
4699 					found = false;
4700 					break;
4701 				}
4702 			}
4703 			/* If for "i"th recipe the found was never set to false
4704 			 * then it means we found our match
4705 			 * Also tun type of recipe needs to be checked
4706 			 */
4707 			if (found && recp[i].tun_type == tun_type)
4708 				return i; /* Return the recipe ID */
4709 		}
4710 	}
4711 	return ICE_MAX_NUM_RECIPES;
4712 }
4713 
4714 /**
4715  * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
4716  *
4717  * As protocol id for outer vlan is different in dvm and svm, if dvm is
4718  * supported protocol array record for outer vlan has to be modified to
4719  * reflect the value proper for DVM.
4720  */
4721 void ice_change_proto_id_to_dvm(void)
4722 {
4723 	u8 i;
4724 
4725 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4726 		if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
4727 		    ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
4728 			ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
4729 }
4730 
4731 /**
4732  * ice_prot_type_to_id - get protocol ID from protocol type
4733  * @type: protocol type
4734  * @id: pointer to variable that will receive the ID
4735  *
4736  * Returns true if found, false otherwise
4737  */
4738 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
4739 {
4740 	u8 i;
4741 
4742 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4743 		if (ice_prot_id_tbl[i].type == type) {
4744 			*id = ice_prot_id_tbl[i].protocol_id;
4745 			return true;
4746 		}
4747 	return false;
4748 }
4749 
4750 /**
4751  * ice_fill_valid_words - count valid words
4752  * @rule: advanced rule with lookup information
4753  * @lkup_exts: byte offset extractions of the words that are valid
4754  *
4755  * calculate valid words in a lookup rule using mask value
4756  */
4757 static u8
4758 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4759 		     struct ice_prot_lkup_ext *lkup_exts)
4760 {
4761 	u8 j, word, prot_id, ret_val;
4762 
4763 	if (!ice_prot_type_to_id(rule->type, &prot_id))
4764 		return 0;
4765 
4766 	word = lkup_exts->n_val_words;
4767 
4768 	for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4769 		if (((u16 *)&rule->m_u)[j] &&
4770 		    rule->type < ARRAY_SIZE(ice_prot_ext)) {
4771 			/* No more space to accommodate */
4772 			if (word >= ICE_MAX_CHAIN_WORDS)
4773 				return 0;
4774 			lkup_exts->fv_words[word].off =
4775 				ice_prot_ext[rule->type].offs[j];
4776 			lkup_exts->fv_words[word].prot_id =
4777 				ice_prot_id_tbl[rule->type].protocol_id;
4778 			lkup_exts->field_mask[word] =
4779 				be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
4780 			word++;
4781 		}
4782 
4783 	ret_val = word - lkup_exts->n_val_words;
4784 	lkup_exts->n_val_words = word;
4785 
4786 	return ret_val;
4787 }
4788 
4789 /**
4790  * ice_create_first_fit_recp_def - Create a recipe grouping
4791  * @hw: pointer to the hardware structure
4792  * @lkup_exts: an array of protocol header extractions
4793  * @rg_list: pointer to a list that stores new recipe groups
4794  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4795  *
4796  * Using first fit algorithm, take all the words that are still not done
4797  * and start grouping them in 4-word groups. Each group makes up one
4798  * recipe.
4799  */
4800 static int
4801 ice_create_first_fit_recp_def(struct ice_hw *hw,
4802 			      struct ice_prot_lkup_ext *lkup_exts,
4803 			      struct list_head *rg_list,
4804 			      u8 *recp_cnt)
4805 {
4806 	struct ice_pref_recipe_group *grp = NULL;
4807 	u8 j;
4808 
4809 	*recp_cnt = 0;
4810 
4811 	/* Walk through every word in the rule to check if it is not done. If so
4812 	 * then this word needs to be part of a new recipe.
4813 	 */
4814 	for (j = 0; j < lkup_exts->n_val_words; j++)
4815 		if (!test_bit(j, lkup_exts->done)) {
4816 			if (!grp ||
4817 			    grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4818 				struct ice_recp_grp_entry *entry;
4819 
4820 				entry = devm_kzalloc(ice_hw_to_dev(hw),
4821 						     sizeof(*entry),
4822 						     GFP_KERNEL);
4823 				if (!entry)
4824 					return -ENOMEM;
4825 				list_add(&entry->l_entry, rg_list);
4826 				grp = &entry->r_group;
4827 				(*recp_cnt)++;
4828 			}
4829 
4830 			grp->pairs[grp->n_val_pairs].prot_id =
4831 				lkup_exts->fv_words[j].prot_id;
4832 			grp->pairs[grp->n_val_pairs].off =
4833 				lkup_exts->fv_words[j].off;
4834 			grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4835 			grp->n_val_pairs++;
4836 		}
4837 
4838 	return 0;
4839 }
4840 
4841 /**
4842  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4843  * @hw: pointer to the hardware structure
4844  * @fv_list: field vector with the extraction sequence information
4845  * @rg_list: recipe groupings with protocol-offset pairs
4846  *
4847  * Helper function to fill in the field vector indices for protocol-offset
4848  * pairs. These indexes are then ultimately programmed into a recipe.
4849  */
4850 static int
4851 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
4852 		       struct list_head *rg_list)
4853 {
4854 	struct ice_sw_fv_list_entry *fv;
4855 	struct ice_recp_grp_entry *rg;
4856 	struct ice_fv_word *fv_ext;
4857 
4858 	if (list_empty(fv_list))
4859 		return 0;
4860 
4861 	fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
4862 			      list_entry);
4863 	fv_ext = fv->fv_ptr->ew;
4864 
4865 	list_for_each_entry(rg, rg_list, l_entry) {
4866 		u8 i;
4867 
4868 		for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4869 			struct ice_fv_word *pr;
4870 			bool found = false;
4871 			u16 mask;
4872 			u8 j;
4873 
4874 			pr = &rg->r_group.pairs[i];
4875 			mask = rg->r_group.mask[i];
4876 
4877 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4878 				if (fv_ext[j].prot_id == pr->prot_id &&
4879 				    fv_ext[j].off == pr->off) {
4880 					found = true;
4881 
4882 					/* Store index of field vector */
4883 					rg->fv_idx[i] = j;
4884 					rg->fv_mask[i] = mask;
4885 					break;
4886 				}
4887 
4888 			/* Protocol/offset could not be found, caller gave an
4889 			 * invalid pair
4890 			 */
4891 			if (!found)
4892 				return -EINVAL;
4893 		}
4894 	}
4895 
4896 	return 0;
4897 }
4898 
4899 /**
4900  * ice_find_free_recp_res_idx - find free result indexes for recipe
4901  * @hw: pointer to hardware structure
4902  * @profiles: bitmap of profiles that will be associated with the new recipe
4903  * @free_idx: pointer to variable to receive the free index bitmap
4904  *
4905  * The algorithm used here is:
4906  *	1. When creating a new recipe, create a set P which contains all
4907  *	   Profiles that will be associated with our new recipe
4908  *
4909  *	2. For each Profile p in set P:
4910  *	    a. Add all recipes associated with Profile p into set R
4911  *	    b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4912  *		[initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4913  *		i. Or just assume they all have the same possible indexes:
4914  *			44, 45, 46, 47
4915  *			i.e., PossibleIndexes = 0x0000F00000000000
4916  *
4917  *	3. For each Recipe r in set R:
4918  *	    a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4919  *	    b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4920  *
4921  *	FreeIndexes will contain the bits indicating the indexes free for use,
4922  *      then the code needs to update the recipe[r].used_result_idx_bits to
4923  *      indicate which indexes were selected for use by this recipe.
4924  */
4925 static u16
4926 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
4927 			   unsigned long *free_idx)
4928 {
4929 	DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
4930 	DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
4931 	DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
4932 	u16 bit;
4933 
4934 	bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
4935 	bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
4936 
4937 	bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
4938 
4939 	/* For each profile we are going to associate the recipe with, add the
4940 	 * recipes that are associated with that profile. This will give us
4941 	 * the set of recipes that our recipe may collide with. Also, determine
4942 	 * what possible result indexes are usable given this set of profiles.
4943 	 */
4944 	for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
4945 		bitmap_or(recipes, recipes, profile_to_recipe[bit],
4946 			  ICE_MAX_NUM_RECIPES);
4947 		bitmap_and(possible_idx, possible_idx,
4948 			   hw->switch_info->prof_res_bm[bit],
4949 			   ICE_MAX_FV_WORDS);
4950 	}
4951 
4952 	/* For each recipe that our new recipe may collide with, determine
4953 	 * which indexes have been used.
4954 	 */
4955 	for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
4956 		bitmap_or(used_idx, used_idx,
4957 			  hw->switch_info->recp_list[bit].res_idxs,
4958 			  ICE_MAX_FV_WORDS);
4959 
4960 	bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4961 
4962 	/* return number of free indexes */
4963 	return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
4964 }
4965 
4966 /**
4967  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4968  * @hw: pointer to hardware structure
4969  * @rm: recipe management list entry
4970  * @profiles: bitmap of profiles that will be associated.
4971  */
4972 static int
4973 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4974 		  unsigned long *profiles)
4975 {
4976 	DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
4977 	struct ice_aqc_recipe_data_elem *tmp;
4978 	struct ice_aqc_recipe_data_elem *buf;
4979 	struct ice_recp_grp_entry *entry;
4980 	u16 free_res_idx;
4981 	u16 recipe_count;
4982 	u8 chain_idx;
4983 	u8 recps = 0;
4984 	int status;
4985 
4986 	/* When more than one recipe are required, another recipe is needed to
4987 	 * chain them together. Matching a tunnel metadata ID takes up one of
4988 	 * the match fields in the chaining recipe reducing the number of
4989 	 * chained recipes by one.
4990 	 */
4991 	 /* check number of free result indices */
4992 	bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
4993 	free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
4994 
4995 	ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
4996 		  free_res_idx, rm->n_grp_count);
4997 
4998 	if (rm->n_grp_count > 1) {
4999 		if (rm->n_grp_count > free_res_idx)
5000 			return -ENOSPC;
5001 
5002 		rm->n_grp_count++;
5003 	}
5004 
5005 	if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
5006 		return -ENOSPC;
5007 
5008 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
5009 	if (!tmp)
5010 		return -ENOMEM;
5011 
5012 	buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
5013 			   GFP_KERNEL);
5014 	if (!buf) {
5015 		status = -ENOMEM;
5016 		goto err_mem;
5017 	}
5018 
5019 	bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5020 	recipe_count = ICE_MAX_NUM_RECIPES;
5021 	status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5022 				   NULL);
5023 	if (status || recipe_count == 0)
5024 		goto err_unroll;
5025 
5026 	/* Allocate the recipe resources, and configure them according to the
5027 	 * match fields from protocol headers and extracted field vectors.
5028 	 */
5029 	chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5030 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
5031 		u8 i;
5032 
5033 		status = ice_alloc_recipe(hw, &entry->rid);
5034 		if (status)
5035 			goto err_unroll;
5036 
5037 		/* Clear the result index of the located recipe, as this will be
5038 		 * updated, if needed, later in the recipe creation process.
5039 		 */
5040 		tmp[0].content.result_indx = 0;
5041 
5042 		buf[recps] = tmp[0];
5043 		buf[recps].recipe_indx = (u8)entry->rid;
5044 		/* if the recipe is a non-root recipe RID should be programmed
5045 		 * as 0 for the rules to be applied correctly.
5046 		 */
5047 		buf[recps].content.rid = 0;
5048 		memset(&buf[recps].content.lkup_indx, 0,
5049 		       sizeof(buf[recps].content.lkup_indx));
5050 
5051 		/* All recipes use look-up index 0 to match switch ID. */
5052 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5053 		buf[recps].content.mask[0] =
5054 			cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5055 		/* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5056 		 * to be 0
5057 		 */
5058 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5059 			buf[recps].content.lkup_indx[i] = 0x80;
5060 			buf[recps].content.mask[i] = 0;
5061 		}
5062 
5063 		for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5064 			buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5065 			buf[recps].content.mask[i + 1] =
5066 				cpu_to_le16(entry->fv_mask[i]);
5067 		}
5068 
5069 		if (rm->n_grp_count > 1) {
5070 			/* Checks to see if there really is a valid result index
5071 			 * that can be used.
5072 			 */
5073 			if (chain_idx >= ICE_MAX_FV_WORDS) {
5074 				ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
5075 				status = -ENOSPC;
5076 				goto err_unroll;
5077 			}
5078 
5079 			entry->chain_idx = chain_idx;
5080 			buf[recps].content.result_indx =
5081 				ICE_AQ_RECIPE_RESULT_EN |
5082 				((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5083 				 ICE_AQ_RECIPE_RESULT_DATA_M);
5084 			clear_bit(chain_idx, result_idx_bm);
5085 			chain_idx = find_first_bit(result_idx_bm,
5086 						   ICE_MAX_FV_WORDS);
5087 		}
5088 
5089 		/* fill recipe dependencies */
5090 		bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
5091 			    ICE_MAX_NUM_RECIPES);
5092 		set_bit(buf[recps].recipe_indx,
5093 			(unsigned long *)buf[recps].recipe_bitmap);
5094 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5095 		recps++;
5096 	}
5097 
5098 	if (rm->n_grp_count == 1) {
5099 		rm->root_rid = buf[0].recipe_indx;
5100 		set_bit(buf[0].recipe_indx, rm->r_bitmap);
5101 		buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5102 		if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5103 			memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5104 			       sizeof(buf[0].recipe_bitmap));
5105 		} else {
5106 			status = -EINVAL;
5107 			goto err_unroll;
5108 		}
5109 		/* Applicable only for ROOT_RECIPE, set the fwd_priority for
5110 		 * the recipe which is getting created if specified
5111 		 * by user. Usually any advanced switch filter, which results
5112 		 * into new extraction sequence, ended up creating a new recipe
5113 		 * of type ROOT and usually recipes are associated with profiles
5114 		 * Switch rule referreing newly created recipe, needs to have
5115 		 * either/or 'fwd' or 'join' priority, otherwise switch rule
5116 		 * evaluation will not happen correctly. In other words, if
5117 		 * switch rule to be evaluated on priority basis, then recipe
5118 		 * needs to have priority, otherwise it will be evaluated last.
5119 		 */
5120 		buf[0].content.act_ctrl_fwd_priority = rm->priority;
5121 	} else {
5122 		struct ice_recp_grp_entry *last_chain_entry;
5123 		u16 rid, i;
5124 
5125 		/* Allocate the last recipe that will chain the outcomes of the
5126 		 * other recipes together
5127 		 */
5128 		status = ice_alloc_recipe(hw, &rid);
5129 		if (status)
5130 			goto err_unroll;
5131 
5132 		buf[recps].recipe_indx = (u8)rid;
5133 		buf[recps].content.rid = (u8)rid;
5134 		buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5135 		/* the new entry created should also be part of rg_list to
5136 		 * make sure we have complete recipe
5137 		 */
5138 		last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
5139 						sizeof(*last_chain_entry),
5140 						GFP_KERNEL);
5141 		if (!last_chain_entry) {
5142 			status = -ENOMEM;
5143 			goto err_unroll;
5144 		}
5145 		last_chain_entry->rid = rid;
5146 		memset(&buf[recps].content.lkup_indx, 0,
5147 		       sizeof(buf[recps].content.lkup_indx));
5148 		/* All recipes use look-up index 0 to match switch ID. */
5149 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5150 		buf[recps].content.mask[0] =
5151 			cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5152 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5153 			buf[recps].content.lkup_indx[i] =
5154 				ICE_AQ_RECIPE_LKUP_IGNORE;
5155 			buf[recps].content.mask[i] = 0;
5156 		}
5157 
5158 		i = 1;
5159 		/* update r_bitmap with the recp that is used for chaining */
5160 		set_bit(rid, rm->r_bitmap);
5161 		/* this is the recipe that chains all the other recipes so it
5162 		 * should not have a chaining ID to indicate the same
5163 		 */
5164 		last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5165 		list_for_each_entry(entry, &rm->rg_list, l_entry) {
5166 			last_chain_entry->fv_idx[i] = entry->chain_idx;
5167 			buf[recps].content.lkup_indx[i] = entry->chain_idx;
5168 			buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
5169 			set_bit(entry->rid, rm->r_bitmap);
5170 		}
5171 		list_add(&last_chain_entry->l_entry, &rm->rg_list);
5172 		if (sizeof(buf[recps].recipe_bitmap) >=
5173 		    sizeof(rm->r_bitmap)) {
5174 			memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5175 			       sizeof(buf[recps].recipe_bitmap));
5176 		} else {
5177 			status = -EINVAL;
5178 			goto err_unroll;
5179 		}
5180 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5181 
5182 		recps++;
5183 		rm->root_rid = (u8)rid;
5184 	}
5185 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5186 	if (status)
5187 		goto err_unroll;
5188 
5189 	status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5190 	ice_release_change_lock(hw);
5191 	if (status)
5192 		goto err_unroll;
5193 
5194 	/* Every recipe that just got created add it to the recipe
5195 	 * book keeping list
5196 	 */
5197 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
5198 		struct ice_switch_info *sw = hw->switch_info;
5199 		bool is_root, idx_found = false;
5200 		struct ice_sw_recipe *recp;
5201 		u16 idx, buf_idx = 0;
5202 
5203 		/* find buffer index for copying some data */
5204 		for (idx = 0; idx < rm->n_grp_count; idx++)
5205 			if (buf[idx].recipe_indx == entry->rid) {
5206 				buf_idx = idx;
5207 				idx_found = true;
5208 			}
5209 
5210 		if (!idx_found) {
5211 			status = -EIO;
5212 			goto err_unroll;
5213 		}
5214 
5215 		recp = &sw->recp_list[entry->rid];
5216 		is_root = (rm->root_rid == entry->rid);
5217 		recp->is_root = is_root;
5218 
5219 		recp->root_rid = entry->rid;
5220 		recp->big_recp = (is_root && rm->n_grp_count > 1);
5221 
5222 		memcpy(&recp->ext_words, entry->r_group.pairs,
5223 		       entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
5224 
5225 		memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5226 		       sizeof(recp->r_bitmap));
5227 
5228 		/* Copy non-result fv index values and masks to recipe. This
5229 		 * call will also update the result recipe bitmask.
5230 		 */
5231 		ice_collect_result_idx(&buf[buf_idx], recp);
5232 
5233 		/* for non-root recipes, also copy to the root, this allows
5234 		 * easier matching of a complete chained recipe
5235 		 */
5236 		if (!is_root)
5237 			ice_collect_result_idx(&buf[buf_idx],
5238 					       &sw->recp_list[rm->root_rid]);
5239 
5240 		recp->n_ext_words = entry->r_group.n_val_pairs;
5241 		recp->chain_idx = entry->chain_idx;
5242 		recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5243 		recp->n_grp_count = rm->n_grp_count;
5244 		recp->tun_type = rm->tun_type;
5245 		recp->recp_created = true;
5246 	}
5247 	rm->root_buf = buf;
5248 	kfree(tmp);
5249 	return status;
5250 
5251 err_unroll:
5252 err_mem:
5253 	kfree(tmp);
5254 	devm_kfree(ice_hw_to_dev(hw), buf);
5255 	return status;
5256 }
5257 
5258 /**
5259  * ice_create_recipe_group - creates recipe group
5260  * @hw: pointer to hardware structure
5261  * @rm: recipe management list entry
5262  * @lkup_exts: lookup elements
5263  */
5264 static int
5265 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5266 			struct ice_prot_lkup_ext *lkup_exts)
5267 {
5268 	u8 recp_count = 0;
5269 	int status;
5270 
5271 	rm->n_grp_count = 0;
5272 
5273 	/* Create recipes for words that are marked not done by packing them
5274 	 * as best fit.
5275 	 */
5276 	status = ice_create_first_fit_recp_def(hw, lkup_exts,
5277 					       &rm->rg_list, &recp_count);
5278 	if (!status) {
5279 		rm->n_grp_count += recp_count;
5280 		rm->n_ext_words = lkup_exts->n_val_words;
5281 		memcpy(&rm->ext_words, lkup_exts->fv_words,
5282 		       sizeof(rm->ext_words));
5283 		memcpy(rm->word_masks, lkup_exts->field_mask,
5284 		       sizeof(rm->word_masks));
5285 	}
5286 
5287 	return status;
5288 }
5289 
5290 /**
5291  * ice_tun_type_match_word - determine if tun type needs a match mask
5292  * @tun_type: tunnel type
5293  * @mask: mask to be used for the tunnel
5294  */
5295 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
5296 {
5297 	switch (tun_type) {
5298 	case ICE_SW_TUN_GENEVE:
5299 	case ICE_SW_TUN_VXLAN:
5300 	case ICE_SW_TUN_NVGRE:
5301 	case ICE_SW_TUN_GTPU:
5302 	case ICE_SW_TUN_GTPC:
5303 		*mask = ICE_TUN_FLAG_MASK;
5304 		return true;
5305 
5306 	default:
5307 		*mask = 0;
5308 		return false;
5309 	}
5310 }
5311 
5312 /**
5313  * ice_add_special_words - Add words that are not protocols, such as metadata
5314  * @rinfo: other information regarding the rule e.g. priority and action info
5315  * @lkup_exts: lookup word structure
5316  */
5317 static int
5318 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5319 		      struct ice_prot_lkup_ext *lkup_exts)
5320 {
5321 	u16 mask;
5322 
5323 	/* If this is a tunneled packet, then add recipe index to match the
5324 	 * tunnel bit in the packet metadata flags.
5325 	 */
5326 	if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
5327 		if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5328 			u8 word = lkup_exts->n_val_words++;
5329 
5330 			lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5331 			lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
5332 			lkup_exts->field_mask[word] = mask;
5333 		} else {
5334 			return -ENOSPC;
5335 		}
5336 	}
5337 
5338 	return 0;
5339 }
5340 
5341 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5342  * @hw: pointer to hardware structure
5343  * @rinfo: other information regarding the rule e.g. priority and action info
5344  * @bm: pointer to memory for returning the bitmap of field vectors
5345  */
5346 static void
5347 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5348 			 unsigned long *bm)
5349 {
5350 	enum ice_prof_type prof_type;
5351 
5352 	bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
5353 
5354 	switch (rinfo->tun_type) {
5355 	case ICE_NON_TUN:
5356 		prof_type = ICE_PROF_NON_TUN;
5357 		break;
5358 	case ICE_ALL_TUNNELS:
5359 		prof_type = ICE_PROF_TUN_ALL;
5360 		break;
5361 	case ICE_SW_TUN_GENEVE:
5362 	case ICE_SW_TUN_VXLAN:
5363 		prof_type = ICE_PROF_TUN_UDP;
5364 		break;
5365 	case ICE_SW_TUN_NVGRE:
5366 		prof_type = ICE_PROF_TUN_GRE;
5367 		break;
5368 	case ICE_SW_TUN_GTPU:
5369 		prof_type = ICE_PROF_TUN_GTPU;
5370 		break;
5371 	case ICE_SW_TUN_GTPC:
5372 		prof_type = ICE_PROF_TUN_GTPC;
5373 		break;
5374 	case ICE_SW_TUN_AND_NON_TUN:
5375 	default:
5376 		prof_type = ICE_PROF_ALL;
5377 		break;
5378 	}
5379 
5380 	ice_get_sw_fv_bitmap(hw, prof_type, bm);
5381 }
5382 
5383 /**
5384  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5385  * @hw: pointer to hardware structure
5386  * @lkups: lookup elements or match criteria for the advanced recipe, one
5387  *  structure per protocol header
5388  * @lkups_cnt: number of protocols
5389  * @rinfo: other information regarding the rule e.g. priority and action info
5390  * @rid: return the recipe ID of the recipe created
5391  */
5392 static int
5393 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5394 		   u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5395 {
5396 	DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
5397 	DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
5398 	struct ice_prot_lkup_ext *lkup_exts;
5399 	struct ice_recp_grp_entry *r_entry;
5400 	struct ice_sw_fv_list_entry *fvit;
5401 	struct ice_recp_grp_entry *r_tmp;
5402 	struct ice_sw_fv_list_entry *tmp;
5403 	struct ice_sw_recipe *rm;
5404 	int status = 0;
5405 	u8 i;
5406 
5407 	if (!lkups_cnt)
5408 		return -EINVAL;
5409 
5410 	lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
5411 	if (!lkup_exts)
5412 		return -ENOMEM;
5413 
5414 	/* Determine the number of words to be matched and if it exceeds a
5415 	 * recipe's restrictions
5416 	 */
5417 	for (i = 0; i < lkups_cnt; i++) {
5418 		u16 count;
5419 
5420 		if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5421 			status = -EIO;
5422 			goto err_free_lkup_exts;
5423 		}
5424 
5425 		count = ice_fill_valid_words(&lkups[i], lkup_exts);
5426 		if (!count) {
5427 			status = -EIO;
5428 			goto err_free_lkup_exts;
5429 		}
5430 	}
5431 
5432 	rm = kzalloc(sizeof(*rm), GFP_KERNEL);
5433 	if (!rm) {
5434 		status = -ENOMEM;
5435 		goto err_free_lkup_exts;
5436 	}
5437 
5438 	/* Get field vectors that contain fields extracted from all the protocol
5439 	 * headers being programmed.
5440 	 */
5441 	INIT_LIST_HEAD(&rm->fv_list);
5442 	INIT_LIST_HEAD(&rm->rg_list);
5443 
5444 	/* Get bitmap of field vectors (profiles) that are compatible with the
5445 	 * rule request; only these will be searched in the subsequent call to
5446 	 * ice_get_sw_fv_list.
5447 	 */
5448 	ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5449 
5450 	status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
5451 	if (status)
5452 		goto err_unroll;
5453 
5454 	/* Create any special protocol/offset pairs, such as looking at tunnel
5455 	 * bits by extracting metadata
5456 	 */
5457 	status = ice_add_special_words(rinfo, lkup_exts);
5458 	if (status)
5459 		goto err_free_lkup_exts;
5460 
5461 	/* Group match words into recipes using preferred recipe grouping
5462 	 * criteria.
5463 	 */
5464 	status = ice_create_recipe_group(hw, rm, lkup_exts);
5465 	if (status)
5466 		goto err_unroll;
5467 
5468 	/* set the recipe priority if specified */
5469 	rm->priority = (u8)rinfo->priority;
5470 
5471 	/* Find offsets from the field vector. Pick the first one for all the
5472 	 * recipes.
5473 	 */
5474 	status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5475 	if (status)
5476 		goto err_unroll;
5477 
5478 	/* get bitmap of all profiles the recipe will be associated with */
5479 	bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
5480 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5481 		ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5482 		set_bit((u16)fvit->profile_id, profiles);
5483 	}
5484 
5485 	/* Look for a recipe which matches our requested fv / mask list */
5486 	*rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
5487 	if (*rid < ICE_MAX_NUM_RECIPES)
5488 		/* Success if found a recipe that match the existing criteria */
5489 		goto err_unroll;
5490 
5491 	rm->tun_type = rinfo->tun_type;
5492 	/* Recipe we need does not exist, add a recipe */
5493 	status = ice_add_sw_recipe(hw, rm, profiles);
5494 	if (status)
5495 		goto err_unroll;
5496 
5497 	/* Associate all the recipes created with all the profiles in the
5498 	 * common field vector.
5499 	 */
5500 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5501 		DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
5502 		u16 j;
5503 
5504 		status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5505 						      (u8 *)r_bitmap, NULL);
5506 		if (status)
5507 			goto err_unroll;
5508 
5509 		bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
5510 			  ICE_MAX_NUM_RECIPES);
5511 		status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5512 		if (status)
5513 			goto err_unroll;
5514 
5515 		status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5516 						      (u8 *)r_bitmap,
5517 						      NULL);
5518 		ice_release_change_lock(hw);
5519 
5520 		if (status)
5521 			goto err_unroll;
5522 
5523 		/* Update profile to recipe bitmap array */
5524 		bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
5525 			    ICE_MAX_NUM_RECIPES);
5526 
5527 		/* Update recipe to profile bitmap array */
5528 		for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
5529 			set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
5530 	}
5531 
5532 	*rid = rm->root_rid;
5533 	memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
5534 	       sizeof(*lkup_exts));
5535 err_unroll:
5536 	list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
5537 		list_del(&r_entry->l_entry);
5538 		devm_kfree(ice_hw_to_dev(hw), r_entry);
5539 	}
5540 
5541 	list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
5542 		list_del(&fvit->list_entry);
5543 		devm_kfree(ice_hw_to_dev(hw), fvit);
5544 	}
5545 
5546 	if (rm->root_buf)
5547 		devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
5548 
5549 	kfree(rm);
5550 
5551 err_free_lkup_exts:
5552 	kfree(lkup_exts);
5553 
5554 	return status;
5555 }
5556 
5557 /**
5558  * ice_find_dummy_packet - find dummy packet
5559  *
5560  * @lkups: lookup elements or match criteria for the advanced recipe, one
5561  *	   structure per protocol header
5562  * @lkups_cnt: number of protocols
5563  * @tun_type: tunnel type
5564  *
5565  * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
5566  */
5567 static const struct ice_dummy_pkt_profile *
5568 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5569 		      enum ice_sw_tunnel_type tun_type)
5570 {
5571 	const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
5572 	u32 match = 0;
5573 	u16 i;
5574 
5575 	switch (tun_type) {
5576 	case ICE_SW_TUN_GTPC:
5577 		match |= ICE_PKT_TUN_GTPC;
5578 		break;
5579 	case ICE_SW_TUN_GTPU:
5580 		match |= ICE_PKT_TUN_GTPU;
5581 		break;
5582 	case ICE_SW_TUN_NVGRE:
5583 		match |= ICE_PKT_TUN_NVGRE;
5584 		break;
5585 	case ICE_SW_TUN_GENEVE:
5586 	case ICE_SW_TUN_VXLAN:
5587 		match |= ICE_PKT_TUN_UDP;
5588 		break;
5589 	default:
5590 		break;
5591 	}
5592 
5593 	for (i = 0; i < lkups_cnt; i++) {
5594 		if (lkups[i].type == ICE_UDP_ILOS)
5595 			match |= ICE_PKT_INNER_UDP;
5596 		else if (lkups[i].type == ICE_TCP_IL)
5597 			match |= ICE_PKT_INNER_TCP;
5598 		else if (lkups[i].type == ICE_IPV6_OFOS)
5599 			match |= ICE_PKT_OUTER_IPV6;
5600 		else if (lkups[i].type == ICE_VLAN_OFOS)
5601 			match |= ICE_PKT_VLAN;
5602 		else if (lkups[i].type == ICE_ETYPE_OL &&
5603 			 lkups[i].h_u.ethertype.ethtype_id ==
5604 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5605 			 lkups[i].m_u.ethertype.ethtype_id ==
5606 				cpu_to_be16(0xFFFF))
5607 			match |= ICE_PKT_OUTER_IPV6;
5608 		else if (lkups[i].type == ICE_ETYPE_IL &&
5609 			 lkups[i].h_u.ethertype.ethtype_id ==
5610 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5611 			 lkups[i].m_u.ethertype.ethtype_id ==
5612 				cpu_to_be16(0xFFFF))
5613 			match |= ICE_PKT_INNER_IPV6;
5614 		else if (lkups[i].type == ICE_IPV6_IL)
5615 			match |= ICE_PKT_INNER_IPV6;
5616 		else if (lkups[i].type == ICE_GTP_NO_PAY)
5617 			match |= ICE_PKT_GTP_NOPAY;
5618 	}
5619 
5620 	while (ret->match && (match & ret->match) != ret->match)
5621 		ret++;
5622 
5623 	return ret;
5624 }
5625 
5626 /**
5627  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5628  *
5629  * @lkups: lookup elements or match criteria for the advanced recipe, one
5630  *	   structure per protocol header
5631  * @lkups_cnt: number of protocols
5632  * @s_rule: stores rule information from the match criteria
5633  * @profile: dummy packet profile (the template, its size and header offsets)
5634  */
5635 static int
5636 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5637 			  struct ice_sw_rule_lkup_rx_tx *s_rule,
5638 			  const struct ice_dummy_pkt_profile *profile)
5639 {
5640 	u8 *pkt;
5641 	u16 i;
5642 
5643 	/* Start with a packet with a pre-defined/dummy content. Then, fill
5644 	 * in the header values to be looked up or matched.
5645 	 */
5646 	pkt = s_rule->hdr_data;
5647 
5648 	memcpy(pkt, profile->pkt, profile->pkt_len);
5649 
5650 	for (i = 0; i < lkups_cnt; i++) {
5651 		const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
5652 		enum ice_protocol_type type;
5653 		u16 offset = 0, len = 0, j;
5654 		bool found = false;
5655 
5656 		/* find the start of this layer; it should be found since this
5657 		 * was already checked when search for the dummy packet
5658 		 */
5659 		type = lkups[i].type;
5660 		for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5661 			if (type == offsets[j].type) {
5662 				offset = offsets[j].offset;
5663 				found = true;
5664 				break;
5665 			}
5666 		}
5667 		/* this should never happen in a correct calling sequence */
5668 		if (!found)
5669 			return -EINVAL;
5670 
5671 		switch (lkups[i].type) {
5672 		case ICE_MAC_OFOS:
5673 		case ICE_MAC_IL:
5674 			len = sizeof(struct ice_ether_hdr);
5675 			break;
5676 		case ICE_ETYPE_OL:
5677 		case ICE_ETYPE_IL:
5678 			len = sizeof(struct ice_ethtype_hdr);
5679 			break;
5680 		case ICE_VLAN_OFOS:
5681 			len = sizeof(struct ice_vlan_hdr);
5682 			break;
5683 		case ICE_IPV4_OFOS:
5684 		case ICE_IPV4_IL:
5685 			len = sizeof(struct ice_ipv4_hdr);
5686 			break;
5687 		case ICE_IPV6_OFOS:
5688 		case ICE_IPV6_IL:
5689 			len = sizeof(struct ice_ipv6_hdr);
5690 			break;
5691 		case ICE_TCP_IL:
5692 		case ICE_UDP_OF:
5693 		case ICE_UDP_ILOS:
5694 			len = sizeof(struct ice_l4_hdr);
5695 			break;
5696 		case ICE_SCTP_IL:
5697 			len = sizeof(struct ice_sctp_hdr);
5698 			break;
5699 		case ICE_NVGRE:
5700 			len = sizeof(struct ice_nvgre_hdr);
5701 			break;
5702 		case ICE_VXLAN:
5703 		case ICE_GENEVE:
5704 			len = sizeof(struct ice_udp_tnl_hdr);
5705 			break;
5706 		case ICE_GTP_NO_PAY:
5707 		case ICE_GTP:
5708 			len = sizeof(struct ice_udp_gtp_hdr);
5709 			break;
5710 		default:
5711 			return -EINVAL;
5712 		}
5713 
5714 		/* the length should be a word multiple */
5715 		if (len % ICE_BYTES_PER_WORD)
5716 			return -EIO;
5717 
5718 		/* We have the offset to the header start, the length, the
5719 		 * caller's header values and mask. Use this information to
5720 		 * copy the data into the dummy packet appropriately based on
5721 		 * the mask. Note that we need to only write the bits as
5722 		 * indicated by the mask to make sure we don't improperly write
5723 		 * over any significant packet data.
5724 		 */
5725 		for (j = 0; j < len / sizeof(u16); j++) {
5726 			u16 *ptr = (u16 *)(pkt + offset);
5727 			u16 mask = lkups[i].m_raw[j];
5728 
5729 			if (!mask)
5730 				continue;
5731 
5732 			ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
5733 		}
5734 	}
5735 
5736 	s_rule->hdr_len = cpu_to_le16(profile->pkt_len);
5737 
5738 	return 0;
5739 }
5740 
5741 /**
5742  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5743  * @hw: pointer to the hardware structure
5744  * @tun_type: tunnel type
5745  * @pkt: dummy packet to fill in
5746  * @offsets: offset info for the dummy packet
5747  */
5748 static int
5749 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5750 			u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5751 {
5752 	u16 open_port, i;
5753 
5754 	switch (tun_type) {
5755 	case ICE_SW_TUN_VXLAN:
5756 		if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
5757 			return -EIO;
5758 		break;
5759 	case ICE_SW_TUN_GENEVE:
5760 		if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
5761 			return -EIO;
5762 		break;
5763 	default:
5764 		/* Nothing needs to be done for this tunnel type */
5765 		return 0;
5766 	}
5767 
5768 	/* Find the outer UDP protocol header and insert the port number */
5769 	for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5770 		if (offsets[i].type == ICE_UDP_OF) {
5771 			struct ice_l4_hdr *hdr;
5772 			u16 offset;
5773 
5774 			offset = offsets[i].offset;
5775 			hdr = (struct ice_l4_hdr *)&pkt[offset];
5776 			hdr->dst_port = cpu_to_be16(open_port);
5777 
5778 			return 0;
5779 		}
5780 	}
5781 
5782 	return -EIO;
5783 }
5784 
5785 /**
5786  * ice_find_adv_rule_entry - Search a rule entry
5787  * @hw: pointer to the hardware structure
5788  * @lkups: lookup elements or match criteria for the advanced recipe, one
5789  *	   structure per protocol header
5790  * @lkups_cnt: number of protocols
5791  * @recp_id: recipe ID for which we are finding the rule
5792  * @rinfo: other information regarding the rule e.g. priority and action info
5793  *
5794  * Helper function to search for a given advance rule entry
5795  * Returns pointer to entry storing the rule if found
5796  */
5797 static struct ice_adv_fltr_mgmt_list_entry *
5798 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5799 			u16 lkups_cnt, u16 recp_id,
5800 			struct ice_adv_rule_info *rinfo)
5801 {
5802 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
5803 	struct ice_switch_info *sw = hw->switch_info;
5804 	int i;
5805 
5806 	list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
5807 			    list_entry) {
5808 		bool lkups_matched = true;
5809 
5810 		if (lkups_cnt != list_itr->lkups_cnt)
5811 			continue;
5812 		for (i = 0; i < list_itr->lkups_cnt; i++)
5813 			if (memcmp(&list_itr->lkups[i], &lkups[i],
5814 				   sizeof(*lkups))) {
5815 				lkups_matched = false;
5816 				break;
5817 			}
5818 		if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5819 		    rinfo->tun_type == list_itr->rule_info.tun_type &&
5820 		    lkups_matched)
5821 			return list_itr;
5822 	}
5823 	return NULL;
5824 }
5825 
5826 /**
5827  * ice_adv_add_update_vsi_list
5828  * @hw: pointer to the hardware structure
5829  * @m_entry: pointer to current adv filter management list entry
5830  * @cur_fltr: filter information from the book keeping entry
5831  * @new_fltr: filter information with the new VSI to be added
5832  *
5833  * Call AQ command to add or update previously created VSI list with new VSI.
5834  *
5835  * Helper function to do book keeping associated with adding filter information
5836  * The algorithm to do the booking keeping is described below :
5837  * When a VSI needs to subscribe to a given advanced filter
5838  *	if only one VSI has been added till now
5839  *		Allocate a new VSI list and add two VSIs
5840  *		to this list using switch rule command
5841  *		Update the previously created switch rule with the
5842  *		newly created VSI list ID
5843  *	if a VSI list was previously created
5844  *		Add the new VSI to the previously created VSI list set
5845  *		using the update switch rule command
5846  */
5847 static int
5848 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5849 			    struct ice_adv_fltr_mgmt_list_entry *m_entry,
5850 			    struct ice_adv_rule_info *cur_fltr,
5851 			    struct ice_adv_rule_info *new_fltr)
5852 {
5853 	u16 vsi_list_id = 0;
5854 	int status;
5855 
5856 	if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5857 	    cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5858 	    cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5859 		return -EOPNOTSUPP;
5860 
5861 	if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5862 	     new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5863 	    (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5864 	     cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5865 		return -EOPNOTSUPP;
5866 
5867 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5868 		 /* Only one entry existed in the mapping and it was not already
5869 		  * a part of a VSI list. So, create a VSI list with the old and
5870 		  * new VSIs.
5871 		  */
5872 		struct ice_fltr_info tmp_fltr;
5873 		u16 vsi_handle_arr[2];
5874 
5875 		/* A rule already exists with the new VSI being added */
5876 		if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5877 		    new_fltr->sw_act.fwd_id.hw_vsi_id)
5878 			return -EEXIST;
5879 
5880 		vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5881 		vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5882 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5883 						  &vsi_list_id,
5884 						  ICE_SW_LKUP_LAST);
5885 		if (status)
5886 			return status;
5887 
5888 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5889 		tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
5890 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5891 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5892 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5893 		tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
5894 
5895 		/* Update the previous switch rule of "forward to VSI" to
5896 		 * "fwd to VSI list"
5897 		 */
5898 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5899 		if (status)
5900 			return status;
5901 
5902 		cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5903 		cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5904 		m_entry->vsi_list_info =
5905 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5906 						vsi_list_id);
5907 	} else {
5908 		u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5909 
5910 		if (!m_entry->vsi_list_info)
5911 			return -EIO;
5912 
5913 		/* A rule already exists with the new VSI being added */
5914 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
5915 			return 0;
5916 
5917 		/* Update the previously created VSI list set with
5918 		 * the new VSI ID passed in
5919 		 */
5920 		vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5921 
5922 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5923 						  vsi_list_id, false,
5924 						  ice_aqc_opc_update_sw_rules,
5925 						  ICE_SW_LKUP_LAST);
5926 		/* update VSI list mapping info with new VSI ID */
5927 		if (!status)
5928 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
5929 	}
5930 	if (!status)
5931 		m_entry->vsi_count++;
5932 	return status;
5933 }
5934 
5935 /**
5936  * ice_add_adv_rule - helper function to create an advanced switch rule
5937  * @hw: pointer to the hardware structure
5938  * @lkups: information on the words that needs to be looked up. All words
5939  * together makes one recipe
5940  * @lkups_cnt: num of entries in the lkups array
5941  * @rinfo: other information related to the rule that needs to be programmed
5942  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
5943  *               ignored is case of error.
5944  *
5945  * This function can program only 1 rule at a time. The lkups is used to
5946  * describe the all the words that forms the "lookup" portion of the recipe.
5947  * These words can span multiple protocols. Callers to this function need to
5948  * pass in a list of protocol headers with lookup information along and mask
5949  * that determines which words are valid from the given protocol header.
5950  * rinfo describes other information related to this rule such as forwarding
5951  * IDs, priority of this rule, etc.
5952  */
5953 int
5954 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5955 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
5956 		 struct ice_rule_query_data *added_entry)
5957 {
5958 	struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
5959 	struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
5960 	const struct ice_dummy_pkt_profile *profile;
5961 	u16 rid = 0, i, rule_buf_sz, vsi_handle;
5962 	struct list_head *rule_head;
5963 	struct ice_switch_info *sw;
5964 	u16 word_cnt;
5965 	u32 act = 0;
5966 	int status;
5967 	u8 q_rgn;
5968 
5969 	/* Initialize profile to result index bitmap */
5970 	if (!hw->switch_info->prof_res_bm_init) {
5971 		hw->switch_info->prof_res_bm_init = 1;
5972 		ice_init_prof_result_bm(hw);
5973 	}
5974 
5975 	if (!lkups_cnt)
5976 		return -EINVAL;
5977 
5978 	/* get # of words we need to match */
5979 	word_cnt = 0;
5980 	for (i = 0; i < lkups_cnt; i++) {
5981 		u16 j;
5982 
5983 		for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
5984 			if (lkups[i].m_raw[j])
5985 				word_cnt++;
5986 	}
5987 
5988 	if (!word_cnt)
5989 		return -EINVAL;
5990 
5991 	if (word_cnt > ICE_MAX_CHAIN_WORDS)
5992 		return -ENOSPC;
5993 
5994 	/* locate a dummy packet */
5995 	profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
5996 
5997 	if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5998 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
5999 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6000 	      rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6001 		return -EIO;
6002 
6003 	vsi_handle = rinfo->sw_act.vsi_handle;
6004 	if (!ice_is_vsi_valid(hw, vsi_handle))
6005 		return -EINVAL;
6006 
6007 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6008 		rinfo->sw_act.fwd_id.hw_vsi_id =
6009 			ice_get_hw_vsi_num(hw, vsi_handle);
6010 	if (rinfo->sw_act.flag & ICE_FLTR_TX)
6011 		rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6012 
6013 	status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6014 	if (status)
6015 		return status;
6016 	m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6017 	if (m_entry) {
6018 		/* we have to add VSI to VSI_LIST and increment vsi_count.
6019 		 * Also Update VSI list so that we can change forwarding rule
6020 		 * if the rule already exists, we will check if it exists with
6021 		 * same vsi_id, if not then add it to the VSI list if it already
6022 		 * exists if not then create a VSI list and add the existing VSI
6023 		 * ID and the new VSI ID to the list
6024 		 * We will add that VSI to the list
6025 		 */
6026 		status = ice_adv_add_update_vsi_list(hw, m_entry,
6027 						     &m_entry->rule_info,
6028 						     rinfo);
6029 		if (added_entry) {
6030 			added_entry->rid = rid;
6031 			added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6032 			added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6033 		}
6034 		return status;
6035 	}
6036 	rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
6037 	s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6038 	if (!s_rule)
6039 		return -ENOMEM;
6040 	if (!rinfo->flags_info.act_valid) {
6041 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
6042 		act |= ICE_SINGLE_ACT_LB_ENABLE;
6043 	} else {
6044 		act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
6045 						ICE_SINGLE_ACT_LB_ENABLE);
6046 	}
6047 
6048 	switch (rinfo->sw_act.fltr_act) {
6049 	case ICE_FWD_TO_VSI:
6050 		act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6051 			ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6052 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6053 		break;
6054 	case ICE_FWD_TO_Q:
6055 		act |= ICE_SINGLE_ACT_TO_Q;
6056 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6057 		       ICE_SINGLE_ACT_Q_INDEX_M;
6058 		break;
6059 	case ICE_FWD_TO_QGRP:
6060 		q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6061 			(u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
6062 		act |= ICE_SINGLE_ACT_TO_Q;
6063 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6064 		       ICE_SINGLE_ACT_Q_INDEX_M;
6065 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6066 		       ICE_SINGLE_ACT_Q_REGION_M;
6067 		break;
6068 	case ICE_DROP_PACKET:
6069 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6070 		       ICE_SINGLE_ACT_VALID_BIT;
6071 		break;
6072 	default:
6073 		status = -EIO;
6074 		goto err_ice_add_adv_rule;
6075 	}
6076 
6077 	/* set the rule LOOKUP type based on caller specified 'Rx'
6078 	 * instead of hardcoding it to be either LOOKUP_TX/RX
6079 	 *
6080 	 * for 'Rx' set the source to be the port number
6081 	 * for 'Tx' set the source to be the source HW VSI number (determined
6082 	 * by caller)
6083 	 */
6084 	if (rinfo->rx) {
6085 		s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
6086 		s_rule->src = cpu_to_le16(hw->port_info->lport);
6087 	} else {
6088 		s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
6089 		s_rule->src = cpu_to_le16(rinfo->sw_act.src);
6090 	}
6091 
6092 	s_rule->recipe_id = cpu_to_le16(rid);
6093 	s_rule->act = cpu_to_le32(act);
6094 
6095 	status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
6096 	if (status)
6097 		goto err_ice_add_adv_rule;
6098 
6099 	if (rinfo->tun_type != ICE_NON_TUN &&
6100 	    rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6101 		status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6102 						 s_rule->hdr_data,
6103 						 profile->offsets);
6104 		if (status)
6105 			goto err_ice_add_adv_rule;
6106 	}
6107 
6108 	status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6109 				 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6110 				 NULL);
6111 	if (status)
6112 		goto err_ice_add_adv_rule;
6113 	adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
6114 				sizeof(struct ice_adv_fltr_mgmt_list_entry),
6115 				GFP_KERNEL);
6116 	if (!adv_fltr) {
6117 		status = -ENOMEM;
6118 		goto err_ice_add_adv_rule;
6119 	}
6120 
6121 	adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
6122 				       lkups_cnt * sizeof(*lkups), GFP_KERNEL);
6123 	if (!adv_fltr->lkups) {
6124 		status = -ENOMEM;
6125 		goto err_ice_add_adv_rule;
6126 	}
6127 
6128 	adv_fltr->lkups_cnt = lkups_cnt;
6129 	adv_fltr->rule_info = *rinfo;
6130 	adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index);
6131 	sw = hw->switch_info;
6132 	sw->recp_list[rid].adv_rule = true;
6133 	rule_head = &sw->recp_list[rid].filt_rules;
6134 
6135 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6136 		adv_fltr->vsi_count = 1;
6137 
6138 	/* Add rule entry to book keeping list */
6139 	list_add(&adv_fltr->list_entry, rule_head);
6140 	if (added_entry) {
6141 		added_entry->rid = rid;
6142 		added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6143 		added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6144 	}
6145 err_ice_add_adv_rule:
6146 	if (status && adv_fltr) {
6147 		devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
6148 		devm_kfree(ice_hw_to_dev(hw), adv_fltr);
6149 	}
6150 
6151 	kfree(s_rule);
6152 
6153 	return status;
6154 }
6155 
6156 /**
6157  * ice_replay_vsi_fltr - Replay filters for requested VSI
6158  * @hw: pointer to the hardware structure
6159  * @vsi_handle: driver VSI handle
6160  * @recp_id: Recipe ID for which rules need to be replayed
6161  * @list_head: list for which filters need to be replayed
6162  *
6163  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6164  * It is required to pass valid VSI handle.
6165  */
6166 static int
6167 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6168 		    struct list_head *list_head)
6169 {
6170 	struct ice_fltr_mgmt_list_entry *itr;
6171 	int status = 0;
6172 	u16 hw_vsi_id;
6173 
6174 	if (list_empty(list_head))
6175 		return status;
6176 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6177 
6178 	list_for_each_entry(itr, list_head, list_entry) {
6179 		struct ice_fltr_list_entry f_entry;
6180 
6181 		f_entry.fltr_info = itr->fltr_info;
6182 		if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6183 		    itr->fltr_info.vsi_handle == vsi_handle) {
6184 			/* update the src in case it is VSI num */
6185 			if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6186 				f_entry.fltr_info.src = hw_vsi_id;
6187 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
6188 			if (status)
6189 				goto end;
6190 			continue;
6191 		}
6192 		if (!itr->vsi_list_info ||
6193 		    !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
6194 			continue;
6195 		/* Clearing it so that the logic can add it back */
6196 		clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6197 		f_entry.fltr_info.vsi_handle = vsi_handle;
6198 		f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6199 		/* update the src in case it is VSI num */
6200 		if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6201 			f_entry.fltr_info.src = hw_vsi_id;
6202 		if (recp_id == ICE_SW_LKUP_VLAN)
6203 			status = ice_add_vlan_internal(hw, &f_entry);
6204 		else
6205 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
6206 		if (status)
6207 			goto end;
6208 	}
6209 end:
6210 	return status;
6211 }
6212 
6213 /**
6214  * ice_adv_rem_update_vsi_list
6215  * @hw: pointer to the hardware structure
6216  * @vsi_handle: VSI handle of the VSI to remove
6217  * @fm_list: filter management entry for which the VSI list management needs to
6218  *	     be done
6219  */
6220 static int
6221 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6222 			    struct ice_adv_fltr_mgmt_list_entry *fm_list)
6223 {
6224 	struct ice_vsi_list_map_info *vsi_list_info;
6225 	enum ice_sw_lkup_type lkup_type;
6226 	u16 vsi_list_id;
6227 	int status;
6228 
6229 	if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6230 	    fm_list->vsi_count == 0)
6231 		return -EINVAL;
6232 
6233 	/* A rule with the VSI being removed does not exist */
6234 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
6235 		return -ENOENT;
6236 
6237 	lkup_type = ICE_SW_LKUP_LAST;
6238 	vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6239 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6240 					  ice_aqc_opc_update_sw_rules,
6241 					  lkup_type);
6242 	if (status)
6243 		return status;
6244 
6245 	fm_list->vsi_count--;
6246 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6247 	vsi_list_info = fm_list->vsi_list_info;
6248 	if (fm_list->vsi_count == 1) {
6249 		struct ice_fltr_info tmp_fltr;
6250 		u16 rem_vsi_handle;
6251 
6252 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
6253 						ICE_MAX_VSI);
6254 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6255 			return -EIO;
6256 
6257 		/* Make sure VSI list is empty before removing it below */
6258 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6259 						  vsi_list_id, true,
6260 						  ice_aqc_opc_update_sw_rules,
6261 						  lkup_type);
6262 		if (status)
6263 			return status;
6264 
6265 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
6266 		tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
6267 		tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6268 		fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6269 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6270 		tmp_fltr.fwd_id.hw_vsi_id =
6271 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
6272 		fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6273 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
6274 		fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
6275 
6276 		/* Update the previous switch rule of "MAC forward to VSI" to
6277 		 * "MAC fwd to VSI list"
6278 		 */
6279 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6280 		if (status) {
6281 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6282 				  tmp_fltr.fwd_id.hw_vsi_id, status);
6283 			return status;
6284 		}
6285 		fm_list->vsi_list_info->ref_cnt--;
6286 
6287 		/* Remove the VSI list since it is no longer used */
6288 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6289 		if (status) {
6290 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
6291 				  vsi_list_id, status);
6292 			return status;
6293 		}
6294 
6295 		list_del(&vsi_list_info->list_entry);
6296 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
6297 		fm_list->vsi_list_info = NULL;
6298 	}
6299 
6300 	return status;
6301 }
6302 
6303 /**
6304  * ice_rem_adv_rule - removes existing advanced switch rule
6305  * @hw: pointer to the hardware structure
6306  * @lkups: information on the words that needs to be looked up. All words
6307  *         together makes one recipe
6308  * @lkups_cnt: num of entries in the lkups array
6309  * @rinfo: Its the pointer to the rule information for the rule
6310  *
6311  * This function can be used to remove 1 rule at a time. The lkups is
6312  * used to describe all the words that forms the "lookup" portion of the
6313  * rule. These words can span multiple protocols. Callers to this function
6314  * need to pass in a list of protocol headers with lookup information along
6315  * and mask that determines which words are valid from the given protocol
6316  * header. rinfo describes other information related to this rule such as
6317  * forwarding IDs, priority of this rule, etc.
6318  */
6319 static int
6320 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6321 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6322 {
6323 	struct ice_adv_fltr_mgmt_list_entry *list_elem;
6324 	struct ice_prot_lkup_ext lkup_exts;
6325 	bool remove_rule = false;
6326 	struct mutex *rule_lock; /* Lock to protect filter rule list */
6327 	u16 i, rid, vsi_handle;
6328 	int status = 0;
6329 
6330 	memset(&lkup_exts, 0, sizeof(lkup_exts));
6331 	for (i = 0; i < lkups_cnt; i++) {
6332 		u16 count;
6333 
6334 		if (lkups[i].type >= ICE_PROTOCOL_LAST)
6335 			return -EIO;
6336 
6337 		count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6338 		if (!count)
6339 			return -EIO;
6340 	}
6341 
6342 	/* Create any special protocol/offset pairs, such as looking at tunnel
6343 	 * bits by extracting metadata
6344 	 */
6345 	status = ice_add_special_words(rinfo, &lkup_exts);
6346 	if (status)
6347 		return status;
6348 
6349 	rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
6350 	/* If did not find a recipe that match the existing criteria */
6351 	if (rid == ICE_MAX_NUM_RECIPES)
6352 		return -EINVAL;
6353 
6354 	rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6355 	list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6356 	/* the rule is already removed */
6357 	if (!list_elem)
6358 		return 0;
6359 	mutex_lock(rule_lock);
6360 	if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6361 		remove_rule = true;
6362 	} else if (list_elem->vsi_count > 1) {
6363 		remove_rule = false;
6364 		vsi_handle = rinfo->sw_act.vsi_handle;
6365 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6366 	} else {
6367 		vsi_handle = rinfo->sw_act.vsi_handle;
6368 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6369 		if (status) {
6370 			mutex_unlock(rule_lock);
6371 			return status;
6372 		}
6373 		if (list_elem->vsi_count == 0)
6374 			remove_rule = true;
6375 	}
6376 	mutex_unlock(rule_lock);
6377 	if (remove_rule) {
6378 		struct ice_sw_rule_lkup_rx_tx *s_rule;
6379 		u16 rule_buf_sz;
6380 
6381 		rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
6382 		s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6383 		if (!s_rule)
6384 			return -ENOMEM;
6385 		s_rule->act = 0;
6386 		s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id);
6387 		s_rule->hdr_len = 0;
6388 		status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6389 					 rule_buf_sz, 1,
6390 					 ice_aqc_opc_remove_sw_rules, NULL);
6391 		if (!status || status == -ENOENT) {
6392 			struct ice_switch_info *sw = hw->switch_info;
6393 
6394 			mutex_lock(rule_lock);
6395 			list_del(&list_elem->list_entry);
6396 			devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
6397 			devm_kfree(ice_hw_to_dev(hw), list_elem);
6398 			mutex_unlock(rule_lock);
6399 			if (list_empty(&sw->recp_list[rid].filt_rules))
6400 				sw->recp_list[rid].adv_rule = false;
6401 		}
6402 		kfree(s_rule);
6403 	}
6404 	return status;
6405 }
6406 
6407 /**
6408  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6409  * @hw: pointer to the hardware structure
6410  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6411  *
6412  * This function is used to remove 1 rule at a time. The removal is based on
6413  * the remove_entry parameter. This function will remove rule for a given
6414  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6415  */
6416 int
6417 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6418 		       struct ice_rule_query_data *remove_entry)
6419 {
6420 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
6421 	struct list_head *list_head;
6422 	struct ice_adv_rule_info rinfo;
6423 	struct ice_switch_info *sw;
6424 
6425 	sw = hw->switch_info;
6426 	if (!sw->recp_list[remove_entry->rid].recp_created)
6427 		return -EINVAL;
6428 	list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6429 	list_for_each_entry(list_itr, list_head, list_entry) {
6430 		if (list_itr->rule_info.fltr_rule_id ==
6431 		    remove_entry->rule_id) {
6432 			rinfo = list_itr->rule_info;
6433 			rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6434 			return ice_rem_adv_rule(hw, list_itr->lkups,
6435 						list_itr->lkups_cnt, &rinfo);
6436 		}
6437 	}
6438 	/* either list is empty or unable to find rule */
6439 	return -ENOENT;
6440 }
6441 
6442 /**
6443  * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
6444  *                            given VSI handle
6445  * @hw: pointer to the hardware structure
6446  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6447  *
6448  * This function is used to remove all the rules for a given VSI and as soon
6449  * as removing a rule fails, it will return immediately with the error code,
6450  * else it will return success.
6451  */
6452 int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6453 {
6454 	struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
6455 	struct ice_vsi_list_map_info *map_info;
6456 	struct ice_adv_rule_info rinfo;
6457 	struct list_head *list_head;
6458 	struct ice_switch_info *sw;
6459 	int status;
6460 	u8 rid;
6461 
6462 	sw = hw->switch_info;
6463 	for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6464 		if (!sw->recp_list[rid].recp_created)
6465 			continue;
6466 		if (!sw->recp_list[rid].adv_rule)
6467 			continue;
6468 
6469 		list_head = &sw->recp_list[rid].filt_rules;
6470 		list_for_each_entry_safe(list_itr, tmp_entry, list_head,
6471 					 list_entry) {
6472 			rinfo = list_itr->rule_info;
6473 
6474 			if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
6475 				map_info = list_itr->vsi_list_info;
6476 				if (!map_info)
6477 					continue;
6478 
6479 				if (!test_bit(vsi_handle, map_info->vsi_map))
6480 					continue;
6481 			} else if (rinfo.sw_act.vsi_handle != vsi_handle) {
6482 				continue;
6483 			}
6484 
6485 			rinfo.sw_act.vsi_handle = vsi_handle;
6486 			status = ice_rem_adv_rule(hw, list_itr->lkups,
6487 						  list_itr->lkups_cnt, &rinfo);
6488 			if (status)
6489 				return status;
6490 		}
6491 	}
6492 	return 0;
6493 }
6494 
6495 /**
6496  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6497  * @hw: pointer to the hardware structure
6498  * @vsi_handle: driver VSI handle
6499  * @list_head: list for which filters need to be replayed
6500  *
6501  * Replay the advanced rule for the given VSI.
6502  */
6503 static int
6504 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6505 			struct list_head *list_head)
6506 {
6507 	struct ice_rule_query_data added_entry = { 0 };
6508 	struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6509 	int status = 0;
6510 
6511 	if (list_empty(list_head))
6512 		return status;
6513 	list_for_each_entry(adv_fltr, list_head, list_entry) {
6514 		struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6515 		u16 lk_cnt = adv_fltr->lkups_cnt;
6516 
6517 		if (vsi_handle != rinfo->sw_act.vsi_handle)
6518 			continue;
6519 		status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6520 					  &added_entry);
6521 		if (status)
6522 			break;
6523 	}
6524 	return status;
6525 }
6526 
6527 /**
6528  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6529  * @hw: pointer to the hardware structure
6530  * @vsi_handle: driver VSI handle
6531  *
6532  * Replays filters for requested VSI via vsi_handle.
6533  */
6534 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6535 {
6536 	struct ice_switch_info *sw = hw->switch_info;
6537 	int status;
6538 	u8 i;
6539 
6540 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6541 		struct list_head *head;
6542 
6543 		head = &sw->recp_list[i].filt_replay_rules;
6544 		if (!sw->recp_list[i].adv_rule)
6545 			status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6546 		else
6547 			status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6548 		if (status)
6549 			return status;
6550 	}
6551 	return status;
6552 }
6553 
6554 /**
6555  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6556  * @hw: pointer to the HW struct
6557  *
6558  * Deletes the filter replay rules.
6559  */
6560 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6561 {
6562 	struct ice_switch_info *sw = hw->switch_info;
6563 	u8 i;
6564 
6565 	if (!sw)
6566 		return;
6567 
6568 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6569 		if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
6570 			struct list_head *l_head;
6571 
6572 			l_head = &sw->recp_list[i].filt_replay_rules;
6573 			if (!sw->recp_list[i].adv_rule)
6574 				ice_rem_sw_rule_info(hw, l_head);
6575 			else
6576 				ice_rem_adv_rule_info(hw, l_head);
6577 		}
6578 	}
6579 }
6580