xref: /linux/drivers/net/ethernet/intel/ice/ice_tc_lib.c (revision 860a9bed265146b10311bcadbbcef59c3af4454d)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_tc_lib.h"
6 #include "ice_fltr.h"
7 #include "ice_lib.h"
8 #include "ice_protocol_type.h"
9 
10 #define ICE_TC_METADATA_LKUP_IDX 0
11 
12 /**
13  * ice_tc_count_lkups - determine lookup count for switch filter
14  * @flags: TC-flower flags
15  * @headers: Pointer to TC flower filter header structure
16  * @fltr: Pointer to outer TC filter structure
17  *
18  * Determine lookup count based on TC flower input for switch filter.
19  */
20 static int
21 ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
22 		   struct ice_tc_flower_fltr *fltr)
23 {
24 	int lkups_cnt = 1; /* 0th lookup is metadata */
25 
26 	/* Always add metadata as the 0th lookup. Included elements:
27 	 * - Direction flag (always present)
28 	 * - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified)
29 	 * - Tunnel flag (present if tunnel)
30 	 */
31 
32 	if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
33 		lkups_cnt++;
34 
35 	if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
36 		lkups_cnt++;
37 
38 	if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS)
39 		lkups_cnt++;
40 
41 	if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS)
42 		lkups_cnt++;
43 
44 	if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
45 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
46 		     ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
47 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV6))
48 		lkups_cnt++;
49 
50 	if (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
51 		     ICE_TC_FLWR_FIELD_ENC_IP_TTL))
52 		lkups_cnt++;
53 
54 	if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
55 		lkups_cnt++;
56 
57 	if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID)
58 		lkups_cnt++;
59 
60 	/* are MAC fields specified? */
61 	if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC))
62 		lkups_cnt++;
63 
64 	/* is VLAN specified? */
65 	if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO))
66 		lkups_cnt++;
67 
68 	/* is CVLAN specified? */
69 	if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO))
70 		lkups_cnt++;
71 
72 	/* are PPPoE options specified? */
73 	if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID |
74 		     ICE_TC_FLWR_FIELD_PPP_PROTO))
75 		lkups_cnt++;
76 
77 	/* are IPv[4|6] fields specified? */
78 	if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 |
79 		     ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6))
80 		lkups_cnt++;
81 
82 	if (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))
83 		lkups_cnt++;
84 
85 	/* are L2TPv3 options specified? */
86 	if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID)
87 		lkups_cnt++;
88 
89 	/* is L4 (TCP/UDP/any other L4 protocol fields) specified? */
90 	if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
91 		     ICE_TC_FLWR_FIELD_SRC_L4_PORT))
92 		lkups_cnt++;
93 
94 	return lkups_cnt;
95 }
96 
97 static enum ice_protocol_type ice_proto_type_from_mac(bool inner)
98 {
99 	return inner ? ICE_MAC_IL : ICE_MAC_OFOS;
100 }
101 
102 static enum ice_protocol_type ice_proto_type_from_etype(bool inner)
103 {
104 	return inner ? ICE_ETYPE_IL : ICE_ETYPE_OL;
105 }
106 
107 static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner)
108 {
109 	return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS;
110 }
111 
112 static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner)
113 {
114 	return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS;
115 }
116 
117 static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto)
118 {
119 	switch (ip_proto) {
120 	case IPPROTO_TCP:
121 		return ICE_TCP_IL;
122 	case IPPROTO_UDP:
123 		return ICE_UDP_ILOS;
124 	}
125 
126 	return 0;
127 }
128 
129 static enum ice_protocol_type
130 ice_proto_type_from_tunnel(enum ice_tunnel_type type)
131 {
132 	switch (type) {
133 	case TNL_VXLAN:
134 		return ICE_VXLAN;
135 	case TNL_GENEVE:
136 		return ICE_GENEVE;
137 	case TNL_GRETAP:
138 		return ICE_NVGRE;
139 	case TNL_GTPU:
140 		/* NO_PAY profiles will not work with GTP-U */
141 		return ICE_GTP;
142 	case TNL_GTPC:
143 		return ICE_GTP_NO_PAY;
144 	case TNL_PFCP:
145 		return ICE_PFCP;
146 	default:
147 		return 0;
148 	}
149 }
150 
151 static enum ice_sw_tunnel_type
152 ice_sw_type_from_tunnel(enum ice_tunnel_type type)
153 {
154 	switch (type) {
155 	case TNL_VXLAN:
156 		return ICE_SW_TUN_VXLAN;
157 	case TNL_GENEVE:
158 		return ICE_SW_TUN_GENEVE;
159 	case TNL_GRETAP:
160 		return ICE_SW_TUN_NVGRE;
161 	case TNL_GTPU:
162 		return ICE_SW_TUN_GTPU;
163 	case TNL_GTPC:
164 		return ICE_SW_TUN_GTPC;
165 	case TNL_PFCP:
166 		return ICE_SW_TUN_PFCP;
167 	default:
168 		return ICE_NON_TUN;
169 	}
170 }
171 
172 static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid)
173 {
174 	switch (vlan_tpid) {
175 	case ETH_P_8021Q:
176 	case ETH_P_8021AD:
177 	case ETH_P_QINQ1:
178 		return vlan_tpid;
179 	default:
180 		return 0;
181 	}
182 }
183 
184 static int
185 ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
186 			 struct ice_adv_lkup_elem *list, int i)
187 {
188 	struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers;
189 
190 	if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) {
191 		u32 tenant_id;
192 
193 		list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
194 		switch (fltr->tunnel_type) {
195 		case TNL_VXLAN:
196 		case TNL_GENEVE:
197 			tenant_id = be32_to_cpu(fltr->tenant_id) << 8;
198 			list[i].h_u.tnl_hdr.vni = cpu_to_be32(tenant_id);
199 			memcpy(&list[i].m_u.tnl_hdr.vni, "\xff\xff\xff\x00", 4);
200 			i++;
201 			break;
202 		case TNL_GRETAP:
203 			list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id;
204 			memcpy(&list[i].m_u.nvgre_hdr.tni_flow,
205 			       "\xff\xff\xff\xff", 4);
206 			i++;
207 			break;
208 		case TNL_GTPC:
209 		case TNL_GTPU:
210 			list[i].h_u.gtp_hdr.teid = fltr->tenant_id;
211 			memcpy(&list[i].m_u.gtp_hdr.teid,
212 			       "\xff\xff\xff\xff", 4);
213 			i++;
214 			break;
215 		default:
216 			break;
217 		}
218 	}
219 
220 	if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) {
221 		list[i].type = ice_proto_type_from_mac(false);
222 		ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
223 				hdr->l2_key.dst_mac);
224 		ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
225 				hdr->l2_mask.dst_mac);
226 		i++;
227 	}
228 
229 	if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS) {
230 		list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
231 
232 		if (fltr->gtp_pdu_info_masks.pdu_type) {
233 			list[i].h_u.gtp_hdr.pdu_type =
234 				fltr->gtp_pdu_info_keys.pdu_type << 4;
235 			memcpy(&list[i].m_u.gtp_hdr.pdu_type, "\xf0", 1);
236 		}
237 
238 		if (fltr->gtp_pdu_info_masks.qfi) {
239 			list[i].h_u.gtp_hdr.qfi = fltr->gtp_pdu_info_keys.qfi;
240 			memcpy(&list[i].m_u.gtp_hdr.qfi, "\x3f", 1);
241 		}
242 
243 		i++;
244 	}
245 
246 	if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS) {
247 		struct ice_pfcp_hdr *hdr_h, *hdr_m;
248 
249 		hdr_h = &list[i].h_u.pfcp_hdr;
250 		hdr_m = &list[i].m_u.pfcp_hdr;
251 		list[i].type = ICE_PFCP;
252 
253 		hdr_h->flags = fltr->pfcp_meta_keys.type;
254 		hdr_m->flags = fltr->pfcp_meta_masks.type & 0x01;
255 
256 		hdr_h->seid = fltr->pfcp_meta_keys.seid;
257 		hdr_m->seid = fltr->pfcp_meta_masks.seid;
258 
259 		i++;
260 	}
261 
262 	if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
263 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
264 		list[i].type = ice_proto_type_from_ipv4(false);
265 
266 		if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV4) {
267 			list[i].h_u.ipv4_hdr.src_addr = hdr->l3_key.src_ipv4;
268 			list[i].m_u.ipv4_hdr.src_addr = hdr->l3_mask.src_ipv4;
269 		}
270 		if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV4) {
271 			list[i].h_u.ipv4_hdr.dst_addr = hdr->l3_key.dst_ipv4;
272 			list[i].m_u.ipv4_hdr.dst_addr = hdr->l3_mask.dst_ipv4;
273 		}
274 		i++;
275 	}
276 
277 	if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
278 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) {
279 		list[i].type = ice_proto_type_from_ipv6(false);
280 
281 		if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV6) {
282 			memcpy(&list[i].h_u.ipv6_hdr.src_addr,
283 			       &hdr->l3_key.src_ipv6_addr,
284 			       sizeof(hdr->l3_key.src_ipv6_addr));
285 			memcpy(&list[i].m_u.ipv6_hdr.src_addr,
286 			       &hdr->l3_mask.src_ipv6_addr,
287 			       sizeof(hdr->l3_mask.src_ipv6_addr));
288 		}
289 		if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV6) {
290 			memcpy(&list[i].h_u.ipv6_hdr.dst_addr,
291 			       &hdr->l3_key.dst_ipv6_addr,
292 			       sizeof(hdr->l3_key.dst_ipv6_addr));
293 			memcpy(&list[i].m_u.ipv6_hdr.dst_addr,
294 			       &hdr->l3_mask.dst_ipv6_addr,
295 			       sizeof(hdr->l3_mask.dst_ipv6_addr));
296 		}
297 		i++;
298 	}
299 
300 	if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IP) &&
301 	    (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
302 		      ICE_TC_FLWR_FIELD_ENC_IP_TTL))) {
303 		list[i].type = ice_proto_type_from_ipv4(false);
304 
305 		if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) {
306 			list[i].h_u.ipv4_hdr.tos = hdr->l3_key.tos;
307 			list[i].m_u.ipv4_hdr.tos = hdr->l3_mask.tos;
308 		}
309 
310 		if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) {
311 			list[i].h_u.ipv4_hdr.time_to_live = hdr->l3_key.ttl;
312 			list[i].m_u.ipv4_hdr.time_to_live = hdr->l3_mask.ttl;
313 		}
314 
315 		i++;
316 	}
317 
318 	if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IPV6) &&
319 	    (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
320 		      ICE_TC_FLWR_FIELD_ENC_IP_TTL))) {
321 		struct ice_ipv6_hdr *hdr_h, *hdr_m;
322 
323 		hdr_h = &list[i].h_u.ipv6_hdr;
324 		hdr_m = &list[i].m_u.ipv6_hdr;
325 		list[i].type = ice_proto_type_from_ipv6(false);
326 
327 		if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) {
328 			be32p_replace_bits(&hdr_h->be_ver_tc_flow,
329 					   hdr->l3_key.tos,
330 					   ICE_IPV6_HDR_TC_MASK);
331 			be32p_replace_bits(&hdr_m->be_ver_tc_flow,
332 					   hdr->l3_mask.tos,
333 					   ICE_IPV6_HDR_TC_MASK);
334 		}
335 
336 		if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) {
337 			hdr_h->hop_limit = hdr->l3_key.ttl;
338 			hdr_m->hop_limit = hdr->l3_mask.ttl;
339 		}
340 
341 		i++;
342 	}
343 
344 	if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) &&
345 	    hdr->l3_key.ip_proto == IPPROTO_UDP) {
346 		list[i].type = ICE_UDP_OF;
347 		list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port;
348 		list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port;
349 		i++;
350 	}
351 
352 	/* always fill matching on tunneled packets in metadata */
353 	ice_rule_add_tunnel_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
354 
355 	return i;
356 }
357 
358 /**
359  * ice_tc_fill_rules - fill filter rules based on TC fltr
360  * @hw: pointer to HW structure
361  * @flags: tc flower field flags
362  * @tc_fltr: pointer to TC flower filter
363  * @list: list of advance rule elements
364  * @rule_info: pointer to information about rule
365  * @l4_proto: pointer to information such as L4 proto type
366  *
367  * Fill ice_adv_lkup_elem list based on TC flower flags and
368  * TC flower headers. This list should be used to add
369  * advance filter in hardware.
370  */
371 static int
372 ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
373 		  struct ice_tc_flower_fltr *tc_fltr,
374 		  struct ice_adv_lkup_elem *list,
375 		  struct ice_adv_rule_info *rule_info,
376 		  u16 *l4_proto)
377 {
378 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
379 	bool inner = false;
380 	u16 vlan_tpid = 0;
381 	int i = 1; /* 0th lookup is metadata */
382 
383 	rule_info->vlan_type = vlan_tpid;
384 
385 	/* Always add direction metadata */
386 	ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
387 
388 	rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
389 	if (tc_fltr->tunnel_type != TNL_LAST) {
390 		i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
391 
392 		/* PFCP is considered non-tunneled - don't swap headers. */
393 		if (tc_fltr->tunnel_type != TNL_PFCP) {
394 			headers = &tc_fltr->inner_headers;
395 			inner = true;
396 		}
397 	}
398 
399 	if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
400 		list[i].type = ice_proto_type_from_etype(inner);
401 		list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
402 		list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
403 		i++;
404 	}
405 
406 	if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
407 		     ICE_TC_FLWR_FIELD_SRC_MAC)) {
408 		struct ice_tc_l2_hdr *l2_key, *l2_mask;
409 
410 		l2_key = &headers->l2_key;
411 		l2_mask = &headers->l2_mask;
412 
413 		list[i].type = ice_proto_type_from_mac(inner);
414 		if (flags & ICE_TC_FLWR_FIELD_DST_MAC) {
415 			ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
416 					l2_key->dst_mac);
417 			ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
418 					l2_mask->dst_mac);
419 		}
420 		if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) {
421 			ether_addr_copy(list[i].h_u.eth_hdr.src_addr,
422 					l2_key->src_mac);
423 			ether_addr_copy(list[i].m_u.eth_hdr.src_addr,
424 					l2_mask->src_mac);
425 		}
426 		i++;
427 	}
428 
429 	/* copy VLAN info */
430 	if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO)) {
431 		if (flags & ICE_TC_FLWR_FIELD_CVLAN)
432 			list[i].type = ICE_VLAN_EX;
433 		else
434 			list[i].type = ICE_VLAN_OFOS;
435 
436 		if (flags & ICE_TC_FLWR_FIELD_VLAN) {
437 			list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
438 			list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF);
439 		}
440 
441 		if (flags & ICE_TC_FLWR_FIELD_VLAN_PRIO) {
442 			if (flags & ICE_TC_FLWR_FIELD_VLAN) {
443 				list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF);
444 			} else {
445 				list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000);
446 				list[i].h_u.vlan_hdr.vlan = 0;
447 			}
448 			list[i].h_u.vlan_hdr.vlan |=
449 				headers->vlan_hdr.vlan_prio;
450 		}
451 
452 		i++;
453 	}
454 
455 	if (flags & ICE_TC_FLWR_FIELD_VLAN_TPID) {
456 		vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid);
457 		rule_info->vlan_type =
458 				ice_check_supported_vlan_tpid(vlan_tpid);
459 
460 		ice_rule_add_vlan_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
461 	}
462 
463 	if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) {
464 		list[i].type = ICE_VLAN_IN;
465 
466 		if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
467 			list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id;
468 			list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF);
469 		}
470 
471 		if (flags & ICE_TC_FLWR_FIELD_CVLAN_PRIO) {
472 			if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
473 				list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF);
474 			} else {
475 				list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000);
476 				list[i].h_u.vlan_hdr.vlan = 0;
477 			}
478 			list[i].h_u.vlan_hdr.vlan |=
479 				headers->cvlan_hdr.vlan_prio;
480 		}
481 
482 		i++;
483 	}
484 
485 	if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID |
486 		     ICE_TC_FLWR_FIELD_PPP_PROTO)) {
487 		struct ice_pppoe_hdr *vals, *masks;
488 
489 		vals = &list[i].h_u.pppoe_hdr;
490 		masks = &list[i].m_u.pppoe_hdr;
491 
492 		list[i].type = ICE_PPPOE;
493 
494 		if (flags & ICE_TC_FLWR_FIELD_PPPOE_SESSID) {
495 			vals->session_id = headers->pppoe_hdr.session_id;
496 			masks->session_id = cpu_to_be16(0xFFFF);
497 		}
498 
499 		if (flags & ICE_TC_FLWR_FIELD_PPP_PROTO) {
500 			vals->ppp_prot_id = headers->pppoe_hdr.ppp_proto;
501 			masks->ppp_prot_id = cpu_to_be16(0xFFFF);
502 		}
503 
504 		i++;
505 	}
506 
507 	/* copy L3 (IPv[4|6]: src, dest) address */
508 	if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 |
509 		     ICE_TC_FLWR_FIELD_SRC_IPV4)) {
510 		struct ice_tc_l3_hdr *l3_key, *l3_mask;
511 
512 		list[i].type = ice_proto_type_from_ipv4(inner);
513 		l3_key = &headers->l3_key;
514 		l3_mask = &headers->l3_mask;
515 		if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) {
516 			list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4;
517 			list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4;
518 		}
519 		if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) {
520 			list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4;
521 			list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4;
522 		}
523 		i++;
524 	} else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 |
525 			    ICE_TC_FLWR_FIELD_SRC_IPV6)) {
526 		struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask;
527 		struct ice_tc_l3_hdr *l3_key, *l3_mask;
528 
529 		list[i].type = ice_proto_type_from_ipv6(inner);
530 		ipv6_hdr = &list[i].h_u.ipv6_hdr;
531 		ipv6_mask = &list[i].m_u.ipv6_hdr;
532 		l3_key = &headers->l3_key;
533 		l3_mask = &headers->l3_mask;
534 
535 		if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) {
536 			memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr,
537 			       sizeof(l3_key->dst_ipv6_addr));
538 			memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr,
539 			       sizeof(l3_mask->dst_ipv6_addr));
540 		}
541 		if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) {
542 			memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr,
543 			       sizeof(l3_key->src_ipv6_addr));
544 			memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr,
545 			       sizeof(l3_mask->src_ipv6_addr));
546 		}
547 		i++;
548 	}
549 
550 	if (headers->l2_key.n_proto == htons(ETH_P_IP) &&
551 	    (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) {
552 		list[i].type = ice_proto_type_from_ipv4(inner);
553 
554 		if (flags & ICE_TC_FLWR_FIELD_IP_TOS) {
555 			list[i].h_u.ipv4_hdr.tos = headers->l3_key.tos;
556 			list[i].m_u.ipv4_hdr.tos = headers->l3_mask.tos;
557 		}
558 
559 		if (flags & ICE_TC_FLWR_FIELD_IP_TTL) {
560 			list[i].h_u.ipv4_hdr.time_to_live =
561 				headers->l3_key.ttl;
562 			list[i].m_u.ipv4_hdr.time_to_live =
563 				headers->l3_mask.ttl;
564 		}
565 
566 		i++;
567 	}
568 
569 	if (headers->l2_key.n_proto == htons(ETH_P_IPV6) &&
570 	    (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) {
571 		struct ice_ipv6_hdr *hdr_h, *hdr_m;
572 
573 		hdr_h = &list[i].h_u.ipv6_hdr;
574 		hdr_m = &list[i].m_u.ipv6_hdr;
575 		list[i].type = ice_proto_type_from_ipv6(inner);
576 
577 		if (flags & ICE_TC_FLWR_FIELD_IP_TOS) {
578 			be32p_replace_bits(&hdr_h->be_ver_tc_flow,
579 					   headers->l3_key.tos,
580 					   ICE_IPV6_HDR_TC_MASK);
581 			be32p_replace_bits(&hdr_m->be_ver_tc_flow,
582 					   headers->l3_mask.tos,
583 					   ICE_IPV6_HDR_TC_MASK);
584 		}
585 
586 		if (flags & ICE_TC_FLWR_FIELD_IP_TTL) {
587 			hdr_h->hop_limit = headers->l3_key.ttl;
588 			hdr_m->hop_limit = headers->l3_mask.ttl;
589 		}
590 
591 		i++;
592 	}
593 
594 	if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID) {
595 		list[i].type = ICE_L2TPV3;
596 
597 		list[i].h_u.l2tpv3_sess_hdr.session_id =
598 			headers->l2tpv3_hdr.session_id;
599 		list[i].m_u.l2tpv3_sess_hdr.session_id =
600 			cpu_to_be32(0xFFFFFFFF);
601 
602 		i++;
603 	}
604 
605 	/* copy L4 (src, dest) port */
606 	if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
607 		     ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
608 		struct ice_tc_l4_hdr *l4_key, *l4_mask;
609 
610 		list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto);
611 		l4_key = &headers->l4_key;
612 		l4_mask = &headers->l4_mask;
613 
614 		if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) {
615 			list[i].h_u.l4_hdr.dst_port = l4_key->dst_port;
616 			list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port;
617 		}
618 		if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) {
619 			list[i].h_u.l4_hdr.src_port = l4_key->src_port;
620 			list[i].m_u.l4_hdr.src_port = l4_mask->src_port;
621 		}
622 		i++;
623 	}
624 
625 	return i;
626 }
627 
628 /**
629  * ice_tc_tun_get_type - get the tunnel type
630  * @tunnel_dev: ptr to tunnel device
631  *
632  * This function detects appropriate tunnel_type if specified device is
633  * tunnel device such as VXLAN/Geneve
634  */
635 static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
636 {
637 	if (netif_is_vxlan(tunnel_dev))
638 		return TNL_VXLAN;
639 	if (netif_is_geneve(tunnel_dev))
640 		return TNL_GENEVE;
641 	if (netif_is_gretap(tunnel_dev) ||
642 	    netif_is_ip6gretap(tunnel_dev))
643 		return TNL_GRETAP;
644 
645 	/* Assume GTP-U by default in case of GTP netdev.
646 	 * GTP-C may be selected later, based on enc_dst_port.
647 	 */
648 	if (netif_is_gtp(tunnel_dev))
649 		return TNL_GTPU;
650 	if (netif_is_pfcp(tunnel_dev))
651 		return TNL_PFCP;
652 	return TNL_LAST;
653 }
654 
655 bool ice_is_tunnel_supported(struct net_device *dev)
656 {
657 	return ice_tc_tun_get_type(dev) != TNL_LAST;
658 }
659 
660 static bool ice_tc_is_dev_uplink(struct net_device *dev)
661 {
662 	return netif_is_ice(dev) || ice_is_tunnel_supported(dev);
663 }
664 
665 static int ice_tc_setup_redirect_action(struct net_device *filter_dev,
666 					struct ice_tc_flower_fltr *fltr,
667 					struct net_device *target_dev)
668 {
669 	struct ice_repr *repr;
670 
671 	fltr->action.fltr_act = ICE_FWD_TO_VSI;
672 
673 	if (ice_is_port_repr_netdev(filter_dev) &&
674 	    ice_is_port_repr_netdev(target_dev)) {
675 		repr = ice_netdev_to_repr(target_dev);
676 
677 		fltr->dest_vsi = repr->src_vsi;
678 		fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
679 	} else if (ice_is_port_repr_netdev(filter_dev) &&
680 		   ice_tc_is_dev_uplink(target_dev)) {
681 		repr = ice_netdev_to_repr(filter_dev);
682 
683 		fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
684 		fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
685 	} else if (ice_tc_is_dev_uplink(filter_dev) &&
686 		   ice_is_port_repr_netdev(target_dev)) {
687 		repr = ice_netdev_to_repr(target_dev);
688 
689 		fltr->dest_vsi = repr->src_vsi;
690 		fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
691 	} else {
692 		NL_SET_ERR_MSG_MOD(fltr->extack,
693 				   "Unsupported netdevice in switchdev mode");
694 		return -EINVAL;
695 	}
696 
697 	return 0;
698 }
699 
700 static int
701 ice_tc_setup_drop_action(struct net_device *filter_dev,
702 			 struct ice_tc_flower_fltr *fltr)
703 {
704 	fltr->action.fltr_act = ICE_DROP_PACKET;
705 
706 	if (ice_is_port_repr_netdev(filter_dev)) {
707 		fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
708 	} else if (ice_tc_is_dev_uplink(filter_dev)) {
709 		fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
710 	} else {
711 		NL_SET_ERR_MSG_MOD(fltr->extack,
712 				   "Unsupported netdevice in switchdev mode");
713 		return -EINVAL;
714 	}
715 
716 	return 0;
717 }
718 
719 static int ice_tc_setup_mirror_action(struct net_device *filter_dev,
720 				      struct ice_tc_flower_fltr *fltr,
721 				      struct net_device *target_dev)
722 {
723 	struct ice_repr *repr;
724 
725 	fltr->action.fltr_act = ICE_MIRROR_PACKET;
726 
727 	if (ice_is_port_repr_netdev(filter_dev) &&
728 	    ice_is_port_repr_netdev(target_dev)) {
729 		repr = ice_netdev_to_repr(target_dev);
730 
731 		fltr->dest_vsi = repr->src_vsi;
732 		fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
733 	} else if (ice_is_port_repr_netdev(filter_dev) &&
734 		   ice_tc_is_dev_uplink(target_dev)) {
735 		repr = ice_netdev_to_repr(filter_dev);
736 
737 		fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
738 		fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
739 	} else if (ice_tc_is_dev_uplink(filter_dev) &&
740 		   ice_is_port_repr_netdev(target_dev)) {
741 		repr = ice_netdev_to_repr(target_dev);
742 
743 		fltr->dest_vsi = repr->src_vsi;
744 		fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
745 	} else {
746 		NL_SET_ERR_MSG_MOD(fltr->extack,
747 				   "Unsupported netdevice in switchdev mode");
748 		return -EINVAL;
749 	}
750 
751 	return 0;
752 }
753 
754 static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
755 				       struct ice_tc_flower_fltr *fltr,
756 				       struct flow_action_entry *act)
757 {
758 	int err;
759 
760 	switch (act->id) {
761 	case FLOW_ACTION_DROP:
762 		err = ice_tc_setup_drop_action(filter_dev, fltr);
763 		if (err)
764 			return err;
765 
766 		break;
767 
768 	case FLOW_ACTION_REDIRECT:
769 		err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev);
770 		if (err)
771 			return err;
772 
773 		break;
774 
775 	case FLOW_ACTION_MIRRED:
776 		err = ice_tc_setup_mirror_action(filter_dev, fltr, act->dev);
777 		if (err)
778 			return err;
779 		break;
780 
781 	default:
782 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode");
783 		return -EINVAL;
784 	}
785 
786 	return 0;
787 }
788 
789 static int
790 ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
791 {
792 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
793 	struct ice_adv_rule_info rule_info = { 0 };
794 	struct ice_rule_query_data rule_added;
795 	struct ice_hw *hw = &vsi->back->hw;
796 	struct ice_adv_lkup_elem *list;
797 	u32 flags = fltr->flags;
798 	int lkups_cnt;
799 	int ret;
800 	int i;
801 
802 	if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
803 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
804 		return -EOPNOTSUPP;
805 	}
806 
807 	lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
808 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
809 	if (!list)
810 		return -ENOMEM;
811 
812 	i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL);
813 	if (i != lkups_cnt) {
814 		ret = -EINVAL;
815 		goto exit;
816 	}
817 
818 	rule_info.sw_act.fltr_act = fltr->action.fltr_act;
819 	if (fltr->action.fltr_act != ICE_DROP_PACKET)
820 		rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
821 	/* For now, making priority to be highest, and it also becomes
822 	 * the priority for recipe which will get created as a result of
823 	 * new extraction sequence based on input set.
824 	 * Priority '7' is max val for switch recipe, higher the number
825 	 * results into order of switch rule evaluation.
826 	 */
827 	rule_info.priority = 7;
828 	rule_info.flags_info.act_valid = true;
829 
830 	if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
831 		/* Uplink to VF */
832 		rule_info.sw_act.flag |= ICE_FLTR_RX;
833 		rule_info.sw_act.src = hw->pf_id;
834 		rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
835 	} else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
836 		   fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) {
837 		/* VF to Uplink */
838 		rule_info.sw_act.flag |= ICE_FLTR_TX;
839 		rule_info.sw_act.src = vsi->idx;
840 		rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
841 	} else {
842 		/* VF to VF */
843 		rule_info.sw_act.flag |= ICE_FLTR_TX;
844 		rule_info.sw_act.src = vsi->idx;
845 		rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
846 	}
847 
848 	/* specify the cookie as filter_rule_id */
849 	rule_info.fltr_rule_id = fltr->cookie;
850 
851 	ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
852 	if (ret == -EEXIST) {
853 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
854 		ret = -EINVAL;
855 		goto exit;
856 	} else if (ret) {
857 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
858 		goto exit;
859 	}
860 
861 	/* store the output params, which are needed later for removing
862 	 * advanced switch filter
863 	 */
864 	fltr->rid = rule_added.rid;
865 	fltr->rule_id = rule_added.rule_id;
866 	fltr->dest_vsi_handle = rule_added.vsi_handle;
867 
868 exit:
869 	kfree(list);
870 	return ret;
871 }
872 
873 /**
874  * ice_locate_vsi_using_queue - locate VSI using queue (forward to queue action)
875  * @vsi: Pointer to VSI
876  * @queue: Queue index
877  *
878  * Locate the VSI using specified "queue". When ADQ is not enabled,
879  * always return input VSI, otherwise locate corresponding
880  * VSI based on per channel "offset" and "qcount"
881  */
882 struct ice_vsi *
883 ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue)
884 {
885 	int num_tc, tc;
886 
887 	/* if ADQ is not active, passed VSI is the candidate VSI */
888 	if (!ice_is_adq_active(vsi->back))
889 		return vsi;
890 
891 	/* Locate the VSI (it could still be main PF VSI or CHNL_VSI depending
892 	 * upon queue number)
893 	 */
894 	num_tc = vsi->mqprio_qopt.qopt.num_tc;
895 
896 	for (tc = 0; tc < num_tc; tc++) {
897 		int qcount = vsi->mqprio_qopt.qopt.count[tc];
898 		int offset = vsi->mqprio_qopt.qopt.offset[tc];
899 
900 		if (queue >= offset && queue < offset + qcount) {
901 			/* for non-ADQ TCs, passed VSI is the candidate VSI */
902 			if (tc < ICE_CHNL_START_TC)
903 				return vsi;
904 			else
905 				return vsi->tc_map_vsi[tc];
906 		}
907 	}
908 	return NULL;
909 }
910 
911 static struct ice_rx_ring *
912 ice_locate_rx_ring_using_queue(struct ice_vsi *vsi,
913 			       struct ice_tc_flower_fltr *tc_fltr)
914 {
915 	u16 queue = tc_fltr->action.fwd.q.queue;
916 
917 	return queue < vsi->num_rxq ? vsi->rx_rings[queue] : NULL;
918 }
919 
920 /**
921  * ice_tc_forward_action - Determine destination VSI and queue for the action
922  * @vsi: Pointer to VSI
923  * @tc_fltr: Pointer to TC flower filter structure
924  *
925  * Validates the tc forward action and determines the destination VSI and queue
926  * for the forward action.
927  */
928 static struct ice_vsi *
929 ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
930 {
931 	struct ice_rx_ring *ring = NULL;
932 	struct ice_vsi *dest_vsi = NULL;
933 	struct ice_pf *pf = vsi->back;
934 	struct device *dev;
935 	u32 tc_class;
936 	int q;
937 
938 	dev = ice_pf_to_dev(pf);
939 
940 	/* Get the destination VSI and/or destination queue and validate them */
941 	switch (tc_fltr->action.fltr_act) {
942 	case ICE_FWD_TO_VSI:
943 		tc_class = tc_fltr->action.fwd.tc.tc_class;
944 		/* Select the destination VSI */
945 		if (tc_class < ICE_CHNL_START_TC) {
946 			NL_SET_ERR_MSG_MOD(tc_fltr->extack,
947 					   "Unable to add filter because of unsupported destination");
948 			return ERR_PTR(-EOPNOTSUPP);
949 		}
950 		/* Locate ADQ VSI depending on hw_tc number */
951 		dest_vsi = vsi->tc_map_vsi[tc_class];
952 		break;
953 	case ICE_FWD_TO_Q:
954 		/* Locate the Rx queue */
955 		ring = ice_locate_rx_ring_using_queue(vsi, tc_fltr);
956 		if (!ring) {
957 			dev_err(dev,
958 				"Unable to locate Rx queue for action fwd_to_queue: %u\n",
959 				tc_fltr->action.fwd.q.queue);
960 			return ERR_PTR(-EINVAL);
961 		}
962 		/* Determine destination VSI even though the action is
963 		 * FWD_TO_QUEUE, because QUEUE is associated with VSI
964 		 */
965 		q = tc_fltr->action.fwd.q.queue;
966 		dest_vsi = ice_locate_vsi_using_queue(vsi, q);
967 		break;
968 	default:
969 		dev_err(dev,
970 			"Unable to add filter because of unsupported action %u (supported actions: fwd to tc, fwd to queue)\n",
971 			tc_fltr->action.fltr_act);
972 		return ERR_PTR(-EINVAL);
973 	}
974 	/* Must have valid dest_vsi (it could be main VSI or ADQ VSI) */
975 	if (!dest_vsi) {
976 		dev_err(dev,
977 			"Unable to add filter because specified destination VSI doesn't exist\n");
978 		return ERR_PTR(-EINVAL);
979 	}
980 	return dest_vsi;
981 }
982 
983 /**
984  * ice_add_tc_flower_adv_fltr - add appropriate filter rules
985  * @vsi: Pointer to VSI
986  * @tc_fltr: Pointer to TC flower filter structure
987  *
988  * based on filter parameters using Advance recipes supported
989  * by OS package.
990  */
991 static int
992 ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
993 			   struct ice_tc_flower_fltr *tc_fltr)
994 {
995 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
996 	struct ice_adv_rule_info rule_info = {0};
997 	struct ice_rule_query_data rule_added;
998 	struct ice_adv_lkup_elem *list;
999 	struct ice_pf *pf = vsi->back;
1000 	struct ice_hw *hw = &pf->hw;
1001 	u32 flags = tc_fltr->flags;
1002 	struct ice_vsi *dest_vsi;
1003 	struct device *dev;
1004 	u16 lkups_cnt = 0;
1005 	u16 l4_proto = 0;
1006 	int ret = 0;
1007 	u16 i = 0;
1008 
1009 	dev = ice_pf_to_dev(pf);
1010 	if (ice_is_safe_mode(pf)) {
1011 		NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode");
1012 		return -EOPNOTSUPP;
1013 	}
1014 
1015 	if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
1016 				ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
1017 				ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
1018 				ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
1019 				ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) {
1020 		NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)");
1021 		return -EOPNOTSUPP;
1022 	}
1023 
1024 	/* validate forwarding action VSI and queue */
1025 	if (ice_is_forward_action(tc_fltr->action.fltr_act)) {
1026 		dest_vsi = ice_tc_forward_action(vsi, tc_fltr);
1027 		if (IS_ERR(dest_vsi))
1028 			return PTR_ERR(dest_vsi);
1029 	}
1030 
1031 	lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
1032 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
1033 	if (!list)
1034 		return -ENOMEM;
1035 
1036 	i = ice_tc_fill_rules(hw, flags, tc_fltr, list, &rule_info, &l4_proto);
1037 	if (i != lkups_cnt) {
1038 		ret = -EINVAL;
1039 		goto exit;
1040 	}
1041 
1042 	rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act;
1043 	/* specify the cookie as filter_rule_id */
1044 	rule_info.fltr_rule_id = tc_fltr->cookie;
1045 
1046 	switch (tc_fltr->action.fltr_act) {
1047 	case ICE_FWD_TO_VSI:
1048 		rule_info.sw_act.vsi_handle = dest_vsi->idx;
1049 		rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
1050 		rule_info.sw_act.src = hw->pf_id;
1051 		dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
1052 			tc_fltr->action.fwd.tc.tc_class,
1053 			rule_info.sw_act.vsi_handle, lkups_cnt);
1054 		break;
1055 	case ICE_FWD_TO_Q:
1056 		/* HW queue number in global space */
1057 		rule_info.sw_act.fwd_id.q_id = tc_fltr->action.fwd.q.hw_queue;
1058 		rule_info.sw_act.vsi_handle = dest_vsi->idx;
1059 		rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE;
1060 		rule_info.sw_act.src = hw->pf_id;
1061 		dev_dbg(dev, "add switch rule action to forward to queue:%u (HW queue %u), lkups_cnt:%u\n",
1062 			tc_fltr->action.fwd.q.queue,
1063 			tc_fltr->action.fwd.q.hw_queue, lkups_cnt);
1064 		break;
1065 	case ICE_DROP_PACKET:
1066 		rule_info.sw_act.flag |= ICE_FLTR_RX;
1067 		rule_info.sw_act.src = hw->pf_id;
1068 		rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
1069 		break;
1070 	default:
1071 		ret = -EOPNOTSUPP;
1072 		goto exit;
1073 	}
1074 
1075 	ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
1076 	if (ret == -EEXIST) {
1077 		NL_SET_ERR_MSG_MOD(tc_fltr->extack,
1078 				   "Unable to add filter because it already exist");
1079 		ret = -EINVAL;
1080 		goto exit;
1081 	} else if (ret) {
1082 		NL_SET_ERR_MSG_MOD(tc_fltr->extack,
1083 				   "Unable to add filter due to error");
1084 		goto exit;
1085 	}
1086 
1087 	/* store the output params, which are needed later for removing
1088 	 * advanced switch filter
1089 	 */
1090 	tc_fltr->rid = rule_added.rid;
1091 	tc_fltr->rule_id = rule_added.rule_id;
1092 	tc_fltr->dest_vsi_handle = rule_added.vsi_handle;
1093 	if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI ||
1094 	    tc_fltr->action.fltr_act == ICE_FWD_TO_Q) {
1095 		tc_fltr->dest_vsi = dest_vsi;
1096 		/* keep track of advanced switch filter for
1097 		 * destination VSI
1098 		 */
1099 		dest_vsi->num_chnl_fltr++;
1100 
1101 		/* keeps track of channel filters for PF VSI */
1102 		if (vsi->type == ICE_VSI_PF &&
1103 		    (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1104 			      ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
1105 			pf->num_dmac_chnl_fltrs++;
1106 	}
1107 	switch (tc_fltr->action.fltr_act) {
1108 	case ICE_FWD_TO_VSI:
1109 		dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to TC %u, rid %u, rule_id %u, vsi_idx %u\n",
1110 			lkups_cnt, flags,
1111 			tc_fltr->action.fwd.tc.tc_class, rule_added.rid,
1112 			rule_added.rule_id, rule_added.vsi_handle);
1113 		break;
1114 	case ICE_FWD_TO_Q:
1115 		dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to queue: %u (HW queue %u)     , rid %u, rule_id %u\n",
1116 			lkups_cnt, flags, tc_fltr->action.fwd.q.queue,
1117 			tc_fltr->action.fwd.q.hw_queue, rule_added.rid,
1118 			rule_added.rule_id);
1119 		break;
1120 	case ICE_DROP_PACKET:
1121 		dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is drop, rid %u, rule_id %u\n",
1122 			lkups_cnt, flags, rule_added.rid, rule_added.rule_id);
1123 		break;
1124 	default:
1125 		break;
1126 	}
1127 exit:
1128 	kfree(list);
1129 	return ret;
1130 }
1131 
1132 /**
1133  * ice_tc_set_pppoe - Parse PPPoE fields from TC flower filter
1134  * @match: Pointer to flow match structure
1135  * @fltr: Pointer to filter structure
1136  * @headers: Pointer to outer header fields
1137  * @returns PPP protocol used in filter (ppp_ses or ppp_disc)
1138  */
1139 static u16
1140 ice_tc_set_pppoe(struct flow_match_pppoe *match,
1141 		 struct ice_tc_flower_fltr *fltr,
1142 		 struct ice_tc_flower_lyr_2_4_hdrs *headers)
1143 {
1144 	if (match->mask->session_id) {
1145 		fltr->flags |= ICE_TC_FLWR_FIELD_PPPOE_SESSID;
1146 		headers->pppoe_hdr.session_id = match->key->session_id;
1147 	}
1148 
1149 	if (match->mask->ppp_proto) {
1150 		fltr->flags |= ICE_TC_FLWR_FIELD_PPP_PROTO;
1151 		headers->pppoe_hdr.ppp_proto = match->key->ppp_proto;
1152 	}
1153 
1154 	return be16_to_cpu(match->key->type);
1155 }
1156 
1157 /**
1158  * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter
1159  * @match: Pointer to flow match structure
1160  * @fltr: Pointer to filter structure
1161  * @headers: inner or outer header fields
1162  * @is_encap: set true for tunnel IPv4 address
1163  */
1164 static int
1165 ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match,
1166 		struct ice_tc_flower_fltr *fltr,
1167 		struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
1168 {
1169 	if (match->key->dst) {
1170 		if (is_encap)
1171 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV4;
1172 		else
1173 			fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4;
1174 		headers->l3_key.dst_ipv4 = match->key->dst;
1175 		headers->l3_mask.dst_ipv4 = match->mask->dst;
1176 	}
1177 	if (match->key->src) {
1178 		if (is_encap)
1179 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV4;
1180 		else
1181 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4;
1182 		headers->l3_key.src_ipv4 = match->key->src;
1183 		headers->l3_mask.src_ipv4 = match->mask->src;
1184 	}
1185 	return 0;
1186 }
1187 
1188 /**
1189  * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter
1190  * @match: Pointer to flow match structure
1191  * @fltr: Pointer to filter structure
1192  * @headers: inner or outer header fields
1193  * @is_encap: set true for tunnel IPv6 address
1194  */
1195 static int
1196 ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
1197 		struct ice_tc_flower_fltr *fltr,
1198 		struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
1199 {
1200 	struct ice_tc_l3_hdr *l3_key, *l3_mask;
1201 
1202 	/* src and dest IPV6 address should not be LOOPBACK
1203 	 * (0:0:0:0:0:0:0:1), which can be represented as ::1
1204 	 */
1205 	if (ipv6_addr_loopback(&match->key->dst) ||
1206 	    ipv6_addr_loopback(&match->key->src)) {
1207 		NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK");
1208 		return -EINVAL;
1209 	}
1210 	/* if src/dest IPv6 address is *,* error */
1211 	if (ipv6_addr_any(&match->mask->dst) &&
1212 	    ipv6_addr_any(&match->mask->src)) {
1213 		NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any");
1214 		return -EINVAL;
1215 	}
1216 	if (!ipv6_addr_any(&match->mask->dst)) {
1217 		if (is_encap)
1218 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV6;
1219 		else
1220 			fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6;
1221 	}
1222 	if (!ipv6_addr_any(&match->mask->src)) {
1223 		if (is_encap)
1224 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV6;
1225 		else
1226 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6;
1227 	}
1228 
1229 	l3_key = &headers->l3_key;
1230 	l3_mask = &headers->l3_mask;
1231 
1232 	if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
1233 			   ICE_TC_FLWR_FIELD_SRC_IPV6)) {
1234 		memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr,
1235 		       sizeof(match->key->src.s6_addr));
1236 		memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr,
1237 		       sizeof(match->mask->src.s6_addr));
1238 	}
1239 	if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
1240 			   ICE_TC_FLWR_FIELD_DEST_IPV6)) {
1241 		memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr,
1242 		       sizeof(match->key->dst.s6_addr));
1243 		memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr,
1244 		       sizeof(match->mask->dst.s6_addr));
1245 	}
1246 
1247 	return 0;
1248 }
1249 
1250 /**
1251  * ice_tc_set_tos_ttl - Parse IP ToS/TTL from TC flower filter
1252  * @match: Pointer to flow match structure
1253  * @fltr: Pointer to filter structure
1254  * @headers: inner or outer header fields
1255  * @is_encap: set true for tunnel
1256  */
1257 static void
1258 ice_tc_set_tos_ttl(struct flow_match_ip *match,
1259 		   struct ice_tc_flower_fltr *fltr,
1260 		   struct ice_tc_flower_lyr_2_4_hdrs *headers,
1261 		   bool is_encap)
1262 {
1263 	if (match->mask->tos) {
1264 		if (is_encap)
1265 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TOS;
1266 		else
1267 			fltr->flags |= ICE_TC_FLWR_FIELD_IP_TOS;
1268 
1269 		headers->l3_key.tos = match->key->tos;
1270 		headers->l3_mask.tos = match->mask->tos;
1271 	}
1272 
1273 	if (match->mask->ttl) {
1274 		if (is_encap)
1275 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TTL;
1276 		else
1277 			fltr->flags |= ICE_TC_FLWR_FIELD_IP_TTL;
1278 
1279 		headers->l3_key.ttl = match->key->ttl;
1280 		headers->l3_mask.ttl = match->mask->ttl;
1281 	}
1282 }
1283 
1284 /**
1285  * ice_tc_set_port - Parse ports from TC flower filter
1286  * @match: Flow match structure
1287  * @fltr: Pointer to filter structure
1288  * @headers: inner or outer header fields
1289  * @is_encap: set true for tunnel port
1290  */
1291 static int
1292 ice_tc_set_port(struct flow_match_ports match,
1293 		struct ice_tc_flower_fltr *fltr,
1294 		struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
1295 {
1296 	if (match.key->dst) {
1297 		if (is_encap)
1298 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT;
1299 		else
1300 			fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
1301 
1302 		headers->l4_key.dst_port = match.key->dst;
1303 		headers->l4_mask.dst_port = match.mask->dst;
1304 	}
1305 	if (match.key->src) {
1306 		if (is_encap)
1307 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT;
1308 		else
1309 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
1310 
1311 		headers->l4_key.src_port = match.key->src;
1312 		headers->l4_mask.src_port = match.mask->src;
1313 	}
1314 	return 0;
1315 }
1316 
1317 static struct net_device *
1318 ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule)
1319 {
1320 	struct flow_action_entry *act;
1321 	int i;
1322 
1323 	if (ice_is_tunnel_supported(dev))
1324 		return dev;
1325 
1326 	flow_action_for_each(i, act, &rule->action) {
1327 		if (act->id == FLOW_ACTION_REDIRECT &&
1328 		    ice_is_tunnel_supported(act->dev))
1329 			return act->dev;
1330 	}
1331 
1332 	return NULL;
1333 }
1334 
1335 /**
1336  * ice_parse_gtp_type - Sets GTP tunnel type to GTP-U or GTP-C
1337  * @match: Flow match structure
1338  * @fltr: Pointer to filter structure
1339  *
1340  * GTP-C/GTP-U is selected based on destination port number (enc_dst_port).
1341  * Before calling this funtcion, fltr->tunnel_type should be set to TNL_GTPU,
1342  * therefore making GTP-U the default choice (when destination port number is
1343  * not specified).
1344  */
1345 static int
1346 ice_parse_gtp_type(struct flow_match_ports match,
1347 		   struct ice_tc_flower_fltr *fltr)
1348 {
1349 	u16 dst_port;
1350 
1351 	if (match.key->dst) {
1352 		dst_port = be16_to_cpu(match.key->dst);
1353 
1354 		switch (dst_port) {
1355 		case 2152:
1356 			break;
1357 		case 2123:
1358 			fltr->tunnel_type = TNL_GTPC;
1359 			break;
1360 		default:
1361 			NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported GTP port number");
1362 			return -EINVAL;
1363 		}
1364 	}
1365 
1366 	return 0;
1367 }
1368 
1369 static int
1370 ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
1371 		      struct ice_tc_flower_fltr *fltr)
1372 {
1373 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
1374 	struct flow_match_control enc_control;
1375 
1376 	fltr->tunnel_type = ice_tc_tun_get_type(dev);
1377 	headers->l3_key.ip_proto = IPPROTO_UDP;
1378 
1379 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
1380 		struct flow_match_enc_keyid enc_keyid;
1381 
1382 		flow_rule_match_enc_keyid(rule, &enc_keyid);
1383 
1384 		if (!enc_keyid.mask->keyid ||
1385 		    enc_keyid.mask->keyid != cpu_to_be32(ICE_TC_FLOWER_MASK_32))
1386 			return -EINVAL;
1387 
1388 		fltr->flags |= ICE_TC_FLWR_FIELD_TENANT_ID;
1389 		fltr->tenant_id = enc_keyid.key->keyid;
1390 	}
1391 
1392 	flow_rule_match_enc_control(rule, &enc_control);
1393 
1394 	if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1395 		struct flow_match_ipv4_addrs match;
1396 
1397 		flow_rule_match_enc_ipv4_addrs(rule, &match);
1398 		if (ice_tc_set_ipv4(&match, fltr, headers, true))
1399 			return -EINVAL;
1400 	} else if (enc_control.key->addr_type ==
1401 					FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1402 		struct flow_match_ipv6_addrs match;
1403 
1404 		flow_rule_match_enc_ipv6_addrs(rule, &match);
1405 		if (ice_tc_set_ipv6(&match, fltr, headers, true))
1406 			return -EINVAL;
1407 	}
1408 
1409 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
1410 		struct flow_match_ip match;
1411 
1412 		flow_rule_match_enc_ip(rule, &match);
1413 		ice_tc_set_tos_ttl(&match, fltr, headers, true);
1414 	}
1415 
1416 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) &&
1417 	    fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) {
1418 		struct flow_match_ports match;
1419 
1420 		flow_rule_match_enc_ports(rule, &match);
1421 
1422 		if (fltr->tunnel_type != TNL_GTPU) {
1423 			if (ice_tc_set_port(match, fltr, headers, true))
1424 				return -EINVAL;
1425 		} else {
1426 			if (ice_parse_gtp_type(match, fltr))
1427 				return -EINVAL;
1428 		}
1429 	}
1430 
1431 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) &&
1432 	    (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
1433 		struct flow_match_enc_opts match;
1434 
1435 		flow_rule_match_enc_opts(rule, &match);
1436 
1437 		memcpy(&fltr->gtp_pdu_info_keys, &match.key->data[0],
1438 		       sizeof(struct gtp_pdu_session_info));
1439 
1440 		memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0],
1441 		       sizeof(struct gtp_pdu_session_info));
1442 
1443 		fltr->flags |= ICE_TC_FLWR_FIELD_GTP_OPTS;
1444 	}
1445 
1446 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) &&
1447 	    fltr->tunnel_type == TNL_PFCP) {
1448 		struct flow_match_enc_opts match;
1449 
1450 		flow_rule_match_enc_opts(rule, &match);
1451 
1452 		memcpy(&fltr->pfcp_meta_keys, match.key->data,
1453 		       sizeof(struct pfcp_metadata));
1454 		memcpy(&fltr->pfcp_meta_masks, match.mask->data,
1455 		       sizeof(struct pfcp_metadata));
1456 
1457 		fltr->flags |= ICE_TC_FLWR_FIELD_PFCP_OPTS;
1458 	}
1459 
1460 	return 0;
1461 }
1462 
1463 /**
1464  * ice_parse_cls_flower - Parse TC flower filters provided by kernel
1465  * @vsi: Pointer to the VSI
1466  * @filter_dev: Pointer to device on which filter is being added
1467  * @f: Pointer to struct flow_cls_offload
1468  * @fltr: Pointer to filter structure
1469  */
1470 static int
1471 ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
1472 		     struct flow_cls_offload *f,
1473 		     struct ice_tc_flower_fltr *fltr)
1474 {
1475 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
1476 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1477 	u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
1478 	struct flow_dissector *dissector;
1479 	struct net_device *tunnel_dev;
1480 
1481 	dissector = rule->match.dissector;
1482 
1483 	if (dissector->used_keys &
1484 	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
1485 	      BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
1486 	      BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1487 	      BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
1488 	      BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) |
1489 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1490 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1491 	      BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1492 	      BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1493 	      BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1494 	      BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1495 	      BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
1496 	      BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
1497 	      BIT_ULL(FLOW_DISSECTOR_KEY_IP) |
1498 	      BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
1499 	      BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
1500 	      BIT_ULL(FLOW_DISSECTOR_KEY_PPPOE) |
1501 	      BIT_ULL(FLOW_DISSECTOR_KEY_L2TPV3))) {
1502 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
1503 		return -EOPNOTSUPP;
1504 	}
1505 
1506 	tunnel_dev = ice_get_tunnel_device(filter_dev, rule);
1507 	if (tunnel_dev) {
1508 		int err;
1509 
1510 		filter_dev = tunnel_dev;
1511 
1512 		err = ice_parse_tunnel_attr(filter_dev, rule, fltr);
1513 		if (err) {
1514 			NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to parse TC flower tunnel attributes");
1515 			return err;
1516 		}
1517 
1518 		/* PFCP is considered non-tunneled - don't swap headers. */
1519 		if (fltr->tunnel_type != TNL_PFCP) {
1520 			/* Header pointers should point to the inner headers,
1521 			 * outer header were already set by
1522 			 * ice_parse_tunnel_attr().
1523 			 */
1524 			headers = &fltr->inner_headers;
1525 		}
1526 	} else if (dissector->used_keys &
1527 		  (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1528 		   BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1529 		   BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1530 		   BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
1531 		NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
1532 		return -EOPNOTSUPP;
1533 	} else {
1534 		fltr->tunnel_type = TNL_LAST;
1535 	}
1536 
1537 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1538 		struct flow_match_basic match;
1539 
1540 		flow_rule_match_basic(rule, &match);
1541 
1542 		n_proto_key = ntohs(match.key->n_proto);
1543 		n_proto_mask = ntohs(match.mask->n_proto);
1544 
1545 		if (n_proto_key == ETH_P_ALL || n_proto_key == 0 ||
1546 		    fltr->tunnel_type == TNL_GTPU ||
1547 		    fltr->tunnel_type == TNL_GTPC) {
1548 			n_proto_key = 0;
1549 			n_proto_mask = 0;
1550 		} else {
1551 			fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
1552 		}
1553 
1554 		headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
1555 		headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
1556 		headers->l3_key.ip_proto = match.key->ip_proto;
1557 	}
1558 
1559 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1560 		struct flow_match_eth_addrs match;
1561 
1562 		flow_rule_match_eth_addrs(rule, &match);
1563 
1564 		if (!is_zero_ether_addr(match.key->dst)) {
1565 			ether_addr_copy(headers->l2_key.dst_mac,
1566 					match.key->dst);
1567 			ether_addr_copy(headers->l2_mask.dst_mac,
1568 					match.mask->dst);
1569 			fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
1570 		}
1571 
1572 		if (!is_zero_ether_addr(match.key->src)) {
1573 			ether_addr_copy(headers->l2_key.src_mac,
1574 					match.key->src);
1575 			ether_addr_copy(headers->l2_mask.src_mac,
1576 					match.mask->src);
1577 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC;
1578 		}
1579 	}
1580 
1581 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
1582 	    is_vlan_dev(filter_dev)) {
1583 		struct flow_dissector_key_vlan mask;
1584 		struct flow_dissector_key_vlan key;
1585 		struct flow_match_vlan match;
1586 
1587 		if (is_vlan_dev(filter_dev)) {
1588 			match.key = &key;
1589 			match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
1590 			match.key->vlan_priority = 0;
1591 			match.mask = &mask;
1592 			memset(match.mask, 0xff, sizeof(*match.mask));
1593 			match.mask->vlan_priority = 0;
1594 		} else {
1595 			flow_rule_match_vlan(rule, &match);
1596 		}
1597 
1598 		if (match.mask->vlan_id) {
1599 			if (match.mask->vlan_id == VLAN_VID_MASK) {
1600 				fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
1601 				headers->vlan_hdr.vlan_id =
1602 					cpu_to_be16(match.key->vlan_id &
1603 						    VLAN_VID_MASK);
1604 			} else {
1605 				NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
1606 				return -EINVAL;
1607 			}
1608 		}
1609 
1610 		if (match.mask->vlan_priority) {
1611 			fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO;
1612 			headers->vlan_hdr.vlan_prio =
1613 				be16_encode_bits(match.key->vlan_priority,
1614 						 VLAN_PRIO_MASK);
1615 		}
1616 
1617 		if (match.mask->vlan_tpid) {
1618 			headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid;
1619 			fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_TPID;
1620 		}
1621 	}
1622 
1623 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
1624 		struct flow_match_vlan match;
1625 
1626 		if (!ice_is_dvm_ena(&vsi->back->hw)) {
1627 			NL_SET_ERR_MSG_MOD(fltr->extack, "Double VLAN mode is not enabled");
1628 			return -EINVAL;
1629 		}
1630 
1631 		flow_rule_match_cvlan(rule, &match);
1632 
1633 		if (match.mask->vlan_id) {
1634 			if (match.mask->vlan_id == VLAN_VID_MASK) {
1635 				fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN;
1636 				headers->cvlan_hdr.vlan_id =
1637 					cpu_to_be16(match.key->vlan_id &
1638 						    VLAN_VID_MASK);
1639 			} else {
1640 				NL_SET_ERR_MSG_MOD(fltr->extack,
1641 						   "Bad CVLAN mask");
1642 				return -EINVAL;
1643 			}
1644 		}
1645 
1646 		if (match.mask->vlan_priority) {
1647 			fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO;
1648 			headers->cvlan_hdr.vlan_prio =
1649 				be16_encode_bits(match.key->vlan_priority,
1650 						 VLAN_PRIO_MASK);
1651 		}
1652 	}
1653 
1654 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PPPOE)) {
1655 		struct flow_match_pppoe match;
1656 
1657 		flow_rule_match_pppoe(rule, &match);
1658 		n_proto_key = ice_tc_set_pppoe(&match, fltr, headers);
1659 
1660 		/* If ethertype equals ETH_P_PPP_SES, n_proto might be
1661 		 * overwritten by encapsulated protocol (ppp_proto field) or set
1662 		 * to 0. To correct this, flow_match_pppoe provides the type
1663 		 * field, which contains the actual ethertype (ETH_P_PPP_SES).
1664 		 */
1665 		headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
1666 		headers->l2_mask.n_proto = cpu_to_be16(0xFFFF);
1667 		fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
1668 	}
1669 
1670 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
1671 		struct flow_match_control match;
1672 
1673 		flow_rule_match_control(rule, &match);
1674 
1675 		addr_type = match.key->addr_type;
1676 	}
1677 
1678 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1679 		struct flow_match_ipv4_addrs match;
1680 
1681 		flow_rule_match_ipv4_addrs(rule, &match);
1682 		if (ice_tc_set_ipv4(&match, fltr, headers, false))
1683 			return -EINVAL;
1684 	}
1685 
1686 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1687 		struct flow_match_ipv6_addrs match;
1688 
1689 		flow_rule_match_ipv6_addrs(rule, &match);
1690 		if (ice_tc_set_ipv6(&match, fltr, headers, false))
1691 			return -EINVAL;
1692 	}
1693 
1694 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
1695 		struct flow_match_ip match;
1696 
1697 		flow_rule_match_ip(rule, &match);
1698 		ice_tc_set_tos_ttl(&match, fltr, headers, false);
1699 	}
1700 
1701 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_L2TPV3)) {
1702 		struct flow_match_l2tpv3 match;
1703 
1704 		flow_rule_match_l2tpv3(rule, &match);
1705 
1706 		fltr->flags |= ICE_TC_FLWR_FIELD_L2TPV3_SESSID;
1707 		headers->l2tpv3_hdr.session_id = match.key->session_id;
1708 	}
1709 
1710 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1711 		struct flow_match_ports match;
1712 
1713 		flow_rule_match_ports(rule, &match);
1714 		if (ice_tc_set_port(match, fltr, headers, false))
1715 			return -EINVAL;
1716 		switch (headers->l3_key.ip_proto) {
1717 		case IPPROTO_TCP:
1718 		case IPPROTO_UDP:
1719 			break;
1720 		default:
1721 			NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported");
1722 			return -EINVAL;
1723 		}
1724 	}
1725 	return 0;
1726 }
1727 
1728 /**
1729  * ice_add_switch_fltr - Add TC flower filters
1730  * @vsi: Pointer to VSI
1731  * @fltr: Pointer to struct ice_tc_flower_fltr
1732  *
1733  * Add filter in HW switch block
1734  */
1735 static int
1736 ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
1737 {
1738 	if (fltr->action.fltr_act == ICE_FWD_TO_QGRP)
1739 		return -EOPNOTSUPP;
1740 
1741 	if (ice_is_eswitch_mode_switchdev(vsi->back))
1742 		return ice_eswitch_add_tc_fltr(vsi, fltr);
1743 
1744 	return ice_add_tc_flower_adv_fltr(vsi, fltr);
1745 }
1746 
1747 /**
1748  * ice_prep_adq_filter - Prepare ADQ filter with the required additional headers
1749  * @vsi: Pointer to VSI
1750  * @fltr: Pointer to TC flower filter structure
1751  *
1752  * Prepare ADQ filter with the required additional header fields
1753  */
1754 static int
1755 ice_prep_adq_filter(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
1756 {
1757 	if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&
1758 	    (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1759 			   ICE_TC_FLWR_FIELD_SRC_MAC))) {
1760 		NL_SET_ERR_MSG_MOD(fltr->extack,
1761 				   "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination");
1762 		return -EOPNOTSUPP;
1763 	}
1764 
1765 	/* For ADQ, filter must include dest MAC address, otherwise unwanted
1766 	 * packets with unrelated MAC address get delivered to ADQ VSIs as long
1767 	 * as remaining filter criteria is satisfied such as dest IP address
1768 	 * and dest/src L4 port. Below code handles the following cases:
1769 	 * 1. For non-tunnel, if user specify MAC addresses, use them.
1770 	 * 2. For non-tunnel, if user didn't specify MAC address, add implicit
1771 	 * dest MAC to be lower netdev's active unicast MAC address
1772 	 * 3. For tunnel,  as of now TC-filter through flower classifier doesn't
1773 	 * have provision for user to specify outer DMAC, hence driver to
1774 	 * implicitly add outer dest MAC to be lower netdev's active unicast
1775 	 * MAC address.
1776 	 */
1777 	if (fltr->tunnel_type != TNL_LAST &&
1778 	    !(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC))
1779 		fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC;
1780 
1781 	if (fltr->tunnel_type == TNL_LAST &&
1782 	    !(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC))
1783 		fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
1784 
1785 	if (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1786 			   ICE_TC_FLWR_FIELD_ENC_DST_MAC)) {
1787 		ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
1788 				vsi->netdev->dev_addr);
1789 		eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
1790 	}
1791 
1792 	/* Make sure VLAN is already added to main VSI, before allowing ADQ to
1793 	 * add a VLAN based filter such as MAC + VLAN + L4 port.
1794 	 */
1795 	if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {
1796 		u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id);
1797 
1798 		if (!ice_vlan_fltr_exist(&vsi->back->hw, vlan_id, vsi->idx)) {
1799 			NL_SET_ERR_MSG_MOD(fltr->extack,
1800 					   "Unable to add filter because legacy VLAN filter for specified destination doesn't exist");
1801 			return -EINVAL;
1802 		}
1803 	}
1804 	return 0;
1805 }
1806 
1807 /**
1808  * ice_handle_tclass_action - Support directing to a traffic class
1809  * @vsi: Pointer to VSI
1810  * @cls_flower: Pointer to TC flower offload structure
1811  * @fltr: Pointer to TC flower filter structure
1812  *
1813  * Support directing traffic to a traffic class/queue-set
1814  */
1815 static int
1816 ice_handle_tclass_action(struct ice_vsi *vsi,
1817 			 struct flow_cls_offload *cls_flower,
1818 			 struct ice_tc_flower_fltr *fltr)
1819 {
1820 	int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
1821 
1822 	/* user specified hw_tc (must be non-zero for ADQ TC), action is forward
1823 	 * to hw_tc (i.e. ADQ channel number)
1824 	 */
1825 	if (tc < ICE_CHNL_START_TC) {
1826 		NL_SET_ERR_MSG_MOD(fltr->extack,
1827 				   "Unable to add filter because of unsupported destination");
1828 		return -EOPNOTSUPP;
1829 	}
1830 	if (!(vsi->all_enatc & BIT(tc))) {
1831 		NL_SET_ERR_MSG_MOD(fltr->extack,
1832 				   "Unable to add filter because of non-existence destination");
1833 		return -EINVAL;
1834 	}
1835 	fltr->action.fltr_act = ICE_FWD_TO_VSI;
1836 	fltr->action.fwd.tc.tc_class = tc;
1837 
1838 	return ice_prep_adq_filter(vsi, fltr);
1839 }
1840 
1841 static int
1842 ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
1843 			struct flow_action_entry *act)
1844 {
1845 	struct ice_vsi *ch_vsi = NULL;
1846 	u16 queue = act->rx_queue;
1847 
1848 	if (queue >= vsi->num_rxq) {
1849 		NL_SET_ERR_MSG_MOD(fltr->extack,
1850 				   "Unable to add filter because specified queue is invalid");
1851 		return -EINVAL;
1852 	}
1853 	fltr->action.fltr_act = ICE_FWD_TO_Q;
1854 	fltr->action.fwd.q.queue = queue;
1855 	/* determine corresponding HW queue */
1856 	fltr->action.fwd.q.hw_queue = vsi->rxq_map[queue];
1857 
1858 	/* If ADQ is configured, and the queue belongs to ADQ VSI, then prepare
1859 	 * ADQ switch filter
1860 	 */
1861 	ch_vsi = ice_locate_vsi_using_queue(vsi, fltr->action.fwd.q.queue);
1862 	if (!ch_vsi)
1863 		return -EINVAL;
1864 	fltr->dest_vsi = ch_vsi;
1865 	if (!ice_is_chnl_fltr(fltr))
1866 		return 0;
1867 
1868 	return ice_prep_adq_filter(vsi, fltr);
1869 }
1870 
1871 static int
1872 ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
1873 		    struct flow_action_entry *act)
1874 {
1875 	switch (act->id) {
1876 	case FLOW_ACTION_RX_QUEUE_MAPPING:
1877 		/* forward to queue */
1878 		return ice_tc_forward_to_queue(vsi, fltr, act);
1879 	case FLOW_ACTION_DROP:
1880 		fltr->action.fltr_act = ICE_DROP_PACKET;
1881 		return 0;
1882 	default:
1883 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported TC action");
1884 		return -EOPNOTSUPP;
1885 	}
1886 }
1887 
1888 /**
1889  * ice_parse_tc_flower_actions - Parse the actions for a TC filter
1890  * @filter_dev: Pointer to device on which filter is being added
1891  * @vsi: Pointer to VSI
1892  * @cls_flower: Pointer to TC flower offload structure
1893  * @fltr: Pointer to TC flower filter structure
1894  *
1895  * Parse the actions for a TC filter
1896  */
1897 static int ice_parse_tc_flower_actions(struct net_device *filter_dev,
1898 				       struct ice_vsi *vsi,
1899 				       struct flow_cls_offload *cls_flower,
1900 				       struct ice_tc_flower_fltr *fltr)
1901 {
1902 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
1903 	struct flow_action *flow_action = &rule->action;
1904 	struct flow_action_entry *act;
1905 	int i, err;
1906 
1907 	if (cls_flower->classid)
1908 		return ice_handle_tclass_action(vsi, cls_flower, fltr);
1909 
1910 	if (!flow_action_has_entries(flow_action))
1911 		return -EINVAL;
1912 
1913 	flow_action_for_each(i, act, flow_action) {
1914 		if (ice_is_eswitch_mode_switchdev(vsi->back))
1915 			err = ice_eswitch_tc_parse_action(filter_dev, fltr, act);
1916 		else
1917 			err = ice_tc_parse_action(vsi, fltr, act);
1918 		if (err)
1919 			return err;
1920 		continue;
1921 	}
1922 	return 0;
1923 }
1924 
1925 /**
1926  * ice_del_tc_fltr - deletes a filter from HW table
1927  * @vsi: Pointer to VSI
1928  * @fltr: Pointer to struct ice_tc_flower_fltr
1929  *
1930  * This function deletes a filter from HW table and manages book-keeping
1931  */
1932 static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
1933 {
1934 	struct ice_rule_query_data rule_rem;
1935 	struct ice_pf *pf = vsi->back;
1936 	int err;
1937 
1938 	rule_rem.rid = fltr->rid;
1939 	rule_rem.rule_id = fltr->rule_id;
1940 	rule_rem.vsi_handle = fltr->dest_vsi_handle;
1941 	err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
1942 	if (err) {
1943 		if (err == -ENOENT) {
1944 			NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
1945 			return -ENOENT;
1946 		}
1947 		NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter");
1948 		return -EIO;
1949 	}
1950 
1951 	/* update advanced switch filter count for destination
1952 	 * VSI if filter destination was VSI
1953 	 */
1954 	if (fltr->dest_vsi) {
1955 		if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
1956 			fltr->dest_vsi->num_chnl_fltr--;
1957 
1958 			/* keeps track of channel filters for PF VSI */
1959 			if (vsi->type == ICE_VSI_PF &&
1960 			    (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1961 					    ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
1962 				pf->num_dmac_chnl_fltrs--;
1963 		}
1964 	}
1965 	return 0;
1966 }
1967 
1968 /**
1969  * ice_add_tc_fltr - adds a TC flower filter
1970  * @netdev: Pointer to netdev
1971  * @vsi: Pointer to VSI
1972  * @f: Pointer to flower offload structure
1973  * @__fltr: Pointer to struct ice_tc_flower_fltr
1974  *
1975  * This function parses TC-flower input fields, parses action,
1976  * and adds a filter.
1977  */
1978 static int
1979 ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
1980 		struct flow_cls_offload *f,
1981 		struct ice_tc_flower_fltr **__fltr)
1982 {
1983 	struct ice_tc_flower_fltr *fltr;
1984 	int err;
1985 
1986 	/* by default, set output to be INVALID */
1987 	*__fltr = NULL;
1988 
1989 	fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
1990 	if (!fltr)
1991 		return -ENOMEM;
1992 
1993 	fltr->cookie = f->cookie;
1994 	fltr->extack = f->common.extack;
1995 	fltr->src_vsi = vsi;
1996 	INIT_HLIST_NODE(&fltr->tc_flower_node);
1997 
1998 	err = ice_parse_cls_flower(netdev, vsi, f, fltr);
1999 	if (err < 0)
2000 		goto err;
2001 
2002 	err = ice_parse_tc_flower_actions(netdev, vsi, f, fltr);
2003 	if (err < 0)
2004 		goto err;
2005 
2006 	err = ice_add_switch_fltr(vsi, fltr);
2007 	if (err < 0)
2008 		goto err;
2009 
2010 	/* return the newly created filter */
2011 	*__fltr = fltr;
2012 
2013 	return 0;
2014 err:
2015 	kfree(fltr);
2016 	return err;
2017 }
2018 
2019 /**
2020  * ice_find_tc_flower_fltr - Find the TC flower filter in the list
2021  * @pf: Pointer to PF
2022  * @cookie: filter specific cookie
2023  */
2024 static struct ice_tc_flower_fltr *
2025 ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
2026 {
2027 	struct ice_tc_flower_fltr *fltr;
2028 
2029 	hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
2030 		if (cookie == fltr->cookie)
2031 			return fltr;
2032 
2033 	return NULL;
2034 }
2035 
2036 /**
2037  * ice_add_cls_flower - add TC flower filters
2038  * @netdev: Pointer to filter device
2039  * @vsi: Pointer to VSI
2040  * @cls_flower: Pointer to flower offload structure
2041  */
2042 int
2043 ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
2044 		   struct flow_cls_offload *cls_flower)
2045 {
2046 	struct netlink_ext_ack *extack = cls_flower->common.extack;
2047 	struct net_device *vsi_netdev = vsi->netdev;
2048 	struct ice_tc_flower_fltr *fltr;
2049 	struct ice_pf *pf = vsi->back;
2050 	int err;
2051 
2052 	if (ice_is_reset_in_progress(pf->state))
2053 		return -EBUSY;
2054 	if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
2055 		return -EINVAL;
2056 
2057 	if (ice_is_port_repr_netdev(netdev))
2058 		vsi_netdev = netdev;
2059 
2060 	if (!(vsi_netdev->features & NETIF_F_HW_TC) &&
2061 	    !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) {
2062 		/* Based on TC indirect notifications from kernel, all ice
2063 		 * devices get an instance of rule from higher level device.
2064 		 * Avoid triggering explicit error in this case.
2065 		 */
2066 		if (netdev == vsi_netdev)
2067 			NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again");
2068 		return -EINVAL;
2069 	}
2070 
2071 	/* avoid duplicate entries, if exists - return error */
2072 	fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
2073 	if (fltr) {
2074 		NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring");
2075 		return -EEXIST;
2076 	}
2077 
2078 	/* prep and add TC-flower filter in HW */
2079 	err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
2080 	if (err)
2081 		return err;
2082 
2083 	/* add filter into an ordered list */
2084 	hlist_add_head(&fltr->tc_flower_node, &pf->tc_flower_fltr_list);
2085 	return 0;
2086 }
2087 
2088 /**
2089  * ice_del_cls_flower - delete TC flower filters
2090  * @vsi: Pointer to VSI
2091  * @cls_flower: Pointer to struct flow_cls_offload
2092  */
2093 int
2094 ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower)
2095 {
2096 	struct ice_tc_flower_fltr *fltr;
2097 	struct ice_pf *pf = vsi->back;
2098 	int err;
2099 
2100 	/* find filter */
2101 	fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
2102 	if (!fltr) {
2103 		if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) &&
2104 		    hlist_empty(&pf->tc_flower_fltr_list))
2105 			return 0;
2106 
2107 		NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it");
2108 		return -EINVAL;
2109 	}
2110 
2111 	fltr->extack = cls_flower->common.extack;
2112 	/* delete filter from HW */
2113 	err = ice_del_tc_fltr(vsi, fltr);
2114 	if (err)
2115 		return err;
2116 
2117 	/* delete filter from an ordered list */
2118 	hlist_del(&fltr->tc_flower_node);
2119 
2120 	/* free the filter node */
2121 	kfree(fltr);
2122 
2123 	return 0;
2124 }
2125 
2126 /**
2127  * ice_replay_tc_fltrs - replay TC filters
2128  * @pf: pointer to PF struct
2129  */
2130 void ice_replay_tc_fltrs(struct ice_pf *pf)
2131 {
2132 	struct ice_tc_flower_fltr *fltr;
2133 	struct hlist_node *node;
2134 
2135 	hlist_for_each_entry_safe(fltr, node,
2136 				  &pf->tc_flower_fltr_list,
2137 				  tc_flower_node) {
2138 		fltr->extack = NULL;
2139 		ice_add_switch_fltr(fltr->src_vsi, fltr);
2140 	}
2141 }
2142