xref: /linux/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c (revision b1992c3772e69a6fd0e3fc81cd4d2820c8b6eca0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2018-2023, Intel Corporation. */
3 
4 /* flow director ethtool support for ice */
5 
6 #include "ice.h"
7 #include "ice_lib.h"
8 #include "ice_fdir.h"
9 #include "ice_flow.h"
10 
11 static struct in6_addr full_ipv6_addr_mask = {
12 	.in6_u = {
13 		.u6_addr8 = {
14 			0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
15 			0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
16 		}
17 	}
18 };
19 
20 static struct in6_addr zero_ipv6_addr_mask = {
21 	.in6_u = {
22 		.u6_addr8 = {
23 			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
24 			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
25 		}
26 	}
27 };
28 
29 /* calls to ice_flow_add_prof require the number of segments in the array
30  * for segs_cnt. In this code that is one more than the index.
31  */
32 #define TNL_SEG_CNT(_TNL_) ((_TNL_) + 1)
33 
34 /**
35  * ice_fltr_to_ethtool_flow - convert filter type values to ethtool
36  * flow type values
37  * @flow: filter type to be converted
38  *
39  * Returns the corresponding ethtool flow type.
40  */
41 static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
42 {
43 	switch (flow) {
44 	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
45 		return TCP_V4_FLOW;
46 	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
47 		return UDP_V4_FLOW;
48 	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
49 		return SCTP_V4_FLOW;
50 	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
51 		return IPV4_USER_FLOW;
52 	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
53 		return TCP_V6_FLOW;
54 	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
55 		return UDP_V6_FLOW;
56 	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
57 		return SCTP_V6_FLOW;
58 	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
59 		return IPV6_USER_FLOW;
60 	default:
61 		/* 0 is undefined ethtool flow */
62 		return 0;
63 	}
64 }
65 
66 /**
67  * ice_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
68  * @eth: Ethtool flow type to be converted
69  *
70  * Returns flow enum
71  */
72 static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth)
73 {
74 	switch (eth) {
75 	case TCP_V4_FLOW:
76 		return ICE_FLTR_PTYPE_NONF_IPV4_TCP;
77 	case UDP_V4_FLOW:
78 		return ICE_FLTR_PTYPE_NONF_IPV4_UDP;
79 	case SCTP_V4_FLOW:
80 		return ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
81 	case IPV4_USER_FLOW:
82 		return ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
83 	case TCP_V6_FLOW:
84 		return ICE_FLTR_PTYPE_NONF_IPV6_TCP;
85 	case UDP_V6_FLOW:
86 		return ICE_FLTR_PTYPE_NONF_IPV6_UDP;
87 	case SCTP_V6_FLOW:
88 		return ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
89 	case IPV6_USER_FLOW:
90 		return ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
91 	default:
92 		return ICE_FLTR_PTYPE_NONF_NONE;
93 	}
94 }
95 
96 /**
97  * ice_is_mask_valid - check mask field set
98  * @mask: full mask to check
99  * @field: field for which mask should be valid
100  *
101  * If the mask is fully set return true. If it is not valid for field return
102  * false.
103  */
104 static bool ice_is_mask_valid(u64 mask, u64 field)
105 {
106 	return (mask & field) == field;
107 }
108 
109 /**
110  * ice_get_ethtool_fdir_entry - fill ethtool structure with fdir filter data
111  * @hw: hardware structure that contains filter list
112  * @cmd: ethtool command data structure to receive the filter data
113  *
114  * Returns 0 on success and -EINVAL on failure
115  */
116 int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
117 {
118 	struct ethtool_rx_flow_spec *fsp;
119 	struct ice_fdir_fltr *rule;
120 	int ret = 0;
121 	u16 idx;
122 
123 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
124 
125 	mutex_lock(&hw->fdir_fltr_lock);
126 
127 	rule = ice_fdir_find_fltr_by_idx(hw, fsp->location);
128 
129 	if (!rule || fsp->location != rule->fltr_id) {
130 		ret = -EINVAL;
131 		goto release_lock;
132 	}
133 
134 	fsp->flow_type = ice_fltr_to_ethtool_flow(rule->flow_type);
135 
136 	memset(&fsp->m_u, 0, sizeof(fsp->m_u));
137 	memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
138 
139 	switch (fsp->flow_type) {
140 	case IPV4_USER_FLOW:
141 		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
142 		fsp->h_u.usr_ip4_spec.proto = 0;
143 		fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip.v4.l4_header;
144 		fsp->h_u.usr_ip4_spec.tos = rule->ip.v4.tos;
145 		fsp->h_u.usr_ip4_spec.ip4src = rule->ip.v4.src_ip;
146 		fsp->h_u.usr_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
147 		fsp->m_u.usr_ip4_spec.ip4src = rule->mask.v4.src_ip;
148 		fsp->m_u.usr_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
149 		fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
150 		fsp->m_u.usr_ip4_spec.proto = 0;
151 		fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->mask.v4.l4_header;
152 		fsp->m_u.usr_ip4_spec.tos = rule->mask.v4.tos;
153 		break;
154 	case TCP_V4_FLOW:
155 	case UDP_V4_FLOW:
156 	case SCTP_V4_FLOW:
157 		fsp->h_u.tcp_ip4_spec.psrc = rule->ip.v4.src_port;
158 		fsp->h_u.tcp_ip4_spec.pdst = rule->ip.v4.dst_port;
159 		fsp->h_u.tcp_ip4_spec.ip4src = rule->ip.v4.src_ip;
160 		fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
161 		fsp->m_u.tcp_ip4_spec.psrc = rule->mask.v4.src_port;
162 		fsp->m_u.tcp_ip4_spec.pdst = rule->mask.v4.dst_port;
163 		fsp->m_u.tcp_ip4_spec.ip4src = rule->mask.v4.src_ip;
164 		fsp->m_u.tcp_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
165 		break;
166 	case IPV6_USER_FLOW:
167 		fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip.v6.l4_header;
168 		fsp->h_u.usr_ip6_spec.tclass = rule->ip.v6.tc;
169 		fsp->h_u.usr_ip6_spec.l4_proto = rule->ip.v6.proto;
170 		memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
171 		       sizeof(struct in6_addr));
172 		memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
173 		       sizeof(struct in6_addr));
174 		memcpy(fsp->m_u.tcp_ip6_spec.ip6src, rule->mask.v6.src_ip,
175 		       sizeof(struct in6_addr));
176 		memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, rule->mask.v6.dst_ip,
177 		       sizeof(struct in6_addr));
178 		fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->mask.v6.l4_header;
179 		fsp->m_u.usr_ip6_spec.tclass = rule->mask.v6.tc;
180 		fsp->m_u.usr_ip6_spec.l4_proto = rule->mask.v6.proto;
181 		break;
182 	case TCP_V6_FLOW:
183 	case UDP_V6_FLOW:
184 	case SCTP_V6_FLOW:
185 		memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
186 		       sizeof(struct in6_addr));
187 		memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
188 		       sizeof(struct in6_addr));
189 		fsp->h_u.tcp_ip6_spec.psrc = rule->ip.v6.src_port;
190 		fsp->h_u.tcp_ip6_spec.pdst = rule->ip.v6.dst_port;
191 		memcpy(fsp->m_u.tcp_ip6_spec.ip6src,
192 		       rule->mask.v6.src_ip,
193 		       sizeof(struct in6_addr));
194 		memcpy(fsp->m_u.tcp_ip6_spec.ip6dst,
195 		       rule->mask.v6.dst_ip,
196 		       sizeof(struct in6_addr));
197 		fsp->m_u.tcp_ip6_spec.psrc = rule->mask.v6.src_port;
198 		fsp->m_u.tcp_ip6_spec.pdst = rule->mask.v6.dst_port;
199 		fsp->h_u.tcp_ip6_spec.tclass = rule->ip.v6.tc;
200 		fsp->m_u.tcp_ip6_spec.tclass = rule->mask.v6.tc;
201 		break;
202 	default:
203 		break;
204 	}
205 
206 	if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT)
207 		fsp->ring_cookie = RX_CLS_FLOW_DISC;
208 	else
209 		fsp->ring_cookie = rule->orig_q_index;
210 
211 	idx = ice_ethtool_flow_to_fltr(fsp->flow_type);
212 	if (idx == ICE_FLTR_PTYPE_NONF_NONE) {
213 		dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n",
214 			rule->flow_type);
215 		ret = -EINVAL;
216 	}
217 
218 release_lock:
219 	mutex_unlock(&hw->fdir_fltr_lock);
220 	return ret;
221 }
222 
223 /**
224  * ice_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
225  * @hw: hardware structure containing the filter list
226  * @cmd: ethtool command data structure
227  * @rule_locs: ethtool array passed in from OS to receive filter IDs
228  *
229  * Returns 0 as expected for success by ethtool
230  */
231 int
232 ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
233 		      u32 *rule_locs)
234 {
235 	struct ice_fdir_fltr *f_rule;
236 	unsigned int cnt = 0;
237 	int val = 0;
238 
239 	/* report total rule count */
240 	cmd->data = ice_get_fdir_cnt_all(hw);
241 
242 	mutex_lock(&hw->fdir_fltr_lock);
243 
244 	list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
245 		if (cnt == cmd->rule_cnt) {
246 			val = -EMSGSIZE;
247 			goto release_lock;
248 		}
249 		rule_locs[cnt] = f_rule->fltr_id;
250 		cnt++;
251 	}
252 
253 release_lock:
254 	mutex_unlock(&hw->fdir_fltr_lock);
255 	if (!val)
256 		cmd->rule_cnt = cnt;
257 	return val;
258 }
259 
260 /**
261  * ice_fdir_remap_entries - update the FDir entries in profile
262  * @prof: FDir structure pointer
263  * @tun: tunneled or non-tunneled packet
264  * @idx: FDir entry index
265  */
266 static void
267 ice_fdir_remap_entries(struct ice_fd_hw_prof *prof, int tun, int idx)
268 {
269 	if (idx != prof->cnt && tun < ICE_FD_HW_SEG_MAX) {
270 		int i;
271 
272 		for (i = idx; i < (prof->cnt - 1); i++) {
273 			u64 old_entry_h;
274 
275 			old_entry_h = prof->entry_h[i + 1][tun];
276 			prof->entry_h[i][tun] = old_entry_h;
277 			prof->vsi_h[i] = prof->vsi_h[i + 1];
278 		}
279 
280 		prof->entry_h[i][tun] = 0;
281 		prof->vsi_h[i] = 0;
282 	}
283 }
284 
285 /**
286  * ice_fdir_rem_adq_chnl - remove an ADQ channel from HW filter rules
287  * @hw: hardware structure containing filter list
288  * @vsi_idx: VSI handle
289  */
290 void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx)
291 {
292 	int status, flow;
293 
294 	if (!hw->fdir_prof)
295 		return;
296 
297 	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
298 		struct ice_fd_hw_prof *prof = hw->fdir_prof[flow];
299 		int tun, i;
300 
301 		if (!prof || !prof->cnt)
302 			continue;
303 
304 		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
305 			u64 prof_id = prof->prof_id[tun];
306 
307 			for (i = 0; i < prof->cnt; i++) {
308 				if (prof->vsi_h[i] != vsi_idx)
309 					continue;
310 
311 				prof->entry_h[i][tun] = 0;
312 				prof->vsi_h[i] = 0;
313 				break;
314 			}
315 
316 			/* after clearing FDir entries update the remaining */
317 			ice_fdir_remap_entries(prof, tun, i);
318 
319 			/* find flow profile corresponding to prof_id and clear
320 			 * vsi_idx from bitmap.
321 			 */
322 			status = ice_flow_rem_vsi_prof(hw, vsi_idx, prof_id);
323 			if (status) {
324 				dev_err(ice_hw_to_dev(hw), "ice_flow_rem_vsi_prof() failed status=%d\n",
325 					status);
326 			}
327 		}
328 		prof->cnt--;
329 	}
330 }
331 
332 /**
333  * ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow
334  * @hw: hardware structure containing the filter list
335  * @blk: hardware block
336  * @flow: FDir flow type to release
337  */
338 static struct ice_fd_hw_prof *
339 ice_fdir_get_hw_prof(struct ice_hw *hw, enum ice_block blk, int flow)
340 {
341 	if (blk == ICE_BLK_FD && hw->fdir_prof)
342 		return hw->fdir_prof[flow];
343 
344 	return NULL;
345 }
346 
347 /**
348  * ice_fdir_erase_flow_from_hw - remove a flow from the HW profile tables
349  * @hw: hardware structure containing the filter list
350  * @blk: hardware block
351  * @flow: FDir flow type to release
352  */
353 static void
354 ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow)
355 {
356 	struct ice_fd_hw_prof *prof = ice_fdir_get_hw_prof(hw, blk, flow);
357 	int tun;
358 
359 	if (!prof)
360 		return;
361 
362 	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
363 		u64 prof_id = prof->prof_id[tun];
364 		int j;
365 
366 		for (j = 0; j < prof->cnt; j++) {
367 			u16 vsi_num;
368 
369 			if (!prof->entry_h[j][tun] || !prof->vsi_h[j])
370 				continue;
371 			vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]);
372 			ice_rem_prof_id_flow(hw, blk, vsi_num, prof_id);
373 			ice_flow_rem_entry(hw, blk, prof->entry_h[j][tun]);
374 			prof->entry_h[j][tun] = 0;
375 		}
376 		ice_flow_rem_prof(hw, blk, prof_id);
377 	}
378 }
379 
380 /**
381  * ice_fdir_rem_flow - release the ice_flow structures for a filter type
382  * @hw: hardware structure containing the filter list
383  * @blk: hardware block
384  * @flow_type: FDir flow type to release
385  */
386 static void
387 ice_fdir_rem_flow(struct ice_hw *hw, enum ice_block blk,
388 		  enum ice_fltr_ptype flow_type)
389 {
390 	int flow = (int)flow_type & ~FLOW_EXT;
391 	struct ice_fd_hw_prof *prof;
392 	int tun, i;
393 
394 	prof = ice_fdir_get_hw_prof(hw, blk, flow);
395 	if (!prof)
396 		return;
397 
398 	ice_fdir_erase_flow_from_hw(hw, blk, flow);
399 	for (i = 0; i < prof->cnt; i++)
400 		prof->vsi_h[i] = 0;
401 	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
402 		if (!prof->fdir_seg[tun])
403 			continue;
404 		devm_kfree(ice_hw_to_dev(hw), prof->fdir_seg[tun]);
405 		prof->fdir_seg[tun] = NULL;
406 	}
407 	prof->cnt = 0;
408 }
409 
410 /**
411  * ice_fdir_release_flows - release all flows in use for later replay
412  * @hw: pointer to HW instance
413  */
414 void ice_fdir_release_flows(struct ice_hw *hw)
415 {
416 	int flow;
417 
418 	/* release Flow Director HW table entries */
419 	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++)
420 		ice_fdir_erase_flow_from_hw(hw, ICE_BLK_FD, flow);
421 }
422 
423 /**
424  * ice_fdir_replay_flows - replay HW Flow Director filter info
425  * @hw: pointer to HW instance
426  */
427 void ice_fdir_replay_flows(struct ice_hw *hw)
428 {
429 	int flow;
430 
431 	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
432 		int tun;
433 
434 		if (!hw->fdir_prof[flow] || !hw->fdir_prof[flow]->cnt)
435 			continue;
436 		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
437 			struct ice_flow_prof *hw_prof;
438 			struct ice_fd_hw_prof *prof;
439 			int j;
440 
441 			prof = hw->fdir_prof[flow];
442 			ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX,
443 					  prof->fdir_seg[tun], TNL_SEG_CNT(tun),
444 					  false, &hw_prof);
445 			for (j = 0; j < prof->cnt; j++) {
446 				enum ice_flow_priority prio;
447 				u64 entry_h = 0;
448 				int err;
449 
450 				prio = ICE_FLOW_PRIO_NORMAL;
451 				err = ice_flow_add_entry(hw, ICE_BLK_FD,
452 							 hw_prof->id,
453 							 prof->vsi_h[0],
454 							 prof->vsi_h[j],
455 							 prio, prof->fdir_seg,
456 							 &entry_h);
457 				if (err) {
458 					dev_err(ice_hw_to_dev(hw), "Could not replay Flow Director, flow type %d\n",
459 						flow);
460 					continue;
461 				}
462 				prof->prof_id[tun] = hw_prof->id;
463 				prof->entry_h[j][tun] = entry_h;
464 			}
465 		}
466 	}
467 }
468 
469 /**
470  * ice_parse_rx_flow_user_data - deconstruct user-defined data
471  * @fsp: pointer to ethtool Rx flow specification
472  * @data: pointer to userdef data structure for storage
473  *
474  * Returns 0 on success, negative error value on failure
475  */
476 static int
477 ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
478 			    struct ice_rx_flow_userdef *data)
479 {
480 	u64 value, mask;
481 
482 	memset(data, 0, sizeof(*data));
483 	if (!(fsp->flow_type & FLOW_EXT))
484 		return 0;
485 
486 	value = be64_to_cpu(*((__force __be64 *)fsp->h_ext.data));
487 	mask = be64_to_cpu(*((__force __be64 *)fsp->m_ext.data));
488 	if (!mask)
489 		return 0;
490 
491 #define ICE_USERDEF_FLEX_WORD_M	GENMASK_ULL(15, 0)
492 #define ICE_USERDEF_FLEX_OFFS_S	16
493 #define ICE_USERDEF_FLEX_OFFS_M	GENMASK_ULL(31, ICE_USERDEF_FLEX_OFFS_S)
494 #define ICE_USERDEF_FLEX_FLTR_M	GENMASK_ULL(31, 0)
495 
496 	/* 0x1fe is the maximum value for offsets stored in the internal
497 	 * filtering tables.
498 	 */
499 #define ICE_USERDEF_FLEX_MAX_OFFS_VAL 0x1fe
500 
501 	if (!ice_is_mask_valid(mask, ICE_USERDEF_FLEX_FLTR_M) ||
502 	    value > ICE_USERDEF_FLEX_FLTR_M)
503 		return -EINVAL;
504 
505 	data->flex_word = value & ICE_USERDEF_FLEX_WORD_M;
506 	data->flex_offset = FIELD_GET(ICE_USERDEF_FLEX_OFFS_M, value);
507 	if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL)
508 		return -EINVAL;
509 
510 	data->flex_fltr = true;
511 
512 	return 0;
513 }
514 
515 /**
516  * ice_fdir_num_avail_fltr - return the number of unused flow director filters
517  * @hw: pointer to hardware structure
518  * @vsi: software VSI structure
519  *
520  * There are 2 filter pools: guaranteed and best effort(shared). Each VSI can
521  * use filters from either pool. The guaranteed pool is divided between VSIs.
522  * The best effort filter pool is common to all VSIs and is a device shared
523  * resource pool. The number of filters available to this VSI is the sum of
524  * the VSIs guaranteed filter pool and the global available best effort
525  * filter pool.
526  *
527  * Returns the number of available flow director filters to this VSI
528  */
529 static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
530 {
531 	u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
532 	u16 num_guar;
533 	u16 num_be;
534 
535 	/* total guaranteed filters assigned to this VSI */
536 	num_guar = vsi->num_gfltr;
537 
538 	/* total global best effort filters */
539 	num_be = hw->func_caps.fd_fltr_best_effort;
540 
541 	/* Subtract the number of programmed filters from the global values */
542 	switch (hw->mac_type) {
543 	case ICE_MAC_E830:
544 		num_guar -= FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M,
545 				      rd32(hw, VSIQF_FD_CNT(vsi_num)));
546 		num_be -= FIELD_GET(E830_GLQF_FD_CNT_FD_BCNT_M,
547 				    rd32(hw, GLQF_FD_CNT));
548 		break;
549 	case ICE_MAC_E810:
550 	default:
551 		num_guar -= FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M,
552 				      rd32(hw, VSIQF_FD_CNT(vsi_num)));
553 		num_be -= FIELD_GET(E800_GLQF_FD_CNT_FD_BCNT_M,
554 				    rd32(hw, GLQF_FD_CNT));
555 	}
556 
557 	return num_guar + num_be;
558 }
559 
560 /**
561  * ice_fdir_alloc_flow_prof - allocate FDir flow profile structure(s)
562  * @hw: HW structure containing the FDir flow profile structure(s)
563  * @flow: flow type to allocate the flow profile for
564  *
565  * Allocate the fdir_prof and fdir_prof[flow] if not already created. Return 0
566  * on success and negative on error.
567  */
568 static int
569 ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow)
570 {
571 	if (!hw)
572 		return -EINVAL;
573 
574 	if (!hw->fdir_prof) {
575 		hw->fdir_prof = devm_kcalloc(ice_hw_to_dev(hw),
576 					     ICE_FLTR_PTYPE_MAX,
577 					     sizeof(*hw->fdir_prof),
578 					     GFP_KERNEL);
579 		if (!hw->fdir_prof)
580 			return -ENOMEM;
581 	}
582 
583 	if (!hw->fdir_prof[flow]) {
584 		hw->fdir_prof[flow] = devm_kzalloc(ice_hw_to_dev(hw),
585 						   sizeof(**hw->fdir_prof),
586 						   GFP_KERNEL);
587 		if (!hw->fdir_prof[flow])
588 			return -ENOMEM;
589 	}
590 
591 	return 0;
592 }
593 
594 /**
595  * ice_fdir_prof_vsi_idx - find or insert a vsi_idx in structure
596  * @prof: pointer to flow director HW profile
597  * @vsi_idx: vsi_idx to locate
598  *
599  * return the index of the vsi_idx. if vsi_idx is not found insert it
600  * into the vsi_h table.
601  */
602 static u16
603 ice_fdir_prof_vsi_idx(struct ice_fd_hw_prof *prof, int vsi_idx)
604 {
605 	u16 idx = 0;
606 
607 	for (idx = 0; idx < prof->cnt; idx++)
608 		if (prof->vsi_h[idx] == vsi_idx)
609 			return idx;
610 
611 	if (idx == prof->cnt)
612 		prof->vsi_h[prof->cnt++] = vsi_idx;
613 	return idx;
614 }
615 
616 /**
617  * ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule
618  * @pf: pointer to the PF structure
619  * @seg: protocol header description pointer
620  * @flow: filter enum
621  * @tun: FDir segment to program
622  */
623 static int
624 ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
625 			  enum ice_fltr_ptype flow, enum ice_fd_hw_seg tun)
626 {
627 	struct device *dev = ice_pf_to_dev(pf);
628 	struct ice_vsi *main_vsi, *ctrl_vsi;
629 	struct ice_flow_seg_info *old_seg;
630 	struct ice_flow_prof *prof = NULL;
631 	struct ice_fd_hw_prof *hw_prof;
632 	struct ice_hw *hw = &pf->hw;
633 	u64 entry1_h = 0;
634 	u64 entry2_h = 0;
635 	bool del_last;
636 	int err;
637 	int idx;
638 
639 	main_vsi = ice_get_main_vsi(pf);
640 	if (!main_vsi)
641 		return -EINVAL;
642 
643 	ctrl_vsi = ice_get_ctrl_vsi(pf);
644 	if (!ctrl_vsi)
645 		return -EINVAL;
646 
647 	err = ice_fdir_alloc_flow_prof(hw, flow);
648 	if (err)
649 		return err;
650 
651 	hw_prof = hw->fdir_prof[flow];
652 	old_seg = hw_prof->fdir_seg[tun];
653 	if (old_seg) {
654 		/* This flow_type already has a changed input set.
655 		 * If it matches the requested input set then we are
656 		 * done. Or, if it's different then it's an error.
657 		 */
658 		if (!memcmp(old_seg, seg, sizeof(*seg)))
659 			return -EEXIST;
660 
661 		/* if there are FDir filters using this flow,
662 		 * then return error.
663 		 */
664 		if (hw->fdir_fltr_cnt[flow]) {
665 			dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
666 			return -EINVAL;
667 		}
668 
669 		if (ice_is_arfs_using_perfect_flow(hw, flow)) {
670 			dev_err(dev, "aRFS using perfect flow type %d, cannot change input set\n",
671 				flow);
672 			return -EINVAL;
673 		}
674 
675 		/* remove HW filter definition */
676 		ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
677 	}
678 
679 	/* Adding a profile, but there is only one header supported.
680 	 * That is the final parameters are 1 header (segment), no
681 	 * actions (NULL) and zero actions 0.
682 	 */
683 	err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
684 				TNL_SEG_CNT(tun), false, &prof);
685 	if (err)
686 		return err;
687 	err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
688 				 main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
689 				 seg, &entry1_h);
690 	if (err)
691 		goto err_prof;
692 	err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
693 				 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
694 				 seg, &entry2_h);
695 	if (err)
696 		goto err_entry;
697 
698 	hw_prof->fdir_seg[tun] = seg;
699 	hw_prof->prof_id[tun] = prof->id;
700 	hw_prof->entry_h[0][tun] = entry1_h;
701 	hw_prof->entry_h[1][tun] = entry2_h;
702 	hw_prof->vsi_h[0] = main_vsi->idx;
703 	hw_prof->vsi_h[1] = ctrl_vsi->idx;
704 	if (!hw_prof->cnt)
705 		hw_prof->cnt = 2;
706 
707 	for (idx = 1; idx < ICE_CHNL_MAX_TC; idx++) {
708 		u16 vsi_idx;
709 		u16 vsi_h;
710 
711 		if (!ice_is_adq_active(pf) || !main_vsi->tc_map_vsi[idx])
712 			continue;
713 
714 		entry1_h = 0;
715 		vsi_h = main_vsi->tc_map_vsi[idx]->idx;
716 		err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id,
717 					 main_vsi->idx, vsi_h,
718 					 ICE_FLOW_PRIO_NORMAL, seg,
719 					 &entry1_h);
720 		if (err) {
721 			dev_err(dev, "Could not add Channel VSI %d to flow group\n",
722 				idx);
723 			goto err_unroll;
724 		}
725 
726 		vsi_idx = ice_fdir_prof_vsi_idx(hw_prof,
727 						main_vsi->tc_map_vsi[idx]->idx);
728 		hw_prof->entry_h[vsi_idx][tun] = entry1_h;
729 	}
730 
731 	return 0;
732 
733 err_unroll:
734 	entry1_h = 0;
735 	hw_prof->fdir_seg[tun] = NULL;
736 
737 	/* The variable del_last will be used to determine when to clean up
738 	 * the VSI group data. The VSI data is not needed if there are no
739 	 * segments.
740 	 */
741 	del_last = true;
742 	for (idx = 0; idx < ICE_FD_HW_SEG_MAX; idx++)
743 		if (hw_prof->fdir_seg[idx]) {
744 			del_last = false;
745 			break;
746 		}
747 
748 	for (idx = 0; idx < hw_prof->cnt; idx++) {
749 		u16 vsi_num = ice_get_hw_vsi_num(hw, hw_prof->vsi_h[idx]);
750 
751 		if (!hw_prof->entry_h[idx][tun])
752 			continue;
753 		ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof->id);
754 		ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]);
755 		hw_prof->entry_h[idx][tun] = 0;
756 		if (del_last)
757 			hw_prof->vsi_h[idx] = 0;
758 	}
759 	if (del_last)
760 		hw_prof->cnt = 0;
761 err_entry:
762 	ice_rem_prof_id_flow(hw, ICE_BLK_FD,
763 			     ice_get_hw_vsi_num(hw, main_vsi->idx), prof->id);
764 	ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
765 err_prof:
766 	ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
767 	dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
768 
769 	return err;
770 }
771 
772 /**
773  * ice_set_init_fdir_seg
774  * @seg: flow segment for programming
775  * @l3_proto: ICE_FLOW_SEG_HDR_IPV4 or ICE_FLOW_SEG_HDR_IPV6
776  * @l4_proto: ICE_FLOW_SEG_HDR_TCP or ICE_FLOW_SEG_HDR_UDP
777  *
778  * Set the configuration for perfect filters to the provided flow segment for
779  * programming the HW filter. This is to be called only when initializing
780  * filters as this function it assumes no filters exist.
781  */
782 static int
783 ice_set_init_fdir_seg(struct ice_flow_seg_info *seg,
784 		      enum ice_flow_seg_hdr l3_proto,
785 		      enum ice_flow_seg_hdr l4_proto)
786 {
787 	enum ice_flow_field src_addr, dst_addr, src_port, dst_port;
788 
789 	if (!seg)
790 		return -EINVAL;
791 
792 	if (l3_proto == ICE_FLOW_SEG_HDR_IPV4) {
793 		src_addr = ICE_FLOW_FIELD_IDX_IPV4_SA;
794 		dst_addr = ICE_FLOW_FIELD_IDX_IPV4_DA;
795 	} else if (l3_proto == ICE_FLOW_SEG_HDR_IPV6) {
796 		src_addr = ICE_FLOW_FIELD_IDX_IPV6_SA;
797 		dst_addr = ICE_FLOW_FIELD_IDX_IPV6_DA;
798 	} else {
799 		return -EINVAL;
800 	}
801 
802 	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
803 		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
804 		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
805 	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
806 		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
807 		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
808 	} else {
809 		return -EINVAL;
810 	}
811 
812 	ICE_FLOW_SET_HDRS(seg, l3_proto | l4_proto);
813 
814 	/* IP source address */
815 	ice_flow_set_fld(seg, src_addr, ICE_FLOW_FLD_OFF_INVAL,
816 			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
817 
818 	/* IP destination address */
819 	ice_flow_set_fld(seg, dst_addr, ICE_FLOW_FLD_OFF_INVAL,
820 			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
821 
822 	/* Layer 4 source port */
823 	ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
824 			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
825 
826 	/* Layer 4 destination port */
827 	ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
828 			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
829 
830 	return 0;
831 }
832 
833 /**
834  * ice_create_init_fdir_rule
835  * @pf: PF structure
836  * @flow: filter enum
837  *
838  * Return error value or 0 on success.
839  */
840 static int
841 ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
842 {
843 	struct ice_flow_seg_info *seg, *tun_seg;
844 	struct device *dev = ice_pf_to_dev(pf);
845 	struct ice_hw *hw = &pf->hw;
846 	int ret;
847 
848 	/* if there is already a filter rule for kind return -EINVAL */
849 	if (hw->fdir_prof && hw->fdir_prof[flow] &&
850 	    hw->fdir_prof[flow]->fdir_seg[0])
851 		return -EINVAL;
852 
853 	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
854 	if (!seg)
855 		return -ENOMEM;
856 
857 	tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg),
858 			       GFP_KERNEL);
859 	if (!tun_seg) {
860 		devm_kfree(dev, seg);
861 		return -ENOMEM;
862 	}
863 
864 	if (flow == ICE_FLTR_PTYPE_NONF_IPV4_TCP)
865 		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
866 					    ICE_FLOW_SEG_HDR_TCP);
867 	else if (flow == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
868 		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
869 					    ICE_FLOW_SEG_HDR_UDP);
870 	else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_TCP)
871 		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
872 					    ICE_FLOW_SEG_HDR_TCP);
873 	else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
874 		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
875 					    ICE_FLOW_SEG_HDR_UDP);
876 	else
877 		ret = -EINVAL;
878 	if (ret)
879 		goto err_exit;
880 
881 	/* add filter for outer headers */
882 	ret = ice_fdir_set_hw_fltr_rule(pf, seg, flow, ICE_FD_HW_SEG_NON_TUN);
883 	if (ret)
884 		/* could not write filter, free memory */
885 		goto err_exit;
886 
887 	/* make tunneled filter HW entries if possible */
888 	memcpy(&tun_seg[1], seg, sizeof(*seg));
889 	ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, flow, ICE_FD_HW_SEG_TUN);
890 	if (ret)
891 		/* could not write tunnel filter, but outer header filter
892 		 * exists
893 		 */
894 		devm_kfree(dev, tun_seg);
895 
896 	set_bit(flow, hw->fdir_perfect_fltr);
897 	return ret;
898 err_exit:
899 	devm_kfree(dev, tun_seg);
900 	devm_kfree(dev, seg);
901 
902 	return -EOPNOTSUPP;
903 }
904 
905 /**
906  * ice_set_fdir_ip4_seg
907  * @seg: flow segment for programming
908  * @tcp_ip4_spec: mask data from ethtool
909  * @l4_proto: Layer 4 protocol to program
910  * @perfect_fltr: only valid on success; returns true if perfect filter,
911  *		  false if not
912  *
913  * Set the mask data into the flow segment to be used to program HW
914  * table based on provided L4 protocol for IPv4
915  */
916 static int
917 ice_set_fdir_ip4_seg(struct ice_flow_seg_info *seg,
918 		     struct ethtool_tcpip4_spec *tcp_ip4_spec,
919 		     enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
920 {
921 	enum ice_flow_field src_port, dst_port;
922 
923 	/* make sure we don't have any empty rule */
924 	if (!tcp_ip4_spec->psrc && !tcp_ip4_spec->ip4src &&
925 	    !tcp_ip4_spec->pdst && !tcp_ip4_spec->ip4dst)
926 		return -EINVAL;
927 
928 	/* filtering on TOS not supported */
929 	if (tcp_ip4_spec->tos)
930 		return -EOPNOTSUPP;
931 
932 	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
933 		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
934 		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
935 	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
936 		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
937 		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
938 	} else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
939 		src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
940 		dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
941 	} else {
942 		return -EOPNOTSUPP;
943 	}
944 
945 	*perfect_fltr = true;
946 	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | l4_proto);
947 
948 	/* IP source address */
949 	if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF))
950 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
951 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
952 				 ICE_FLOW_FLD_OFF_INVAL, false);
953 	else if (!tcp_ip4_spec->ip4src)
954 		*perfect_fltr = false;
955 	else
956 		return -EOPNOTSUPP;
957 
958 	/* IP destination address */
959 	if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
960 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
961 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
962 				 ICE_FLOW_FLD_OFF_INVAL, false);
963 	else if (!tcp_ip4_spec->ip4dst)
964 		*perfect_fltr = false;
965 	else
966 		return -EOPNOTSUPP;
967 
968 	/* Layer 4 source port */
969 	if (tcp_ip4_spec->psrc == htons(0xFFFF))
970 		ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
971 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
972 				 false);
973 	else if (!tcp_ip4_spec->psrc)
974 		*perfect_fltr = false;
975 	else
976 		return -EOPNOTSUPP;
977 
978 	/* Layer 4 destination port */
979 	if (tcp_ip4_spec->pdst == htons(0xFFFF))
980 		ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
981 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
982 				 false);
983 	else if (!tcp_ip4_spec->pdst)
984 		*perfect_fltr = false;
985 	else
986 		return -EOPNOTSUPP;
987 
988 	return 0;
989 }
990 
991 /**
992  * ice_set_fdir_ip4_usr_seg
993  * @seg: flow segment for programming
994  * @usr_ip4_spec: ethtool userdef packet offset
995  * @perfect_fltr: only valid on success; returns true if perfect filter,
996  *		  false if not
997  *
998  * Set the offset data into the flow segment to be used to program HW
999  * table for IPv4
1000  */
1001 static int
1002 ice_set_fdir_ip4_usr_seg(struct ice_flow_seg_info *seg,
1003 			 struct ethtool_usrip4_spec *usr_ip4_spec,
1004 			 bool *perfect_fltr)
1005 {
1006 	/* first 4 bytes of Layer 4 header */
1007 	if (usr_ip4_spec->l4_4_bytes)
1008 		return -EINVAL;
1009 	if (usr_ip4_spec->tos)
1010 		return -EINVAL;
1011 	if (usr_ip4_spec->ip_ver)
1012 		return -EINVAL;
1013 	/* Filtering on Layer 4 protocol not supported */
1014 	if (usr_ip4_spec->proto)
1015 		return -EOPNOTSUPP;
1016 	/* empty rules are not valid */
1017 	if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst)
1018 		return -EINVAL;
1019 
1020 	*perfect_fltr = true;
1021 	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
1022 
1023 	/* IP source address */
1024 	if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF))
1025 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
1026 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1027 				 ICE_FLOW_FLD_OFF_INVAL, false);
1028 	else if (!usr_ip4_spec->ip4src)
1029 		*perfect_fltr = false;
1030 	else
1031 		return -EOPNOTSUPP;
1032 
1033 	/* IP destination address */
1034 	if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
1035 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
1036 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1037 				 ICE_FLOW_FLD_OFF_INVAL, false);
1038 	else if (!usr_ip4_spec->ip4dst)
1039 		*perfect_fltr = false;
1040 	else
1041 		return -EOPNOTSUPP;
1042 
1043 	return 0;
1044 }
1045 
1046 /**
1047  * ice_set_fdir_ip6_seg
1048  * @seg: flow segment for programming
1049  * @tcp_ip6_spec: mask data from ethtool
1050  * @l4_proto: Layer 4 protocol to program
1051  * @perfect_fltr: only valid on success; returns true if perfect filter,
1052  *		  false if not
1053  *
1054  * Set the mask data into the flow segment to be used to program HW
1055  * table based on provided L4 protocol for IPv6
1056  */
1057 static int
1058 ice_set_fdir_ip6_seg(struct ice_flow_seg_info *seg,
1059 		     struct ethtool_tcpip6_spec *tcp_ip6_spec,
1060 		     enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
1061 {
1062 	enum ice_flow_field src_port, dst_port;
1063 
1064 	/* make sure we don't have any empty rule */
1065 	if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1066 		    sizeof(struct in6_addr)) &&
1067 	    !memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1068 		    sizeof(struct in6_addr)) &&
1069 	    !tcp_ip6_spec->psrc && !tcp_ip6_spec->pdst)
1070 		return -EINVAL;
1071 
1072 	/* filtering on TC not supported */
1073 	if (tcp_ip6_spec->tclass)
1074 		return -EOPNOTSUPP;
1075 
1076 	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
1077 		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
1078 		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
1079 	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
1080 		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
1081 		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
1082 	} else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
1083 		src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
1084 		dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
1085 	} else {
1086 		return -EINVAL;
1087 	}
1088 
1089 	*perfect_fltr = true;
1090 	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | l4_proto);
1091 
1092 	if (!memcmp(tcp_ip6_spec->ip6src, &full_ipv6_addr_mask,
1093 		    sizeof(struct in6_addr)))
1094 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
1095 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1096 				 ICE_FLOW_FLD_OFF_INVAL, false);
1097 	else if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1098 			 sizeof(struct in6_addr)))
1099 		*perfect_fltr = false;
1100 	else
1101 		return -EOPNOTSUPP;
1102 
1103 	if (!memcmp(tcp_ip6_spec->ip6dst, &full_ipv6_addr_mask,
1104 		    sizeof(struct in6_addr)))
1105 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
1106 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1107 				 ICE_FLOW_FLD_OFF_INVAL, false);
1108 	else if (!memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1109 			 sizeof(struct in6_addr)))
1110 		*perfect_fltr = false;
1111 	else
1112 		return -EOPNOTSUPP;
1113 
1114 	/* Layer 4 source port */
1115 	if (tcp_ip6_spec->psrc == htons(0xFFFF))
1116 		ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
1117 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1118 				 false);
1119 	else if (!tcp_ip6_spec->psrc)
1120 		*perfect_fltr = false;
1121 	else
1122 		return -EOPNOTSUPP;
1123 
1124 	/* Layer 4 destination port */
1125 	if (tcp_ip6_spec->pdst == htons(0xFFFF))
1126 		ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
1127 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1128 				 false);
1129 	else if (!tcp_ip6_spec->pdst)
1130 		*perfect_fltr = false;
1131 	else
1132 		return -EOPNOTSUPP;
1133 
1134 	return 0;
1135 }
1136 
1137 /**
1138  * ice_set_fdir_ip6_usr_seg
1139  * @seg: flow segment for programming
1140  * @usr_ip6_spec: ethtool userdef packet offset
1141  * @perfect_fltr: only valid on success; returns true if perfect filter,
1142  *		  false if not
1143  *
1144  * Set the offset data into the flow segment to be used to program HW
1145  * table for IPv6
1146  */
1147 static int
1148 ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg,
1149 			 struct ethtool_usrip6_spec *usr_ip6_spec,
1150 			 bool *perfect_fltr)
1151 {
1152 	/* filtering on Layer 4 bytes not supported */
1153 	if (usr_ip6_spec->l4_4_bytes)
1154 		return -EOPNOTSUPP;
1155 	/* filtering on TC not supported */
1156 	if (usr_ip6_spec->tclass)
1157 		return -EOPNOTSUPP;
1158 	/* filtering on Layer 4 protocol not supported */
1159 	if (usr_ip6_spec->l4_proto)
1160 		return -EOPNOTSUPP;
1161 	/* empty rules are not valid */
1162 	if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1163 		    sizeof(struct in6_addr)) &&
1164 	    !memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1165 		    sizeof(struct in6_addr)))
1166 		return -EINVAL;
1167 
1168 	*perfect_fltr = true;
1169 	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
1170 
1171 	if (!memcmp(usr_ip6_spec->ip6src, &full_ipv6_addr_mask,
1172 		    sizeof(struct in6_addr)))
1173 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
1174 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1175 				 ICE_FLOW_FLD_OFF_INVAL, false);
1176 	else if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1177 			 sizeof(struct in6_addr)))
1178 		*perfect_fltr = false;
1179 	else
1180 		return -EOPNOTSUPP;
1181 
1182 	if (!memcmp(usr_ip6_spec->ip6dst, &full_ipv6_addr_mask,
1183 		    sizeof(struct in6_addr)))
1184 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
1185 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1186 				 ICE_FLOW_FLD_OFF_INVAL, false);
1187 	else if (!memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1188 			 sizeof(struct in6_addr)))
1189 		*perfect_fltr = false;
1190 	else
1191 		return -EOPNOTSUPP;
1192 
1193 	return 0;
1194 }
1195 
1196 /**
1197  * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter
1198  * @pf: PF structure
1199  * @fsp: pointer to ethtool Rx flow specification
1200  * @user: user defined data from flow specification
1201  *
1202  * Returns 0 on success.
1203  */
1204 static int
1205 ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
1206 		       struct ice_rx_flow_userdef *user)
1207 {
1208 	struct ice_flow_seg_info *seg, *tun_seg;
1209 	struct device *dev = ice_pf_to_dev(pf);
1210 	enum ice_fltr_ptype fltr_idx;
1211 	struct ice_hw *hw = &pf->hw;
1212 	bool perfect_filter;
1213 	int ret;
1214 
1215 	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
1216 	if (!seg)
1217 		return -ENOMEM;
1218 
1219 	tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg),
1220 			       GFP_KERNEL);
1221 	if (!tun_seg) {
1222 		devm_kfree(dev, seg);
1223 		return -ENOMEM;
1224 	}
1225 
1226 	switch (fsp->flow_type & ~FLOW_EXT) {
1227 	case TCP_V4_FLOW:
1228 		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
1229 					   ICE_FLOW_SEG_HDR_TCP,
1230 					   &perfect_filter);
1231 		break;
1232 	case UDP_V4_FLOW:
1233 		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
1234 					   ICE_FLOW_SEG_HDR_UDP,
1235 					   &perfect_filter);
1236 		break;
1237 	case SCTP_V4_FLOW:
1238 		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
1239 					   ICE_FLOW_SEG_HDR_SCTP,
1240 					   &perfect_filter);
1241 		break;
1242 	case IPV4_USER_FLOW:
1243 		ret = ice_set_fdir_ip4_usr_seg(seg, &fsp->m_u.usr_ip4_spec,
1244 					       &perfect_filter);
1245 		break;
1246 	case TCP_V6_FLOW:
1247 		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
1248 					   ICE_FLOW_SEG_HDR_TCP,
1249 					   &perfect_filter);
1250 		break;
1251 	case UDP_V6_FLOW:
1252 		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
1253 					   ICE_FLOW_SEG_HDR_UDP,
1254 					   &perfect_filter);
1255 		break;
1256 	case SCTP_V6_FLOW:
1257 		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
1258 					   ICE_FLOW_SEG_HDR_SCTP,
1259 					   &perfect_filter);
1260 		break;
1261 	case IPV6_USER_FLOW:
1262 		ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec,
1263 					       &perfect_filter);
1264 		break;
1265 	default:
1266 		ret = -EINVAL;
1267 	}
1268 	if (ret)
1269 		goto err_exit;
1270 
1271 	/* tunnel segments are shifted up one. */
1272 	memcpy(&tun_seg[1], seg, sizeof(*seg));
1273 
1274 	if (user && user->flex_fltr) {
1275 		perfect_filter = false;
1276 		ice_flow_add_fld_raw(seg, user->flex_offset,
1277 				     ICE_FLTR_PRGM_FLEX_WORD_SIZE,
1278 				     ICE_FLOW_FLD_OFF_INVAL,
1279 				     ICE_FLOW_FLD_OFF_INVAL);
1280 		ice_flow_add_fld_raw(&tun_seg[1], user->flex_offset,
1281 				     ICE_FLTR_PRGM_FLEX_WORD_SIZE,
1282 				     ICE_FLOW_FLD_OFF_INVAL,
1283 				     ICE_FLOW_FLD_OFF_INVAL);
1284 	}
1285 
1286 	fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT);
1287 
1288 	assign_bit(fltr_idx, hw->fdir_perfect_fltr, perfect_filter);
1289 
1290 	/* add filter for outer headers */
1291 	ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx,
1292 					ICE_FD_HW_SEG_NON_TUN);
1293 	if (ret == -EEXIST) {
1294 		/* Rule already exists, free memory and count as success */
1295 		ret = 0;
1296 		goto err_exit;
1297 	} else if (ret) {
1298 		/* could not write filter, free memory */
1299 		goto err_exit;
1300 	}
1301 
1302 	/* make tunneled filter HW entries if possible */
1303 	memcpy(&tun_seg[1], seg, sizeof(*seg));
1304 	ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, fltr_idx,
1305 					ICE_FD_HW_SEG_TUN);
1306 	if (ret == -EEXIST) {
1307 		/* Rule already exists, free memory and count as success */
1308 		devm_kfree(dev, tun_seg);
1309 		ret = 0;
1310 	} else if (ret) {
1311 		/* could not write tunnel filter, but outer filter exists */
1312 		devm_kfree(dev, tun_seg);
1313 	}
1314 
1315 	return ret;
1316 
1317 err_exit:
1318 	devm_kfree(dev, tun_seg);
1319 	devm_kfree(dev, seg);
1320 
1321 	return ret;
1322 }
1323 
1324 /**
1325  * ice_update_per_q_fltr
1326  * @vsi: ptr to VSI
1327  * @q_index: queue index
1328  * @inc: true to increment or false to decrement per queue filter count
1329  *
1330  * This function is used to keep track of per queue sideband filters
1331  */
1332 static void ice_update_per_q_fltr(struct ice_vsi *vsi, u32 q_index, bool inc)
1333 {
1334 	struct ice_rx_ring *rx_ring;
1335 
1336 	if (!vsi->num_rxq || q_index >= vsi->num_rxq)
1337 		return;
1338 
1339 	rx_ring = vsi->rx_rings[q_index];
1340 	if (!rx_ring || !rx_ring->ch)
1341 		return;
1342 
1343 	if (inc)
1344 		atomic_inc(&rx_ring->ch->num_sb_fltr);
1345 	else
1346 		atomic_dec_if_positive(&rx_ring->ch->num_sb_fltr);
1347 }
1348 
1349 /**
1350  * ice_fdir_write_fltr - send a flow director filter to the hardware
1351  * @pf: PF data structure
1352  * @input: filter structure
1353  * @add: true adds filter and false removed filter
1354  * @is_tun: true adds inner filter on tunnel and false outer headers
1355  *
1356  * returns 0 on success and negative value on error
1357  */
1358 int
1359 ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
1360 		    bool is_tun)
1361 {
1362 	struct device *dev = ice_pf_to_dev(pf);
1363 	struct ice_hw *hw = &pf->hw;
1364 	struct ice_fltr_desc desc;
1365 	struct ice_vsi *ctrl_vsi;
1366 	u8 *pkt, *frag_pkt;
1367 	bool has_frag;
1368 	int err;
1369 
1370 	ctrl_vsi = ice_get_ctrl_vsi(pf);
1371 	if (!ctrl_vsi)
1372 		return -EINVAL;
1373 
1374 	pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1375 	if (!pkt)
1376 		return -ENOMEM;
1377 	frag_pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1378 	if (!frag_pkt) {
1379 		err = -ENOMEM;
1380 		goto err_free;
1381 	}
1382 
1383 	ice_fdir_get_prgm_desc(hw, input, &desc, add);
1384 	err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1385 	if (err)
1386 		goto err_free_all;
1387 	err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1388 	if (err)
1389 		goto err_free_all;
1390 
1391 	/* repeat for fragment packet */
1392 	has_frag = ice_fdir_has_frag(input->flow_type);
1393 	if (has_frag) {
1394 		/* does not return error */
1395 		ice_fdir_get_prgm_desc(hw, input, &desc, add);
1396 		err = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
1397 						is_tun);
1398 		if (err)
1399 			goto err_frag;
1400 		err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt);
1401 		if (err)
1402 			goto err_frag;
1403 	} else {
1404 		devm_kfree(dev, frag_pkt);
1405 	}
1406 
1407 	return 0;
1408 
1409 err_free_all:
1410 	devm_kfree(dev, frag_pkt);
1411 err_free:
1412 	devm_kfree(dev, pkt);
1413 	return err;
1414 
1415 err_frag:
1416 	devm_kfree(dev, frag_pkt);
1417 	return err;
1418 }
1419 
1420 /**
1421  * ice_fdir_write_all_fltr - send a flow director filter to the hardware
1422  * @pf: PF data structure
1423  * @input: filter structure
1424  * @add: true adds filter and false removed filter
1425  *
1426  * returns 0 on success and negative value on error
1427  */
1428 static int
1429 ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
1430 			bool add)
1431 {
1432 	u16 port_num;
1433 	int tun;
1434 
1435 	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
1436 		bool is_tun = tun == ICE_FD_HW_SEG_TUN;
1437 		int err;
1438 
1439 		if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num, TNL_ALL))
1440 			continue;
1441 		err = ice_fdir_write_fltr(pf, input, add, is_tun);
1442 		if (err)
1443 			return err;
1444 	}
1445 	return 0;
1446 }
1447 
1448 /**
1449  * ice_fdir_replay_fltrs - replay filters from the HW filter list
1450  * @pf: board private structure
1451  */
1452 void ice_fdir_replay_fltrs(struct ice_pf *pf)
1453 {
1454 	struct ice_fdir_fltr *f_rule;
1455 	struct ice_hw *hw = &pf->hw;
1456 
1457 	list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
1458 		int err = ice_fdir_write_all_fltr(pf, f_rule, true);
1459 
1460 		if (err)
1461 			dev_dbg(ice_pf_to_dev(pf), "Flow Director error %d, could not reprogram filter %d\n",
1462 				err, f_rule->fltr_id);
1463 	}
1464 }
1465 
1466 /**
1467  * ice_fdir_create_dflt_rules - create default perfect filters
1468  * @pf: PF data structure
1469  *
1470  * Returns 0 for success or error.
1471  */
1472 int ice_fdir_create_dflt_rules(struct ice_pf *pf)
1473 {
1474 	int err;
1475 
1476 	/* Create perfect TCP and UDP rules in hardware. */
1477 	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
1478 	if (err)
1479 		return err;
1480 
1481 	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
1482 	if (err)
1483 		return err;
1484 
1485 	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP);
1486 	if (err)
1487 		return err;
1488 
1489 	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP);
1490 
1491 	return err;
1492 }
1493 
1494 /**
1495  * ice_fdir_del_all_fltrs - Delete all flow director filters
1496  * @vsi: the VSI being changed
1497  *
1498  * This function needs to be called while holding hw->fdir_fltr_lock
1499  */
1500 void ice_fdir_del_all_fltrs(struct ice_vsi *vsi)
1501 {
1502 	struct ice_fdir_fltr *f_rule, *tmp;
1503 	struct ice_pf *pf = vsi->back;
1504 	struct ice_hw *hw = &pf->hw;
1505 
1506 	list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
1507 		ice_fdir_write_all_fltr(pf, f_rule, false);
1508 		ice_fdir_update_cntrs(hw, f_rule->flow_type, false);
1509 		list_del(&f_rule->fltr_node);
1510 		devm_kfree(ice_pf_to_dev(pf), f_rule);
1511 	}
1512 }
1513 
1514 /**
1515  * ice_vsi_manage_fdir - turn on/off flow director
1516  * @vsi: the VSI being changed
1517  * @ena: boolean value indicating if this is an enable or disable request
1518  */
1519 void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
1520 {
1521 	struct ice_pf *pf = vsi->back;
1522 	struct ice_hw *hw = &pf->hw;
1523 	enum ice_fltr_ptype flow;
1524 
1525 	if (ena) {
1526 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
1527 		ice_fdir_create_dflt_rules(pf);
1528 		return;
1529 	}
1530 
1531 	mutex_lock(&hw->fdir_fltr_lock);
1532 	if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags))
1533 		goto release_lock;
1534 
1535 	ice_fdir_del_all_fltrs(vsi);
1536 
1537 	if (hw->fdir_prof)
1538 		for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX;
1539 		     flow++)
1540 			if (hw->fdir_prof[flow])
1541 				ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
1542 
1543 release_lock:
1544 	mutex_unlock(&hw->fdir_fltr_lock);
1545 }
1546 
1547 /**
1548  * ice_fdir_do_rem_flow - delete flow and possibly add perfect flow
1549  * @pf: PF structure
1550  * @flow_type: FDir flow type to release
1551  */
1552 static void
1553 ice_fdir_do_rem_flow(struct ice_pf *pf, enum ice_fltr_ptype flow_type)
1554 {
1555 	struct ice_hw *hw = &pf->hw;
1556 	bool need_perfect = false;
1557 
1558 	if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
1559 	    flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
1560 	    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
1561 	    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
1562 		need_perfect = true;
1563 
1564 	if (need_perfect && test_bit(flow_type, hw->fdir_perfect_fltr))
1565 		return;
1566 
1567 	ice_fdir_rem_flow(hw, ICE_BLK_FD, flow_type);
1568 	if (need_perfect)
1569 		ice_create_init_fdir_rule(pf, flow_type);
1570 }
1571 
1572 /**
1573  * ice_fdir_update_list_entry - add or delete a filter from the filter list
1574  * @pf: PF structure
1575  * @input: filter structure
1576  * @fltr_idx: ethtool index of filter to modify
1577  *
1578  * returns 0 on success and negative on errors
1579  */
1580 static int
1581 ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
1582 			   int fltr_idx)
1583 {
1584 	struct ice_fdir_fltr *old_fltr;
1585 	struct ice_hw *hw = &pf->hw;
1586 	struct ice_vsi *vsi;
1587 	int err = -ENOENT;
1588 
1589 	/* Do not update filters during reset */
1590 	if (ice_is_reset_in_progress(pf->state))
1591 		return -EBUSY;
1592 
1593 	vsi = ice_get_main_vsi(pf);
1594 	if (!vsi)
1595 		return -EINVAL;
1596 
1597 	old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx);
1598 	if (old_fltr) {
1599 		err = ice_fdir_write_all_fltr(pf, old_fltr, false);
1600 		if (err)
1601 			return err;
1602 		ice_fdir_update_cntrs(hw, old_fltr->flow_type, false);
1603 		/* update sb-filters count, specific to ring->channel */
1604 		ice_update_per_q_fltr(vsi, old_fltr->orig_q_index, false);
1605 		if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type])
1606 			/* we just deleted the last filter of flow_type so we
1607 			 * should also delete the HW filter info.
1608 			 */
1609 			ice_fdir_do_rem_flow(pf, old_fltr->flow_type);
1610 		list_del(&old_fltr->fltr_node);
1611 		devm_kfree(ice_hw_to_dev(hw), old_fltr);
1612 	}
1613 	if (!input)
1614 		return err;
1615 	ice_fdir_list_add_fltr(hw, input);
1616 	/* update sb-filters count, specific to ring->channel */
1617 	ice_update_per_q_fltr(vsi, input->orig_q_index, true);
1618 	ice_fdir_update_cntrs(hw, input->flow_type, true);
1619 	return 0;
1620 }
1621 
1622 /**
1623  * ice_del_fdir_ethtool - delete Flow Director filter
1624  * @vsi: pointer to target VSI
1625  * @cmd: command to add or delete Flow Director filter
1626  *
1627  * Returns 0 on success and negative values for failure
1628  */
1629 int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
1630 {
1631 	struct ethtool_rx_flow_spec *fsp =
1632 		(struct ethtool_rx_flow_spec *)&cmd->fs;
1633 	struct ice_pf *pf = vsi->back;
1634 	struct ice_hw *hw = &pf->hw;
1635 	int val;
1636 
1637 	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
1638 		return -EOPNOTSUPP;
1639 
1640 	/* Do not delete filters during reset */
1641 	if (ice_is_reset_in_progress(pf->state)) {
1642 		dev_err(ice_pf_to_dev(pf), "Device is resetting - deleting Flow Director filters not supported during reset\n");
1643 		return -EBUSY;
1644 	}
1645 
1646 	if (test_bit(ICE_FD_FLUSH_REQ, pf->state))
1647 		return -EBUSY;
1648 
1649 	mutex_lock(&hw->fdir_fltr_lock);
1650 	val = ice_fdir_update_list_entry(pf, NULL, fsp->location);
1651 	mutex_unlock(&hw->fdir_fltr_lock);
1652 
1653 	return val;
1654 }
1655 
1656 /**
1657  * ice_update_ring_dest_vsi - update dest ring and dest VSI
1658  * @vsi: pointer to target VSI
1659  * @dest_vsi: ptr to dest VSI index
1660  * @ring: ptr to dest ring
1661  *
1662  * This function updates destination VSI and queue if user specifies
1663  * target queue which falls in channel's (aka ADQ) queue region
1664  */
1665 static void
1666 ice_update_ring_dest_vsi(struct ice_vsi *vsi, u16 *dest_vsi, u32 *ring)
1667 {
1668 	struct ice_channel *ch;
1669 
1670 	list_for_each_entry(ch, &vsi->ch_list, list) {
1671 		if (!ch->ch_vsi)
1672 			continue;
1673 
1674 		/* make sure to locate corresponding channel based on "queue"
1675 		 * specified
1676 		 */
1677 		if ((*ring < ch->base_q) ||
1678 		    (*ring >= (ch->base_q + ch->num_rxq)))
1679 			continue;
1680 
1681 		/* update the dest_vsi based on channel */
1682 		*dest_vsi = ch->ch_vsi->idx;
1683 
1684 		/* update the "ring" to be correct based on channel */
1685 		*ring -= ch->base_q;
1686 	}
1687 }
1688 
1689 /**
1690  * ice_set_fdir_input_set - Set the input set for Flow Director
1691  * @vsi: pointer to target VSI
1692  * @fsp: pointer to ethtool Rx flow specification
1693  * @input: filter structure
1694  */
1695 static int
1696 ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
1697 		       struct ice_fdir_fltr *input)
1698 {
1699 	u16 dest_vsi, q_index = 0;
1700 	u16 orig_q_index = 0;
1701 	struct ice_pf *pf;
1702 	struct ice_hw *hw;
1703 	int flow_type;
1704 	u8 dest_ctl;
1705 
1706 	if (!vsi || !fsp || !input)
1707 		return -EINVAL;
1708 
1709 	pf = vsi->back;
1710 	hw = &pf->hw;
1711 
1712 	dest_vsi = vsi->idx;
1713 	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
1714 		dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1715 	} else {
1716 		u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
1717 		u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
1718 
1719 		if (vf) {
1720 			dev_err(ice_pf_to_dev(pf), "Failed to add filter. Flow director filters are not supported on VF queues.\n");
1721 			return -EINVAL;
1722 		}
1723 
1724 		if (ring >= vsi->num_rxq)
1725 			return -EINVAL;
1726 
1727 		orig_q_index = ring;
1728 		ice_update_ring_dest_vsi(vsi, &dest_vsi, &ring);
1729 		dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1730 		q_index = ring;
1731 	}
1732 
1733 	input->fltr_id = fsp->location;
1734 	input->q_index = q_index;
1735 	flow_type = fsp->flow_type & ~FLOW_EXT;
1736 
1737 	/* Record the original queue index as specified by user.
1738 	 * with channel configuration 'q_index' becomes relative
1739 	 * to TC (channel).
1740 	 */
1741 	input->orig_q_index = orig_q_index;
1742 	input->dest_vsi = dest_vsi;
1743 	input->dest_ctl = dest_ctl;
1744 	input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
1745 	input->cnt_index = ICE_FD_SB_STAT_IDX(hw->fd_ctr_base);
1746 	input->flow_type = ice_ethtool_flow_to_fltr(flow_type);
1747 
1748 	if (fsp->flow_type & FLOW_EXT) {
1749 		memcpy(input->ext_data.usr_def, fsp->h_ext.data,
1750 		       sizeof(input->ext_data.usr_def));
1751 		input->ext_data.vlan_type = fsp->h_ext.vlan_etype;
1752 		input->ext_data.vlan_tag = fsp->h_ext.vlan_tci;
1753 		memcpy(input->ext_mask.usr_def, fsp->m_ext.data,
1754 		       sizeof(input->ext_mask.usr_def));
1755 		input->ext_mask.vlan_type = fsp->m_ext.vlan_etype;
1756 		input->ext_mask.vlan_tag = fsp->m_ext.vlan_tci;
1757 	}
1758 
1759 	switch (flow_type) {
1760 	case TCP_V4_FLOW:
1761 	case UDP_V4_FLOW:
1762 	case SCTP_V4_FLOW:
1763 		input->ip.v4.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1764 		input->ip.v4.src_port = fsp->h_u.tcp_ip4_spec.psrc;
1765 		input->ip.v4.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
1766 		input->ip.v4.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
1767 		input->mask.v4.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
1768 		input->mask.v4.src_port = fsp->m_u.tcp_ip4_spec.psrc;
1769 		input->mask.v4.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
1770 		input->mask.v4.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
1771 		break;
1772 	case IPV4_USER_FLOW:
1773 		input->ip.v4.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
1774 		input->ip.v4.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
1775 		input->ip.v4.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
1776 		input->ip.v4.proto = fsp->h_u.usr_ip4_spec.proto;
1777 		input->ip.v4.ip_ver = fsp->h_u.usr_ip4_spec.ip_ver;
1778 		input->ip.v4.tos = fsp->h_u.usr_ip4_spec.tos;
1779 		input->mask.v4.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
1780 		input->mask.v4.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
1781 		input->mask.v4.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
1782 		input->mask.v4.proto = fsp->m_u.usr_ip4_spec.proto;
1783 		input->mask.v4.ip_ver = fsp->m_u.usr_ip4_spec.ip_ver;
1784 		input->mask.v4.tos = fsp->m_u.usr_ip4_spec.tos;
1785 		break;
1786 	case TCP_V6_FLOW:
1787 	case UDP_V6_FLOW:
1788 	case SCTP_V6_FLOW:
1789 		memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1790 		       sizeof(struct in6_addr));
1791 		memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1792 		       sizeof(struct in6_addr));
1793 		input->ip.v6.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
1794 		input->ip.v6.src_port = fsp->h_u.tcp_ip6_spec.psrc;
1795 		input->ip.v6.tc = fsp->h_u.tcp_ip6_spec.tclass;
1796 		memcpy(input->mask.v6.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst,
1797 		       sizeof(struct in6_addr));
1798 		memcpy(input->mask.v6.src_ip, fsp->m_u.tcp_ip6_spec.ip6src,
1799 		       sizeof(struct in6_addr));
1800 		input->mask.v6.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
1801 		input->mask.v6.src_port = fsp->m_u.tcp_ip6_spec.psrc;
1802 		input->mask.v6.tc = fsp->m_u.tcp_ip6_spec.tclass;
1803 		break;
1804 	case IPV6_USER_FLOW:
1805 		memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1806 		       sizeof(struct in6_addr));
1807 		memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1808 		       sizeof(struct in6_addr));
1809 		input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
1810 		input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
1811 
1812 		/* if no protocol requested, use IPPROTO_NONE */
1813 		if (!fsp->m_u.usr_ip6_spec.l4_proto)
1814 			input->ip.v6.proto = IPPROTO_NONE;
1815 		else
1816 			input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
1817 
1818 		memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1819 		       sizeof(struct in6_addr));
1820 		memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1821 		       sizeof(struct in6_addr));
1822 		input->mask.v6.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
1823 		input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass;
1824 		input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto;
1825 		break;
1826 	default:
1827 		/* not doing un-parsed flow types */
1828 		return -EINVAL;
1829 	}
1830 
1831 	return 0;
1832 }
1833 
1834 /**
1835  * ice_add_fdir_ethtool - Add/Remove Flow Director filter
1836  * @vsi: pointer to target VSI
1837  * @cmd: command to add or delete Flow Director filter
1838  *
1839  * Returns 0 on success and negative values for failure
1840  */
1841 int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
1842 {
1843 	struct ice_rx_flow_userdef userdata;
1844 	struct ethtool_rx_flow_spec *fsp;
1845 	struct ice_fdir_fltr *input;
1846 	struct device *dev;
1847 	struct ice_pf *pf;
1848 	struct ice_hw *hw;
1849 	int fltrs_needed;
1850 	u32 max_location;
1851 	u16 tunnel_port;
1852 	int ret;
1853 
1854 	if (!vsi)
1855 		return -EINVAL;
1856 
1857 	pf = vsi->back;
1858 	hw = &pf->hw;
1859 	dev = ice_pf_to_dev(pf);
1860 
1861 	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
1862 		return -EOPNOTSUPP;
1863 
1864 	/* Do not program filters during reset */
1865 	if (ice_is_reset_in_progress(pf->state)) {
1866 		dev_err(dev, "Device is resetting - adding Flow Director filters not supported during reset\n");
1867 		return -EBUSY;
1868 	}
1869 
1870 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1871 
1872 	if (ice_parse_rx_flow_user_data(fsp, &userdata))
1873 		return -EINVAL;
1874 
1875 	if (fsp->flow_type & FLOW_MAC_EXT)
1876 		return -EINVAL;
1877 
1878 	ret = ice_cfg_fdir_xtrct_seq(pf, fsp, &userdata);
1879 	if (ret)
1880 		return ret;
1881 
1882 	max_location = ice_get_fdir_cnt_all(hw);
1883 	if (fsp->location >= max_location) {
1884 		dev_err(dev, "Failed to add filter. The number of ntuple filters or provided location exceed max %d.\n",
1885 			max_location);
1886 		return -ENOSPC;
1887 	}
1888 
1889 	/* return error if not an update and no available filters */
1890 	fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1;
1891 	if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
1892 	    ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
1893 		dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
1894 		return -ENOSPC;
1895 	}
1896 
1897 	input = devm_kzalloc(dev, sizeof(*input), GFP_KERNEL);
1898 	if (!input)
1899 		return -ENOMEM;
1900 
1901 	ret = ice_set_fdir_input_set(vsi, fsp, input);
1902 	if (ret)
1903 		goto free_input;
1904 
1905 	mutex_lock(&hw->fdir_fltr_lock);
1906 	if (ice_fdir_is_dup_fltr(hw, input)) {
1907 		ret = -EINVAL;
1908 		goto release_lock;
1909 	}
1910 
1911 	if (userdata.flex_fltr) {
1912 		input->flex_fltr = true;
1913 		input->flex_word = cpu_to_be16(userdata.flex_word);
1914 		input->flex_offset = userdata.flex_offset;
1915 	}
1916 
1917 	input->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1918 	input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1919 	input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
1920 
1921 	/* input struct is added to the HW filter list */
1922 	ret = ice_fdir_update_list_entry(pf, input, fsp->location);
1923 	if (ret)
1924 		goto release_lock;
1925 
1926 	ret = ice_fdir_write_all_fltr(pf, input, true);
1927 	if (ret)
1928 		goto remove_sw_rule;
1929 
1930 	goto release_lock;
1931 
1932 remove_sw_rule:
1933 	ice_fdir_update_cntrs(hw, input->flow_type, false);
1934 	/* update sb-filters count, specific to ring->channel */
1935 	ice_update_per_q_fltr(vsi, input->orig_q_index, false);
1936 	list_del(&input->fltr_node);
1937 release_lock:
1938 	mutex_unlock(&hw->fdir_fltr_lock);
1939 free_input:
1940 	if (ret)
1941 		devm_kfree(dev, input);
1942 
1943 	return ret;
1944 }
1945