xref: /linux/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2018-2023, Intel Corporation. */
3 
4 /* flow director ethtool support for ice */
5 
6 #include "ice.h"
7 #include "ice_lib.h"
8 #include "ice_fdir.h"
9 #include "ice_flow.h"
10 
11 static struct in6_addr full_ipv6_addr_mask = {
12 	.in6_u = {
13 		.u6_addr8 = {
14 			0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
15 			0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
16 		}
17 	}
18 };
19 
20 static struct in6_addr zero_ipv6_addr_mask = {
21 	.in6_u = {
22 		.u6_addr8 = {
23 			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
24 			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
25 		}
26 	}
27 };
28 
29 /* calls to ice_flow_add_prof require the number of segments in the array
30  * for segs_cnt. In this code that is one more than the index.
31  */
32 #define TNL_SEG_CNT(_TNL_) ((_TNL_) + 1)
33 
34 /**
35  * ice_fltr_to_ethtool_flow - convert filter type values to ethtool
36  * flow type values
37  * @flow: filter type to be converted
38  *
39  * Returns the corresponding ethtool flow type.
40  */
ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)41 static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
42 {
43 	switch (flow) {
44 	case ICE_FLTR_PTYPE_NONF_ETH:
45 		return ETHER_FLOW;
46 	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
47 		return TCP_V4_FLOW;
48 	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
49 		return UDP_V4_FLOW;
50 	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
51 		return SCTP_V4_FLOW;
52 	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
53 		return IPV4_USER_FLOW;
54 	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
55 		return TCP_V6_FLOW;
56 	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
57 		return UDP_V6_FLOW;
58 	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
59 		return SCTP_V6_FLOW;
60 	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
61 		return IPV6_USER_FLOW;
62 	default:
63 		/* 0 is undefined ethtool flow */
64 		return 0;
65 	}
66 }
67 
68 /**
69  * ice_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
70  * @eth: Ethtool flow type to be converted
71  *
72  * Returns flow enum
73  */
ice_ethtool_flow_to_fltr(int eth)74 static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth)
75 {
76 	switch (eth) {
77 	case ETHER_FLOW:
78 		return ICE_FLTR_PTYPE_NONF_ETH;
79 	case TCP_V4_FLOW:
80 		return ICE_FLTR_PTYPE_NONF_IPV4_TCP;
81 	case UDP_V4_FLOW:
82 		return ICE_FLTR_PTYPE_NONF_IPV4_UDP;
83 	case SCTP_V4_FLOW:
84 		return ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
85 	case IPV4_USER_FLOW:
86 		return ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
87 	case TCP_V6_FLOW:
88 		return ICE_FLTR_PTYPE_NONF_IPV6_TCP;
89 	case UDP_V6_FLOW:
90 		return ICE_FLTR_PTYPE_NONF_IPV6_UDP;
91 	case SCTP_V6_FLOW:
92 		return ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
93 	case IPV6_USER_FLOW:
94 		return ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
95 	default:
96 		return ICE_FLTR_PTYPE_NONF_NONE;
97 	}
98 }
99 
100 /**
101  * ice_is_mask_valid - check mask field set
102  * @mask: full mask to check
103  * @field: field for which mask should be valid
104  *
105  * If the mask is fully set return true. If it is not valid for field return
106  * false.
107  */
ice_is_mask_valid(u64 mask,u64 field)108 static bool ice_is_mask_valid(u64 mask, u64 field)
109 {
110 	return (mask & field) == field;
111 }
112 
113 /**
114  * ice_get_ethtool_fdir_entry - fill ethtool structure with fdir filter data
115  * @hw: hardware structure that contains filter list
116  * @cmd: ethtool command data structure to receive the filter data
117  *
118  * Returns 0 on success and -EINVAL on failure
119  */
ice_get_ethtool_fdir_entry(struct ice_hw * hw,struct ethtool_rxnfc * cmd)120 int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
121 {
122 	struct ethtool_rx_flow_spec *fsp;
123 	struct ice_fdir_fltr *rule;
124 	int ret = 0;
125 	u16 idx;
126 
127 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
128 
129 	mutex_lock(&hw->fdir_fltr_lock);
130 
131 	rule = ice_fdir_find_fltr_by_idx(hw, fsp->location);
132 
133 	if (!rule || fsp->location != rule->fltr_id) {
134 		ret = -EINVAL;
135 		goto release_lock;
136 	}
137 
138 	fsp->flow_type = ice_fltr_to_ethtool_flow(rule->flow_type);
139 
140 	memset(&fsp->m_u, 0, sizeof(fsp->m_u));
141 	memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
142 
143 	switch (fsp->flow_type) {
144 	case ETHER_FLOW:
145 		fsp->h_u.ether_spec = rule->eth;
146 		fsp->m_u.ether_spec = rule->eth_mask;
147 		break;
148 	case IPV4_USER_FLOW:
149 		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
150 		fsp->h_u.usr_ip4_spec.proto = 0;
151 		fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip.v4.l4_header;
152 		fsp->h_u.usr_ip4_spec.tos = rule->ip.v4.tos;
153 		fsp->h_u.usr_ip4_spec.ip4src = rule->ip.v4.src_ip;
154 		fsp->h_u.usr_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
155 		fsp->m_u.usr_ip4_spec.ip4src = rule->mask.v4.src_ip;
156 		fsp->m_u.usr_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
157 		fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
158 		fsp->m_u.usr_ip4_spec.proto = 0;
159 		fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->mask.v4.l4_header;
160 		fsp->m_u.usr_ip4_spec.tos = rule->mask.v4.tos;
161 		break;
162 	case TCP_V4_FLOW:
163 	case UDP_V4_FLOW:
164 	case SCTP_V4_FLOW:
165 		fsp->h_u.tcp_ip4_spec.psrc = rule->ip.v4.src_port;
166 		fsp->h_u.tcp_ip4_spec.pdst = rule->ip.v4.dst_port;
167 		fsp->h_u.tcp_ip4_spec.ip4src = rule->ip.v4.src_ip;
168 		fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
169 		fsp->m_u.tcp_ip4_spec.psrc = rule->mask.v4.src_port;
170 		fsp->m_u.tcp_ip4_spec.pdst = rule->mask.v4.dst_port;
171 		fsp->m_u.tcp_ip4_spec.ip4src = rule->mask.v4.src_ip;
172 		fsp->m_u.tcp_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
173 		break;
174 	case IPV6_USER_FLOW:
175 		fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip.v6.l4_header;
176 		fsp->h_u.usr_ip6_spec.tclass = rule->ip.v6.tc;
177 		fsp->h_u.usr_ip6_spec.l4_proto = rule->ip.v6.proto;
178 		memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
179 		       sizeof(struct in6_addr));
180 		memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
181 		       sizeof(struct in6_addr));
182 		memcpy(fsp->m_u.tcp_ip6_spec.ip6src, rule->mask.v6.src_ip,
183 		       sizeof(struct in6_addr));
184 		memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, rule->mask.v6.dst_ip,
185 		       sizeof(struct in6_addr));
186 		fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->mask.v6.l4_header;
187 		fsp->m_u.usr_ip6_spec.tclass = rule->mask.v6.tc;
188 		fsp->m_u.usr_ip6_spec.l4_proto = rule->mask.v6.proto;
189 		break;
190 	case TCP_V6_FLOW:
191 	case UDP_V6_FLOW:
192 	case SCTP_V6_FLOW:
193 		memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
194 		       sizeof(struct in6_addr));
195 		memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
196 		       sizeof(struct in6_addr));
197 		fsp->h_u.tcp_ip6_spec.psrc = rule->ip.v6.src_port;
198 		fsp->h_u.tcp_ip6_spec.pdst = rule->ip.v6.dst_port;
199 		memcpy(fsp->m_u.tcp_ip6_spec.ip6src,
200 		       rule->mask.v6.src_ip,
201 		       sizeof(struct in6_addr));
202 		memcpy(fsp->m_u.tcp_ip6_spec.ip6dst,
203 		       rule->mask.v6.dst_ip,
204 		       sizeof(struct in6_addr));
205 		fsp->m_u.tcp_ip6_spec.psrc = rule->mask.v6.src_port;
206 		fsp->m_u.tcp_ip6_spec.pdst = rule->mask.v6.dst_port;
207 		fsp->h_u.tcp_ip6_spec.tclass = rule->ip.v6.tc;
208 		fsp->m_u.tcp_ip6_spec.tclass = rule->mask.v6.tc;
209 		break;
210 	default:
211 		break;
212 	}
213 
214 	if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT)
215 		fsp->ring_cookie = RX_CLS_FLOW_DISC;
216 	else
217 		fsp->ring_cookie = rule->orig_q_index;
218 
219 	idx = ice_ethtool_flow_to_fltr(fsp->flow_type);
220 	if (idx == ICE_FLTR_PTYPE_NONF_NONE) {
221 		dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n",
222 			rule->flow_type);
223 		ret = -EINVAL;
224 	}
225 
226 release_lock:
227 	mutex_unlock(&hw->fdir_fltr_lock);
228 	return ret;
229 }
230 
231 /**
232  * ice_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
233  * @hw: hardware structure containing the filter list
234  * @cmd: ethtool command data structure
235  * @rule_locs: ethtool array passed in from OS to receive filter IDs
236  *
237  * Returns 0 as expected for success by ethtool
238  */
239 int
ice_get_fdir_fltr_ids(struct ice_hw * hw,struct ethtool_rxnfc * cmd,u32 * rule_locs)240 ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
241 		      u32 *rule_locs)
242 {
243 	struct ice_fdir_fltr *f_rule;
244 	unsigned int cnt = 0;
245 	int val = 0;
246 
247 	/* report total rule count */
248 	cmd->data = ice_get_fdir_cnt_all(hw);
249 
250 	mutex_lock(&hw->fdir_fltr_lock);
251 
252 	list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
253 		if (cnt == cmd->rule_cnt) {
254 			val = -EMSGSIZE;
255 			goto release_lock;
256 		}
257 		rule_locs[cnt] = f_rule->fltr_id;
258 		cnt++;
259 	}
260 
261 release_lock:
262 	mutex_unlock(&hw->fdir_fltr_lock);
263 	if (!val)
264 		cmd->rule_cnt = cnt;
265 	return val;
266 }
267 
268 /**
269  * ice_fdir_remap_entries - update the FDir entries in profile
270  * @prof: FDir structure pointer
271  * @tun: tunneled or non-tunneled packet
272  * @idx: FDir entry index
273  */
274 static void
ice_fdir_remap_entries(struct ice_fd_hw_prof * prof,int tun,int idx)275 ice_fdir_remap_entries(struct ice_fd_hw_prof *prof, int tun, int idx)
276 {
277 	if (idx != prof->cnt && tun < ICE_FD_HW_SEG_MAX) {
278 		int i;
279 
280 		for (i = idx; i < (prof->cnt - 1); i++) {
281 			u64 old_entry_h;
282 
283 			old_entry_h = prof->entry_h[i + 1][tun];
284 			prof->entry_h[i][tun] = old_entry_h;
285 			prof->vsi_h[i] = prof->vsi_h[i + 1];
286 		}
287 
288 		prof->entry_h[i][tun] = 0;
289 		prof->vsi_h[i] = 0;
290 	}
291 }
292 
293 /**
294  * ice_fdir_rem_adq_chnl - remove an ADQ channel from HW filter rules
295  * @hw: hardware structure containing filter list
296  * @vsi_idx: VSI handle
297  */
ice_fdir_rem_adq_chnl(struct ice_hw * hw,u16 vsi_idx)298 void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx)
299 {
300 	int status, flow;
301 
302 	if (!hw->fdir_prof)
303 		return;
304 
305 	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
306 		struct ice_fd_hw_prof *prof = hw->fdir_prof[flow];
307 		int tun, i;
308 
309 		if (!prof || !prof->cnt)
310 			continue;
311 
312 		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
313 			u64 prof_id = prof->prof_id[tun];
314 
315 			for (i = 0; i < prof->cnt; i++) {
316 				if (prof->vsi_h[i] != vsi_idx)
317 					continue;
318 
319 				prof->entry_h[i][tun] = 0;
320 				prof->vsi_h[i] = 0;
321 				break;
322 			}
323 
324 			/* after clearing FDir entries update the remaining */
325 			ice_fdir_remap_entries(prof, tun, i);
326 
327 			/* find flow profile corresponding to prof_id and clear
328 			 * vsi_idx from bitmap.
329 			 */
330 			status = ice_flow_rem_vsi_prof(hw, vsi_idx, prof_id);
331 			if (status) {
332 				dev_err(ice_hw_to_dev(hw), "ice_flow_rem_vsi_prof() failed status=%d\n",
333 					status);
334 			}
335 		}
336 		prof->cnt--;
337 	}
338 }
339 
340 /**
341  * ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow
342  * @hw: hardware structure containing the filter list
343  * @blk: hardware block
344  * @flow: FDir flow type to release
345  */
346 static struct ice_fd_hw_prof *
ice_fdir_get_hw_prof(struct ice_hw * hw,enum ice_block blk,int flow)347 ice_fdir_get_hw_prof(struct ice_hw *hw, enum ice_block blk, int flow)
348 {
349 	if (blk == ICE_BLK_FD && hw->fdir_prof)
350 		return hw->fdir_prof[flow];
351 
352 	return NULL;
353 }
354 
355 /**
356  * ice_fdir_erase_flow_from_hw - remove a flow from the HW profile tables
357  * @hw: hardware structure containing the filter list
358  * @blk: hardware block
359  * @flow: FDir flow type to release
360  */
361 static void
ice_fdir_erase_flow_from_hw(struct ice_hw * hw,enum ice_block blk,int flow)362 ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow)
363 {
364 	struct ice_fd_hw_prof *prof = ice_fdir_get_hw_prof(hw, blk, flow);
365 	int tun;
366 
367 	if (!prof)
368 		return;
369 
370 	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
371 		u64 prof_id = prof->prof_id[tun];
372 		int j;
373 
374 		for (j = 0; j < prof->cnt; j++) {
375 			u16 vsi_num;
376 
377 			if (!prof->entry_h[j][tun] || !prof->vsi_h[j])
378 				continue;
379 			vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]);
380 			ice_rem_prof_id_flow(hw, blk, vsi_num, prof_id);
381 			ice_flow_rem_entry(hw, blk, prof->entry_h[j][tun]);
382 			prof->entry_h[j][tun] = 0;
383 		}
384 		ice_flow_rem_prof(hw, blk, prof_id);
385 	}
386 }
387 
388 /**
389  * ice_fdir_rem_flow - release the ice_flow structures for a filter type
390  * @hw: hardware structure containing the filter list
391  * @blk: hardware block
392  * @flow_type: FDir flow type to release
393  */
394 static void
ice_fdir_rem_flow(struct ice_hw * hw,enum ice_block blk,enum ice_fltr_ptype flow_type)395 ice_fdir_rem_flow(struct ice_hw *hw, enum ice_block blk,
396 		  enum ice_fltr_ptype flow_type)
397 {
398 	int flow = (int)flow_type & ~FLOW_EXT;
399 	struct ice_fd_hw_prof *prof;
400 	int tun, i;
401 
402 	prof = ice_fdir_get_hw_prof(hw, blk, flow);
403 	if (!prof)
404 		return;
405 
406 	ice_fdir_erase_flow_from_hw(hw, blk, flow);
407 	for (i = 0; i < prof->cnt; i++)
408 		prof->vsi_h[i] = 0;
409 	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
410 		if (!prof->fdir_seg[tun])
411 			continue;
412 		devm_kfree(ice_hw_to_dev(hw), prof->fdir_seg[tun]);
413 		prof->fdir_seg[tun] = NULL;
414 	}
415 	prof->cnt = 0;
416 }
417 
418 /**
419  * ice_fdir_release_flows - release all flows in use for later replay
420  * @hw: pointer to HW instance
421  */
ice_fdir_release_flows(struct ice_hw * hw)422 void ice_fdir_release_flows(struct ice_hw *hw)
423 {
424 	int flow;
425 
426 	/* release Flow Director HW table entries */
427 	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++)
428 		ice_fdir_erase_flow_from_hw(hw, ICE_BLK_FD, flow);
429 }
430 
431 /**
432  * ice_fdir_replay_flows - replay HW Flow Director filter info
433  * @hw: pointer to HW instance
434  */
ice_fdir_replay_flows(struct ice_hw * hw)435 void ice_fdir_replay_flows(struct ice_hw *hw)
436 {
437 	int flow;
438 
439 	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
440 		int tun;
441 
442 		if (!hw->fdir_prof[flow] || !hw->fdir_prof[flow]->cnt)
443 			continue;
444 		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
445 			struct ice_flow_prof *hw_prof;
446 			struct ice_fd_hw_prof *prof;
447 			int j;
448 
449 			prof = hw->fdir_prof[flow];
450 			ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX,
451 					  prof->fdir_seg[tun], TNL_SEG_CNT(tun),
452 					  false, &hw_prof);
453 			for (j = 0; j < prof->cnt; j++) {
454 				enum ice_flow_priority prio;
455 				u64 entry_h = 0;
456 				int err;
457 
458 				prio = ICE_FLOW_PRIO_NORMAL;
459 				err = ice_flow_add_entry(hw, ICE_BLK_FD,
460 							 hw_prof->id,
461 							 prof->vsi_h[0],
462 							 prof->vsi_h[j],
463 							 prio, prof->fdir_seg,
464 							 &entry_h);
465 				if (err) {
466 					dev_err(ice_hw_to_dev(hw), "Could not replay Flow Director, flow type %d\n",
467 						flow);
468 					continue;
469 				}
470 				prof->prof_id[tun] = hw_prof->id;
471 				prof->entry_h[j][tun] = entry_h;
472 			}
473 		}
474 	}
475 }
476 
477 /**
478  * ice_parse_rx_flow_user_data - deconstruct user-defined data
479  * @fsp: pointer to ethtool Rx flow specification
480  * @data: pointer to userdef data structure for storage
481  *
482  * Returns 0 on success, negative error value on failure
483  */
484 static int
ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec * fsp,struct ice_rx_flow_userdef * data)485 ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
486 			    struct ice_rx_flow_userdef *data)
487 {
488 	u64 value, mask;
489 
490 	memset(data, 0, sizeof(*data));
491 	if (!(fsp->flow_type & FLOW_EXT))
492 		return 0;
493 
494 	value = be64_to_cpu(*((__force __be64 *)fsp->h_ext.data));
495 	mask = be64_to_cpu(*((__force __be64 *)fsp->m_ext.data));
496 	if (!mask)
497 		return 0;
498 
499 #define ICE_USERDEF_FLEX_WORD_M	GENMASK_ULL(15, 0)
500 #define ICE_USERDEF_FLEX_OFFS_S	16
501 #define ICE_USERDEF_FLEX_OFFS_M	GENMASK_ULL(31, ICE_USERDEF_FLEX_OFFS_S)
502 #define ICE_USERDEF_FLEX_FLTR_M	GENMASK_ULL(31, 0)
503 
504 	/* 0x1fe is the maximum value for offsets stored in the internal
505 	 * filtering tables.
506 	 */
507 #define ICE_USERDEF_FLEX_MAX_OFFS_VAL 0x1fe
508 
509 	if (!ice_is_mask_valid(mask, ICE_USERDEF_FLEX_FLTR_M) ||
510 	    value > ICE_USERDEF_FLEX_FLTR_M)
511 		return -EINVAL;
512 
513 	data->flex_word = value & ICE_USERDEF_FLEX_WORD_M;
514 	data->flex_offset = FIELD_GET(ICE_USERDEF_FLEX_OFFS_M, value);
515 	if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL)
516 		return -EINVAL;
517 
518 	data->flex_fltr = true;
519 
520 	return 0;
521 }
522 
523 /**
524  * ice_fdir_num_avail_fltr - return the number of unused flow director filters
525  * @hw: pointer to hardware structure
526  * @vsi: software VSI structure
527  *
528  * There are 2 filter pools: guaranteed and best effort(shared). Each VSI can
529  * use filters from either pool. The guaranteed pool is divided between VSIs.
530  * The best effort filter pool is common to all VSIs and is a device shared
531  * resource pool. The number of filters available to this VSI is the sum of
532  * the VSIs guaranteed filter pool and the global available best effort
533  * filter pool.
534  *
535  * Returns the number of available flow director filters to this VSI
536  */
ice_fdir_num_avail_fltr(struct ice_hw * hw,struct ice_vsi * vsi)537 int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
538 {
539 	u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
540 	u16 num_guar;
541 	u16 num_be;
542 
543 	/* total guaranteed filters assigned to this VSI */
544 	num_guar = vsi->num_gfltr;
545 
546 	/* total global best effort filters */
547 	num_be = hw->func_caps.fd_fltr_best_effort;
548 
549 	/* Subtract the number of programmed filters from the global values */
550 	switch (hw->mac_type) {
551 	case ICE_MAC_E830:
552 		num_guar -= FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M,
553 				      rd32(hw, VSIQF_FD_CNT(vsi_num)));
554 		num_be -= FIELD_GET(E830_GLQF_FD_CNT_FD_BCNT_M,
555 				    rd32(hw, GLQF_FD_CNT));
556 		break;
557 	case ICE_MAC_E810:
558 	default:
559 		num_guar -= FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M,
560 				      rd32(hw, VSIQF_FD_CNT(vsi_num)));
561 		num_be -= FIELD_GET(E800_GLQF_FD_CNT_FD_BCNT_M,
562 				    rd32(hw, GLQF_FD_CNT));
563 	}
564 
565 	return num_guar + num_be;
566 }
567 
568 /**
569  * ice_fdir_alloc_flow_prof - allocate FDir flow profile structure(s)
570  * @hw: HW structure containing the FDir flow profile structure(s)
571  * @flow: flow type to allocate the flow profile for
572  *
573  * Allocate the fdir_prof and fdir_prof[flow] if not already created. Return 0
574  * on success and negative on error.
575  */
576 static int
ice_fdir_alloc_flow_prof(struct ice_hw * hw,enum ice_fltr_ptype flow)577 ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow)
578 {
579 	if (!hw)
580 		return -EINVAL;
581 
582 	if (!hw->fdir_prof) {
583 		hw->fdir_prof = devm_kcalloc(ice_hw_to_dev(hw),
584 					     ICE_FLTR_PTYPE_MAX,
585 					     sizeof(*hw->fdir_prof),
586 					     GFP_KERNEL);
587 		if (!hw->fdir_prof)
588 			return -ENOMEM;
589 	}
590 
591 	if (!hw->fdir_prof[flow]) {
592 		hw->fdir_prof[flow] = devm_kzalloc(ice_hw_to_dev(hw),
593 						   sizeof(**hw->fdir_prof),
594 						   GFP_KERNEL);
595 		if (!hw->fdir_prof[flow])
596 			return -ENOMEM;
597 	}
598 
599 	return 0;
600 }
601 
602 /**
603  * ice_fdir_prof_vsi_idx - find or insert a vsi_idx in structure
604  * @prof: pointer to flow director HW profile
605  * @vsi_idx: vsi_idx to locate
606  *
607  * return the index of the vsi_idx. if vsi_idx is not found insert it
608  * into the vsi_h table.
609  */
610 static u16
ice_fdir_prof_vsi_idx(struct ice_fd_hw_prof * prof,int vsi_idx)611 ice_fdir_prof_vsi_idx(struct ice_fd_hw_prof *prof, int vsi_idx)
612 {
613 	u16 idx = 0;
614 
615 	for (idx = 0; idx < prof->cnt; idx++)
616 		if (prof->vsi_h[idx] == vsi_idx)
617 			return idx;
618 
619 	if (idx == prof->cnt)
620 		prof->vsi_h[prof->cnt++] = vsi_idx;
621 	return idx;
622 }
623 
624 /**
625  * ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule
626  * @pf: pointer to the PF structure
627  * @seg: protocol header description pointer
628  * @flow: filter enum
629  * @tun: FDir segment to program
630  */
631 static int
ice_fdir_set_hw_fltr_rule(struct ice_pf * pf,struct ice_flow_seg_info * seg,enum ice_fltr_ptype flow,enum ice_fd_hw_seg tun)632 ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
633 			  enum ice_fltr_ptype flow, enum ice_fd_hw_seg tun)
634 {
635 	struct device *dev = ice_pf_to_dev(pf);
636 	struct ice_vsi *main_vsi, *ctrl_vsi;
637 	struct ice_flow_seg_info *old_seg;
638 	struct ice_flow_prof *prof = NULL;
639 	struct ice_fd_hw_prof *hw_prof;
640 	struct ice_hw *hw = &pf->hw;
641 	u64 entry1_h = 0;
642 	u64 entry2_h = 0;
643 	bool del_last;
644 	int err;
645 	int idx;
646 
647 	main_vsi = ice_get_main_vsi(pf);
648 	if (!main_vsi)
649 		return -EINVAL;
650 
651 	ctrl_vsi = ice_get_ctrl_vsi(pf);
652 	if (!ctrl_vsi)
653 		return -EINVAL;
654 
655 	err = ice_fdir_alloc_flow_prof(hw, flow);
656 	if (err)
657 		return err;
658 
659 	hw_prof = hw->fdir_prof[flow];
660 	old_seg = hw_prof->fdir_seg[tun];
661 	if (old_seg) {
662 		/* This flow_type already has a changed input set.
663 		 * If it matches the requested input set then we are
664 		 * done. Or, if it's different then it's an error.
665 		 */
666 		if (!memcmp(old_seg, seg, sizeof(*seg)))
667 			return -EEXIST;
668 
669 		/* if there are FDir filters using this flow,
670 		 * then return error.
671 		 */
672 		if (hw->fdir_fltr_cnt[flow]) {
673 			dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
674 			return -EINVAL;
675 		}
676 
677 		if (ice_is_arfs_using_perfect_flow(hw, flow)) {
678 			dev_err(dev, "aRFS using perfect flow type %d, cannot change input set\n",
679 				flow);
680 			return -EINVAL;
681 		}
682 
683 		/* remove HW filter definition */
684 		ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
685 	}
686 
687 	/* Adding a profile, but there is only one header supported.
688 	 * That is the final parameters are 1 header (segment), no
689 	 * actions (NULL) and zero actions 0.
690 	 */
691 	err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
692 				TNL_SEG_CNT(tun), false, &prof);
693 	if (err)
694 		return err;
695 	err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
696 				 main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
697 				 seg, &entry1_h);
698 	if (err)
699 		goto err_prof;
700 	err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
701 				 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
702 				 seg, &entry2_h);
703 	if (err)
704 		goto err_entry;
705 
706 	hw_prof->fdir_seg[tun] = seg;
707 	hw_prof->prof_id[tun] = prof->id;
708 	hw_prof->entry_h[0][tun] = entry1_h;
709 	hw_prof->entry_h[1][tun] = entry2_h;
710 	hw_prof->vsi_h[0] = main_vsi->idx;
711 	hw_prof->vsi_h[1] = ctrl_vsi->idx;
712 	if (!hw_prof->cnt)
713 		hw_prof->cnt = 2;
714 
715 	for (idx = 1; idx < ICE_CHNL_MAX_TC; idx++) {
716 		u16 vsi_idx;
717 		u16 vsi_h;
718 
719 		if (!ice_is_adq_active(pf) || !main_vsi->tc_map_vsi[idx])
720 			continue;
721 
722 		entry1_h = 0;
723 		vsi_h = main_vsi->tc_map_vsi[idx]->idx;
724 		err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id,
725 					 main_vsi->idx, vsi_h,
726 					 ICE_FLOW_PRIO_NORMAL, seg,
727 					 &entry1_h);
728 		if (err) {
729 			dev_err(dev, "Could not add Channel VSI %d to flow group\n",
730 				idx);
731 			goto err_unroll;
732 		}
733 
734 		vsi_idx = ice_fdir_prof_vsi_idx(hw_prof,
735 						main_vsi->tc_map_vsi[idx]->idx);
736 		hw_prof->entry_h[vsi_idx][tun] = entry1_h;
737 	}
738 
739 	return 0;
740 
741 err_unroll:
742 	entry1_h = 0;
743 	hw_prof->fdir_seg[tun] = NULL;
744 
745 	/* The variable del_last will be used to determine when to clean up
746 	 * the VSI group data. The VSI data is not needed if there are no
747 	 * segments.
748 	 */
749 	del_last = true;
750 	for (idx = 0; idx < ICE_FD_HW_SEG_MAX; idx++)
751 		if (hw_prof->fdir_seg[idx]) {
752 			del_last = false;
753 			break;
754 		}
755 
756 	for (idx = 0; idx < hw_prof->cnt; idx++) {
757 		u16 vsi_num = ice_get_hw_vsi_num(hw, hw_prof->vsi_h[idx]);
758 
759 		if (!hw_prof->entry_h[idx][tun])
760 			continue;
761 		ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof->id);
762 		ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]);
763 		hw_prof->entry_h[idx][tun] = 0;
764 		if (del_last)
765 			hw_prof->vsi_h[idx] = 0;
766 	}
767 	if (del_last)
768 		hw_prof->cnt = 0;
769 err_entry:
770 	ice_rem_prof_id_flow(hw, ICE_BLK_FD,
771 			     ice_get_hw_vsi_num(hw, main_vsi->idx), prof->id);
772 	ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
773 err_prof:
774 	ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
775 	dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
776 
777 	return err;
778 }
779 
780 /**
781  * ice_set_init_fdir_seg
782  * @seg: flow segment for programming
783  * @l3_proto: ICE_FLOW_SEG_HDR_IPV4 or ICE_FLOW_SEG_HDR_IPV6
784  * @l4_proto: ICE_FLOW_SEG_HDR_TCP or ICE_FLOW_SEG_HDR_UDP
785  *
786  * Set the configuration for perfect filters to the provided flow segment for
787  * programming the HW filter. This is to be called only when initializing
788  * filters as this function it assumes no filters exist.
789  */
790 static int
ice_set_init_fdir_seg(struct ice_flow_seg_info * seg,enum ice_flow_seg_hdr l3_proto,enum ice_flow_seg_hdr l4_proto)791 ice_set_init_fdir_seg(struct ice_flow_seg_info *seg,
792 		      enum ice_flow_seg_hdr l3_proto,
793 		      enum ice_flow_seg_hdr l4_proto)
794 {
795 	enum ice_flow_field src_addr, dst_addr, src_port, dst_port;
796 
797 	if (!seg)
798 		return -EINVAL;
799 
800 	if (l3_proto == ICE_FLOW_SEG_HDR_IPV4) {
801 		src_addr = ICE_FLOW_FIELD_IDX_IPV4_SA;
802 		dst_addr = ICE_FLOW_FIELD_IDX_IPV4_DA;
803 	} else if (l3_proto == ICE_FLOW_SEG_HDR_IPV6) {
804 		src_addr = ICE_FLOW_FIELD_IDX_IPV6_SA;
805 		dst_addr = ICE_FLOW_FIELD_IDX_IPV6_DA;
806 	} else {
807 		return -EINVAL;
808 	}
809 
810 	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
811 		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
812 		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
813 	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
814 		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
815 		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
816 	} else {
817 		return -EINVAL;
818 	}
819 
820 	ICE_FLOW_SET_HDRS(seg, l3_proto | l4_proto);
821 
822 	/* IP source address */
823 	ice_flow_set_fld(seg, src_addr, ICE_FLOW_FLD_OFF_INVAL,
824 			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
825 
826 	/* IP destination address */
827 	ice_flow_set_fld(seg, dst_addr, ICE_FLOW_FLD_OFF_INVAL,
828 			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
829 
830 	/* Layer 4 source port */
831 	ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
832 			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
833 
834 	/* Layer 4 destination port */
835 	ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
836 			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
837 
838 	return 0;
839 }
840 
841 /**
842  * ice_create_init_fdir_rule
843  * @pf: PF structure
844  * @flow: filter enum
845  *
846  * Return error value or 0 on success.
847  */
848 static int
ice_create_init_fdir_rule(struct ice_pf * pf,enum ice_fltr_ptype flow)849 ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
850 {
851 	struct ice_flow_seg_info *seg, *tun_seg;
852 	struct device *dev = ice_pf_to_dev(pf);
853 	struct ice_hw *hw = &pf->hw;
854 	int ret;
855 
856 	/* if there is already a filter rule for kind return -EINVAL */
857 	if (hw->fdir_prof && hw->fdir_prof[flow] &&
858 	    hw->fdir_prof[flow]->fdir_seg[0])
859 		return -EINVAL;
860 
861 	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
862 	if (!seg)
863 		return -ENOMEM;
864 
865 	tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg),
866 			       GFP_KERNEL);
867 	if (!tun_seg) {
868 		devm_kfree(dev, seg);
869 		return -ENOMEM;
870 	}
871 
872 	if (flow == ICE_FLTR_PTYPE_NONF_IPV4_TCP)
873 		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
874 					    ICE_FLOW_SEG_HDR_TCP);
875 	else if (flow == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
876 		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
877 					    ICE_FLOW_SEG_HDR_UDP);
878 	else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_TCP)
879 		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
880 					    ICE_FLOW_SEG_HDR_TCP);
881 	else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
882 		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
883 					    ICE_FLOW_SEG_HDR_UDP);
884 	else
885 		ret = -EINVAL;
886 	if (ret)
887 		goto err_exit;
888 
889 	/* add filter for outer headers */
890 	ret = ice_fdir_set_hw_fltr_rule(pf, seg, flow, ICE_FD_HW_SEG_NON_TUN);
891 	if (ret)
892 		/* could not write filter, free memory */
893 		goto err_exit;
894 
895 	/* make tunneled filter HW entries if possible */
896 	memcpy(&tun_seg[1], seg, sizeof(*seg));
897 	ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, flow, ICE_FD_HW_SEG_TUN);
898 	if (ret)
899 		/* could not write tunnel filter, but outer header filter
900 		 * exists
901 		 */
902 		devm_kfree(dev, tun_seg);
903 
904 	set_bit(flow, hw->fdir_perfect_fltr);
905 	return ret;
906 err_exit:
907 	devm_kfree(dev, tun_seg);
908 	devm_kfree(dev, seg);
909 
910 	return -EOPNOTSUPP;
911 }
912 
913 /**
914  * ice_set_fdir_ip4_seg
915  * @seg: flow segment for programming
916  * @tcp_ip4_spec: mask data from ethtool
917  * @l4_proto: Layer 4 protocol to program
918  * @perfect_fltr: only valid on success; returns true if perfect filter,
919  *		  false if not
920  *
921  * Set the mask data into the flow segment to be used to program HW
922  * table based on provided L4 protocol for IPv4
923  */
924 static int
ice_set_fdir_ip4_seg(struct ice_flow_seg_info * seg,struct ethtool_tcpip4_spec * tcp_ip4_spec,enum ice_flow_seg_hdr l4_proto,bool * perfect_fltr)925 ice_set_fdir_ip4_seg(struct ice_flow_seg_info *seg,
926 		     struct ethtool_tcpip4_spec *tcp_ip4_spec,
927 		     enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
928 {
929 	enum ice_flow_field src_port, dst_port;
930 
931 	/* make sure we don't have any empty rule */
932 	if (!tcp_ip4_spec->psrc && !tcp_ip4_spec->ip4src &&
933 	    !tcp_ip4_spec->pdst && !tcp_ip4_spec->ip4dst)
934 		return -EINVAL;
935 
936 	/* filtering on TOS not supported */
937 	if (tcp_ip4_spec->tos)
938 		return -EOPNOTSUPP;
939 
940 	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
941 		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
942 		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
943 	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
944 		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
945 		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
946 	} else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
947 		src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
948 		dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
949 	} else {
950 		return -EOPNOTSUPP;
951 	}
952 
953 	*perfect_fltr = true;
954 	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | l4_proto);
955 
956 	/* IP source address */
957 	if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF))
958 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
959 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
960 				 ICE_FLOW_FLD_OFF_INVAL, false);
961 	else if (!tcp_ip4_spec->ip4src)
962 		*perfect_fltr = false;
963 	else
964 		return -EOPNOTSUPP;
965 
966 	/* IP destination address */
967 	if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
968 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
969 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
970 				 ICE_FLOW_FLD_OFF_INVAL, false);
971 	else if (!tcp_ip4_spec->ip4dst)
972 		*perfect_fltr = false;
973 	else
974 		return -EOPNOTSUPP;
975 
976 	/* Layer 4 source port */
977 	if (tcp_ip4_spec->psrc == htons(0xFFFF))
978 		ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
979 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
980 				 false);
981 	else if (!tcp_ip4_spec->psrc)
982 		*perfect_fltr = false;
983 	else
984 		return -EOPNOTSUPP;
985 
986 	/* Layer 4 destination port */
987 	if (tcp_ip4_spec->pdst == htons(0xFFFF))
988 		ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
989 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
990 				 false);
991 	else if (!tcp_ip4_spec->pdst)
992 		*perfect_fltr = false;
993 	else
994 		return -EOPNOTSUPP;
995 
996 	return 0;
997 }
998 
999 /**
1000  * ice_set_fdir_ip4_usr_seg
1001  * @seg: flow segment for programming
1002  * @usr_ip4_spec: ethtool userdef packet offset
1003  * @perfect_fltr: only valid on success; returns true if perfect filter,
1004  *		  false if not
1005  *
1006  * Set the offset data into the flow segment to be used to program HW
1007  * table for IPv4
1008  */
1009 static int
ice_set_fdir_ip4_usr_seg(struct ice_flow_seg_info * seg,struct ethtool_usrip4_spec * usr_ip4_spec,bool * perfect_fltr)1010 ice_set_fdir_ip4_usr_seg(struct ice_flow_seg_info *seg,
1011 			 struct ethtool_usrip4_spec *usr_ip4_spec,
1012 			 bool *perfect_fltr)
1013 {
1014 	/* first 4 bytes of Layer 4 header */
1015 	if (usr_ip4_spec->l4_4_bytes)
1016 		return -EINVAL;
1017 	if (usr_ip4_spec->tos)
1018 		return -EINVAL;
1019 	if (usr_ip4_spec->ip_ver)
1020 		return -EINVAL;
1021 	/* Filtering on Layer 4 protocol not supported */
1022 	if (usr_ip4_spec->proto)
1023 		return -EOPNOTSUPP;
1024 	/* empty rules are not valid */
1025 	if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst)
1026 		return -EINVAL;
1027 
1028 	*perfect_fltr = true;
1029 	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
1030 
1031 	/* IP source address */
1032 	if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF))
1033 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
1034 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1035 				 ICE_FLOW_FLD_OFF_INVAL, false);
1036 	else if (!usr_ip4_spec->ip4src)
1037 		*perfect_fltr = false;
1038 	else
1039 		return -EOPNOTSUPP;
1040 
1041 	/* IP destination address */
1042 	if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
1043 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
1044 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1045 				 ICE_FLOW_FLD_OFF_INVAL, false);
1046 	else if (!usr_ip4_spec->ip4dst)
1047 		*perfect_fltr = false;
1048 	else
1049 		return -EOPNOTSUPP;
1050 
1051 	return 0;
1052 }
1053 
1054 /**
1055  * ice_set_fdir_ip6_seg
1056  * @seg: flow segment for programming
1057  * @tcp_ip6_spec: mask data from ethtool
1058  * @l4_proto: Layer 4 protocol to program
1059  * @perfect_fltr: only valid on success; returns true if perfect filter,
1060  *		  false if not
1061  *
1062  * Set the mask data into the flow segment to be used to program HW
1063  * table based on provided L4 protocol for IPv6
1064  */
1065 static int
ice_set_fdir_ip6_seg(struct ice_flow_seg_info * seg,struct ethtool_tcpip6_spec * tcp_ip6_spec,enum ice_flow_seg_hdr l4_proto,bool * perfect_fltr)1066 ice_set_fdir_ip6_seg(struct ice_flow_seg_info *seg,
1067 		     struct ethtool_tcpip6_spec *tcp_ip6_spec,
1068 		     enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
1069 {
1070 	enum ice_flow_field src_port, dst_port;
1071 
1072 	/* make sure we don't have any empty rule */
1073 	if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1074 		    sizeof(struct in6_addr)) &&
1075 	    !memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1076 		    sizeof(struct in6_addr)) &&
1077 	    !tcp_ip6_spec->psrc && !tcp_ip6_spec->pdst)
1078 		return -EINVAL;
1079 
1080 	/* filtering on TC not supported */
1081 	if (tcp_ip6_spec->tclass)
1082 		return -EOPNOTSUPP;
1083 
1084 	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
1085 		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
1086 		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
1087 	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
1088 		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
1089 		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
1090 	} else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
1091 		src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
1092 		dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
1093 	} else {
1094 		return -EINVAL;
1095 	}
1096 
1097 	*perfect_fltr = true;
1098 	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | l4_proto);
1099 
1100 	if (!memcmp(tcp_ip6_spec->ip6src, &full_ipv6_addr_mask,
1101 		    sizeof(struct in6_addr)))
1102 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
1103 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1104 				 ICE_FLOW_FLD_OFF_INVAL, false);
1105 	else if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1106 			 sizeof(struct in6_addr)))
1107 		*perfect_fltr = false;
1108 	else
1109 		return -EOPNOTSUPP;
1110 
1111 	if (!memcmp(tcp_ip6_spec->ip6dst, &full_ipv6_addr_mask,
1112 		    sizeof(struct in6_addr)))
1113 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
1114 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1115 				 ICE_FLOW_FLD_OFF_INVAL, false);
1116 	else if (!memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1117 			 sizeof(struct in6_addr)))
1118 		*perfect_fltr = false;
1119 	else
1120 		return -EOPNOTSUPP;
1121 
1122 	/* Layer 4 source port */
1123 	if (tcp_ip6_spec->psrc == htons(0xFFFF))
1124 		ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
1125 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1126 				 false);
1127 	else if (!tcp_ip6_spec->psrc)
1128 		*perfect_fltr = false;
1129 	else
1130 		return -EOPNOTSUPP;
1131 
1132 	/* Layer 4 destination port */
1133 	if (tcp_ip6_spec->pdst == htons(0xFFFF))
1134 		ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
1135 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1136 				 false);
1137 	else if (!tcp_ip6_spec->pdst)
1138 		*perfect_fltr = false;
1139 	else
1140 		return -EOPNOTSUPP;
1141 
1142 	return 0;
1143 }
1144 
1145 /**
1146  * ice_set_fdir_ip6_usr_seg
1147  * @seg: flow segment for programming
1148  * @usr_ip6_spec: ethtool userdef packet offset
1149  * @perfect_fltr: only valid on success; returns true if perfect filter,
1150  *		  false if not
1151  *
1152  * Set the offset data into the flow segment to be used to program HW
1153  * table for IPv6
1154  */
1155 static int
ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info * seg,struct ethtool_usrip6_spec * usr_ip6_spec,bool * perfect_fltr)1156 ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg,
1157 			 struct ethtool_usrip6_spec *usr_ip6_spec,
1158 			 bool *perfect_fltr)
1159 {
1160 	/* filtering on Layer 4 bytes not supported */
1161 	if (usr_ip6_spec->l4_4_bytes)
1162 		return -EOPNOTSUPP;
1163 	/* filtering on TC not supported */
1164 	if (usr_ip6_spec->tclass)
1165 		return -EOPNOTSUPP;
1166 	/* filtering on Layer 4 protocol not supported */
1167 	if (usr_ip6_spec->l4_proto)
1168 		return -EOPNOTSUPP;
1169 	/* empty rules are not valid */
1170 	if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1171 		    sizeof(struct in6_addr)) &&
1172 	    !memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1173 		    sizeof(struct in6_addr)))
1174 		return -EINVAL;
1175 
1176 	*perfect_fltr = true;
1177 	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
1178 
1179 	if (!memcmp(usr_ip6_spec->ip6src, &full_ipv6_addr_mask,
1180 		    sizeof(struct in6_addr)))
1181 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
1182 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1183 				 ICE_FLOW_FLD_OFF_INVAL, false);
1184 	else if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1185 			 sizeof(struct in6_addr)))
1186 		*perfect_fltr = false;
1187 	else
1188 		return -EOPNOTSUPP;
1189 
1190 	if (!memcmp(usr_ip6_spec->ip6dst, &full_ipv6_addr_mask,
1191 		    sizeof(struct in6_addr)))
1192 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
1193 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1194 				 ICE_FLOW_FLD_OFF_INVAL, false);
1195 	else if (!memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1196 			 sizeof(struct in6_addr)))
1197 		*perfect_fltr = false;
1198 	else
1199 		return -EOPNOTSUPP;
1200 
1201 	return 0;
1202 }
1203 
1204 /**
1205  * ice_fdir_vlan_valid - validate VLAN data for Flow Director rule
1206  * @dev: network interface device structure
1207  * @fsp: pointer to ethtool Rx flow specification
1208  *
1209  * Return: true if vlan data is valid, false otherwise
1210  */
ice_fdir_vlan_valid(struct device * dev,struct ethtool_rx_flow_spec * fsp)1211 static bool ice_fdir_vlan_valid(struct device *dev,
1212 				struct ethtool_rx_flow_spec *fsp)
1213 {
1214 	if (fsp->m_ext.vlan_etype && !eth_type_vlan(fsp->h_ext.vlan_etype))
1215 		return false;
1216 
1217 	if (fsp->m_ext.vlan_tci && ntohs(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
1218 		return false;
1219 
1220 	/* proto and vlan must have vlan-etype defined */
1221 	if (fsp->m_u.ether_spec.h_proto && fsp->m_ext.vlan_tci &&
1222 	    !fsp->m_ext.vlan_etype) {
1223 		dev_warn(dev, "Filter with proto and vlan require also vlan-etype");
1224 		return false;
1225 	}
1226 
1227 	return true;
1228 }
1229 
1230 /**
1231  * ice_set_ether_flow_seg - set address and protocol segments for ether flow
1232  * @dev: network interface device structure
1233  * @seg: flow segment for programming
1234  * @eth_spec: mask data from ethtool
1235  *
1236  * Return: 0 on success and errno in case of error.
1237  */
ice_set_ether_flow_seg(struct device * dev,struct ice_flow_seg_info * seg,struct ethhdr * eth_spec)1238 static int ice_set_ether_flow_seg(struct device *dev,
1239 				  struct ice_flow_seg_info *seg,
1240 				  struct ethhdr *eth_spec)
1241 {
1242 	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH);
1243 
1244 	/* empty rules are not valid */
1245 	if (is_zero_ether_addr(eth_spec->h_source) &&
1246 	    is_zero_ether_addr(eth_spec->h_dest) &&
1247 	    !eth_spec->h_proto)
1248 		return -EINVAL;
1249 
1250 	/* Ethertype */
1251 	if (eth_spec->h_proto == htons(0xFFFF)) {
1252 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_TYPE,
1253 				 ICE_FLOW_FLD_OFF_INVAL,
1254 				 ICE_FLOW_FLD_OFF_INVAL,
1255 				 ICE_FLOW_FLD_OFF_INVAL, false);
1256 	} else if (eth_spec->h_proto) {
1257 		dev_warn(dev, "Only 0x0000 or 0xffff proto mask is allowed for flow-type ether");
1258 		return -EOPNOTSUPP;
1259 	}
1260 
1261 	/* Source MAC address */
1262 	if (is_broadcast_ether_addr(eth_spec->h_source))
1263 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA,
1264 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1265 				 ICE_FLOW_FLD_OFF_INVAL, false);
1266 	else if (!is_zero_ether_addr(eth_spec->h_source))
1267 		goto err_mask;
1268 
1269 	/* Destination MAC address */
1270 	if (is_broadcast_ether_addr(eth_spec->h_dest))
1271 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA,
1272 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1273 				 ICE_FLOW_FLD_OFF_INVAL, false);
1274 	else if (!is_zero_ether_addr(eth_spec->h_dest))
1275 		goto err_mask;
1276 
1277 	return 0;
1278 
1279 err_mask:
1280 	dev_warn(dev, "Only 00:00:00:00:00:00 or ff:ff:ff:ff:ff:ff MAC address mask is allowed for flow-type ether");
1281 	return -EOPNOTSUPP;
1282 }
1283 
1284 /**
1285  * ice_set_fdir_vlan_seg - set vlan segments for ether flow
1286  * @seg: flow segment for programming
1287  * @ext_masks: masks for additional RX flow fields
1288  *
1289  * Return: 0 on success and errno in case of error.
1290  */
1291 static int
ice_set_fdir_vlan_seg(struct ice_flow_seg_info * seg,struct ethtool_flow_ext * ext_masks)1292 ice_set_fdir_vlan_seg(struct ice_flow_seg_info *seg,
1293 		      struct ethtool_flow_ext *ext_masks)
1294 {
1295 	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_VLAN);
1296 
1297 	if (ext_masks->vlan_etype) {
1298 		if (ext_masks->vlan_etype != htons(0xFFFF))
1299 			return -EOPNOTSUPP;
1300 
1301 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_S_VLAN,
1302 				 ICE_FLOW_FLD_OFF_INVAL,
1303 				 ICE_FLOW_FLD_OFF_INVAL,
1304 				 ICE_FLOW_FLD_OFF_INVAL, false);
1305 	}
1306 
1307 	if (ext_masks->vlan_tci) {
1308 		if (ext_masks->vlan_tci != htons(0xFFFF))
1309 			return -EOPNOTSUPP;
1310 
1311 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_C_VLAN,
1312 				 ICE_FLOW_FLD_OFF_INVAL,
1313 				 ICE_FLOW_FLD_OFF_INVAL,
1314 				 ICE_FLOW_FLD_OFF_INVAL, false);
1315 	}
1316 
1317 	return 0;
1318 }
1319 
1320 /**
1321  * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter
1322  * @pf: PF structure
1323  * @fsp: pointer to ethtool Rx flow specification
1324  * @user: user defined data from flow specification
1325  *
1326  * Returns 0 on success.
1327  */
1328 static int
ice_cfg_fdir_xtrct_seq(struct ice_pf * pf,struct ethtool_rx_flow_spec * fsp,struct ice_rx_flow_userdef * user)1329 ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
1330 		       struct ice_rx_flow_userdef *user)
1331 {
1332 	struct ice_flow_seg_info *seg, *tun_seg;
1333 	struct device *dev = ice_pf_to_dev(pf);
1334 	enum ice_fltr_ptype fltr_idx;
1335 	struct ice_hw *hw = &pf->hw;
1336 	bool perfect_filter = false;
1337 	int ret;
1338 
1339 	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
1340 	if (!seg)
1341 		return -ENOMEM;
1342 
1343 	tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg),
1344 			       GFP_KERNEL);
1345 	if (!tun_seg) {
1346 		devm_kfree(dev, seg);
1347 		return -ENOMEM;
1348 	}
1349 
1350 	switch (fsp->flow_type & ~FLOW_EXT) {
1351 	case TCP_V4_FLOW:
1352 		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
1353 					   ICE_FLOW_SEG_HDR_TCP,
1354 					   &perfect_filter);
1355 		break;
1356 	case UDP_V4_FLOW:
1357 		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
1358 					   ICE_FLOW_SEG_HDR_UDP,
1359 					   &perfect_filter);
1360 		break;
1361 	case SCTP_V4_FLOW:
1362 		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
1363 					   ICE_FLOW_SEG_HDR_SCTP,
1364 					   &perfect_filter);
1365 		break;
1366 	case IPV4_USER_FLOW:
1367 		ret = ice_set_fdir_ip4_usr_seg(seg, &fsp->m_u.usr_ip4_spec,
1368 					       &perfect_filter);
1369 		break;
1370 	case TCP_V6_FLOW:
1371 		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
1372 					   ICE_FLOW_SEG_HDR_TCP,
1373 					   &perfect_filter);
1374 		break;
1375 	case UDP_V6_FLOW:
1376 		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
1377 					   ICE_FLOW_SEG_HDR_UDP,
1378 					   &perfect_filter);
1379 		break;
1380 	case SCTP_V6_FLOW:
1381 		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
1382 					   ICE_FLOW_SEG_HDR_SCTP,
1383 					   &perfect_filter);
1384 		break;
1385 	case IPV6_USER_FLOW:
1386 		ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec,
1387 					       &perfect_filter);
1388 		break;
1389 	case ETHER_FLOW:
1390 		ret = ice_set_ether_flow_seg(dev, seg, &fsp->m_u.ether_spec);
1391 		if (!ret && (fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci)) {
1392 			if (!ice_fdir_vlan_valid(dev, fsp)) {
1393 				ret = -EINVAL;
1394 				break;
1395 			}
1396 			ret = ice_set_fdir_vlan_seg(seg, &fsp->m_ext);
1397 		}
1398 		break;
1399 	default:
1400 		ret = -EINVAL;
1401 	}
1402 	if (ret)
1403 		goto err_exit;
1404 
1405 	/* tunnel segments are shifted up one. */
1406 	memcpy(&tun_seg[1], seg, sizeof(*seg));
1407 
1408 	if (user && user->flex_fltr) {
1409 		perfect_filter = false;
1410 		ice_flow_add_fld_raw(seg, user->flex_offset,
1411 				     ICE_FLTR_PRGM_FLEX_WORD_SIZE,
1412 				     ICE_FLOW_FLD_OFF_INVAL,
1413 				     ICE_FLOW_FLD_OFF_INVAL);
1414 		ice_flow_add_fld_raw(&tun_seg[1], user->flex_offset,
1415 				     ICE_FLTR_PRGM_FLEX_WORD_SIZE,
1416 				     ICE_FLOW_FLD_OFF_INVAL,
1417 				     ICE_FLOW_FLD_OFF_INVAL);
1418 	}
1419 
1420 	fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT);
1421 
1422 	assign_bit(fltr_idx, hw->fdir_perfect_fltr, perfect_filter);
1423 
1424 	/* add filter for outer headers */
1425 	ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx,
1426 					ICE_FD_HW_SEG_NON_TUN);
1427 	if (ret == -EEXIST) {
1428 		/* Rule already exists, free memory and count as success */
1429 		ret = 0;
1430 		goto err_exit;
1431 	} else if (ret) {
1432 		/* could not write filter, free memory */
1433 		goto err_exit;
1434 	}
1435 
1436 	/* make tunneled filter HW entries if possible */
1437 	memcpy(&tun_seg[1], seg, sizeof(*seg));
1438 	ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, fltr_idx,
1439 					ICE_FD_HW_SEG_TUN);
1440 	if (ret == -EEXIST) {
1441 		/* Rule already exists, free memory and count as success */
1442 		devm_kfree(dev, tun_seg);
1443 		ret = 0;
1444 	} else if (ret) {
1445 		/* could not write tunnel filter, but outer filter exists */
1446 		devm_kfree(dev, tun_seg);
1447 	}
1448 
1449 	return ret;
1450 
1451 err_exit:
1452 	devm_kfree(dev, tun_seg);
1453 	devm_kfree(dev, seg);
1454 
1455 	return ret;
1456 }
1457 
1458 /**
1459  * ice_update_per_q_fltr
1460  * @vsi: ptr to VSI
1461  * @q_index: queue index
1462  * @inc: true to increment or false to decrement per queue filter count
1463  *
1464  * This function is used to keep track of per queue sideband filters
1465  */
ice_update_per_q_fltr(struct ice_vsi * vsi,u32 q_index,bool inc)1466 static void ice_update_per_q_fltr(struct ice_vsi *vsi, u32 q_index, bool inc)
1467 {
1468 	struct ice_rx_ring *rx_ring;
1469 
1470 	if (!vsi->num_rxq || q_index >= vsi->num_rxq)
1471 		return;
1472 
1473 	rx_ring = vsi->rx_rings[q_index];
1474 	if (!rx_ring || !rx_ring->ch)
1475 		return;
1476 
1477 	if (inc)
1478 		atomic_inc(&rx_ring->ch->num_sb_fltr);
1479 	else
1480 		atomic_dec_if_positive(&rx_ring->ch->num_sb_fltr);
1481 }
1482 
1483 /**
1484  * ice_fdir_write_fltr - send a flow director filter to the hardware
1485  * @pf: PF data structure
1486  * @input: filter structure
1487  * @add: true adds filter and false removed filter
1488  * @is_tun: true adds inner filter on tunnel and false outer headers
1489  *
1490  * returns 0 on success and negative value on error
1491  */
1492 int
ice_fdir_write_fltr(struct ice_pf * pf,struct ice_fdir_fltr * input,bool add,bool is_tun)1493 ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
1494 		    bool is_tun)
1495 {
1496 	struct device *dev = ice_pf_to_dev(pf);
1497 	struct ice_hw *hw = &pf->hw;
1498 	struct ice_fltr_desc desc;
1499 	struct ice_vsi *ctrl_vsi;
1500 	u8 *pkt, *frag_pkt;
1501 	bool has_frag;
1502 	int err;
1503 
1504 	ctrl_vsi = ice_get_ctrl_vsi(pf);
1505 	if (!ctrl_vsi)
1506 		return -EINVAL;
1507 
1508 	pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1509 	if (!pkt)
1510 		return -ENOMEM;
1511 	frag_pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1512 	if (!frag_pkt) {
1513 		err = -ENOMEM;
1514 		goto err_free;
1515 	}
1516 
1517 	ice_fdir_get_prgm_desc(hw, input, &desc, add);
1518 	err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1519 	if (err)
1520 		goto err_free_all;
1521 	err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1522 	if (err)
1523 		goto err_free_all;
1524 
1525 	/* repeat for fragment packet */
1526 	has_frag = ice_fdir_has_frag(input->flow_type);
1527 	if (has_frag) {
1528 		/* does not return error */
1529 		ice_fdir_get_prgm_desc(hw, input, &desc, add);
1530 		err = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
1531 						is_tun);
1532 		if (err)
1533 			goto err_frag;
1534 		err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt);
1535 		if (err)
1536 			goto err_frag;
1537 	} else {
1538 		devm_kfree(dev, frag_pkt);
1539 	}
1540 
1541 	return 0;
1542 
1543 err_free_all:
1544 	devm_kfree(dev, frag_pkt);
1545 err_free:
1546 	devm_kfree(dev, pkt);
1547 	return err;
1548 
1549 err_frag:
1550 	devm_kfree(dev, frag_pkt);
1551 	return err;
1552 }
1553 
1554 /**
1555  * ice_fdir_write_all_fltr - send a flow director filter to the hardware
1556  * @pf: PF data structure
1557  * @input: filter structure
1558  * @add: true adds filter and false removed filter
1559  *
1560  * returns 0 on success and negative value on error
1561  */
1562 static int
ice_fdir_write_all_fltr(struct ice_pf * pf,struct ice_fdir_fltr * input,bool add)1563 ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
1564 			bool add)
1565 {
1566 	u16 port_num;
1567 	int tun;
1568 
1569 	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
1570 		bool is_tun = tun == ICE_FD_HW_SEG_TUN;
1571 		int err;
1572 
1573 		if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num, TNL_ALL))
1574 			continue;
1575 		err = ice_fdir_write_fltr(pf, input, add, is_tun);
1576 		if (err)
1577 			return err;
1578 	}
1579 	return 0;
1580 }
1581 
1582 /**
1583  * ice_fdir_replay_fltrs - replay filters from the HW filter list
1584  * @pf: board private structure
1585  */
ice_fdir_replay_fltrs(struct ice_pf * pf)1586 void ice_fdir_replay_fltrs(struct ice_pf *pf)
1587 {
1588 	struct ice_fdir_fltr *f_rule;
1589 	struct ice_hw *hw = &pf->hw;
1590 
1591 	list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
1592 		int err = ice_fdir_write_all_fltr(pf, f_rule, true);
1593 
1594 		if (err)
1595 			dev_dbg(ice_pf_to_dev(pf), "Flow Director error %d, could not reprogram filter %d\n",
1596 				err, f_rule->fltr_id);
1597 	}
1598 }
1599 
1600 /**
1601  * ice_fdir_create_dflt_rules - create default perfect filters
1602  * @pf: PF data structure
1603  *
1604  * Returns 0 for success or error.
1605  */
ice_fdir_create_dflt_rules(struct ice_pf * pf)1606 int ice_fdir_create_dflt_rules(struct ice_pf *pf)
1607 {
1608 	int err;
1609 
1610 	/* Create perfect TCP and UDP rules in hardware. */
1611 	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
1612 	if (err)
1613 		return err;
1614 
1615 	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
1616 	if (err)
1617 		return err;
1618 
1619 	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP);
1620 	if (err)
1621 		return err;
1622 
1623 	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP);
1624 
1625 	return err;
1626 }
1627 
1628 /**
1629  * ice_fdir_del_all_fltrs - Delete all flow director filters
1630  * @vsi: the VSI being changed
1631  *
1632  * This function needs to be called while holding hw->fdir_fltr_lock
1633  */
ice_fdir_del_all_fltrs(struct ice_vsi * vsi)1634 void ice_fdir_del_all_fltrs(struct ice_vsi *vsi)
1635 {
1636 	struct ice_fdir_fltr *f_rule, *tmp;
1637 	struct ice_pf *pf = vsi->back;
1638 	struct ice_hw *hw = &pf->hw;
1639 
1640 	list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
1641 		ice_fdir_write_all_fltr(pf, f_rule, false);
1642 		ice_fdir_update_cntrs(hw, f_rule->flow_type, false);
1643 		list_del(&f_rule->fltr_node);
1644 		devm_kfree(ice_pf_to_dev(pf), f_rule);
1645 	}
1646 }
1647 
1648 /**
1649  * ice_vsi_manage_fdir - turn on/off flow director
1650  * @vsi: the VSI being changed
1651  * @ena: boolean value indicating if this is an enable or disable request
1652  */
ice_vsi_manage_fdir(struct ice_vsi * vsi,bool ena)1653 void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
1654 {
1655 	struct ice_pf *pf = vsi->back;
1656 	struct ice_hw *hw = &pf->hw;
1657 	enum ice_fltr_ptype flow;
1658 
1659 	if (ena) {
1660 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
1661 		ice_fdir_create_dflt_rules(pf);
1662 		return;
1663 	}
1664 
1665 	mutex_lock(&hw->fdir_fltr_lock);
1666 	if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags))
1667 		goto release_lock;
1668 
1669 	ice_fdir_del_all_fltrs(vsi);
1670 
1671 	if (hw->fdir_prof)
1672 		for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX;
1673 		     flow++)
1674 			if (hw->fdir_prof[flow])
1675 				ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
1676 
1677 release_lock:
1678 	mutex_unlock(&hw->fdir_fltr_lock);
1679 }
1680 
1681 /**
1682  * ice_fdir_do_rem_flow - delete flow and possibly add perfect flow
1683  * @pf: PF structure
1684  * @flow_type: FDir flow type to release
1685  */
1686 static void
ice_fdir_do_rem_flow(struct ice_pf * pf,enum ice_fltr_ptype flow_type)1687 ice_fdir_do_rem_flow(struct ice_pf *pf, enum ice_fltr_ptype flow_type)
1688 {
1689 	struct ice_hw *hw = &pf->hw;
1690 	bool need_perfect = false;
1691 
1692 	if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
1693 	    flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
1694 	    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
1695 	    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
1696 		need_perfect = true;
1697 
1698 	if (need_perfect && test_bit(flow_type, hw->fdir_perfect_fltr))
1699 		return;
1700 
1701 	ice_fdir_rem_flow(hw, ICE_BLK_FD, flow_type);
1702 	if (need_perfect)
1703 		ice_create_init_fdir_rule(pf, flow_type);
1704 }
1705 
1706 /**
1707  * ice_fdir_update_list_entry - add or delete a filter from the filter list
1708  * @pf: PF structure
1709  * @input: filter structure
1710  * @fltr_idx: ethtool index of filter to modify
1711  *
1712  * returns 0 on success and negative on errors
1713  */
1714 static int
ice_fdir_update_list_entry(struct ice_pf * pf,struct ice_fdir_fltr * input,int fltr_idx)1715 ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
1716 			   int fltr_idx)
1717 {
1718 	struct ice_fdir_fltr *old_fltr;
1719 	struct ice_hw *hw = &pf->hw;
1720 	struct ice_vsi *vsi;
1721 	int err = -ENOENT;
1722 
1723 	/* Do not update filters during reset */
1724 	if (ice_is_reset_in_progress(pf->state))
1725 		return -EBUSY;
1726 
1727 	vsi = ice_get_main_vsi(pf);
1728 	if (!vsi)
1729 		return -EINVAL;
1730 
1731 	old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx);
1732 	if (old_fltr) {
1733 		err = ice_fdir_write_all_fltr(pf, old_fltr, false);
1734 		if (err)
1735 			return err;
1736 		ice_fdir_update_cntrs(hw, old_fltr->flow_type, false);
1737 		/* update sb-filters count, specific to ring->channel */
1738 		ice_update_per_q_fltr(vsi, old_fltr->orig_q_index, false);
1739 		if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type])
1740 			/* we just deleted the last filter of flow_type so we
1741 			 * should also delete the HW filter info.
1742 			 */
1743 			ice_fdir_do_rem_flow(pf, old_fltr->flow_type);
1744 		list_del(&old_fltr->fltr_node);
1745 		devm_kfree(ice_hw_to_dev(hw), old_fltr);
1746 	}
1747 	if (!input)
1748 		return err;
1749 	ice_fdir_list_add_fltr(hw, input);
1750 	/* update sb-filters count, specific to ring->channel */
1751 	ice_update_per_q_fltr(vsi, input->orig_q_index, true);
1752 	ice_fdir_update_cntrs(hw, input->flow_type, true);
1753 	return 0;
1754 }
1755 
1756 /**
1757  * ice_del_fdir_ethtool - delete Flow Director filter
1758  * @vsi: pointer to target VSI
1759  * @cmd: command to add or delete Flow Director filter
1760  *
1761  * Returns 0 on success and negative values for failure
1762  */
ice_del_fdir_ethtool(struct ice_vsi * vsi,struct ethtool_rxnfc * cmd)1763 int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
1764 {
1765 	struct ethtool_rx_flow_spec *fsp =
1766 		(struct ethtool_rx_flow_spec *)&cmd->fs;
1767 	struct ice_pf *pf = vsi->back;
1768 	struct ice_hw *hw = &pf->hw;
1769 	int val;
1770 
1771 	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
1772 		return -EOPNOTSUPP;
1773 
1774 	/* Do not delete filters during reset */
1775 	if (ice_is_reset_in_progress(pf->state)) {
1776 		dev_err(ice_pf_to_dev(pf), "Device is resetting - deleting Flow Director filters not supported during reset\n");
1777 		return -EBUSY;
1778 	}
1779 
1780 	if (test_bit(ICE_FD_FLUSH_REQ, pf->state))
1781 		return -EBUSY;
1782 
1783 	mutex_lock(&hw->fdir_fltr_lock);
1784 	val = ice_fdir_update_list_entry(pf, NULL, fsp->location);
1785 	mutex_unlock(&hw->fdir_fltr_lock);
1786 
1787 	return val;
1788 }
1789 
1790 /**
1791  * ice_update_ring_dest_vsi - update dest ring and dest VSI
1792  * @vsi: pointer to target VSI
1793  * @dest_vsi: ptr to dest VSI index
1794  * @ring: ptr to dest ring
1795  *
1796  * This function updates destination VSI and queue if user specifies
1797  * target queue which falls in channel's (aka ADQ) queue region
1798  */
1799 static void
ice_update_ring_dest_vsi(struct ice_vsi * vsi,u16 * dest_vsi,u32 * ring)1800 ice_update_ring_dest_vsi(struct ice_vsi *vsi, u16 *dest_vsi, u32 *ring)
1801 {
1802 	struct ice_channel *ch;
1803 
1804 	list_for_each_entry(ch, &vsi->ch_list, list) {
1805 		if (!ch->ch_vsi)
1806 			continue;
1807 
1808 		/* make sure to locate corresponding channel based on "queue"
1809 		 * specified
1810 		 */
1811 		if ((*ring < ch->base_q) ||
1812 		    (*ring >= (ch->base_q + ch->num_rxq)))
1813 			continue;
1814 
1815 		/* update the dest_vsi based on channel */
1816 		*dest_vsi = ch->ch_vsi->idx;
1817 
1818 		/* update the "ring" to be correct based on channel */
1819 		*ring -= ch->base_q;
1820 	}
1821 }
1822 
1823 /**
1824  * ice_set_fdir_input_set - Set the input set for Flow Director
1825  * @vsi: pointer to target VSI
1826  * @fsp: pointer to ethtool Rx flow specification
1827  * @input: filter structure
1828  */
1829 static int
ice_set_fdir_input_set(struct ice_vsi * vsi,struct ethtool_rx_flow_spec * fsp,struct ice_fdir_fltr * input)1830 ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
1831 		       struct ice_fdir_fltr *input)
1832 {
1833 	u16 dest_vsi, q_index = 0;
1834 	u16 orig_q_index = 0;
1835 	struct ice_pf *pf;
1836 	struct ice_hw *hw;
1837 	int flow_type;
1838 	u8 dest_ctl;
1839 
1840 	if (!vsi || !fsp || !input)
1841 		return -EINVAL;
1842 
1843 	pf = vsi->back;
1844 	hw = &pf->hw;
1845 
1846 	dest_vsi = vsi->idx;
1847 	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
1848 		dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1849 	} else {
1850 		u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
1851 		u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
1852 
1853 		if (vf) {
1854 			dev_err(ice_pf_to_dev(pf), "Failed to add filter. Flow director filters are not supported on VF queues.\n");
1855 			return -EINVAL;
1856 		}
1857 
1858 		if (ring >= vsi->num_rxq)
1859 			return -EINVAL;
1860 
1861 		orig_q_index = ring;
1862 		ice_update_ring_dest_vsi(vsi, &dest_vsi, &ring);
1863 		dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1864 		q_index = ring;
1865 	}
1866 
1867 	input->fltr_id = fsp->location;
1868 	input->q_index = q_index;
1869 	flow_type = fsp->flow_type & ~FLOW_EXT;
1870 
1871 	/* Record the original queue index as specified by user.
1872 	 * with channel configuration 'q_index' becomes relative
1873 	 * to TC (channel).
1874 	 */
1875 	input->orig_q_index = orig_q_index;
1876 	input->dest_vsi = dest_vsi;
1877 	input->dest_ctl = dest_ctl;
1878 	input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
1879 	input->cnt_index = ICE_FD_SB_STAT_IDX(hw->fd_ctr_base);
1880 	input->flow_type = ice_ethtool_flow_to_fltr(flow_type);
1881 
1882 	if (fsp->flow_type & FLOW_EXT) {
1883 		memcpy(input->ext_data.usr_def, fsp->h_ext.data,
1884 		       sizeof(input->ext_data.usr_def));
1885 		input->ext_data.vlan_type = fsp->h_ext.vlan_etype;
1886 		input->ext_data.vlan_tag = fsp->h_ext.vlan_tci;
1887 		memcpy(input->ext_mask.usr_def, fsp->m_ext.data,
1888 		       sizeof(input->ext_mask.usr_def));
1889 		input->ext_mask.vlan_type = fsp->m_ext.vlan_etype;
1890 		input->ext_mask.vlan_tag = fsp->m_ext.vlan_tci;
1891 	}
1892 
1893 	switch (flow_type) {
1894 	case TCP_V4_FLOW:
1895 	case UDP_V4_FLOW:
1896 	case SCTP_V4_FLOW:
1897 		input->ip.v4.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1898 		input->ip.v4.src_port = fsp->h_u.tcp_ip4_spec.psrc;
1899 		input->ip.v4.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
1900 		input->ip.v4.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
1901 		input->mask.v4.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
1902 		input->mask.v4.src_port = fsp->m_u.tcp_ip4_spec.psrc;
1903 		input->mask.v4.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
1904 		input->mask.v4.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
1905 		break;
1906 	case IPV4_USER_FLOW:
1907 		input->ip.v4.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
1908 		input->ip.v4.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
1909 		input->ip.v4.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
1910 		input->ip.v4.proto = fsp->h_u.usr_ip4_spec.proto;
1911 		input->ip.v4.ip_ver = fsp->h_u.usr_ip4_spec.ip_ver;
1912 		input->ip.v4.tos = fsp->h_u.usr_ip4_spec.tos;
1913 		input->mask.v4.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
1914 		input->mask.v4.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
1915 		input->mask.v4.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
1916 		input->mask.v4.proto = fsp->m_u.usr_ip4_spec.proto;
1917 		input->mask.v4.ip_ver = fsp->m_u.usr_ip4_spec.ip_ver;
1918 		input->mask.v4.tos = fsp->m_u.usr_ip4_spec.tos;
1919 		break;
1920 	case TCP_V6_FLOW:
1921 	case UDP_V6_FLOW:
1922 	case SCTP_V6_FLOW:
1923 		memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1924 		       sizeof(struct in6_addr));
1925 		memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1926 		       sizeof(struct in6_addr));
1927 		input->ip.v6.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
1928 		input->ip.v6.src_port = fsp->h_u.tcp_ip6_spec.psrc;
1929 		input->ip.v6.tc = fsp->h_u.tcp_ip6_spec.tclass;
1930 		memcpy(input->mask.v6.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst,
1931 		       sizeof(struct in6_addr));
1932 		memcpy(input->mask.v6.src_ip, fsp->m_u.tcp_ip6_spec.ip6src,
1933 		       sizeof(struct in6_addr));
1934 		input->mask.v6.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
1935 		input->mask.v6.src_port = fsp->m_u.tcp_ip6_spec.psrc;
1936 		input->mask.v6.tc = fsp->m_u.tcp_ip6_spec.tclass;
1937 		break;
1938 	case IPV6_USER_FLOW:
1939 		memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1940 		       sizeof(struct in6_addr));
1941 		memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1942 		       sizeof(struct in6_addr));
1943 		input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
1944 		input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
1945 
1946 		/* if no protocol requested, use IPPROTO_NONE */
1947 		if (!fsp->m_u.usr_ip6_spec.l4_proto)
1948 			input->ip.v6.proto = IPPROTO_NONE;
1949 		else
1950 			input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
1951 
1952 		memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1953 		       sizeof(struct in6_addr));
1954 		memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1955 		       sizeof(struct in6_addr));
1956 		input->mask.v6.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
1957 		input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass;
1958 		input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto;
1959 		break;
1960 	case ETHER_FLOW:
1961 		input->eth = fsp->h_u.ether_spec;
1962 		input->eth_mask = fsp->m_u.ether_spec;
1963 		break;
1964 	default:
1965 		/* not doing un-parsed flow types */
1966 		return -EINVAL;
1967 	}
1968 
1969 	return 0;
1970 }
1971 
1972 /**
1973  * ice_add_fdir_ethtool - Add/Remove Flow Director filter
1974  * @vsi: pointer to target VSI
1975  * @cmd: command to add or delete Flow Director filter
1976  *
1977  * Returns 0 on success and negative values for failure
1978  */
ice_add_fdir_ethtool(struct ice_vsi * vsi,struct ethtool_rxnfc * cmd)1979 int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
1980 {
1981 	struct ice_rx_flow_userdef userdata;
1982 	struct ethtool_rx_flow_spec *fsp;
1983 	struct ice_fdir_fltr *input;
1984 	struct device *dev;
1985 	struct ice_pf *pf;
1986 	struct ice_hw *hw;
1987 	int fltrs_needed;
1988 	u32 max_location;
1989 	u16 tunnel_port;
1990 	int ret;
1991 
1992 	if (!vsi)
1993 		return -EINVAL;
1994 
1995 	pf = vsi->back;
1996 	hw = &pf->hw;
1997 	dev = ice_pf_to_dev(pf);
1998 
1999 	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
2000 		return -EOPNOTSUPP;
2001 
2002 	/* Do not program filters during reset */
2003 	if (ice_is_reset_in_progress(pf->state)) {
2004 		dev_err(dev, "Device is resetting - adding Flow Director filters not supported during reset\n");
2005 		return -EBUSY;
2006 	}
2007 
2008 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
2009 
2010 	if (ice_parse_rx_flow_user_data(fsp, &userdata))
2011 		return -EINVAL;
2012 
2013 	if (fsp->flow_type & FLOW_MAC_EXT)
2014 		return -EINVAL;
2015 
2016 	ret = ice_cfg_fdir_xtrct_seq(pf, fsp, &userdata);
2017 	if (ret)
2018 		return ret;
2019 
2020 	max_location = ice_get_fdir_cnt_all(hw);
2021 	if (fsp->location >= max_location) {
2022 		dev_err(dev, "Failed to add filter. The number of ntuple filters or provided location exceed max %d.\n",
2023 			max_location);
2024 		return -ENOSPC;
2025 	}
2026 
2027 	/* return error if not an update and no available filters */
2028 	fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1;
2029 	if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
2030 	    ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
2031 		dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
2032 		return -ENOSPC;
2033 	}
2034 
2035 	input = devm_kzalloc(dev, sizeof(*input), GFP_KERNEL);
2036 	if (!input)
2037 		return -ENOMEM;
2038 
2039 	ret = ice_set_fdir_input_set(vsi, fsp, input);
2040 	if (ret)
2041 		goto free_input;
2042 
2043 	mutex_lock(&hw->fdir_fltr_lock);
2044 	if (ice_fdir_is_dup_fltr(hw, input)) {
2045 		ret = -EINVAL;
2046 		goto release_lock;
2047 	}
2048 
2049 	if (userdata.flex_fltr) {
2050 		input->flex_fltr = true;
2051 		input->flex_word = cpu_to_be16(userdata.flex_word);
2052 		input->flex_offset = userdata.flex_offset;
2053 	}
2054 
2055 	input->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
2056 	input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
2057 	input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
2058 
2059 	/* input struct is added to the HW filter list */
2060 	ret = ice_fdir_update_list_entry(pf, input, fsp->location);
2061 	if (ret)
2062 		goto release_lock;
2063 
2064 	ret = ice_fdir_write_all_fltr(pf, input, true);
2065 	if (ret)
2066 		goto remove_sw_rule;
2067 
2068 	goto release_lock;
2069 
2070 remove_sw_rule:
2071 	ice_fdir_update_cntrs(hw, input->flow_type, false);
2072 	/* update sb-filters count, specific to ring->channel */
2073 	ice_update_per_q_fltr(vsi, input->orig_q_index, false);
2074 	list_del(&input->fltr_node);
2075 release_lock:
2076 	mutex_unlock(&hw->fdir_fltr_lock);
2077 free_input:
2078 	if (ret)
2079 		devm_kfree(dev, input);
2080 
2081 	return ret;
2082 }
2083