xref: /linux/drivers/net/ethernet/intel/ice/ice_arfs.c (revision 5c8013ae2e86ec36b07500ba4cacb14ab4d6f728)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2018-2020, Intel Corporation. */
3 
4 #include "ice.h"
5 #include <net/rps.h>
6 
7 /**
8  * ice_is_arfs_active - helper to check is aRFS is active
9  * @vsi: VSI to check
10  */
ice_is_arfs_active(struct ice_vsi * vsi)11 static bool ice_is_arfs_active(struct ice_vsi *vsi)
12 {
13 	return !!vsi->arfs_fltr_list;
14 }
15 
16 /**
17  * ice_is_arfs_using_perfect_flow - check if aRFS has active perfect filters
18  * @hw: pointer to the HW structure
19  * @flow_type: flow type as Flow Director understands it
20  *
21  * Flow Director will query this function to see if aRFS is currently using
22  * the specified flow_type for perfect (4-tuple) filters.
23  */
24 bool
ice_is_arfs_using_perfect_flow(struct ice_hw * hw,enum ice_fltr_ptype flow_type)25 ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
26 {
27 	struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
28 	struct ice_pf *pf = hw->back;
29 	struct ice_vsi *vsi;
30 
31 	vsi = ice_get_main_vsi(pf);
32 	if (!vsi)
33 		return false;
34 
35 	arfs_fltr_cntrs = vsi->arfs_fltr_cntrs;
36 
37 	/* active counters can be updated by multiple CPUs */
38 	smp_mb__before_atomic();
39 	switch (flow_type) {
40 	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
41 		return atomic_read(&arfs_fltr_cntrs->active_udpv4_cnt) > 0;
42 	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
43 		return atomic_read(&arfs_fltr_cntrs->active_udpv6_cnt) > 0;
44 	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
45 		return atomic_read(&arfs_fltr_cntrs->active_tcpv4_cnt) > 0;
46 	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
47 		return atomic_read(&arfs_fltr_cntrs->active_tcpv6_cnt) > 0;
48 	default:
49 		return false;
50 	}
51 }
52 
53 /**
54  * ice_arfs_update_active_fltr_cntrs - update active filter counters for aRFS
55  * @vsi: VSI that aRFS is active on
56  * @entry: aRFS entry used to change counters
57  * @add: true to increment counter, false to decrement
58  */
59 static void
ice_arfs_update_active_fltr_cntrs(struct ice_vsi * vsi,struct ice_arfs_entry * entry,bool add)60 ice_arfs_update_active_fltr_cntrs(struct ice_vsi *vsi,
61 				  struct ice_arfs_entry *entry, bool add)
62 {
63 	struct ice_arfs_active_fltr_cntrs *fltr_cntrs = vsi->arfs_fltr_cntrs;
64 
65 	switch (entry->fltr_info.flow_type) {
66 	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
67 		if (add)
68 			atomic_inc(&fltr_cntrs->active_tcpv4_cnt);
69 		else
70 			atomic_dec(&fltr_cntrs->active_tcpv4_cnt);
71 		break;
72 	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
73 		if (add)
74 			atomic_inc(&fltr_cntrs->active_tcpv6_cnt);
75 		else
76 			atomic_dec(&fltr_cntrs->active_tcpv6_cnt);
77 		break;
78 	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
79 		if (add)
80 			atomic_inc(&fltr_cntrs->active_udpv4_cnt);
81 		else
82 			atomic_dec(&fltr_cntrs->active_udpv4_cnt);
83 		break;
84 	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
85 		if (add)
86 			atomic_inc(&fltr_cntrs->active_udpv6_cnt);
87 		else
88 			atomic_dec(&fltr_cntrs->active_udpv6_cnt);
89 		break;
90 	default:
91 		dev_err(ice_pf_to_dev(vsi->back), "aRFS: Failed to update filter counters, invalid filter type %d\n",
92 			entry->fltr_info.flow_type);
93 	}
94 }
95 
96 /**
97  * ice_arfs_del_flow_rules - delete the rules passed in from HW
98  * @vsi: VSI for the flow rules that need to be deleted
99  * @del_list_head: head of the list of ice_arfs_entry(s) for rule deletion
100  *
101  * Loop through the delete list passed in and remove the rules from HW. After
102  * each rule is deleted, disconnect and free the ice_arfs_entry because it is no
103  * longer being referenced by the aRFS hash table.
104  */
105 static void
ice_arfs_del_flow_rules(struct ice_vsi * vsi,struct hlist_head * del_list_head)106 ice_arfs_del_flow_rules(struct ice_vsi *vsi, struct hlist_head *del_list_head)
107 {
108 	struct ice_arfs_entry *e;
109 	struct hlist_node *n;
110 	struct device *dev;
111 
112 	dev = ice_pf_to_dev(vsi->back);
113 
114 	hlist_for_each_entry_safe(e, n, del_list_head, list_entry) {
115 		int result;
116 
117 		result = ice_fdir_write_fltr(vsi->back, &e->fltr_info, false,
118 					     false);
119 		if (!result)
120 			ice_arfs_update_active_fltr_cntrs(vsi, e, false);
121 		else
122 			dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
123 				result, e->fltr_state, e->fltr_info.fltr_id,
124 				e->flow_id, e->fltr_info.q_index);
125 
126 		/* The aRFS hash table is no longer referencing this entry */
127 		hlist_del(&e->list_entry);
128 		devm_kfree(dev, e);
129 	}
130 }
131 
132 /**
133  * ice_arfs_add_flow_rules - add the rules passed in from HW
134  * @vsi: VSI for the flow rules that need to be added
135  * @add_list_head: head of the list of ice_arfs_entry_ptr(s) for rule addition
136  *
137  * Loop through the add list passed in and remove the rules from HW. After each
138  * rule is added, disconnect and free the ice_arfs_entry_ptr node. Don't free
139  * the ice_arfs_entry(s) because they are still being referenced in the aRFS
140  * hash table.
141  */
142 static void
ice_arfs_add_flow_rules(struct ice_vsi * vsi,struct hlist_head * add_list_head)143 ice_arfs_add_flow_rules(struct ice_vsi *vsi, struct hlist_head *add_list_head)
144 {
145 	struct ice_arfs_entry_ptr *ep;
146 	struct hlist_node *n;
147 	struct device *dev;
148 
149 	dev = ice_pf_to_dev(vsi->back);
150 
151 	hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) {
152 		int result;
153 
154 		result = ice_fdir_write_fltr(vsi->back,
155 					     &ep->arfs_entry->fltr_info, true,
156 					     false);
157 		if (!result)
158 			ice_arfs_update_active_fltr_cntrs(vsi, ep->arfs_entry,
159 							  true);
160 		else
161 			dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
162 				result, ep->arfs_entry->fltr_state,
163 				ep->arfs_entry->fltr_info.fltr_id,
164 				ep->arfs_entry->flow_id,
165 				ep->arfs_entry->fltr_info.q_index);
166 
167 		hlist_del(&ep->list_entry);
168 		devm_kfree(dev, ep);
169 	}
170 }
171 
172 /**
173  * ice_arfs_is_flow_expired - check if the aRFS entry has expired
174  * @vsi: VSI containing the aRFS entry
175  * @arfs_entry: aRFS entry that's being checked for expiration
176  *
177  * Return true if the flow has expired, else false. This function should be used
178  * to determine whether or not an aRFS entry should be removed from the hardware
179  * and software structures.
180  */
181 static bool
ice_arfs_is_flow_expired(struct ice_vsi * vsi,struct ice_arfs_entry * arfs_entry)182 ice_arfs_is_flow_expired(struct ice_vsi *vsi, struct ice_arfs_entry *arfs_entry)
183 {
184 #define ICE_ARFS_TIME_DELTA_EXPIRATION	msecs_to_jiffies(5000)
185 	if (rps_may_expire_flow(vsi->netdev, arfs_entry->fltr_info.q_index,
186 				arfs_entry->flow_id,
187 				arfs_entry->fltr_info.fltr_id))
188 		return true;
189 
190 	/* expiration timer only used for UDP filters */
191 	if (arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV4_UDP &&
192 	    arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV6_UDP)
193 		return false;
194 
195 	return time_in_range64(arfs_entry->time_activated +
196 			       ICE_ARFS_TIME_DELTA_EXPIRATION,
197 			       arfs_entry->time_activated, get_jiffies_64());
198 }
199 
200 /**
201  * ice_arfs_update_flow_rules - add/delete aRFS rules in HW
202  * @vsi: the VSI to be forwarded to
203  * @idx: index into the table of aRFS filter lists. Obtained from skb->hash
204  * @add_list: list to populate with filters to be added to Flow Director
205  * @del_list: list to populate with filters to be deleted from Flow Director
206  *
207  * Iterate over the hlist at the index given in the aRFS hash table and
208  * determine if there are any aRFS entries that need to be either added or
209  * deleted in the HW. If the aRFS entry is marked as ICE_ARFS_INACTIVE the
210  * filter needs to be added to HW, else if it's marked as ICE_ARFS_ACTIVE and
211  * the flow has expired delete the filter from HW. The caller of this function
212  * is expected to add/delete rules on the add_list/del_list respectively.
213  */
214 static void
ice_arfs_update_flow_rules(struct ice_vsi * vsi,u16 idx,struct hlist_head * add_list,struct hlist_head * del_list)215 ice_arfs_update_flow_rules(struct ice_vsi *vsi, u16 idx,
216 			   struct hlist_head *add_list,
217 			   struct hlist_head *del_list)
218 {
219 	struct ice_arfs_entry *e;
220 	struct hlist_node *n;
221 	struct device *dev;
222 
223 	dev = ice_pf_to_dev(vsi->back);
224 
225 	/* go through the aRFS hlist at this idx and check for needed updates */
226 	hlist_for_each_entry_safe(e, n, &vsi->arfs_fltr_list[idx], list_entry)
227 		/* check if filter needs to be added to HW */
228 		if (e->fltr_state == ICE_ARFS_INACTIVE) {
229 			enum ice_fltr_ptype flow_type = e->fltr_info.flow_type;
230 			struct ice_arfs_entry_ptr *ep =
231 				devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC);
232 
233 			if (!ep)
234 				continue;
235 			INIT_HLIST_NODE(&ep->list_entry);
236 			/* reference aRFS entry to add HW filter */
237 			ep->arfs_entry = e;
238 			hlist_add_head(&ep->list_entry, add_list);
239 			e->fltr_state = ICE_ARFS_ACTIVE;
240 			/* expiration timer only used for UDP flows */
241 			if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
242 			    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
243 				e->time_activated = get_jiffies_64();
244 		} else if (e->fltr_state == ICE_ARFS_ACTIVE) {
245 			/* check if filter needs to be removed from HW */
246 			if (ice_arfs_is_flow_expired(vsi, e)) {
247 				/* remove aRFS entry from hash table for delete
248 				 * and to prevent referencing it the next time
249 				 * through this hlist index
250 				 */
251 				hlist_del(&e->list_entry);
252 				e->fltr_state = ICE_ARFS_TODEL;
253 				/* save reference to aRFS entry for delete */
254 				hlist_add_head(&e->list_entry, del_list);
255 			}
256 		}
257 }
258 
259 /**
260  * ice_sync_arfs_fltrs - update all aRFS filters
261  * @pf: board private structure
262  */
ice_sync_arfs_fltrs(struct ice_pf * pf)263 void ice_sync_arfs_fltrs(struct ice_pf *pf)
264 {
265 	HLIST_HEAD(tmp_del_list);
266 	HLIST_HEAD(tmp_add_list);
267 	struct ice_vsi *pf_vsi;
268 	unsigned int i;
269 
270 	pf_vsi = ice_get_main_vsi(pf);
271 	if (!pf_vsi)
272 		return;
273 
274 	if (!ice_is_arfs_active(pf_vsi))
275 		return;
276 
277 	spin_lock_bh(&pf_vsi->arfs_lock);
278 	/* Once we process aRFS for the PF VSI get out */
279 	for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
280 		ice_arfs_update_flow_rules(pf_vsi, i, &tmp_add_list,
281 					   &tmp_del_list);
282 	spin_unlock_bh(&pf_vsi->arfs_lock);
283 
284 	/* use list of ice_arfs_entry(s) for delete */
285 	ice_arfs_del_flow_rules(pf_vsi, &tmp_del_list);
286 
287 	/* use list of ice_arfs_entry_ptr(s) for add */
288 	ice_arfs_add_flow_rules(pf_vsi, &tmp_add_list);
289 }
290 
291 /**
292  * ice_arfs_build_entry - builds an aRFS entry based on input
293  * @vsi: destination VSI for this flow
294  * @fk: flow dissector keys for creating the tuple
295  * @rxq_idx: Rx queue to steer this flow to
296  * @flow_id: passed down from the stack and saved for flow expiration
297  *
298  * returns an aRFS entry on success and NULL on failure
299  */
300 static struct ice_arfs_entry *
ice_arfs_build_entry(struct ice_vsi * vsi,const struct flow_keys * fk,u16 rxq_idx,u32 flow_id)301 ice_arfs_build_entry(struct ice_vsi *vsi, const struct flow_keys *fk,
302 		     u16 rxq_idx, u32 flow_id)
303 {
304 	struct ice_arfs_entry *arfs_entry;
305 	struct ice_fdir_fltr *fltr_info;
306 	u8 ip_proto;
307 
308 	arfs_entry = devm_kzalloc(ice_pf_to_dev(vsi->back),
309 				  sizeof(*arfs_entry),
310 				  GFP_ATOMIC | __GFP_NOWARN);
311 	if (!arfs_entry)
312 		return NULL;
313 
314 	fltr_info = &arfs_entry->fltr_info;
315 	fltr_info->q_index = rxq_idx;
316 	fltr_info->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
317 	fltr_info->dest_vsi = vsi->idx;
318 	ip_proto = fk->basic.ip_proto;
319 
320 	if (fk->basic.n_proto == htons(ETH_P_IP)) {
321 		fltr_info->ip.v4.proto = ip_proto;
322 		fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
323 			ICE_FLTR_PTYPE_NONF_IPV4_TCP :
324 			ICE_FLTR_PTYPE_NONF_IPV4_UDP;
325 		fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src;
326 		fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst;
327 		fltr_info->ip.v4.src_port = fk->ports.src;
328 		fltr_info->ip.v4.dst_port = fk->ports.dst;
329 	} else { /* ETH_P_IPV6 */
330 		fltr_info->ip.v6.proto = ip_proto;
331 		fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
332 			ICE_FLTR_PTYPE_NONF_IPV6_TCP :
333 			ICE_FLTR_PTYPE_NONF_IPV6_UDP;
334 		memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
335 		       sizeof(struct in6_addr));
336 		memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
337 		       sizeof(struct in6_addr));
338 		fltr_info->ip.v6.src_port = fk->ports.src;
339 		fltr_info->ip.v6.dst_port = fk->ports.dst;
340 	}
341 
342 	arfs_entry->flow_id = flow_id;
343 	fltr_info->fltr_id =
344 		atomic_inc_return(vsi->arfs_last_fltr_id) % RPS_NO_FILTER;
345 
346 	return arfs_entry;
347 }
348 
349 /**
350  * ice_arfs_is_perfect_flow_set - Check to see if perfect flow is set
351  * @hw: pointer to HW structure
352  * @l3_proto: ETH_P_IP or ETH_P_IPV6 in network order
353  * @l4_proto: IPPROTO_UDP or IPPROTO_TCP
354  *
355  * We only support perfect (4-tuple) filters for aRFS. This function allows aRFS
356  * to check if perfect (4-tuple) flow rules are currently in place by Flow
357  * Director.
358  */
359 static bool
ice_arfs_is_perfect_flow_set(struct ice_hw * hw,__be16 l3_proto,u8 l4_proto)360 ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
361 {
362 	unsigned long *perfect_fltr = hw->fdir_perfect_fltr;
363 
364 	/* advanced Flow Director disabled, perfect filters always supported */
365 	if (!perfect_fltr)
366 		return true;
367 
368 	if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_UDP)
369 		return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_UDP, perfect_fltr);
370 	else if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_TCP)
371 		return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_TCP, perfect_fltr);
372 	else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_UDP)
373 		return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_UDP, perfect_fltr);
374 	else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_TCP)
375 		return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_TCP, perfect_fltr);
376 
377 	return false;
378 }
379 
380 /**
381  * ice_arfs_cmp - Check if aRFS filter matches this flow.
382  * @fltr_info: filter info of the saved ARFS entry.
383  * @fk: flow dissector keys.
384  * @n_proto:  One of htons(ETH_P_IP) or htons(ETH_P_IPV6).
385  * @ip_proto: One of IPPROTO_TCP or IPPROTO_UDP.
386  *
387  * Since this function assumes limited values for n_proto and ip_proto, it
388  * is meant to be called only from ice_rx_flow_steer().
389  *
390  * Return:
391  * * true	- fltr_info refers to the same flow as fk.
392  * * false	- fltr_info and fk refer to different flows.
393  */
394 static bool
ice_arfs_cmp(const struct ice_fdir_fltr * fltr_info,const struct flow_keys * fk,__be16 n_proto,u8 ip_proto)395 ice_arfs_cmp(const struct ice_fdir_fltr *fltr_info, const struct flow_keys *fk,
396 	     __be16 n_proto, u8 ip_proto)
397 {
398 	/* Determine if the filter is for IPv4 or IPv6 based on flow_type,
399 	 * which is one of ICE_FLTR_PTYPE_NONF_IPV{4,6}_{TCP,UDP}.
400 	 */
401 	bool is_v4 = fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
402 		     fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP;
403 
404 	/* Following checks are arranged in the quickest and most discriminative
405 	 * fields first for early failure.
406 	 */
407 	if (is_v4)
408 		return n_proto == htons(ETH_P_IP) &&
409 			fltr_info->ip.v4.src_port == fk->ports.src &&
410 			fltr_info->ip.v4.dst_port == fk->ports.dst &&
411 			fltr_info->ip.v4.src_ip == fk->addrs.v4addrs.src &&
412 			fltr_info->ip.v4.dst_ip == fk->addrs.v4addrs.dst &&
413 			fltr_info->ip.v4.proto == ip_proto;
414 
415 	return fltr_info->ip.v6.src_port == fk->ports.src &&
416 		fltr_info->ip.v6.dst_port == fk->ports.dst &&
417 		fltr_info->ip.v6.proto == ip_proto &&
418 		!memcmp(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
419 			sizeof(struct in6_addr)) &&
420 		!memcmp(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
421 			sizeof(struct in6_addr));
422 }
423 
424 /**
425  * ice_rx_flow_steer - steer the Rx flow to where application is being run
426  * @netdev: ptr to the netdev being adjusted
427  * @skb: buffer with required header information
428  * @rxq_idx: queue to which the flow needs to move
429  * @flow_id: flow identifier provided by the netdev
430  *
431  * Based on the skb, rxq_idx, and flow_id passed in add/update an entry in the
432  * aRFS hash table. Iterate over one of the hlists in the aRFS hash table and
433  * if the flow_id already exists in the hash table but the rxq_idx has changed
434  * mark the entry as ICE_ARFS_INACTIVE so it can get updated in HW, else
435  * if the entry is marked as ICE_ARFS_TODEL delete it from the aRFS hash table.
436  * If neither of the previous conditions are true then add a new entry in the
437  * aRFS hash table, which gets set to ICE_ARFS_INACTIVE by default so it can be
438  * added to HW.
439  */
440 int
ice_rx_flow_steer(struct net_device * netdev,const struct sk_buff * skb,u16 rxq_idx,u32 flow_id)441 ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
442 		  u16 rxq_idx, u32 flow_id)
443 {
444 	struct ice_netdev_priv *np = netdev_priv(netdev);
445 	struct ice_arfs_entry *arfs_entry;
446 	struct ice_vsi *vsi = np->vsi;
447 	struct flow_keys fk;
448 	struct ice_pf *pf;
449 	__be16 n_proto;
450 	u8 ip_proto;
451 	u16 idx;
452 	int ret;
453 
454 	/* failed to allocate memory for aRFS so don't crash */
455 	if (unlikely(!vsi->arfs_fltr_list))
456 		return -ENODEV;
457 
458 	pf = vsi->back;
459 
460 	if (skb->encapsulation)
461 		return -EPROTONOSUPPORT;
462 
463 	if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
464 		return -EPROTONOSUPPORT;
465 
466 	n_proto = fk.basic.n_proto;
467 	/* Support only IPV4 and IPV6 */
468 	if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(ip_hdr(skb))) ||
469 	    n_proto == htons(ETH_P_IPV6))
470 		ip_proto = fk.basic.ip_proto;
471 	else
472 		return -EPROTONOSUPPORT;
473 
474 	/* Support only TCP and UDP */
475 	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
476 		return -EPROTONOSUPPORT;
477 
478 	/* only support 4-tuple filters for aRFS */
479 	if (!ice_arfs_is_perfect_flow_set(&pf->hw, n_proto, ip_proto))
480 		return -EOPNOTSUPP;
481 
482 	/* choose the aRFS list bucket based on skb hash */
483 	idx = skb_get_hash_raw(skb) & ICE_ARFS_LST_MASK;
484 	/* search for entry in the bucket */
485 	spin_lock_bh(&vsi->arfs_lock);
486 	hlist_for_each_entry(arfs_entry, &vsi->arfs_fltr_list[idx],
487 			     list_entry) {
488 		struct ice_fdir_fltr *fltr_info;
489 
490 		/* keep searching for the already existing arfs_entry flow */
491 		if (arfs_entry->flow_id != flow_id)
492 			continue;
493 
494 		fltr_info = &arfs_entry->fltr_info;
495 
496 		if (!ice_arfs_cmp(fltr_info, &fk, n_proto, ip_proto))
497 			continue;
498 
499 		ret = fltr_info->fltr_id;
500 
501 		if (fltr_info->q_index == rxq_idx ||
502 		    arfs_entry->fltr_state != ICE_ARFS_ACTIVE)
503 			goto out;
504 
505 		/* update the queue to forward to on an already existing flow */
506 		fltr_info->q_index = rxq_idx;
507 		arfs_entry->fltr_state = ICE_ARFS_INACTIVE;
508 		ice_arfs_update_active_fltr_cntrs(vsi, arfs_entry, false);
509 		goto out_schedule_service_task;
510 	}
511 
512 	arfs_entry = ice_arfs_build_entry(vsi, &fk, rxq_idx, flow_id);
513 	if (!arfs_entry) {
514 		ret = -ENOMEM;
515 		goto out;
516 	}
517 
518 	ret = arfs_entry->fltr_info.fltr_id;
519 	INIT_HLIST_NODE(&arfs_entry->list_entry);
520 	hlist_add_head(&arfs_entry->list_entry, &vsi->arfs_fltr_list[idx]);
521 out_schedule_service_task:
522 	ice_service_task_schedule(pf);
523 out:
524 	spin_unlock_bh(&vsi->arfs_lock);
525 	return ret;
526 }
527 
528 /**
529  * ice_init_arfs_cntrs - initialize aRFS counter values
530  * @vsi: VSI that aRFS counters need to be initialized on
531  */
ice_init_arfs_cntrs(struct ice_vsi * vsi)532 static int ice_init_arfs_cntrs(struct ice_vsi *vsi)
533 {
534 	if (!vsi || vsi->type != ICE_VSI_PF)
535 		return -EINVAL;
536 
537 	vsi->arfs_fltr_cntrs = kzalloc(sizeof(*vsi->arfs_fltr_cntrs),
538 				       GFP_KERNEL);
539 	if (!vsi->arfs_fltr_cntrs)
540 		return -ENOMEM;
541 
542 	vsi->arfs_last_fltr_id = kzalloc(sizeof(*vsi->arfs_last_fltr_id),
543 					 GFP_KERNEL);
544 	if (!vsi->arfs_last_fltr_id) {
545 		kfree(vsi->arfs_fltr_cntrs);
546 		vsi->arfs_fltr_cntrs = NULL;
547 		return -ENOMEM;
548 	}
549 
550 	return 0;
551 }
552 
553 /**
554  * ice_init_arfs - initialize aRFS resources
555  * @vsi: the VSI to be forwarded to
556  */
ice_init_arfs(struct ice_vsi * vsi)557 void ice_init_arfs(struct ice_vsi *vsi)
558 {
559 	struct hlist_head *arfs_fltr_list;
560 	unsigned int i;
561 
562 	if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi))
563 		return;
564 
565 	arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
566 				 GFP_KERNEL);
567 	if (!arfs_fltr_list)
568 		return;
569 
570 	if (ice_init_arfs_cntrs(vsi))
571 		goto free_arfs_fltr_list;
572 
573 	for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
574 		INIT_HLIST_HEAD(&arfs_fltr_list[i]);
575 
576 	spin_lock_init(&vsi->arfs_lock);
577 
578 	vsi->arfs_fltr_list = arfs_fltr_list;
579 
580 	return;
581 
582 free_arfs_fltr_list:
583 	kfree(arfs_fltr_list);
584 }
585 
586 /**
587  * ice_clear_arfs - clear the aRFS hash table and any memory used for aRFS
588  * @vsi: the VSI to be forwarded to
589  */
ice_clear_arfs(struct ice_vsi * vsi)590 void ice_clear_arfs(struct ice_vsi *vsi)
591 {
592 	struct device *dev;
593 	unsigned int i;
594 
595 	if (!vsi || vsi->type != ICE_VSI_PF || !vsi->back ||
596 	    !vsi->arfs_fltr_list)
597 		return;
598 
599 	dev = ice_pf_to_dev(vsi->back);
600 	for (i = 0; i < ICE_MAX_ARFS_LIST; i++) {
601 		struct ice_arfs_entry *r;
602 		struct hlist_node *n;
603 
604 		spin_lock_bh(&vsi->arfs_lock);
605 		hlist_for_each_entry_safe(r, n, &vsi->arfs_fltr_list[i],
606 					  list_entry) {
607 			hlist_del(&r->list_entry);
608 			devm_kfree(dev, r);
609 		}
610 		spin_unlock_bh(&vsi->arfs_lock);
611 	}
612 
613 	kfree(vsi->arfs_fltr_list);
614 	vsi->arfs_fltr_list = NULL;
615 	kfree(vsi->arfs_last_fltr_id);
616 	vsi->arfs_last_fltr_id = NULL;
617 	kfree(vsi->arfs_fltr_cntrs);
618 	vsi->arfs_fltr_cntrs = NULL;
619 }
620 
621 /**
622  * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
623  * @vsi: the VSI to be forwarded to
624  */
ice_set_cpu_rx_rmap(struct ice_vsi * vsi)625 int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
626 {
627 	struct net_device *netdev;
628 	struct ice_pf *pf;
629 
630 	if (!vsi || vsi->type != ICE_VSI_PF)
631 		return 0;
632 
633 	pf = vsi->back;
634 	netdev = vsi->netdev;
635 	if (!pf || !netdev || !vsi->num_q_vectors)
636 		return -EINVAL;
637 
638 	netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
639 		   vsi->type, netdev->name, vsi->num_q_vectors);
640 
641 	return netif_enable_cpu_rmap(netdev, vsi->num_q_vectors);
642 }
643 
644 /**
645  * ice_remove_arfs - remove/clear all aRFS resources
646  * @pf: device private structure
647  */
ice_remove_arfs(struct ice_pf * pf)648 void ice_remove_arfs(struct ice_pf *pf)
649 {
650 	struct ice_vsi *pf_vsi;
651 
652 	pf_vsi = ice_get_main_vsi(pf);
653 	if (!pf_vsi)
654 		return;
655 
656 	ice_clear_arfs(pf_vsi);
657 }
658 
659 /**
660  * ice_rebuild_arfs - remove/clear all aRFS resources and rebuild after reset
661  * @pf: device private structure
662  */
ice_rebuild_arfs(struct ice_pf * pf)663 void ice_rebuild_arfs(struct ice_pf *pf)
664 {
665 	struct ice_vsi *pf_vsi;
666 
667 	pf_vsi = ice_get_main_vsi(pf);
668 	if (!pf_vsi)
669 		return;
670 
671 	ice_remove_arfs(pf);
672 	ice_init_arfs(pf_vsi);
673 }
674