xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c (revision 3a07362fab1653d3aca31a9155c8cc776138fd02)
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/hash.h>
34 #include <linux/mlx5/fs.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include <net/rps.h>
38 #include "en.h"
39 
40 #define ARFS_HASH_SHIFT BITS_PER_BYTE
41 #define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
42 
43 struct arfs_table {
44 	struct mlx5e_flow_table  ft;
45 	struct mlx5_flow_handle	 *default_rule;
46 	struct hlist_head	 rules_hash[ARFS_HASH_SIZE];
47 };
48 
49 enum {
50 	MLX5E_ARFS_STATE_ENABLED,
51 };
52 
53 enum arfs_type {
54 	ARFS_IPV4_TCP,
55 	ARFS_IPV6_TCP,
56 	ARFS_IPV4_UDP,
57 	ARFS_IPV6_UDP,
58 	ARFS_NUM_TYPES,
59 };
60 
61 struct mlx5e_arfs_tables {
62 	struct arfs_table arfs_tables[ARFS_NUM_TYPES];
63 	/* Protect aRFS rules list */
64 	spinlock_t                     arfs_lock;
65 	int                            last_filter_id;
66 	struct workqueue_struct        *wq;
67 	unsigned long                  state;
68 };
69 
70 struct arfs_tuple {
71 	__be16 etype;
72 	u8     ip_proto;
73 	union {
74 		__be32 src_ipv4;
75 		struct in6_addr src_ipv6;
76 	};
77 	union {
78 		__be32 dst_ipv4;
79 		struct in6_addr dst_ipv6;
80 	};
81 	__be16 src_port;
82 	__be16 dst_port;
83 };
84 
85 struct arfs_rule {
86 	struct mlx5e_priv	*priv;
87 	struct work_struct      arfs_work;
88 	struct mlx5_flow_handle *rule;
89 	struct hlist_node	hlist;
90 	int			rxq;
91 	/* Flow ID passed to ndo_rx_flow_steer */
92 	int			flow_id;
93 	/* Filter ID returned by ndo_rx_flow_steer */
94 	int			filter_id;
95 	struct arfs_tuple	tuple;
96 };
97 
98 #define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \
99 	for (i = 0; i < ARFS_NUM_TYPES; i++) \
100 		mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j)
101 
102 #define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \
103 	for (j = 0; j < ARFS_HASH_SIZE; j++) \
104 		hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist)
105 
arfs_get_tt(enum arfs_type type)106 static enum mlx5_traffic_types arfs_get_tt(enum arfs_type type)
107 {
108 	switch (type) {
109 	case ARFS_IPV4_TCP:
110 		return MLX5_TT_IPV4_TCP;
111 	case ARFS_IPV4_UDP:
112 		return MLX5_TT_IPV4_UDP;
113 	case ARFS_IPV6_TCP:
114 		return MLX5_TT_IPV6_TCP;
115 	case ARFS_IPV6_UDP:
116 		return MLX5_TT_IPV6_UDP;
117 	default:
118 		return -EINVAL;
119 	}
120 }
121 
arfs_disable(struct mlx5e_flow_steering * fs)122 static int arfs_disable(struct mlx5e_flow_steering *fs)
123 {
124 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
125 	int err, i;
126 
127 	for (i = 0; i < ARFS_NUM_TYPES; i++) {
128 		/* Modify ttc rules destination back to their default */
129 		err = mlx5_ttc_fwd_default_dest(ttc, arfs_get_tt(i));
130 		if (err) {
131 			fs_err(fs,
132 			       "%s: modify ttc[%d] default destination failed, err(%d)\n",
133 			       __func__, arfs_get_tt(i), err);
134 			return err;
135 		}
136 	}
137 	return 0;
138 }
139 
140 static void arfs_del_rules(struct mlx5e_flow_steering *fs);
141 
mlx5e_arfs_disable(struct mlx5e_flow_steering * fs)142 int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs)
143 {
144 	/* Moving to switchdev mode, fs->arfs is freed by mlx5e_nic_profile
145 	 * cleanup_rx callback and it is not recreated when
146 	 * mlx5e_uplink_rep_profile is loaded as mlx5e_create_flow_steering()
147 	 * is not called by the uplink_rep profile init_rx callback. Thus, if
148 	 * ntuple is set, moving to switchdev flow will enter this function
149 	 * with fs->arfs nullified.
150 	 */
151 	if (!mlx5e_fs_get_arfs(fs))
152 		return 0;
153 
154 	arfs_del_rules(fs);
155 
156 	return arfs_disable(fs);
157 }
158 
mlx5e_arfs_enable(struct mlx5e_flow_steering * fs)159 int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
160 {
161 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
162 	struct mlx5e_arfs_tables *arfs =  mlx5e_fs_get_arfs(fs);
163 	struct mlx5_flow_destination dest = {};
164 	int err, i;
165 
166 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
167 	for (i = 0; i < ARFS_NUM_TYPES; i++) {
168 		dest.ft = arfs->arfs_tables[i].ft.t;
169 		/* Modify ttc rules destination to point on the aRFS FTs */
170 		err = mlx5_ttc_fwd_dest(ttc, arfs_get_tt(i), &dest);
171 		if (err) {
172 			fs_err(fs, "%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
173 			       __func__, arfs_get_tt(i), err);
174 			arfs_disable(fs);
175 			return err;
176 		}
177 	}
178 	set_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
179 
180 	return 0;
181 }
182 
arfs_destroy_table(struct arfs_table * arfs_t)183 static void arfs_destroy_table(struct arfs_table *arfs_t)
184 {
185 	mlx5_del_flow_rules(arfs_t->default_rule);
186 	mlx5e_destroy_flow_table(&arfs_t->ft);
187 }
188 
_mlx5e_cleanup_tables(struct mlx5e_flow_steering * fs)189 static void _mlx5e_cleanup_tables(struct mlx5e_flow_steering *fs)
190 {
191 	struct mlx5e_arfs_tables *arfs =  mlx5e_fs_get_arfs(fs);
192 	int i;
193 
194 	arfs_del_rules(fs);
195 	destroy_workqueue(arfs->wq);
196 	for (i = 0; i < ARFS_NUM_TYPES; i++) {
197 		if (!IS_ERR_OR_NULL(arfs->arfs_tables[i].ft.t))
198 			arfs_destroy_table(&arfs->arfs_tables[i]);
199 	}
200 }
201 
mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering * fs,bool ntuple)202 void mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering *fs, bool ntuple)
203 {
204 	struct mlx5e_arfs_tables *arfs =  mlx5e_fs_get_arfs(fs);
205 
206 	if (!ntuple)
207 		return;
208 
209 	_mlx5e_cleanup_tables(fs);
210 	mlx5e_fs_set_arfs(fs, NULL);
211 	kvfree(arfs);
212 }
213 
arfs_add_default_rule(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res,enum arfs_type type)214 static int arfs_add_default_rule(struct mlx5e_flow_steering *fs,
215 				 struct mlx5e_rx_res *rx_res,
216 				 enum arfs_type type)
217 {
218 	struct mlx5e_arfs_tables *arfs =  mlx5e_fs_get_arfs(fs);
219 	struct arfs_table *arfs_t = &arfs->arfs_tables[type];
220 	struct mlx5_flow_destination dest = {};
221 	MLX5_DECLARE_FLOW_ACT(flow_act);
222 	enum mlx5_traffic_types tt;
223 	int err = 0;
224 
225 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
226 	tt = arfs_get_tt(type);
227 	if (tt == -EINVAL) {
228 		fs_err(fs, "%s: bad arfs_type: %d\n", __func__, type);
229 		return -EINVAL;
230 	}
231 
232 	/* FIXME: Must use mlx5_ttc_get_default_dest(),
233 	 * but can't since TTC default is not setup yet !
234 	 */
235 	dest.tir_num = mlx5e_rx_res_get_tirn_rss(rx_res, tt);
236 	arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL,
237 						   &flow_act,
238 						   &dest, 1);
239 	if (IS_ERR(arfs_t->default_rule)) {
240 		err = PTR_ERR(arfs_t->default_rule);
241 		arfs_t->default_rule = NULL;
242 		fs_err(fs, "%s: add rule failed, arfs type=%d\n", __func__, type);
243 	}
244 
245 	return err;
246 }
247 
248 #define MLX5E_ARFS_NUM_GROUPS	2
249 #define MLX5E_ARFS_GROUP1_SIZE	(BIT(16) - 1)
250 #define MLX5E_ARFS_GROUP2_SIZE	BIT(0)
251 #define MLX5E_ARFS_TABLE_SIZE	(MLX5E_ARFS_GROUP1_SIZE +\
252 				 MLX5E_ARFS_GROUP2_SIZE)
arfs_create_groups(struct mlx5e_flow_table * ft,enum arfs_type type)253 static int arfs_create_groups(struct mlx5e_flow_table *ft,
254 			      enum  arfs_type type)
255 {
256 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
257 	void *outer_headers_c;
258 	int ix = 0;
259 	u32 *in;
260 	int err;
261 	u8 *mc;
262 
263 	ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
264 			sizeof(*ft->g), GFP_KERNEL);
265 	if (!ft->g)
266 		return -ENOMEM;
267 
268 	in = kvzalloc(inlen, GFP_KERNEL);
269 	if (!in) {
270 		err = -ENOMEM;
271 		goto err_free_g;
272 	}
273 
274 	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
275 	outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc,
276 				       outer_headers);
277 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
278 	switch (type) {
279 	case ARFS_IPV4_TCP:
280 	case ARFS_IPV6_TCP:
281 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
282 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
283 		break;
284 	case ARFS_IPV4_UDP:
285 	case ARFS_IPV6_UDP:
286 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
287 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport);
288 		break;
289 	default:
290 		err = -EINVAL;
291 		goto err_free_in;
292 	}
293 
294 	switch (type) {
295 	case ARFS_IPV4_TCP:
296 	case ARFS_IPV4_UDP:
297 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
298 				 src_ipv4_src_ipv6.ipv4_layout.ipv4);
299 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
300 				 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
301 		break;
302 	case ARFS_IPV6_TCP:
303 	case ARFS_IPV6_UDP:
304 		memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
305 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
306 		       0xff, 16);
307 		memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
308 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
309 		       0xff, 16);
310 		break;
311 	default:
312 		err = -EINVAL;
313 		goto err_free_in;
314 	}
315 
316 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
317 	MLX5_SET_CFG(in, start_flow_index, ix);
318 	ix += MLX5E_ARFS_GROUP1_SIZE;
319 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
320 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
321 	if (IS_ERR(ft->g[ft->num_groups]))
322 		goto err_clean_group;
323 	ft->num_groups++;
324 
325 	memset(in, 0, inlen);
326 	MLX5_SET_CFG(in, start_flow_index, ix);
327 	ix += MLX5E_ARFS_GROUP2_SIZE;
328 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
329 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
330 	if (IS_ERR(ft->g[ft->num_groups]))
331 		goto err_clean_group;
332 	ft->num_groups++;
333 
334 	kvfree(in);
335 	return 0;
336 
337 err_clean_group:
338 	err = PTR_ERR(ft->g[ft->num_groups]);
339 	ft->g[ft->num_groups] = NULL;
340 err_free_in:
341 	kvfree(in);
342 err_free_g:
343 	kfree(ft->g);
344 	ft->g = NULL;
345 	return err;
346 }
347 
arfs_create_table(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res,enum arfs_type type)348 static int arfs_create_table(struct mlx5e_flow_steering *fs,
349 			     struct mlx5e_rx_res *rx_res,
350 			     enum arfs_type type)
351 {
352 	struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
353 	struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
354 	struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
355 	struct mlx5_flow_table_attr ft_attr = {};
356 	int err;
357 
358 	ft->num_groups = 0;
359 
360 	ft_attr.max_fte = MLX5E_ARFS_TABLE_SIZE;
361 	ft_attr.level = MLX5E_ARFS_FT_LEVEL;
362 	ft_attr.prio = MLX5E_NIC_PRIO;
363 
364 	ft->t = mlx5_create_flow_table(ns, &ft_attr);
365 	if (IS_ERR(ft->t)) {
366 		err = PTR_ERR(ft->t);
367 		ft->t = NULL;
368 		return err;
369 	}
370 
371 	err = arfs_create_groups(ft, type);
372 	if (err)
373 		goto err;
374 
375 	err = arfs_add_default_rule(fs, rx_res,  type);
376 	if (err)
377 		goto err;
378 
379 	return 0;
380 err:
381 	mlx5e_destroy_flow_table(ft);
382 	return err;
383 }
384 
mlx5e_arfs_create_tables(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res,bool ntuple)385 int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
386 			     struct mlx5e_rx_res *rx_res, bool ntuple)
387 {
388 	struct mlx5e_arfs_tables *arfs;
389 	int err = -ENOMEM;
390 	int i;
391 
392 	if (!ntuple)
393 		return 0;
394 
395 	arfs = kvzalloc(sizeof(*arfs), GFP_KERNEL);
396 	if (!arfs)
397 		return -ENOMEM;
398 
399 	spin_lock_init(&arfs->arfs_lock);
400 	arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
401 	if (!arfs->wq)
402 		goto err;
403 
404 	mlx5e_fs_set_arfs(fs, arfs);
405 
406 	for (i = 0; i < ARFS_NUM_TYPES; i++) {
407 		err = arfs_create_table(fs, rx_res, i);
408 		if (err)
409 			goto err_des;
410 	}
411 	return 0;
412 
413 err_des:
414 	_mlx5e_cleanup_tables(fs);
415 err:
416 	mlx5e_fs_set_arfs(fs, NULL);
417 	kvfree(arfs);
418 	return err;
419 }
420 
421 #define MLX5E_ARFS_EXPIRY_QUOTA 60
422 
arfs_may_expire_flow(struct mlx5e_priv * priv)423 static void arfs_may_expire_flow(struct mlx5e_priv *priv)
424 {
425 	struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
426 	struct arfs_rule *arfs_rule;
427 	struct hlist_node *htmp;
428 	HLIST_HEAD(del_list);
429 	int quota = 0;
430 	int i;
431 	int j;
432 
433 	spin_lock_bh(&arfs->arfs_lock);
434 	mlx5e_for_each_arfs_rule(arfs_rule, htmp, arfs->arfs_tables, i, j) {
435 		if (!work_pending(&arfs_rule->arfs_work) &&
436 		    rps_may_expire_flow(priv->netdev,
437 					arfs_rule->rxq, arfs_rule->flow_id,
438 					arfs_rule->filter_id)) {
439 			hlist_del_init(&arfs_rule->hlist);
440 			hlist_add_head(&arfs_rule->hlist, &del_list);
441 			if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
442 				break;
443 		}
444 	}
445 	spin_unlock_bh(&arfs->arfs_lock);
446 	hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
447 		if (arfs_rule->rule) {
448 			mlx5_del_flow_rules(arfs_rule->rule);
449 			priv->channel_stats[arfs_rule->rxq]->rq.arfs_expired++;
450 		}
451 		hlist_del(&arfs_rule->hlist);
452 		kfree(arfs_rule);
453 	}
454 }
455 
arfs_del_rules(struct mlx5e_flow_steering * fs)456 static void arfs_del_rules(struct mlx5e_flow_steering *fs)
457 {
458 	struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
459 	struct hlist_node *htmp;
460 	struct arfs_rule *rule;
461 	HLIST_HEAD(del_list);
462 	int i;
463 	int j;
464 
465 	clear_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
466 
467 	spin_lock_bh(&arfs->arfs_lock);
468 	mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
469 		hlist_del_init(&rule->hlist);
470 		hlist_add_head(&rule->hlist, &del_list);
471 	}
472 	spin_unlock_bh(&arfs->arfs_lock);
473 
474 	hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
475 		cancel_work_sync(&rule->arfs_work);
476 		if (rule->rule)
477 			mlx5_del_flow_rules(rule->rule);
478 		hlist_del(&rule->hlist);
479 		kfree(rule);
480 	}
481 }
482 
483 static struct hlist_head *
arfs_hash_bucket(struct arfs_table * arfs_t,__be16 src_port,__be16 dst_port)484 arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
485 		 __be16 dst_port)
486 {
487 	unsigned long l;
488 	int bucket_idx;
489 
490 	l = (__force unsigned long)src_port |
491 	    ((__force unsigned long)dst_port << 2);
492 
493 	bucket_idx = hash_long(l, ARFS_HASH_SHIFT);
494 
495 	return &arfs_t->rules_hash[bucket_idx];
496 }
497 
arfs_get_table(struct mlx5e_arfs_tables * arfs,u8 ip_proto,__be16 etype)498 static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
499 					 u8 ip_proto, __be16 etype)
500 {
501 	if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP)
502 		return &arfs->arfs_tables[ARFS_IPV4_TCP];
503 	if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP)
504 		return &arfs->arfs_tables[ARFS_IPV4_UDP];
505 	if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP)
506 		return &arfs->arfs_tables[ARFS_IPV6_TCP];
507 	if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP)
508 		return &arfs->arfs_tables[ARFS_IPV6_UDP];
509 
510 	return NULL;
511 }
512 
arfs_add_rule(struct mlx5e_priv * priv,struct arfs_rule * arfs_rule)513 static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
514 					      struct arfs_rule *arfs_rule)
515 {
516 	struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
517 	struct arfs_tuple *tuple = &arfs_rule->tuple;
518 	struct mlx5_flow_handle *rule = NULL;
519 	struct mlx5_flow_destination dest = {};
520 	MLX5_DECLARE_FLOW_ACT(flow_act);
521 	struct arfs_table *arfs_table;
522 	struct mlx5_flow_spec *spec;
523 	struct mlx5_flow_table *ft;
524 	int err = 0;
525 
526 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
527 	if (!spec) {
528 		priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
529 		err = -ENOMEM;
530 		goto out;
531 	}
532 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
533 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
534 			 outer_headers.ethertype);
535 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype,
536 		 ntohs(tuple->etype));
537 	arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
538 	if (!arfs_table) {
539 		WARN_ONCE(1, "arfs table does not exist for etype %u and ip_proto %u\n",
540 			  tuple->etype, tuple->ip_proto);
541 		err = -EINVAL;
542 		goto out;
543 	}
544 
545 	ft = arfs_table->ft.t;
546 	if (tuple->ip_proto == IPPROTO_TCP) {
547 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
548 				 outer_headers.tcp_dport);
549 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
550 				 outer_headers.tcp_sport);
551 		MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
552 			 ntohs(tuple->dst_port));
553 		MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
554 			 ntohs(tuple->src_port));
555 	} else {
556 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
557 				 outer_headers.udp_dport);
558 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
559 				 outer_headers.udp_sport);
560 		MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport,
561 			 ntohs(tuple->dst_port));
562 		MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport,
563 			 ntohs(tuple->src_port));
564 	}
565 	if (tuple->etype == htons(ETH_P_IP)) {
566 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
567 				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
568 		       &tuple->src_ipv4,
569 		       4);
570 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
571 				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
572 		       &tuple->dst_ipv4,
573 		       4);
574 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
575 				 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
576 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
577 				 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
578 	} else {
579 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
580 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
581 		       &tuple->src_ipv6,
582 		       16);
583 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
584 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
585 		       &tuple->dst_ipv6,
586 		       16);
587 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
588 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
589 		       0xff,
590 		       16);
591 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
592 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
593 		       0xff,
594 		       16);
595 	}
596 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
597 	dest.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, arfs_rule->rxq);
598 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
599 	if (IS_ERR(rule)) {
600 		err = PTR_ERR(rule);
601 		priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
602 		netdev_dbg(priv->netdev,
603 			   "%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
604 			   __func__, arfs_rule->filter_id, arfs_rule->rxq,
605 			   tuple->ip_proto, err);
606 	}
607 
608 out:
609 	kvfree(spec);
610 	return err ? ERR_PTR(err) : rule;
611 }
612 
arfs_modify_rule_rq(struct mlx5e_priv * priv,struct mlx5_flow_handle * rule,u16 rxq)613 static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
614 				struct mlx5_flow_handle *rule, u16 rxq)
615 {
616 	struct mlx5_flow_destination dst = {};
617 	int err = 0;
618 
619 	dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
620 	dst.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, rxq);
621 	err =  mlx5_modify_rule_destination(rule, &dst, NULL);
622 	if (err) {
623 		priv->channel_stats[rxq]->rq.arfs_err++;
624 		netdev_warn(priv->netdev,
625 			    "Failed to modify aRFS rule destination to rq=%d\n", rxq);
626 	}
627 }
628 
arfs_handle_work(struct work_struct * work)629 static void arfs_handle_work(struct work_struct *work)
630 {
631 	struct arfs_rule *arfs_rule = container_of(work,
632 						   struct arfs_rule,
633 						   arfs_work);
634 	struct mlx5e_priv *priv = arfs_rule->priv;
635 	struct mlx5e_arfs_tables *arfs;
636 	struct mlx5_flow_handle *rule;
637 
638 	arfs = mlx5e_fs_get_arfs(priv->fs);
639 	if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state))
640 		return;
641 
642 	if (!arfs_rule->rule) {
643 		rule = arfs_add_rule(priv, arfs_rule);
644 		if (IS_ERR(rule))
645 			goto out;
646 		arfs_rule->rule = rule;
647 		priv->channel_stats[arfs_rule->rxq]->rq.arfs_add++;
648 	} else {
649 		arfs_modify_rule_rq(priv, arfs_rule->rule,
650 				    arfs_rule->rxq);
651 	}
652 out:
653 	arfs_may_expire_flow(priv);
654 }
655 
arfs_alloc_rule(struct mlx5e_priv * priv,struct arfs_table * arfs_t,const struct flow_keys * fk,u16 rxq,u32 flow_id)656 static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
657 					 struct arfs_table *arfs_t,
658 					 const struct flow_keys *fk,
659 					 u16 rxq, u32 flow_id)
660 {
661 	struct mlx5e_arfs_tables *arfs =  mlx5e_fs_get_arfs(priv->fs);
662 	struct arfs_rule *rule;
663 	struct arfs_tuple *tuple;
664 
665 	rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
666 	if (!rule) {
667 		priv->channel_stats[rxq]->rq.arfs_err++;
668 		return NULL;
669 	}
670 
671 	rule->priv = priv;
672 	rule->rxq = rxq;
673 	INIT_WORK(&rule->arfs_work, arfs_handle_work);
674 
675 	tuple = &rule->tuple;
676 	tuple->etype = fk->basic.n_proto;
677 	tuple->ip_proto = fk->basic.ip_proto;
678 	if (tuple->etype == htons(ETH_P_IP)) {
679 		tuple->src_ipv4 = fk->addrs.v4addrs.src;
680 		tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
681 	} else {
682 		memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
683 		       sizeof(struct in6_addr));
684 		memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
685 		       sizeof(struct in6_addr));
686 	}
687 	tuple->src_port = fk->ports.src;
688 	tuple->dst_port = fk->ports.dst;
689 
690 	rule->flow_id = flow_id;
691 	rule->filter_id = arfs->last_filter_id++ % RPS_NO_FILTER;
692 
693 	hlist_add_head(&rule->hlist,
694 		       arfs_hash_bucket(arfs_t, tuple->src_port,
695 					tuple->dst_port));
696 	return rule;
697 }
698 
arfs_cmp(const struct arfs_tuple * tuple,const struct flow_keys * fk)699 static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
700 {
701 	if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
702 		return false;
703 	if (tuple->etype != fk->basic.n_proto)
704 		return false;
705 	if (tuple->etype == htons(ETH_P_IP))
706 		return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
707 		       tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
708 	if (tuple->etype == htons(ETH_P_IPV6))
709 		return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
710 			       sizeof(struct in6_addr)) &&
711 		       !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
712 			       sizeof(struct in6_addr));
713 	return false;
714 }
715 
arfs_find_rule(struct arfs_table * arfs_t,const struct flow_keys * fk)716 static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
717 					const struct flow_keys *fk)
718 {
719 	struct arfs_rule *arfs_rule;
720 	struct hlist_head *head;
721 
722 	head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
723 	hlist_for_each_entry(arfs_rule, head, hlist) {
724 		if (arfs_cmp(&arfs_rule->tuple, fk))
725 			return arfs_rule;
726 	}
727 
728 	return NULL;
729 }
730 
mlx5e_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)731 int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
732 			u16 rxq_index, u32 flow_id)
733 {
734 	struct mlx5e_priv *priv = netdev_priv(dev);
735 	struct mlx5e_arfs_tables *arfs;
736 	struct arfs_rule *arfs_rule;
737 	struct arfs_table *arfs_t;
738 	struct flow_keys fk;
739 
740 	arfs =  mlx5e_fs_get_arfs(priv->fs);
741 	if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
742 		return -EPROTONOSUPPORT;
743 
744 	if (fk.basic.n_proto != htons(ETH_P_IP) &&
745 	    fk.basic.n_proto != htons(ETH_P_IPV6))
746 		return -EPROTONOSUPPORT;
747 
748 	if (skb->encapsulation)
749 		return -EPROTONOSUPPORT;
750 
751 	arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
752 	if (!arfs_t)
753 		return -EPROTONOSUPPORT;
754 
755 	spin_lock_bh(&arfs->arfs_lock);
756 	if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state)) {
757 		spin_unlock_bh(&arfs->arfs_lock);
758 		return -EPERM;
759 	}
760 
761 	arfs_rule = arfs_find_rule(arfs_t, &fk);
762 	if (arfs_rule) {
763 		if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) {
764 			spin_unlock_bh(&arfs->arfs_lock);
765 			return arfs_rule->filter_id;
766 		}
767 
768 		priv->channel_stats[rxq_index]->rq.arfs_request_in++;
769 		priv->channel_stats[arfs_rule->rxq]->rq.arfs_request_out++;
770 		arfs_rule->rxq = rxq_index;
771 	} else {
772 		arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
773 		if (!arfs_rule) {
774 			spin_unlock_bh(&arfs->arfs_lock);
775 			return -ENOMEM;
776 		}
777 	}
778 	queue_work(arfs->wq, &arfs_rule->arfs_work);
779 	spin_unlock_bh(&arfs->arfs_lock);
780 	return arfs_rule->filter_id;
781 }
782 
783