1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES.
3
4 #include "rss.h"
5
6 #define mlx5e_rss_warn(__dev, format, ...) \
7 dev_warn((__dev)->device, "%s:%d:(pid %d): " format, \
8 __func__, __LINE__, current->pid, \
9 ##__VA_ARGS__)
10
11 static const struct mlx5e_rss_params_traffic_type rss_default_config[MLX5E_NUM_INDIR_TIRS] = {
12 [MLX5_TT_IPV4_TCP] = {
13 .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
14 .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
15 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
16 },
17 [MLX5_TT_IPV6_TCP] = {
18 .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
19 .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
20 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
21 },
22 [MLX5_TT_IPV4_UDP] = {
23 .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
24 .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
25 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
26 },
27 [MLX5_TT_IPV6_UDP] = {
28 .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
29 .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
30 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
31 },
32 [MLX5_TT_IPV4_IPSEC_AH] = {
33 .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
34 .l4_prot_type = 0,
35 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
36 },
37 [MLX5_TT_IPV6_IPSEC_AH] = {
38 .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
39 .l4_prot_type = 0,
40 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
41 },
42 [MLX5_TT_IPV4_IPSEC_ESP] = {
43 .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
44 .l4_prot_type = 0,
45 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
46 },
47 [MLX5_TT_IPV6_IPSEC_ESP] = {
48 .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
49 .l4_prot_type = 0,
50 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
51 },
52 [MLX5_TT_IPV4] = {
53 .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
54 .l4_prot_type = 0,
55 .rx_hash_fields = MLX5_HASH_IP,
56 },
57 [MLX5_TT_IPV6] = {
58 .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
59 .l4_prot_type = 0,
60 .rx_hash_fields = MLX5_HASH_IP,
61 },
62 };
63
64 struct mlx5e_rss_params_traffic_type
mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt)65 mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt)
66 {
67 return rss_default_config[tt];
68 }
69
70 struct mlx5e_rss {
71 struct mlx5e_rss_params_hash hash;
72 struct mlx5e_rss_params_indir indir;
73 u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
74 struct mlx5e_tir *tir[MLX5E_NUM_INDIR_TIRS];
75 struct mlx5e_tir *inner_tir[MLX5E_NUM_INDIR_TIRS];
76 struct mlx5e_rqt rqt;
77 struct mlx5_core_dev *mdev; /* primary */
78 u32 drop_rqn;
79 bool inner_ft_support;
80 bool enabled;
81 refcount_t refcnt;
82 };
83
mlx5e_rss_get_inner_ft_support(struct mlx5e_rss * rss)84 bool mlx5e_rss_get_inner_ft_support(struct mlx5e_rss *rss)
85 {
86 return rss->inner_ft_support;
87 }
88
mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss * rss,u32 num_channels)89 void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels)
90 {
91 rss->indir.actual_table_size = mlx5e_rqt_size(rss->mdev, num_channels);
92 }
93
mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir * indir,struct mlx5_core_dev * mdev,u32 actual_table_size,u32 max_table_size)94 int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev,
95 u32 actual_table_size, u32 max_table_size)
96 {
97 indir->table = kvmalloc_array(max_table_size, sizeof(*indir->table), GFP_KERNEL);
98 if (!indir->table)
99 return -ENOMEM;
100
101 indir->max_table_size = max_table_size;
102 indir->actual_table_size = actual_table_size;
103
104 return 0;
105 }
106
mlx5e_rss_params_indir_cleanup(struct mlx5e_rss_params_indir * indir)107 void mlx5e_rss_params_indir_cleanup(struct mlx5e_rss_params_indir *indir)
108 {
109 kvfree(indir->table);
110 }
111
mlx5e_rss_copy(struct mlx5e_rss * to,const struct mlx5e_rss * from)112 static int mlx5e_rss_copy(struct mlx5e_rss *to, const struct mlx5e_rss *from)
113 {
114 u32 *dst_indir_table;
115
116 if (to->indir.actual_table_size != from->indir.actual_table_size ||
117 to->indir.max_table_size != from->indir.max_table_size) {
118 mlx5e_rss_warn(to->mdev,
119 "Failed to copy RSS due to size mismatch, src (actual %u, max %u) != dst (actual %u, max %u)\n",
120 from->indir.actual_table_size, from->indir.max_table_size,
121 to->indir.actual_table_size, to->indir.max_table_size);
122 return -EINVAL;
123 }
124
125 dst_indir_table = to->indir.table;
126 *to = *from;
127 to->indir.table = dst_indir_table;
128 memcpy(to->indir.table, from->indir.table,
129 from->indir.actual_table_size * sizeof(*from->indir.table));
130 return 0;
131 }
132
mlx5e_rss_init_copy(const struct mlx5e_rss * from)133 static struct mlx5e_rss *mlx5e_rss_init_copy(const struct mlx5e_rss *from)
134 {
135 struct mlx5e_rss *rss;
136 int err;
137
138 rss = kvzalloc(sizeof(*rss), GFP_KERNEL);
139 if (!rss)
140 return ERR_PTR(-ENOMEM);
141
142 err = mlx5e_rss_params_indir_init(&rss->indir, from->mdev, from->indir.actual_table_size,
143 from->indir.max_table_size);
144 if (err)
145 goto err_free_rss;
146
147 err = mlx5e_rss_copy(rss, from);
148 if (err)
149 goto err_free_indir;
150
151 return rss;
152
153 err_free_indir:
154 mlx5e_rss_params_indir_cleanup(&rss->indir);
155 err_free_rss:
156 kvfree(rss);
157 return ERR_PTR(err);
158 }
159
mlx5e_rss_params_init(struct mlx5e_rss * rss)160 static void mlx5e_rss_params_init(struct mlx5e_rss *rss)
161 {
162 enum mlx5_traffic_types tt;
163
164 rss->hash.symmetric = true;
165 rss->hash.hfunc = ETH_RSS_HASH_TOP;
166 netdev_rss_key_fill(rss->hash.toeplitz_hash_key,
167 sizeof(rss->hash.toeplitz_hash_key));
168 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
169 rss->rx_hash_fields[tt] =
170 mlx5e_rss_get_default_tt_config(tt).rx_hash_fields;
171 }
172
rss_get_tirp(struct mlx5e_rss * rss,enum mlx5_traffic_types tt,bool inner)173 static struct mlx5e_tir **rss_get_tirp(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
174 bool inner)
175 {
176 return inner ? &rss->inner_tir[tt] : &rss->tir[tt];
177 }
178
rss_get_tir(struct mlx5e_rss * rss,enum mlx5_traffic_types tt,bool inner)179 static struct mlx5e_tir *rss_get_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
180 bool inner)
181 {
182 return *rss_get_tirp(rss, tt, inner);
183 }
184
185 static struct mlx5e_rss_params_traffic_type
mlx5e_rss_get_tt_config(struct mlx5e_rss * rss,enum mlx5_traffic_types tt)186 mlx5e_rss_get_tt_config(struct mlx5e_rss *rss, enum mlx5_traffic_types tt)
187 {
188 struct mlx5e_rss_params_traffic_type rss_tt;
189
190 rss_tt = mlx5e_rss_get_default_tt_config(tt);
191 rss_tt.rx_hash_fields = rss->rx_hash_fields[tt];
192 return rss_tt;
193 }
194
mlx5e_rss_create_tir(struct mlx5e_rss * rss,enum mlx5_traffic_types tt,const struct mlx5e_packet_merge_param * init_pkt_merge_param,bool inner)195 static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
196 enum mlx5_traffic_types tt,
197 const struct mlx5e_packet_merge_param *init_pkt_merge_param,
198 bool inner)
199 {
200 struct mlx5e_rss_params_traffic_type rss_tt;
201 struct mlx5e_tir_builder *builder;
202 struct mlx5e_tir **tir_p;
203 struct mlx5e_tir *tir;
204 u32 rqtn;
205 int err;
206
207 if (inner && !rss->inner_ft_support) {
208 mlx5e_rss_warn(rss->mdev,
209 "Cannot create inner indirect TIR[%d], RSS inner FT is not supported.\n",
210 tt);
211 return -EINVAL;
212 }
213
214 tir_p = rss_get_tirp(rss, tt, inner);
215 if (*tir_p)
216 return -EINVAL;
217
218 tir = kvzalloc(sizeof(*tir), GFP_KERNEL);
219 if (!tir)
220 return -ENOMEM;
221
222 builder = mlx5e_tir_builder_alloc(false);
223 if (!builder) {
224 err = -ENOMEM;
225 goto free_tir;
226 }
227
228 rqtn = mlx5e_rqt_get_rqtn(&rss->rqt);
229 mlx5e_tir_builder_build_rqt(builder, rss->mdev->mlx5e_res.hw_objs.td.tdn,
230 rqtn, rss->inner_ft_support);
231 mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
232 rss_tt = mlx5e_rss_get_tt_config(rss, tt);
233 mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner);
234
235 err = mlx5e_tir_init(tir, builder, rss->mdev, true);
236 mlx5e_tir_builder_free(builder);
237 if (err) {
238 mlx5e_rss_warn(rss->mdev, "Failed to create %sindirect TIR: err = %d, tt = %d\n",
239 inner ? "inner " : "", err, tt);
240 goto free_tir;
241 }
242
243 *tir_p = tir;
244 return 0;
245
246 free_tir:
247 kvfree(tir);
248 return err;
249 }
250
mlx5e_rss_destroy_tir(struct mlx5e_rss * rss,enum mlx5_traffic_types tt,bool inner)251 static void mlx5e_rss_destroy_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
252 bool inner)
253 {
254 struct mlx5e_tir **tir_p;
255 struct mlx5e_tir *tir;
256
257 tir_p = rss_get_tirp(rss, tt, inner);
258 if (!*tir_p)
259 return;
260
261 tir = *tir_p;
262 mlx5e_tir_destroy(tir);
263 kvfree(tir);
264 *tir_p = NULL;
265 }
266
mlx5e_rss_create_tirs(struct mlx5e_rss * rss,const struct mlx5e_packet_merge_param * init_pkt_merge_param,bool inner)267 static int mlx5e_rss_create_tirs(struct mlx5e_rss *rss,
268 const struct mlx5e_packet_merge_param *init_pkt_merge_param,
269 bool inner)
270 {
271 enum mlx5_traffic_types tt, max_tt;
272 int err;
273
274 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
275 err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
276 if (err)
277 goto err_destroy_tirs;
278 }
279
280 return 0;
281
282 err_destroy_tirs:
283 max_tt = tt;
284 for (tt = 0; tt < max_tt; tt++)
285 mlx5e_rss_destroy_tir(rss, tt, inner);
286 return err;
287 }
288
mlx5e_rss_destroy_tirs(struct mlx5e_rss * rss,bool inner)289 static void mlx5e_rss_destroy_tirs(struct mlx5e_rss *rss, bool inner)
290 {
291 enum mlx5_traffic_types tt;
292
293 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
294 mlx5e_rss_destroy_tir(rss, tt, inner);
295 }
296
mlx5e_rss_update_tir(struct mlx5e_rss * rss,enum mlx5_traffic_types tt,bool inner)297 static int mlx5e_rss_update_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
298 bool inner)
299 {
300 struct mlx5e_rss_params_traffic_type rss_tt;
301 struct mlx5e_tir_builder *builder;
302 struct mlx5e_tir *tir;
303 int err;
304
305 tir = rss_get_tir(rss, tt, inner);
306 if (!tir)
307 return 0;
308
309 builder = mlx5e_tir_builder_alloc(true);
310 if (!builder)
311 return -ENOMEM;
312
313 rss_tt = mlx5e_rss_get_tt_config(rss, tt);
314
315 mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner);
316 err = mlx5e_tir_modify(tir, builder);
317
318 mlx5e_tir_builder_free(builder);
319 return err;
320 }
321
mlx5e_rss_update_tirs(struct mlx5e_rss * rss)322 static int mlx5e_rss_update_tirs(struct mlx5e_rss *rss)
323 {
324 enum mlx5_traffic_types tt;
325 int err, retval;
326
327 retval = 0;
328
329 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
330 err = mlx5e_rss_update_tir(rss, tt, false);
331 if (err) {
332 retval = retval ? : err;
333 mlx5e_rss_warn(rss->mdev,
334 "Failed to update RSS hash of indirect TIR for traffic type %d: err = %d\n",
335 tt, err);
336 }
337
338 if (!rss->inner_ft_support)
339 continue;
340
341 err = mlx5e_rss_update_tir(rss, tt, true);
342 if (err) {
343 retval = retval ? : err;
344 mlx5e_rss_warn(rss->mdev,
345 "Failed to update RSS hash of inner indirect TIR for traffic type %d: err = %d\n",
346 tt, err);
347 }
348 }
349 return retval;
350 }
351
mlx5e_rss_init_no_tirs(struct mlx5e_rss * rss)352 static int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss)
353 {
354 mlx5e_rss_params_init(rss);
355 refcount_set(&rss->refcnt, 1);
356
357 return mlx5e_rqt_init_direct(&rss->rqt, rss->mdev, true,
358 rss->drop_rqn, rss->indir.max_table_size);
359 }
360
mlx5e_rss_init(struct mlx5_core_dev * mdev,bool inner_ft_support,u32 drop_rqn,const struct mlx5e_packet_merge_param * init_pkt_merge_param,enum mlx5e_rss_init_type type,unsigned int nch,unsigned int max_nch)361 struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn,
362 const struct mlx5e_packet_merge_param *init_pkt_merge_param,
363 enum mlx5e_rss_init_type type, unsigned int nch,
364 unsigned int max_nch)
365 {
366 struct mlx5e_rss *rss;
367 int err;
368
369 rss = kvzalloc(sizeof(*rss), GFP_KERNEL);
370 if (!rss)
371 return ERR_PTR(-ENOMEM);
372
373 err = mlx5e_rss_params_indir_init(&rss->indir, mdev,
374 mlx5e_rqt_size(mdev, nch),
375 mlx5e_rqt_size(mdev, max_nch));
376 if (err)
377 goto err_free_rss;
378
379 rss->mdev = mdev;
380 rss->inner_ft_support = inner_ft_support;
381 rss->drop_rqn = drop_rqn;
382
383 err = mlx5e_rss_init_no_tirs(rss);
384 if (err)
385 goto err_free_indir;
386
387 if (type == MLX5E_RSS_INIT_NO_TIRS)
388 goto out;
389
390 err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, false);
391 if (err)
392 goto err_destroy_rqt;
393
394 if (inner_ft_support) {
395 err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, true);
396 if (err)
397 goto err_destroy_tirs;
398 }
399
400 out:
401 return rss;
402
403 err_destroy_tirs:
404 mlx5e_rss_destroy_tirs(rss, false);
405 err_destroy_rqt:
406 mlx5e_rqt_destroy(&rss->rqt);
407 err_free_indir:
408 mlx5e_rss_params_indir_cleanup(&rss->indir);
409 err_free_rss:
410 kvfree(rss);
411 return ERR_PTR(err);
412 }
413
mlx5e_rss_cleanup(struct mlx5e_rss * rss)414 int mlx5e_rss_cleanup(struct mlx5e_rss *rss)
415 {
416 if (!refcount_dec_if_one(&rss->refcnt))
417 return -EBUSY;
418
419 mlx5e_rss_destroy_tirs(rss, false);
420
421 if (rss->inner_ft_support)
422 mlx5e_rss_destroy_tirs(rss, true);
423
424 mlx5e_rqt_destroy(&rss->rqt);
425 mlx5e_rss_params_indir_cleanup(&rss->indir);
426 kvfree(rss);
427
428 return 0;
429 }
430
mlx5e_rss_refcnt_inc(struct mlx5e_rss * rss)431 void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss)
432 {
433 refcount_inc(&rss->refcnt);
434 }
435
mlx5e_rss_refcnt_dec(struct mlx5e_rss * rss)436 void mlx5e_rss_refcnt_dec(struct mlx5e_rss *rss)
437 {
438 refcount_dec(&rss->refcnt);
439 }
440
mlx5e_rss_refcnt_read(struct mlx5e_rss * rss)441 unsigned int mlx5e_rss_refcnt_read(struct mlx5e_rss *rss)
442 {
443 return refcount_read(&rss->refcnt);
444 }
445
mlx5e_rss_get_tirn(struct mlx5e_rss * rss,enum mlx5_traffic_types tt,bool inner)446 u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
447 bool inner)
448 {
449 struct mlx5e_tir *tir;
450
451 WARN_ON(inner && !rss->inner_ft_support);
452 tir = rss_get_tir(rss, tt, inner);
453 WARN_ON(!tir);
454
455 return mlx5e_tir_get_tirn(tir);
456 }
457
mlx5e_rss_get_rqtn(struct mlx5e_rss * rss)458 u32 mlx5e_rss_get_rqtn(struct mlx5e_rss *rss)
459 {
460 return mlx5e_rqt_get_rqtn(&rss->rqt);
461 }
462
mlx5e_rss_valid_tir(struct mlx5e_rss * rss,enum mlx5_traffic_types tt,bool inner)463 bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner)
464 {
465 return !!rss_get_tir(rss, tt, inner);
466 }
467
468 /* Fill the "tirn" output parameter.
469 * Create the requested TIR if it's its first usage.
470 */
mlx5e_rss_obtain_tirn(struct mlx5e_rss * rss,enum mlx5_traffic_types tt,const struct mlx5e_packet_merge_param * init_pkt_merge_param,bool inner,u32 * tirn)471 int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
472 enum mlx5_traffic_types tt,
473 const struct mlx5e_packet_merge_param *init_pkt_merge_param,
474 bool inner, u32 *tirn)
475 {
476 struct mlx5e_tir *tir;
477
478 tir = rss_get_tir(rss, tt, inner);
479 if (!tir) { /* TIR doesn't exist, create one */
480 int err;
481
482 err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
483 if (err)
484 return err;
485 tir = rss_get_tir(rss, tt, inner);
486 }
487
488 *tirn = mlx5e_tir_get_tirn(tir);
489 return 0;
490 }
491
mlx5e_rss_apply(struct mlx5e_rss * rss,u32 * rqns,u32 * vhca_ids,unsigned int num_rqns)492 static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
493 {
494 int err;
495
496 err = mlx5e_rqt_redirect_indir(&rss->rqt, rqns, vhca_ids, num_rqns, rss->hash.hfunc,
497 &rss->indir);
498 if (err)
499 mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to channels: err = %d\n",
500 mlx5e_rqt_get_rqtn(&rss->rqt), err);
501 return err;
502 }
503
mlx5e_rss_enable(struct mlx5e_rss * rss,u32 * rqns,u32 * vhca_ids,unsigned int num_rqns)504 void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
505 {
506 rss->enabled = true;
507 mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns);
508 }
509
mlx5e_rss_disable(struct mlx5e_rss * rss)510 void mlx5e_rss_disable(struct mlx5e_rss *rss)
511 {
512 int err;
513
514 rss->enabled = false;
515 err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn, NULL);
516 if (err)
517 mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to drop RQ %#x: err = %d\n",
518 mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err);
519 }
520
mlx5e_rss_packet_merge_set_param(struct mlx5e_rss * rss,struct mlx5e_packet_merge_param * pkt_merge_param)521 int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
522 struct mlx5e_packet_merge_param *pkt_merge_param)
523 {
524 struct mlx5e_tir_builder *builder;
525 enum mlx5_traffic_types tt;
526 int err, final_err;
527
528 builder = mlx5e_tir_builder_alloc(true);
529 if (!builder)
530 return -ENOMEM;
531
532 mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
533
534 final_err = 0;
535
536 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
537 struct mlx5e_tir *tir;
538
539 tir = rss_get_tir(rss, tt, false);
540 if (!tir)
541 goto inner_tir;
542 err = mlx5e_tir_modify(tir, builder);
543 if (err) {
544 mlx5e_rss_warn(rss->mdev, "Failed to update packet merge state of indirect TIR %#x for traffic type %d: err = %d\n",
545 mlx5e_tir_get_tirn(tir), tt, err);
546 if (!final_err)
547 final_err = err;
548 }
549
550 inner_tir:
551 if (!rss->inner_ft_support)
552 continue;
553
554 tir = rss_get_tir(rss, tt, true);
555 if (!tir)
556 continue;
557 err = mlx5e_tir_modify(tir, builder);
558 if (err) {
559 mlx5e_rss_warn(rss->mdev, "Failed to update packet merge state of inner indirect TIR %#x for traffic type %d: err = %d\n",
560 mlx5e_tir_get_tirn(tir), tt, err);
561 if (!final_err)
562 final_err = err;
563 }
564 }
565
566 mlx5e_tir_builder_free(builder);
567 return final_err;
568 }
569
mlx5e_rss_get_rxfh(struct mlx5e_rss * rss,u32 * indir,u8 * key,u8 * hfunc,bool * symmetric)570 int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc, bool *symmetric)
571 {
572 if (indir)
573 memcpy(indir, rss->indir.table,
574 rss->indir.actual_table_size * sizeof(*rss->indir.table));
575
576 if (key)
577 memcpy(key, rss->hash.toeplitz_hash_key,
578 sizeof(rss->hash.toeplitz_hash_key));
579
580 if (hfunc)
581 *hfunc = rss->hash.hfunc;
582
583 if (symmetric)
584 *symmetric = rss->hash.symmetric;
585
586 return 0;
587 }
588
mlx5e_rss_set_rxfh(struct mlx5e_rss * rss,const u32 * indir,const u8 * key,const u8 * hfunc,const bool * symmetric,u32 * rqns,u32 * vhca_ids,unsigned int num_rqns)589 int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
590 const u8 *key, const u8 *hfunc, const bool *symmetric,
591 u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
592 {
593 bool changed_indir = false;
594 bool changed_hash = false;
595 struct mlx5e_rss *old_rss;
596 int err = 0;
597
598 old_rss = mlx5e_rss_init_copy(rss);
599 if (IS_ERR(old_rss))
600 return PTR_ERR(old_rss);
601
602 if (hfunc && *hfunc != rss->hash.hfunc) {
603 switch (*hfunc) {
604 case ETH_RSS_HASH_XOR:
605 case ETH_RSS_HASH_TOP:
606 break;
607 default:
608 err = -EINVAL;
609 goto out;
610 }
611 changed_hash = true;
612 changed_indir = true;
613 rss->hash.hfunc = *hfunc;
614 }
615
616 if (key) {
617 if (rss->hash.hfunc == ETH_RSS_HASH_TOP)
618 changed_hash = true;
619 memcpy(rss->hash.toeplitz_hash_key, key,
620 sizeof(rss->hash.toeplitz_hash_key));
621 }
622
623 if (indir) {
624 changed_indir = true;
625
626 memcpy(rss->indir.table, indir,
627 rss->indir.actual_table_size * sizeof(*rss->indir.table));
628 }
629
630 if (symmetric) {
631 rss->hash.symmetric = *symmetric;
632 changed_hash = true;
633 }
634
635 if (changed_indir && rss->enabled) {
636 err = mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns);
637 if (err) {
638 mlx5e_rss_copy(rss, old_rss);
639 goto out;
640 }
641 }
642
643 if (changed_hash)
644 mlx5e_rss_update_tirs(rss);
645
646 out:
647 mlx5e_rss_params_indir_cleanup(&old_rss->indir);
648 kvfree(old_rss);
649
650 return err;
651 }
652
mlx5e_rss_get_hash(struct mlx5e_rss * rss)653 struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss)
654 {
655 return rss->hash;
656 }
657
mlx5e_rss_get_hash_fields(struct mlx5e_rss * rss,enum mlx5_traffic_types tt)658 u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt)
659 {
660 return rss->rx_hash_fields[tt];
661 }
662
mlx5e_rss_set_hash_fields(struct mlx5e_rss * rss,enum mlx5_traffic_types tt,u8 rx_hash_fields)663 int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
664 u8 rx_hash_fields)
665 {
666 u8 old_rx_hash_fields;
667 int err;
668
669 old_rx_hash_fields = rss->rx_hash_fields[tt];
670
671 if (old_rx_hash_fields == rx_hash_fields)
672 return 0;
673
674 rss->rx_hash_fields[tt] = rx_hash_fields;
675
676 err = mlx5e_rss_update_tir(rss, tt, false);
677 if (err) {
678 rss->rx_hash_fields[tt] = old_rx_hash_fields;
679 mlx5e_rss_warn(rss->mdev,
680 "Failed to update RSS hash fields of indirect TIR for traffic type %d: err = %d\n",
681 tt, err);
682 return err;
683 }
684
685 if (!(rss->inner_ft_support))
686 return 0;
687
688 err = mlx5e_rss_update_tir(rss, tt, true);
689 if (err) {
690 /* Partial update happened. Try to revert - it may fail too, but
691 * there is nothing more we can do.
692 */
693 rss->rx_hash_fields[tt] = old_rx_hash_fields;
694 mlx5e_rss_warn(rss->mdev,
695 "Failed to update RSS hash fields of inner indirect TIR for traffic type %d: err = %d\n",
696 tt, err);
697 if (mlx5e_rss_update_tir(rss, tt, false))
698 mlx5e_rss_warn(rss->mdev,
699 "Partial update of RSS hash fields happened: failed to revert indirect TIR for traffic type %d to the old values\n",
700 tt);
701 }
702
703 return err;
704 }
705
mlx5e_rss_set_indir_uniform(struct mlx5e_rss * rss,unsigned int nch)706 void mlx5e_rss_set_indir_uniform(struct mlx5e_rss *rss, unsigned int nch)
707 {
708 mlx5e_rss_params_indir_init_uniform(&rss->indir, nch);
709 }
710