1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
3
4 #include "rx_res.h"
5 #include "channels.h"
6 #include "params.h"
7
8 struct mlx5e_rx_res {
9 struct mlx5_core_dev *mdev; /* primary */
10 enum mlx5e_rx_res_features features;
11 unsigned int max_nch;
12 u32 drop_rqn;
13
14 struct mlx5e_packet_merge_param pkt_merge_param;
15 struct rw_semaphore pkt_merge_param_sem;
16
17 struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
18 bool rss_active;
19 u32 *rss_rqns;
20 u32 *rss_vhca_ids;
21 unsigned int rss_nch;
22
23 struct {
24 struct mlx5e_rqt direct_rqt;
25 struct mlx5e_tir direct_tir;
26 } *channels;
27
28 struct {
29 struct mlx5e_rqt rqt;
30 struct mlx5e_tir tir;
31 } ptp;
32 };
33
34 /* API for rx_res_rss_* */
35
get_vhca_ids(struct mlx5e_rx_res * res,int offset)36 static u32 *get_vhca_ids(struct mlx5e_rx_res *res, int offset)
37 {
38 bool multi_vhca = res->features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
39
40 return multi_vhca ? res->rss_vhca_ids + offset : NULL;
41 }
42
mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res * res,u32 nch)43 void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch)
44 {
45 int i;
46
47 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
48 if (res->rss[i])
49 mlx5e_rss_params_indir_modify_actual_size(res->rss[i], nch);
50 }
51 }
52
mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res * res,unsigned int init_nch)53 static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
54 unsigned int init_nch)
55 {
56 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
57 struct mlx5e_rss *rss;
58
59 if (WARN_ON(res->rss[0]))
60 return -EINVAL;
61
62 rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
63 &res->pkt_merge_param, MLX5E_RSS_INIT_TIRS, init_nch, res->max_nch);
64 if (IS_ERR(rss))
65 return PTR_ERR(rss);
66
67 mlx5e_rss_set_indir_uniform(rss, init_nch);
68
69 res->rss[0] = rss;
70
71 return 0;
72 }
73
mlx5e_rx_res_rss_init(struct mlx5e_rx_res * res,u32 * rss_idx,unsigned int init_nch)74 int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch)
75 {
76 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
77 struct mlx5e_rss *rss;
78 int i;
79
80 for (i = 1; i < MLX5E_MAX_NUM_RSS; i++)
81 if (!res->rss[i])
82 break;
83
84 if (i == MLX5E_MAX_NUM_RSS)
85 return -ENOSPC;
86
87 rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
88 &res->pkt_merge_param, MLX5E_RSS_INIT_NO_TIRS, init_nch,
89 res->max_nch);
90 if (IS_ERR(rss))
91 return PTR_ERR(rss);
92
93 mlx5e_rss_set_indir_uniform(rss, init_nch);
94 if (res->rss_active) {
95 u32 *vhca_ids = get_vhca_ids(res, 0);
96
97 mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
98 }
99
100 res->rss[i] = rss;
101 *rss_idx = i;
102
103 return 0;
104 }
105
__mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res * res,u32 rss_idx)106 static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
107 {
108 struct mlx5e_rss *rss = res->rss[rss_idx];
109 int err;
110
111 err = mlx5e_rss_cleanup(rss);
112 if (err)
113 return err;
114
115 res->rss[rss_idx] = NULL;
116
117 return 0;
118 }
119
mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res * res,u32 rss_idx)120 int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
121 {
122 struct mlx5e_rss *rss;
123
124 if (rss_idx >= MLX5E_MAX_NUM_RSS)
125 return -EINVAL;
126
127 rss = res->rss[rss_idx];
128 if (!rss)
129 return -EINVAL;
130
131 return __mlx5e_rx_res_rss_destroy(res, rss_idx);
132 }
133
mlx5e_rx_res_rss_destroy_all(struct mlx5e_rx_res * res)134 static void mlx5e_rx_res_rss_destroy_all(struct mlx5e_rx_res *res)
135 {
136 int i;
137
138 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
139 struct mlx5e_rss *rss = res->rss[i];
140 int err;
141
142 if (!rss)
143 continue;
144
145 err = __mlx5e_rx_res_rss_destroy(res, i);
146 if (err) {
147 unsigned int refcount;
148
149 refcount = mlx5e_rss_refcnt_read(rss);
150 mlx5_core_warn(res->mdev,
151 "Failed to destroy RSS context %d, refcount = %u, err = %d\n",
152 i, refcount, err);
153 }
154 }
155 }
156
mlx5e_rx_res_rss_enable(struct mlx5e_rx_res * res)157 static void mlx5e_rx_res_rss_enable(struct mlx5e_rx_res *res)
158 {
159 int i;
160
161 res->rss_active = true;
162
163 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
164 struct mlx5e_rss *rss = res->rss[i];
165 u32 *vhca_ids;
166
167 if (!rss)
168 continue;
169 vhca_ids = get_vhca_ids(res, 0);
170 mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
171 }
172 }
173
mlx5e_rx_res_rss_disable(struct mlx5e_rx_res * res)174 static void mlx5e_rx_res_rss_disable(struct mlx5e_rx_res *res)
175 {
176 int i;
177
178 res->rss_active = false;
179
180 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
181 struct mlx5e_rss *rss = res->rss[i];
182
183 if (!rss)
184 continue;
185 mlx5e_rss_disable(rss);
186 }
187 }
188
189 /* Updates the indirection table SW shadow, does not update the HW resources yet */
mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res * res,unsigned int nch)190 void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch)
191 {
192 WARN_ON_ONCE(res->rss_active);
193 mlx5e_rss_set_indir_uniform(res->rss[0], nch);
194 }
195
mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res * res,u32 rss_idx,u32 * indir,u8 * key,u8 * hfunc,bool * symmetric)196 int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
197 u32 *indir, u8 *key, u8 *hfunc, bool *symmetric)
198 {
199 struct mlx5e_rss *rss;
200
201 if (rss_idx >= MLX5E_MAX_NUM_RSS)
202 return -EINVAL;
203
204 rss = res->rss[rss_idx];
205 if (!rss)
206 return -ENOENT;
207
208 return mlx5e_rss_get_rxfh(rss, indir, key, hfunc, symmetric);
209 }
210
mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res * res,u32 rss_idx,const u32 * indir,const u8 * key,const u8 * hfunc,const bool * symmetric)211 int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
212 const u32 *indir, const u8 *key, const u8 *hfunc,
213 const bool *symmetric)
214 {
215 u32 *vhca_ids = get_vhca_ids(res, 0);
216 struct mlx5e_rss *rss;
217
218 if (rss_idx >= MLX5E_MAX_NUM_RSS)
219 return -EINVAL;
220
221 rss = res->rss[rss_idx];
222 if (!rss)
223 return -ENOENT;
224
225 return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, symmetric,
226 res->rss_rqns, vhca_ids, res->rss_nch);
227 }
228
mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res * res,u32 rss_idx,enum mlx5_traffic_types tt)229 int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
230 enum mlx5_traffic_types tt)
231 {
232 struct mlx5e_rss *rss;
233
234 if (rss_idx >= MLX5E_MAX_NUM_RSS)
235 return -EINVAL;
236
237 rss = res->rss[rss_idx];
238 if (!rss)
239 return -ENOENT;
240
241 return mlx5e_rss_get_hash_fields(rss, tt);
242 }
243
mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res * res,u32 rss_idx,enum mlx5_traffic_types tt,u8 rx_hash_fields)244 int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
245 enum mlx5_traffic_types tt, u8 rx_hash_fields)
246 {
247 struct mlx5e_rss *rss;
248
249 if (rss_idx >= MLX5E_MAX_NUM_RSS)
250 return -EINVAL;
251
252 rss = res->rss[rss_idx];
253 if (!rss)
254 return -ENOENT;
255
256 return mlx5e_rss_set_hash_fields(rss, tt, rx_hash_fields);
257 }
258
mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res * res)259 int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res)
260 {
261 int i, cnt;
262
263 cnt = 0;
264 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++)
265 if (res->rss[i])
266 cnt++;
267
268 return cnt;
269 }
270
mlx5e_rx_res_rss_index(struct mlx5e_rx_res * res,struct mlx5e_rss * rss)271 int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss)
272 {
273 int i;
274
275 if (!rss)
276 return -EINVAL;
277
278 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++)
279 if (rss == res->rss[i])
280 return i;
281
282 return -ENOENT;
283 }
284
mlx5e_rx_res_rss_get(struct mlx5e_rx_res * res,u32 rss_idx)285 struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx)
286 {
287 if (rss_idx >= MLX5E_MAX_NUM_RSS)
288 return NULL;
289
290 return res->rss[rss_idx];
291 }
292
293 /* End of API rx_res_rss_* */
294
mlx5e_rx_res_free(struct mlx5e_rx_res * res)295 static void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
296 {
297 kvfree(res->rss_vhca_ids);
298 kvfree(res->rss_rqns);
299 kvfree(res);
300 }
301
mlx5e_rx_res_alloc(struct mlx5_core_dev * mdev,unsigned int max_nch,bool multi_vhca)302 static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch,
303 bool multi_vhca)
304 {
305 struct mlx5e_rx_res *rx_res;
306
307 rx_res = kvzalloc(sizeof(*rx_res), GFP_KERNEL);
308 if (!rx_res)
309 return NULL;
310
311 rx_res->rss_rqns = kvcalloc(max_nch, sizeof(*rx_res->rss_rqns), GFP_KERNEL);
312 if (!rx_res->rss_rqns) {
313 kvfree(rx_res);
314 return NULL;
315 }
316
317 if (multi_vhca) {
318 rx_res->rss_vhca_ids = kvcalloc(max_nch, sizeof(*rx_res->rss_vhca_ids), GFP_KERNEL);
319 if (!rx_res->rss_vhca_ids) {
320 kvfree(rx_res->rss_rqns);
321 kvfree(rx_res);
322 return NULL;
323 }
324 }
325
326 return rx_res;
327 }
328
mlx5e_rx_res_channels_init(struct mlx5e_rx_res * res)329 static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
330 {
331 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
332 struct mlx5e_tir_builder *builder;
333 int err = 0;
334 int ix;
335
336 builder = mlx5e_tir_builder_alloc(false);
337 if (!builder)
338 return -ENOMEM;
339
340 res->channels = kvcalloc(res->max_nch, sizeof(*res->channels), GFP_KERNEL);
341 if (!res->channels) {
342 err = -ENOMEM;
343 goto out;
344 }
345
346 for (ix = 0; ix < res->max_nch; ix++) {
347 err = mlx5e_rqt_init_direct(&res->channels[ix].direct_rqt,
348 res->mdev, false, res->drop_rqn,
349 mlx5e_rqt_size(res->mdev, res->max_nch));
350 if (err) {
351 mlx5_core_warn(res->mdev, "Failed to create a direct RQT: err = %d, ix = %u\n",
352 err, ix);
353 goto err_destroy_direct_rqts;
354 }
355 }
356
357 for (ix = 0; ix < res->max_nch; ix++) {
358 mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
359 mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
360 inner_ft_support);
361 mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
362 mlx5e_tir_builder_build_direct(builder);
363
364 err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
365 if (err) {
366 mlx5_core_warn(res->mdev, "Failed to create a direct TIR: err = %d, ix = %u\n",
367 err, ix);
368 goto err_destroy_direct_tirs;
369 }
370
371 mlx5e_tir_builder_clear(builder);
372 }
373
374 goto out;
375
376 err_destroy_direct_tirs:
377 while (--ix >= 0)
378 mlx5e_tir_destroy(&res->channels[ix].direct_tir);
379
380 ix = res->max_nch;
381 err_destroy_direct_rqts:
382 while (--ix >= 0)
383 mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
384
385 kvfree(res->channels);
386
387 out:
388 mlx5e_tir_builder_free(builder);
389
390 return err;
391 }
392
mlx5e_rx_res_ptp_init(struct mlx5e_rx_res * res)393 static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
394 {
395 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
396 struct mlx5e_tir_builder *builder;
397 int err;
398
399 builder = mlx5e_tir_builder_alloc(false);
400 if (!builder)
401 return -ENOMEM;
402
403 err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn,
404 mlx5e_rqt_size(res->mdev, res->max_nch));
405 if (err)
406 goto out;
407
408 /* Separated from the channels RQs, does not share pkt_merge state with them */
409 mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
410 mlx5e_rqt_get_rqtn(&res->ptp.rqt),
411 inner_ft_support);
412 mlx5e_tir_builder_build_direct(builder);
413
414 err = mlx5e_tir_init(&res->ptp.tir, builder, res->mdev, true);
415 if (err)
416 goto err_destroy_ptp_rqt;
417
418 goto out;
419
420 err_destroy_ptp_rqt:
421 mlx5e_rqt_destroy(&res->ptp.rqt);
422
423 out:
424 mlx5e_tir_builder_free(builder);
425 return err;
426 }
427
mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res * res)428 static void mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res *res)
429 {
430 unsigned int ix;
431
432 for (ix = 0; ix < res->max_nch; ix++) {
433 mlx5e_tir_destroy(&res->channels[ix].direct_tir);
434 mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
435 }
436
437 kvfree(res->channels);
438 }
439
mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res * res)440 static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
441 {
442 mlx5e_tir_destroy(&res->ptp.tir);
443 mlx5e_rqt_destroy(&res->ptp.rqt);
444 }
445
446 struct mlx5e_rx_res *
mlx5e_rx_res_create(struct mlx5_core_dev * mdev,enum mlx5e_rx_res_features features,unsigned int max_nch,u32 drop_rqn,const struct mlx5e_packet_merge_param * init_pkt_merge_param,unsigned int init_nch)447 mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
448 unsigned int max_nch, u32 drop_rqn,
449 const struct mlx5e_packet_merge_param *init_pkt_merge_param,
450 unsigned int init_nch)
451 {
452 bool multi_vhca = features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
453 struct mlx5e_rx_res *res;
454 int err;
455
456 res = mlx5e_rx_res_alloc(mdev, max_nch, multi_vhca);
457 if (!res)
458 return ERR_PTR(-ENOMEM);
459
460 res->mdev = mdev;
461 res->features = features;
462 res->max_nch = max_nch;
463 res->drop_rqn = drop_rqn;
464
465 res->pkt_merge_param = *init_pkt_merge_param;
466 init_rwsem(&res->pkt_merge_param_sem);
467
468 err = mlx5e_rx_res_rss_init_def(res, init_nch);
469 if (err)
470 goto err_rx_res_free;
471
472 err = mlx5e_rx_res_channels_init(res);
473 if (err)
474 goto err_rss_destroy;
475
476 err = mlx5e_rx_res_ptp_init(res);
477 if (err)
478 goto err_channels_destroy;
479
480 return res;
481
482 err_channels_destroy:
483 mlx5e_rx_res_channels_destroy(res);
484 err_rss_destroy:
485 __mlx5e_rx_res_rss_destroy(res, 0);
486 err_rx_res_free:
487 mlx5e_rx_res_free(res);
488 return ERR_PTR(err);
489 }
490
mlx5e_rx_res_destroy(struct mlx5e_rx_res * res)491 void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
492 {
493 mlx5e_rx_res_ptp_destroy(res);
494 mlx5e_rx_res_channels_destroy(res);
495 mlx5e_rx_res_rss_destroy_all(res);
496 mlx5e_rx_res_free(res);
497 }
498
mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res * res)499 unsigned int mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res *res)
500 {
501 return res->max_nch;
502 }
503
mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res * res,unsigned int ix)504 u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
505 {
506 return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir);
507 }
508
mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res * res,enum mlx5_traffic_types tt)509 u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
510 {
511 struct mlx5e_rss *rss = res->rss[0];
512
513 return mlx5e_rss_get_tirn(rss, tt, false);
514 }
515
mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res * res,enum mlx5_traffic_types tt)516 u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
517 {
518 struct mlx5e_rss *rss = res->rss[0];
519
520 return mlx5e_rss_get_tirn(rss, tt, true);
521 }
522
mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res * res)523 u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
524 {
525 WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_PTP));
526 return mlx5e_tir_get_tirn(&res->ptp.tir);
527 }
528
mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res * res,unsigned int ix)529 u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
530 {
531 return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
532 }
533
mlx5e_rx_res_channel_activate_direct(struct mlx5e_rx_res * res,struct mlx5e_channels * chs,unsigned int ix)534 static void mlx5e_rx_res_channel_activate_direct(struct mlx5e_rx_res *res,
535 struct mlx5e_channels *chs,
536 unsigned int ix)
537 {
538 u32 *vhca_id = get_vhca_ids(res, ix);
539 u32 rqn = res->rss_rqns[ix];
540 int err;
541
542 err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn, vhca_id);
543 if (err)
544 mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
545 mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
546 rqn, ix, err);
547 }
548
mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res * res,unsigned int ix)549 static void mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res *res,
550 unsigned int ix)
551 {
552 int err;
553
554 err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn, NULL);
555 if (err)
556 mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
557 mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
558 res->drop_rqn, ix, err);
559 }
560
mlx5e_rx_res_channels_activate(struct mlx5e_rx_res * res,struct mlx5e_channels * chs)561 void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs)
562 {
563 unsigned int nch, ix;
564 int err;
565
566 nch = mlx5e_channels_get_num(chs);
567
568 for (ix = 0; ix < chs->num; ix++) {
569 u32 *vhca_id = get_vhca_ids(res, ix);
570
571 if (mlx5e_channels_is_xsk(chs, ix))
572 mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
573 else
574 mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
575 }
576 res->rss_nch = chs->num;
577
578 mlx5e_rx_res_rss_enable(res);
579
580 for (ix = 0; ix < nch; ix++)
581 mlx5e_rx_res_channel_activate_direct(res, chs, ix);
582 for (ix = nch; ix < res->max_nch; ix++)
583 mlx5e_rx_res_channel_deactivate_direct(res, ix);
584
585 if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
586 u32 rqn;
587
588 if (!mlx5e_channels_get_ptp_rqn(chs, &rqn))
589 rqn = res->drop_rqn;
590
591 err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn, NULL);
592 if (err)
593 mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (PTP): err = %d\n",
594 mlx5e_rqt_get_rqtn(&res->ptp.rqt),
595 rqn, err);
596 }
597 }
598
mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res * res)599 void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
600 {
601 unsigned int ix;
602 int err;
603
604 mlx5e_rx_res_rss_disable(res);
605
606 for (ix = 0; ix < res->max_nch; ix++)
607 mlx5e_rx_res_channel_deactivate_direct(res, ix);
608
609 if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
610 err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn, NULL);
611 if (err)
612 mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (PTP): err = %d\n",
613 mlx5e_rqt_get_rqtn(&res->ptp.rqt),
614 res->drop_rqn, err);
615 }
616 }
617
mlx5e_rx_res_xsk_update(struct mlx5e_rx_res * res,struct mlx5e_channels * chs,unsigned int ix,bool xsk)618 void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
619 unsigned int ix, bool xsk)
620 {
621 u32 *vhca_id = get_vhca_ids(res, ix);
622
623 if (xsk)
624 mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
625 else
626 mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
627
628 mlx5e_rx_res_rss_enable(res);
629
630 mlx5e_rx_res_channel_activate_direct(res, chs, ix);
631 }
632
mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res * res,struct mlx5e_packet_merge_param * pkt_merge_param)633 int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
634 struct mlx5e_packet_merge_param *pkt_merge_param)
635 {
636 struct mlx5e_tir_builder *builder;
637 int err, final_err;
638 unsigned int ix;
639
640 builder = mlx5e_tir_builder_alloc(true);
641 if (!builder)
642 return -ENOMEM;
643
644 down_write(&res->pkt_merge_param_sem);
645 res->pkt_merge_param = *pkt_merge_param;
646
647 mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
648
649 final_err = 0;
650
651 for (ix = 0; ix < MLX5E_MAX_NUM_RSS; ix++) {
652 struct mlx5e_rss *rss = res->rss[ix];
653
654 if (!rss)
655 continue;
656
657 err = mlx5e_rss_packet_merge_set_param(rss, pkt_merge_param);
658 if (err)
659 final_err = final_err ? : err;
660 }
661
662 for (ix = 0; ix < res->max_nch; ix++) {
663 err = mlx5e_tir_modify(&res->channels[ix].direct_tir, builder);
664 if (err) {
665 mlx5_core_warn(res->mdev, "Failed to update packet merge state of direct TIR %#x for channel %u: err = %d\n",
666 mlx5e_tir_get_tirn(&res->channels[ix].direct_tir), ix, err);
667 if (!final_err)
668 final_err = err;
669 }
670 }
671
672 up_write(&res->pkt_merge_param_sem);
673 mlx5e_tir_builder_free(builder);
674 return final_err;
675 }
676
mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res * res)677 struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res)
678 {
679 return mlx5e_rss_get_hash(res->rss[0]);
680 }
681
mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res * res,unsigned int rxq,struct mlx5e_tir * tir)682 int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
683 struct mlx5e_tir *tir)
684 {
685 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
686 struct mlx5e_tir_builder *builder;
687 u32 rqtn;
688 int err;
689
690 builder = mlx5e_tir_builder_alloc(false);
691 if (!builder)
692 return -ENOMEM;
693
694 rqtn = mlx5e_rx_res_get_rqtn_direct(res, rxq);
695
696 mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, rqtn,
697 inner_ft_support);
698 mlx5e_tir_builder_build_direct(builder);
699 mlx5e_tir_builder_build_tls(builder);
700 down_read(&res->pkt_merge_param_sem);
701 mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
702 err = mlx5e_tir_init(tir, builder, res->mdev, false);
703 up_read(&res->pkt_merge_param_sem);
704
705 mlx5e_tir_builder_free(builder);
706
707 return err;
708 }
709