1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
3
4 #include "rx_res.h"
5 #include "channels.h"
6 #include "params.h"
7
8 struct mlx5e_rx_res {
9 struct mlx5_core_dev *mdev; /* primary */
10 enum mlx5e_rx_res_features features;
11 unsigned int max_nch;
12 u32 drop_rqn;
13
14 struct mlx5e_packet_merge_param pkt_merge_param;
15 struct rw_semaphore pkt_merge_param_sem;
16
17 struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
18 bool rss_active;
19 u32 *rss_rqns;
20 u32 *rss_vhca_ids;
21 unsigned int rss_nch;
22
23 struct {
24 struct mlx5e_rqt direct_rqt;
25 struct mlx5e_tir direct_tir;
26 } *channels;
27
28 struct {
29 struct mlx5e_rqt rqt;
30 struct mlx5e_tir tir;
31 } ptp;
32 };
33
34 /* API for rx_res_rss_* */
35
get_vhca_ids(struct mlx5e_rx_res * res,int offset)36 static u32 *get_vhca_ids(struct mlx5e_rx_res *res, int offset)
37 {
38 bool multi_vhca = res->features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
39
40 return multi_vhca ? res->rss_vhca_ids + offset : NULL;
41 }
42
mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res * res,u32 nch)43 void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch)
44 {
45 int i;
46
47 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
48 if (res->rss[i])
49 mlx5e_rss_params_indir_modify_actual_size(res->rss[i], nch);
50 }
51 }
52
mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res * res,unsigned int init_nch)53 static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
54 unsigned int init_nch)
55 {
56 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
57 struct mlx5e_rss_init_params init_params;
58 struct mlx5e_rss_params rss_params;
59 struct mlx5e_rss *rss;
60
61 if (WARN_ON(res->rss[0]))
62 return -EINVAL;
63
64 init_params = (struct mlx5e_rss_init_params) {
65 .type = MLX5E_RSS_INIT_TIRS,
66 .pkt_merge_param = &res->pkt_merge_param,
67 .nch = init_nch,
68 .max_nch = res->max_nch,
69 };
70
71 rss_params = (struct mlx5e_rss_params) {
72 .inner_ft_support = inner_ft_support,
73 .drop_rqn = res->drop_rqn,
74 .self_lb_blk =
75 res->features & MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK,
76 };
77
78 rss = mlx5e_rss_init(res->mdev, &rss_params, &init_params);
79 if (IS_ERR(rss))
80 return PTR_ERR(rss);
81
82 mlx5e_rss_set_indir_uniform(rss, init_params.nch);
83
84 res->rss[0] = rss;
85
86 return 0;
87 }
88
mlx5e_rx_res_rss_init(struct mlx5e_rx_res * res,u32 rss_idx,unsigned int init_nch)89 int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 rss_idx, unsigned int init_nch)
90 {
91 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
92 struct mlx5e_rss_init_params init_params;
93 struct mlx5e_rss_params rss_params;
94 struct mlx5e_rss *rss;
95
96 if (WARN_ON_ONCE(res->rss[rss_idx]))
97 return -ENOSPC;
98
99 init_params = (struct mlx5e_rss_init_params) {
100 .type = MLX5E_RSS_INIT_NO_TIRS,
101 .pkt_merge_param = &res->pkt_merge_param,
102 .nch = init_nch,
103 .max_nch = res->max_nch,
104 };
105
106 rss_params = (struct mlx5e_rss_params) {
107 .inner_ft_support = inner_ft_support,
108 .drop_rqn = res->drop_rqn,
109 .self_lb_blk =
110 res->features & MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK,
111 };
112
113 rss = mlx5e_rss_init(res->mdev, &rss_params, &init_params);
114 if (IS_ERR(rss))
115 return PTR_ERR(rss);
116
117 mlx5e_rss_set_indir_uniform(rss, init_params.nch);
118 if (res->rss_active) {
119 u32 *vhca_ids = get_vhca_ids(res, 0);
120
121 mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
122 }
123
124 res->rss[rss_idx] = rss;
125
126 return 0;
127 }
128
__mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res * res,u32 rss_idx)129 static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
130 {
131 struct mlx5e_rss *rss = res->rss[rss_idx];
132 int err;
133
134 err = mlx5e_rss_cleanup(rss);
135 if (err)
136 return err;
137
138 res->rss[rss_idx] = NULL;
139
140 return 0;
141 }
142
mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res * res,u32 rss_idx)143 int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
144 {
145 struct mlx5e_rss *rss;
146
147 if (rss_idx >= MLX5E_MAX_NUM_RSS)
148 return -EINVAL;
149
150 rss = res->rss[rss_idx];
151 if (!rss)
152 return -EINVAL;
153
154 return __mlx5e_rx_res_rss_destroy(res, rss_idx);
155 }
156
mlx5e_rx_res_rss_destroy_all(struct mlx5e_rx_res * res)157 static void mlx5e_rx_res_rss_destroy_all(struct mlx5e_rx_res *res)
158 {
159 int i;
160
161 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
162 struct mlx5e_rss *rss = res->rss[i];
163 int err;
164
165 if (!rss)
166 continue;
167
168 err = __mlx5e_rx_res_rss_destroy(res, i);
169 if (err) {
170 unsigned int refcount;
171
172 refcount = mlx5e_rss_refcnt_read(rss);
173 mlx5_core_warn(res->mdev,
174 "Failed to destroy RSS context %d, refcount = %u, err = %d\n",
175 i, refcount, err);
176 }
177 }
178 }
179
mlx5e_rx_res_rss_enable(struct mlx5e_rx_res * res)180 static void mlx5e_rx_res_rss_enable(struct mlx5e_rx_res *res)
181 {
182 int i;
183
184 res->rss_active = true;
185
186 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
187 struct mlx5e_rss *rss = res->rss[i];
188 u32 *vhca_ids;
189
190 if (!rss)
191 continue;
192 vhca_ids = get_vhca_ids(res, 0);
193 mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
194 }
195 }
196
mlx5e_rx_res_rss_disable(struct mlx5e_rx_res * res)197 static void mlx5e_rx_res_rss_disable(struct mlx5e_rx_res *res)
198 {
199 int i;
200
201 res->rss_active = false;
202
203 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
204 struct mlx5e_rss *rss = res->rss[i];
205
206 if (!rss)
207 continue;
208 mlx5e_rss_disable(rss);
209 }
210 }
211
212 /* Updates the indirection table SW shadow, does not update the HW resources yet */
mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res * res,unsigned int nch)213 void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch)
214 {
215 WARN_ON_ONCE(res->rss_active);
216 mlx5e_rss_set_indir_uniform(res->rss[0], nch);
217 }
218
mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res * res,u32 rss_idx,u32 * indir,u8 * key,u8 * hfunc,bool * symmetric)219 void mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
220 u32 *indir, u8 *key, u8 *hfunc, bool *symmetric)
221 {
222 struct mlx5e_rss *rss = NULL;
223
224 if (rss_idx < MLX5E_MAX_NUM_RSS)
225 rss = res->rss[rss_idx];
226 if (WARN_ON_ONCE(!rss))
227 return;
228
229 mlx5e_rss_get_rxfh(rss, indir, key, hfunc, symmetric);
230 }
231
mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res * res,u32 rss_idx,const u32 * indir,const u8 * key,const u8 * hfunc,const bool * symmetric)232 int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
233 const u32 *indir, const u8 *key, const u8 *hfunc,
234 const bool *symmetric)
235 {
236 u32 *vhca_ids = get_vhca_ids(res, 0);
237 struct mlx5e_rss *rss;
238
239 if (rss_idx >= MLX5E_MAX_NUM_RSS)
240 return -EINVAL;
241
242 rss = res->rss[rss_idx];
243 if (!rss)
244 return -ENOENT;
245
246 return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, symmetric,
247 res->rss_rqns, vhca_ids, res->rss_nch);
248 }
249
mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res * res,u32 rss_idx,enum mlx5_traffic_types tt)250 int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
251 enum mlx5_traffic_types tt)
252 {
253 struct mlx5e_rss *rss;
254
255 if (rss_idx >= MLX5E_MAX_NUM_RSS)
256 return -EINVAL;
257
258 rss = res->rss[rss_idx];
259 if (!rss)
260 return -ENOENT;
261
262 return mlx5e_rss_get_hash_fields(rss, tt);
263 }
264
mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res * res,u32 rss_idx,enum mlx5_traffic_types tt,u8 rx_hash_fields)265 int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
266 enum mlx5_traffic_types tt, u8 rx_hash_fields)
267 {
268 struct mlx5e_rss *rss;
269
270 if (rss_idx >= MLX5E_MAX_NUM_RSS)
271 return -EINVAL;
272
273 rss = res->rss[rss_idx];
274 if (!rss)
275 return -ENOENT;
276
277 return mlx5e_rss_set_hash_fields(rss, tt, rx_hash_fields);
278 }
279
mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res * res)280 int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res)
281 {
282 int i, cnt;
283
284 cnt = 0;
285 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++)
286 if (res->rss[i])
287 cnt++;
288
289 return cnt;
290 }
291
mlx5e_rx_res_rss_index(struct mlx5e_rx_res * res,struct mlx5e_rss * rss)292 int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss)
293 {
294 int i;
295
296 if (!rss)
297 return -EINVAL;
298
299 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++)
300 if (rss == res->rss[i])
301 return i;
302
303 return -ENOENT;
304 }
305
mlx5e_rx_res_rss_get(struct mlx5e_rx_res * res,u32 rss_idx)306 struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx)
307 {
308 if (rss_idx >= MLX5E_MAX_NUM_RSS)
309 return NULL;
310
311 return res->rss[rss_idx];
312 }
313
314 /* End of API rx_res_rss_* */
315
mlx5e_rx_res_free(struct mlx5e_rx_res * res)316 static void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
317 {
318 kvfree(res->rss_vhca_ids);
319 kvfree(res->rss_rqns);
320 kvfree(res);
321 }
322
mlx5e_rx_res_alloc(struct mlx5_core_dev * mdev,unsigned int max_nch,bool multi_vhca)323 static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch,
324 bool multi_vhca)
325 {
326 struct mlx5e_rx_res *rx_res;
327
328 rx_res = kvzalloc_obj(*rx_res);
329 if (!rx_res)
330 return NULL;
331
332 rx_res->rss_rqns = kvcalloc(max_nch, sizeof(*rx_res->rss_rqns), GFP_KERNEL);
333 if (!rx_res->rss_rqns) {
334 kvfree(rx_res);
335 return NULL;
336 }
337
338 if (multi_vhca) {
339 rx_res->rss_vhca_ids = kvcalloc(max_nch, sizeof(*rx_res->rss_vhca_ids), GFP_KERNEL);
340 if (!rx_res->rss_vhca_ids) {
341 kvfree(rx_res->rss_rqns);
342 kvfree(rx_res);
343 return NULL;
344 }
345 }
346
347 return rx_res;
348 }
349
mlx5e_rx_res_channels_init(struct mlx5e_rx_res * res)350 static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
351 {
352 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
353 bool self_lb_blk = res->features & MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK;
354 struct mlx5e_tir_builder *builder;
355 int err = 0;
356 int ix;
357
358 builder = mlx5e_tir_builder_alloc(false);
359 if (!builder)
360 return -ENOMEM;
361
362 res->channels = kvzalloc_objs(*res->channels, res->max_nch);
363 if (!res->channels) {
364 err = -ENOMEM;
365 goto out;
366 }
367
368 for (ix = 0; ix < res->max_nch; ix++) {
369 err = mlx5e_rqt_init_direct(&res->channels[ix].direct_rqt,
370 res->mdev, false, res->drop_rqn,
371 mlx5e_rqt_size(res->mdev, res->max_nch));
372 if (err) {
373 mlx5_core_warn(res->mdev, "Failed to create a direct RQT: err = %d, ix = %u\n",
374 err, ix);
375 goto err_destroy_direct_rqts;
376 }
377 }
378
379 for (ix = 0; ix < res->max_nch; ix++) {
380 mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
381 mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
382 inner_ft_support);
383 mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
384 mlx5e_tir_builder_build_self_lb_block(builder, self_lb_blk,
385 self_lb_blk);
386 mlx5e_tir_builder_build_direct(builder);
387
388 err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
389 if (err) {
390 mlx5_core_warn(res->mdev, "Failed to create a direct TIR: err = %d, ix = %u\n",
391 err, ix);
392 goto err_destroy_direct_tirs;
393 }
394
395 mlx5e_tir_builder_clear(builder);
396 }
397
398 goto out;
399
400 err_destroy_direct_tirs:
401 while (--ix >= 0)
402 mlx5e_tir_destroy(&res->channels[ix].direct_tir);
403
404 ix = res->max_nch;
405 err_destroy_direct_rqts:
406 while (--ix >= 0)
407 mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
408
409 kvfree(res->channels);
410
411 out:
412 mlx5e_tir_builder_free(builder);
413
414 return err;
415 }
416
mlx5e_rx_res_ptp_init(struct mlx5e_rx_res * res)417 static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
418 {
419 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
420 struct mlx5e_tir_builder *builder;
421 int err;
422
423 builder = mlx5e_tir_builder_alloc(false);
424 if (!builder)
425 return -ENOMEM;
426
427 err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn,
428 mlx5e_rqt_size(res->mdev, res->max_nch));
429 if (err)
430 goto out;
431
432 /* Separated from the channels RQs, does not share pkt_merge state with them */
433 mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
434 mlx5e_rqt_get_rqtn(&res->ptp.rqt),
435 inner_ft_support);
436 mlx5e_tir_builder_build_direct(builder);
437
438 err = mlx5e_tir_init(&res->ptp.tir, builder, res->mdev, true);
439 if (err)
440 goto err_destroy_ptp_rqt;
441
442 goto out;
443
444 err_destroy_ptp_rqt:
445 mlx5e_rqt_destroy(&res->ptp.rqt);
446
447 out:
448 mlx5e_tir_builder_free(builder);
449 return err;
450 }
451
mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res * res)452 static void mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res *res)
453 {
454 unsigned int ix;
455
456 for (ix = 0; ix < res->max_nch; ix++) {
457 mlx5e_tir_destroy(&res->channels[ix].direct_tir);
458 mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
459 }
460
461 kvfree(res->channels);
462 }
463
mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res * res)464 static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
465 {
466 mlx5e_tir_destroy(&res->ptp.tir);
467 mlx5e_rqt_destroy(&res->ptp.rqt);
468 }
469
470 struct mlx5e_rx_res *
mlx5e_rx_res_create(struct mlx5_core_dev * mdev,enum mlx5e_rx_res_features features,unsigned int max_nch,u32 drop_rqn,const struct mlx5e_packet_merge_param * pkt_merge_param,unsigned int init_nch)471 mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
472 unsigned int max_nch, u32 drop_rqn,
473 const struct mlx5e_packet_merge_param *pkt_merge_param,
474 unsigned int init_nch)
475 {
476 bool multi_vhca = features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
477 struct mlx5e_rx_res *res;
478 int err;
479
480 res = mlx5e_rx_res_alloc(mdev, max_nch, multi_vhca);
481 if (!res)
482 return ERR_PTR(-ENOMEM);
483
484 res->mdev = mdev;
485 res->features = features;
486 res->max_nch = max_nch;
487 res->drop_rqn = drop_rqn;
488
489 res->pkt_merge_param = *pkt_merge_param;
490 init_rwsem(&res->pkt_merge_param_sem);
491
492 err = mlx5e_rx_res_rss_init_def(res, init_nch);
493 if (err)
494 goto err_rx_res_free;
495
496 err = mlx5e_rx_res_channels_init(res);
497 if (err)
498 goto err_rss_destroy;
499
500 err = mlx5e_rx_res_ptp_init(res);
501 if (err)
502 goto err_channels_destroy;
503
504 return res;
505
506 err_channels_destroy:
507 mlx5e_rx_res_channels_destroy(res);
508 err_rss_destroy:
509 __mlx5e_rx_res_rss_destroy(res, 0);
510 err_rx_res_free:
511 mlx5e_rx_res_free(res);
512 return ERR_PTR(err);
513 }
514
mlx5e_rx_res_destroy(struct mlx5e_rx_res * res)515 void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
516 {
517 mlx5e_rx_res_ptp_destroy(res);
518 mlx5e_rx_res_channels_destroy(res);
519 mlx5e_rx_res_rss_destroy_all(res);
520 mlx5e_rx_res_free(res);
521 }
522
mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res * res)523 unsigned int mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res *res)
524 {
525 return res->max_nch;
526 }
527
mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res * res,unsigned int ix)528 u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
529 {
530 return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir);
531 }
532
mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res * res,enum mlx5_traffic_types tt)533 u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
534 {
535 struct mlx5e_rss *rss = res->rss[0];
536
537 return mlx5e_rss_get_tirn(rss, tt, false);
538 }
539
mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res * res,enum mlx5_traffic_types tt)540 u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
541 {
542 struct mlx5e_rss *rss = res->rss[0];
543
544 return mlx5e_rss_get_tirn(rss, tt, true);
545 }
546
mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res * res)547 u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
548 {
549 WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_PTP));
550 return mlx5e_tir_get_tirn(&res->ptp.tir);
551 }
552
mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res * res,unsigned int ix)553 u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
554 {
555 return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
556 }
557
mlx5e_rx_res_channel_activate_direct(struct mlx5e_rx_res * res,struct mlx5e_channels * chs,unsigned int ix)558 static void mlx5e_rx_res_channel_activate_direct(struct mlx5e_rx_res *res,
559 struct mlx5e_channels *chs,
560 unsigned int ix)
561 {
562 u32 *vhca_id = get_vhca_ids(res, ix);
563 u32 rqn = res->rss_rqns[ix];
564 int err;
565
566 err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn, vhca_id);
567 if (err)
568 mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
569 mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
570 rqn, ix, err);
571 }
572
mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res * res,unsigned int ix)573 static void mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res *res,
574 unsigned int ix)
575 {
576 int err;
577
578 err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn, NULL);
579 if (err)
580 mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
581 mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
582 res->drop_rqn, ix, err);
583 }
584
mlx5e_rx_res_channels_activate(struct mlx5e_rx_res * res,struct mlx5e_channels * chs)585 void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs)
586 {
587 unsigned int nch, ix;
588 int err;
589
590 nch = mlx5e_channels_get_num(chs);
591
592 for (ix = 0; ix < chs->num; ix++) {
593 u32 *vhca_id = get_vhca_ids(res, ix);
594
595 if (mlx5e_channels_is_xsk(chs, ix))
596 mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
597 else
598 mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
599 }
600 res->rss_nch = chs->num;
601
602 mlx5e_rx_res_rss_enable(res);
603
604 for (ix = 0; ix < nch; ix++)
605 mlx5e_rx_res_channel_activate_direct(res, chs, ix);
606
607 if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
608 u32 rqn;
609
610 if (!mlx5e_channels_get_ptp_rqn(chs, &rqn))
611 rqn = res->drop_rqn;
612
613 err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn, NULL);
614 if (err)
615 mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (PTP): err = %d\n",
616 mlx5e_rqt_get_rqtn(&res->ptp.rqt),
617 rqn, err);
618 }
619 }
620
mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res * res)621 void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
622 {
623 unsigned int ix;
624 int err;
625
626 mlx5e_rx_res_rss_disable(res);
627
628 for (ix = 0; ix < res->rss_nch; ix++)
629 mlx5e_rx_res_channel_deactivate_direct(res, ix);
630
631 if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
632 err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn, NULL);
633 if (err)
634 mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (PTP): err = %d\n",
635 mlx5e_rqt_get_rqtn(&res->ptp.rqt),
636 res->drop_rqn, err);
637 }
638 }
639
mlx5e_rx_res_xsk_update(struct mlx5e_rx_res * res,struct mlx5e_channels * chs,unsigned int ix,bool xsk)640 void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
641 unsigned int ix, bool xsk)
642 {
643 u32 *vhca_id = get_vhca_ids(res, ix);
644
645 if (xsk)
646 mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
647 else
648 mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
649
650 mlx5e_rx_res_rss_enable(res);
651
652 mlx5e_rx_res_channel_activate_direct(res, chs, ix);
653 }
654
mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res * res,struct mlx5e_packet_merge_param * pkt_merge_param)655 int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
656 struct mlx5e_packet_merge_param *pkt_merge_param)
657 {
658 struct mlx5e_tir_builder *builder;
659 int err, final_err;
660 unsigned int ix;
661
662 builder = mlx5e_tir_builder_alloc(true);
663 if (!builder)
664 return -ENOMEM;
665
666 down_write(&res->pkt_merge_param_sem);
667 res->pkt_merge_param = *pkt_merge_param;
668
669 mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
670
671 final_err = 0;
672
673 for (ix = 0; ix < MLX5E_MAX_NUM_RSS; ix++) {
674 struct mlx5e_rss *rss = res->rss[ix];
675
676 if (!rss)
677 continue;
678
679 err = mlx5e_rss_packet_merge_set_param(rss, pkt_merge_param);
680 if (err)
681 final_err = final_err ? : err;
682 }
683
684 for (ix = 0; ix < res->max_nch; ix++) {
685 err = mlx5e_tir_modify(&res->channels[ix].direct_tir, builder);
686 if (err) {
687 mlx5_core_warn(res->mdev, "Failed to update packet merge state of direct TIR %#x for channel %u: err = %d\n",
688 mlx5e_tir_get_tirn(&res->channels[ix].direct_tir), ix, err);
689 if (!final_err)
690 final_err = err;
691 }
692 }
693
694 up_write(&res->pkt_merge_param_sem);
695 mlx5e_tir_builder_free(builder);
696 return final_err;
697 }
698
mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res * res)699 struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res)
700 {
701 return mlx5e_rss_get_hash(res->rss[0]);
702 }
703
mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res * res,unsigned int rxq,struct mlx5e_tir * tir)704 int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
705 struct mlx5e_tir *tir)
706 {
707 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
708 struct mlx5e_tir_builder *builder;
709 u32 rqtn;
710 int err;
711
712 builder = mlx5e_tir_builder_alloc(false);
713 if (!builder)
714 return -ENOMEM;
715
716 rqtn = mlx5e_rx_res_get_rqtn_direct(res, rxq);
717
718 mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, rqtn,
719 inner_ft_support);
720 mlx5e_tir_builder_build_direct(builder);
721 mlx5e_tir_builder_build_tls(builder);
722 down_read(&res->pkt_merge_param_sem);
723 mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
724 err = mlx5e_tir_init(tir, builder, res->mdev, false);
725 up_read(&res->pkt_merge_param_sem);
726
727 mlx5e_tir_builder_free(builder);
728
729 return err;
730 }
731