xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
3 
4 #include "rx_res.h"
5 #include "channels.h"
6 #include "params.h"
7 
8 struct mlx5e_rx_res {
9 	struct mlx5_core_dev *mdev; /* primary */
10 	enum mlx5e_rx_res_features features;
11 	unsigned int max_nch;
12 	u32 drop_rqn;
13 
14 	struct mlx5e_packet_merge_param pkt_merge_param;
15 	struct rw_semaphore pkt_merge_param_sem;
16 
17 	struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
18 	bool rss_active;
19 	u32 *rss_rqns;
20 	u32 *rss_vhca_ids;
21 	unsigned int rss_nch;
22 
23 	struct {
24 		struct mlx5e_rqt direct_rqt;
25 		struct mlx5e_tir direct_tir;
26 	} *channels;
27 
28 	struct {
29 		struct mlx5e_rqt rqt;
30 		struct mlx5e_tir tir;
31 	} ptp;
32 };
33 
34 /* API for rx_res_rss_* */
35 
get_vhca_ids(struct mlx5e_rx_res * res,int offset)36 static u32 *get_vhca_ids(struct mlx5e_rx_res *res, int offset)
37 {
38 	bool multi_vhca = res->features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
39 
40 	return multi_vhca ? res->rss_vhca_ids + offset : NULL;
41 }
42 
mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res * res,u32 nch)43 void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch)
44 {
45 	int i;
46 
47 	for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
48 		if (res->rss[i])
49 			mlx5e_rss_params_indir_modify_actual_size(res->rss[i], nch);
50 	}
51 }
52 
mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res * res,unsigned int init_nch)53 static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
54 				     unsigned int init_nch)
55 {
56 	bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
57 	struct mlx5e_rss *rss;
58 
59 	if (WARN_ON(res->rss[0]))
60 		return -EINVAL;
61 
62 	rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
63 			     &res->pkt_merge_param, MLX5E_RSS_INIT_TIRS, init_nch, res->max_nch);
64 	if (IS_ERR(rss))
65 		return PTR_ERR(rss);
66 
67 	mlx5e_rss_set_indir_uniform(rss, init_nch);
68 
69 	res->rss[0] = rss;
70 
71 	return 0;
72 }
73 
mlx5e_rx_res_rss_init(struct mlx5e_rx_res * res,u32 rss_idx,unsigned int init_nch)74 int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 rss_idx, unsigned int init_nch)
75 {
76 	bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
77 	struct mlx5e_rss *rss;
78 
79 	if (WARN_ON_ONCE(res->rss[rss_idx]))
80 		return -ENOSPC;
81 
82 	rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
83 			     &res->pkt_merge_param, MLX5E_RSS_INIT_NO_TIRS, init_nch,
84 			     res->max_nch);
85 	if (IS_ERR(rss))
86 		return PTR_ERR(rss);
87 
88 	mlx5e_rss_set_indir_uniform(rss, init_nch);
89 	if (res->rss_active) {
90 		u32 *vhca_ids = get_vhca_ids(res, 0);
91 
92 		mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
93 	}
94 
95 	res->rss[rss_idx] = rss;
96 
97 	return 0;
98 }
99 
__mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res * res,u32 rss_idx)100 static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
101 {
102 	struct mlx5e_rss *rss = res->rss[rss_idx];
103 	int err;
104 
105 	err = mlx5e_rss_cleanup(rss);
106 	if (err)
107 		return err;
108 
109 	res->rss[rss_idx] = NULL;
110 
111 	return 0;
112 }
113 
mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res * res,u32 rss_idx)114 int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
115 {
116 	struct mlx5e_rss *rss;
117 
118 	if (rss_idx >= MLX5E_MAX_NUM_RSS)
119 		return -EINVAL;
120 
121 	rss = res->rss[rss_idx];
122 	if (!rss)
123 		return -EINVAL;
124 
125 	return __mlx5e_rx_res_rss_destroy(res, rss_idx);
126 }
127 
mlx5e_rx_res_rss_destroy_all(struct mlx5e_rx_res * res)128 static void mlx5e_rx_res_rss_destroy_all(struct mlx5e_rx_res *res)
129 {
130 	int i;
131 
132 	for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
133 		struct mlx5e_rss *rss = res->rss[i];
134 		int err;
135 
136 		if (!rss)
137 			continue;
138 
139 		err = __mlx5e_rx_res_rss_destroy(res, i);
140 		if (err) {
141 			unsigned int refcount;
142 
143 			refcount = mlx5e_rss_refcnt_read(rss);
144 			mlx5_core_warn(res->mdev,
145 				       "Failed to destroy RSS context %d, refcount = %u, err = %d\n",
146 				       i, refcount, err);
147 		}
148 	}
149 }
150 
mlx5e_rx_res_rss_enable(struct mlx5e_rx_res * res)151 static void mlx5e_rx_res_rss_enable(struct mlx5e_rx_res *res)
152 {
153 	int i;
154 
155 	res->rss_active = true;
156 
157 	for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
158 		struct mlx5e_rss *rss = res->rss[i];
159 		u32 *vhca_ids;
160 
161 		if (!rss)
162 			continue;
163 		vhca_ids = get_vhca_ids(res, 0);
164 		mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
165 	}
166 }
167 
mlx5e_rx_res_rss_disable(struct mlx5e_rx_res * res)168 static void mlx5e_rx_res_rss_disable(struct mlx5e_rx_res *res)
169 {
170 	int i;
171 
172 	res->rss_active = false;
173 
174 	for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
175 		struct mlx5e_rss *rss = res->rss[i];
176 
177 		if (!rss)
178 			continue;
179 		mlx5e_rss_disable(rss);
180 	}
181 }
182 
183 /* Updates the indirection table SW shadow, does not update the HW resources yet */
mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res * res,unsigned int nch)184 void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch)
185 {
186 	WARN_ON_ONCE(res->rss_active);
187 	mlx5e_rss_set_indir_uniform(res->rss[0], nch);
188 }
189 
mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res * res,u32 rss_idx,u32 * indir,u8 * key,u8 * hfunc,bool * symmetric)190 void mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
191 			       u32 *indir, u8 *key, u8 *hfunc, bool *symmetric)
192 {
193 	struct mlx5e_rss *rss = NULL;
194 
195 	if (rss_idx < MLX5E_MAX_NUM_RSS)
196 		rss = res->rss[rss_idx];
197 	if (WARN_ON_ONCE(!rss))
198 		return;
199 
200 	mlx5e_rss_get_rxfh(rss, indir, key, hfunc, symmetric);
201 }
202 
mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res * res,u32 rss_idx,const u32 * indir,const u8 * key,const u8 * hfunc,const bool * symmetric)203 int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
204 			      const u32 *indir, const u8 *key, const u8 *hfunc,
205 			      const bool *symmetric)
206 {
207 	u32 *vhca_ids = get_vhca_ids(res, 0);
208 	struct mlx5e_rss *rss;
209 
210 	if (rss_idx >= MLX5E_MAX_NUM_RSS)
211 		return -EINVAL;
212 
213 	rss = res->rss[rss_idx];
214 	if (!rss)
215 		return -ENOENT;
216 
217 	return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, symmetric,
218 				  res->rss_rqns, vhca_ids, res->rss_nch);
219 }
220 
mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res * res,u32 rss_idx,enum mlx5_traffic_types tt)221 int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
222 				     enum mlx5_traffic_types tt)
223 {
224 	struct mlx5e_rss *rss;
225 
226 	if (rss_idx >= MLX5E_MAX_NUM_RSS)
227 		return -EINVAL;
228 
229 	rss = res->rss[rss_idx];
230 	if (!rss)
231 		return -ENOENT;
232 
233 	return mlx5e_rss_get_hash_fields(rss, tt);
234 }
235 
mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res * res,u32 rss_idx,enum mlx5_traffic_types tt,u8 rx_hash_fields)236 int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
237 				     enum mlx5_traffic_types tt, u8 rx_hash_fields)
238 {
239 	struct mlx5e_rss *rss;
240 
241 	if (rss_idx >= MLX5E_MAX_NUM_RSS)
242 		return -EINVAL;
243 
244 	rss = res->rss[rss_idx];
245 	if (!rss)
246 		return -ENOENT;
247 
248 	return mlx5e_rss_set_hash_fields(rss, tt, rx_hash_fields);
249 }
250 
mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res * res)251 int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res)
252 {
253 	int i, cnt;
254 
255 	cnt = 0;
256 	for (i = 0; i < MLX5E_MAX_NUM_RSS; i++)
257 		if (res->rss[i])
258 			cnt++;
259 
260 	return cnt;
261 }
262 
mlx5e_rx_res_rss_index(struct mlx5e_rx_res * res,struct mlx5e_rss * rss)263 int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss)
264 {
265 	int i;
266 
267 	if (!rss)
268 		return -EINVAL;
269 
270 	for (i = 0; i < MLX5E_MAX_NUM_RSS; i++)
271 		if (rss == res->rss[i])
272 			return i;
273 
274 	return -ENOENT;
275 }
276 
mlx5e_rx_res_rss_get(struct mlx5e_rx_res * res,u32 rss_idx)277 struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx)
278 {
279 	if (rss_idx >= MLX5E_MAX_NUM_RSS)
280 		return NULL;
281 
282 	return res->rss[rss_idx];
283 }
284 
285 /* End of API rx_res_rss_* */
286 
mlx5e_rx_res_free(struct mlx5e_rx_res * res)287 static void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
288 {
289 	kvfree(res->rss_vhca_ids);
290 	kvfree(res->rss_rqns);
291 	kvfree(res);
292 }
293 
mlx5e_rx_res_alloc(struct mlx5_core_dev * mdev,unsigned int max_nch,bool multi_vhca)294 static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch,
295 					       bool multi_vhca)
296 {
297 	struct mlx5e_rx_res *rx_res;
298 
299 	rx_res = kvzalloc(sizeof(*rx_res), GFP_KERNEL);
300 	if (!rx_res)
301 		return NULL;
302 
303 	rx_res->rss_rqns = kvcalloc(max_nch, sizeof(*rx_res->rss_rqns), GFP_KERNEL);
304 	if (!rx_res->rss_rqns) {
305 		kvfree(rx_res);
306 		return NULL;
307 	}
308 
309 	if (multi_vhca) {
310 		rx_res->rss_vhca_ids = kvcalloc(max_nch, sizeof(*rx_res->rss_vhca_ids), GFP_KERNEL);
311 		if (!rx_res->rss_vhca_ids) {
312 			kvfree(rx_res->rss_rqns);
313 			kvfree(rx_res);
314 			return NULL;
315 		}
316 	}
317 
318 	return rx_res;
319 }
320 
mlx5e_rx_res_channels_init(struct mlx5e_rx_res * res)321 static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
322 {
323 	bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
324 	struct mlx5e_tir_builder *builder;
325 	int err = 0;
326 	int ix;
327 
328 	builder = mlx5e_tir_builder_alloc(false);
329 	if (!builder)
330 		return -ENOMEM;
331 
332 	res->channels = kvcalloc(res->max_nch, sizeof(*res->channels), GFP_KERNEL);
333 	if (!res->channels) {
334 		err = -ENOMEM;
335 		goto out;
336 	}
337 
338 	for (ix = 0; ix < res->max_nch; ix++) {
339 		err = mlx5e_rqt_init_direct(&res->channels[ix].direct_rqt,
340 					    res->mdev, false, res->drop_rqn,
341 					    mlx5e_rqt_size(res->mdev, res->max_nch));
342 		if (err) {
343 			mlx5_core_warn(res->mdev, "Failed to create a direct RQT: err = %d, ix = %u\n",
344 				       err, ix);
345 			goto err_destroy_direct_rqts;
346 		}
347 	}
348 
349 	for (ix = 0; ix < res->max_nch; ix++) {
350 		mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
351 					    mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
352 					    inner_ft_support);
353 		mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
354 		mlx5e_tir_builder_build_direct(builder);
355 
356 		err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
357 		if (err) {
358 			mlx5_core_warn(res->mdev, "Failed to create a direct TIR: err = %d, ix = %u\n",
359 				       err, ix);
360 			goto err_destroy_direct_tirs;
361 		}
362 
363 		mlx5e_tir_builder_clear(builder);
364 	}
365 
366 	goto out;
367 
368 err_destroy_direct_tirs:
369 	while (--ix >= 0)
370 		mlx5e_tir_destroy(&res->channels[ix].direct_tir);
371 
372 	ix = res->max_nch;
373 err_destroy_direct_rqts:
374 	while (--ix >= 0)
375 		mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
376 
377 	kvfree(res->channels);
378 
379 out:
380 	mlx5e_tir_builder_free(builder);
381 
382 	return err;
383 }
384 
mlx5e_rx_res_ptp_init(struct mlx5e_rx_res * res)385 static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
386 {
387 	bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
388 	struct mlx5e_tir_builder *builder;
389 	int err;
390 
391 	builder = mlx5e_tir_builder_alloc(false);
392 	if (!builder)
393 		return -ENOMEM;
394 
395 	err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn,
396 				    mlx5e_rqt_size(res->mdev, res->max_nch));
397 	if (err)
398 		goto out;
399 
400 	/* Separated from the channels RQs, does not share pkt_merge state with them */
401 	mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
402 				    mlx5e_rqt_get_rqtn(&res->ptp.rqt),
403 				    inner_ft_support);
404 	mlx5e_tir_builder_build_direct(builder);
405 
406 	err = mlx5e_tir_init(&res->ptp.tir, builder, res->mdev, true);
407 	if (err)
408 		goto err_destroy_ptp_rqt;
409 
410 	goto out;
411 
412 err_destroy_ptp_rqt:
413 	mlx5e_rqt_destroy(&res->ptp.rqt);
414 
415 out:
416 	mlx5e_tir_builder_free(builder);
417 	return err;
418 }
419 
mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res * res)420 static void mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res *res)
421 {
422 	unsigned int ix;
423 
424 	for (ix = 0; ix < res->max_nch; ix++) {
425 		mlx5e_tir_destroy(&res->channels[ix].direct_tir);
426 		mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
427 	}
428 
429 	kvfree(res->channels);
430 }
431 
mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res * res)432 static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
433 {
434 	mlx5e_tir_destroy(&res->ptp.tir);
435 	mlx5e_rqt_destroy(&res->ptp.rqt);
436 }
437 
438 struct mlx5e_rx_res *
mlx5e_rx_res_create(struct mlx5_core_dev * mdev,enum mlx5e_rx_res_features features,unsigned int max_nch,u32 drop_rqn,const struct mlx5e_packet_merge_param * init_pkt_merge_param,unsigned int init_nch)439 mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
440 		    unsigned int max_nch, u32 drop_rqn,
441 		    const struct mlx5e_packet_merge_param *init_pkt_merge_param,
442 		    unsigned int init_nch)
443 {
444 	bool multi_vhca = features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
445 	struct mlx5e_rx_res *res;
446 	int err;
447 
448 	res = mlx5e_rx_res_alloc(mdev, max_nch, multi_vhca);
449 	if (!res)
450 		return ERR_PTR(-ENOMEM);
451 
452 	res->mdev = mdev;
453 	res->features = features;
454 	res->max_nch = max_nch;
455 	res->drop_rqn = drop_rqn;
456 
457 	res->pkt_merge_param = *init_pkt_merge_param;
458 	init_rwsem(&res->pkt_merge_param_sem);
459 
460 	err = mlx5e_rx_res_rss_init_def(res, init_nch);
461 	if (err)
462 		goto err_rx_res_free;
463 
464 	err = mlx5e_rx_res_channels_init(res);
465 	if (err)
466 		goto err_rss_destroy;
467 
468 	err = mlx5e_rx_res_ptp_init(res);
469 	if (err)
470 		goto err_channels_destroy;
471 
472 	return res;
473 
474 err_channels_destroy:
475 	mlx5e_rx_res_channels_destroy(res);
476 err_rss_destroy:
477 	__mlx5e_rx_res_rss_destroy(res, 0);
478 err_rx_res_free:
479 	mlx5e_rx_res_free(res);
480 	return ERR_PTR(err);
481 }
482 
mlx5e_rx_res_destroy(struct mlx5e_rx_res * res)483 void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
484 {
485 	mlx5e_rx_res_ptp_destroy(res);
486 	mlx5e_rx_res_channels_destroy(res);
487 	mlx5e_rx_res_rss_destroy_all(res);
488 	mlx5e_rx_res_free(res);
489 }
490 
mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res * res)491 unsigned int mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res *res)
492 {
493 	return res->max_nch;
494 }
495 
mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res * res,unsigned int ix)496 u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
497 {
498 	return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir);
499 }
500 
mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res * res,enum mlx5_traffic_types tt)501 u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
502 {
503 	struct mlx5e_rss *rss = res->rss[0];
504 
505 	return mlx5e_rss_get_tirn(rss, tt, false);
506 }
507 
mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res * res,enum mlx5_traffic_types tt)508 u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
509 {
510 	struct mlx5e_rss *rss = res->rss[0];
511 
512 	return mlx5e_rss_get_tirn(rss, tt, true);
513 }
514 
mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res * res)515 u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
516 {
517 	WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_PTP));
518 	return mlx5e_tir_get_tirn(&res->ptp.tir);
519 }
520 
mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res * res,unsigned int ix)521 u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
522 {
523 	return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
524 }
525 
mlx5e_rx_res_channel_activate_direct(struct mlx5e_rx_res * res,struct mlx5e_channels * chs,unsigned int ix)526 static void mlx5e_rx_res_channel_activate_direct(struct mlx5e_rx_res *res,
527 						 struct mlx5e_channels *chs,
528 						 unsigned int ix)
529 {
530 	u32 *vhca_id = get_vhca_ids(res, ix);
531 	u32 rqn = res->rss_rqns[ix];
532 	int err;
533 
534 	err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn, vhca_id);
535 	if (err)
536 		mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
537 			       mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
538 			       rqn, ix, err);
539 }
540 
mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res * res,unsigned int ix)541 static void mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res *res,
542 						   unsigned int ix)
543 {
544 	int err;
545 
546 	err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn, NULL);
547 	if (err)
548 		mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
549 			       mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
550 			       res->drop_rqn, ix, err);
551 }
552 
mlx5e_rx_res_channels_activate(struct mlx5e_rx_res * res,struct mlx5e_channels * chs)553 void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs)
554 {
555 	unsigned int nch, ix;
556 	int err;
557 
558 	nch = mlx5e_channels_get_num(chs);
559 
560 	for (ix = 0; ix < chs->num; ix++) {
561 		u32 *vhca_id = get_vhca_ids(res, ix);
562 
563 		if (mlx5e_channels_is_xsk(chs, ix))
564 			mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
565 		else
566 			mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
567 	}
568 	res->rss_nch = chs->num;
569 
570 	mlx5e_rx_res_rss_enable(res);
571 
572 	for (ix = 0; ix < nch; ix++)
573 		mlx5e_rx_res_channel_activate_direct(res, chs, ix);
574 
575 	if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
576 		u32 rqn;
577 
578 		if (!mlx5e_channels_get_ptp_rqn(chs, &rqn))
579 			rqn = res->drop_rqn;
580 
581 		err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn, NULL);
582 		if (err)
583 			mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (PTP): err = %d\n",
584 				       mlx5e_rqt_get_rqtn(&res->ptp.rqt),
585 				       rqn, err);
586 	}
587 }
588 
mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res * res)589 void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
590 {
591 	unsigned int ix;
592 	int err;
593 
594 	mlx5e_rx_res_rss_disable(res);
595 
596 	for (ix = 0; ix < res->rss_nch; ix++)
597 		mlx5e_rx_res_channel_deactivate_direct(res, ix);
598 
599 	if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
600 		err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn, NULL);
601 		if (err)
602 			mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (PTP): err = %d\n",
603 				       mlx5e_rqt_get_rqtn(&res->ptp.rqt),
604 				       res->drop_rqn, err);
605 	}
606 }
607 
mlx5e_rx_res_xsk_update(struct mlx5e_rx_res * res,struct mlx5e_channels * chs,unsigned int ix,bool xsk)608 void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
609 			     unsigned int ix, bool xsk)
610 {
611 	u32 *vhca_id = get_vhca_ids(res, ix);
612 
613 	if (xsk)
614 		mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
615 	else
616 		mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
617 
618 	mlx5e_rx_res_rss_enable(res);
619 
620 	mlx5e_rx_res_channel_activate_direct(res, chs, ix);
621 }
622 
mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res * res,struct mlx5e_packet_merge_param * pkt_merge_param)623 int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
624 					struct mlx5e_packet_merge_param *pkt_merge_param)
625 {
626 	struct mlx5e_tir_builder *builder;
627 	int err, final_err;
628 	unsigned int ix;
629 
630 	builder = mlx5e_tir_builder_alloc(true);
631 	if (!builder)
632 		return -ENOMEM;
633 
634 	down_write(&res->pkt_merge_param_sem);
635 	res->pkt_merge_param = *pkt_merge_param;
636 
637 	mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
638 
639 	final_err = 0;
640 
641 	for (ix = 0; ix < MLX5E_MAX_NUM_RSS; ix++) {
642 		struct mlx5e_rss *rss = res->rss[ix];
643 
644 		if (!rss)
645 			continue;
646 
647 		err = mlx5e_rss_packet_merge_set_param(rss, pkt_merge_param);
648 		if (err)
649 			final_err = final_err ? : err;
650 	}
651 
652 	for (ix = 0; ix < res->max_nch; ix++) {
653 		err = mlx5e_tir_modify(&res->channels[ix].direct_tir, builder);
654 		if (err) {
655 			mlx5_core_warn(res->mdev, "Failed to update packet merge state of direct TIR %#x for channel %u: err = %d\n",
656 				       mlx5e_tir_get_tirn(&res->channels[ix].direct_tir), ix, err);
657 			if (!final_err)
658 				final_err = err;
659 		}
660 	}
661 
662 	up_write(&res->pkt_merge_param_sem);
663 	mlx5e_tir_builder_free(builder);
664 	return final_err;
665 }
666 
mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res * res)667 struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res)
668 {
669 	return mlx5e_rss_get_hash(res->rss[0]);
670 }
671 
mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res * res,unsigned int rxq,struct mlx5e_tir * tir)672 int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
673 				struct mlx5e_tir *tir)
674 {
675 	bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
676 	struct mlx5e_tir_builder *builder;
677 	u32 rqtn;
678 	int err;
679 
680 	builder = mlx5e_tir_builder_alloc(false);
681 	if (!builder)
682 		return -ENOMEM;
683 
684 	rqtn = mlx5e_rx_res_get_rqtn_direct(res, rxq);
685 
686 	mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, rqtn,
687 				    inner_ft_support);
688 	mlx5e_tir_builder_build_direct(builder);
689 	mlx5e_tir_builder_build_tls(builder);
690 	down_read(&res->pkt_merge_param_sem);
691 	mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
692 	err = mlx5e_tir_init(tir, builder, res->mdev, false);
693 	up_read(&res->pkt_merge_param_sem);
694 
695 	mlx5e_tir_builder_free(builder);
696 
697 	return err;
698 }
699