xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved. */
3 
4 #include "mlx5hws_internal.h"
5 
mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context * ctx)6 bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx)
7 {
8 	return IS_BIT_SET(ctx->caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_BY_STC);
9 }
10 
mlx5hws_context_get_reparse_mode(struct mlx5hws_context * ctx)11 u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx)
12 {
13 	/* Prefer to use dynamic reparse, reparse only specific actions */
14 	if (mlx5hws_context_cap_dynamic_reparse(ctx))
15 		return MLX5_IFC_RTC_REPARSE_NEVER;
16 
17 	/* Otherwise use less efficient static */
18 	return MLX5_IFC_RTC_REPARSE_ALWAYS;
19 }
20 
hws_context_pools_init(struct mlx5hws_context * ctx)21 static int hws_context_pools_init(struct mlx5hws_context *ctx)
22 {
23 	struct mlx5hws_pool_attr pool_attr = {0};
24 	u8 max_log_sz;
25 	int ret;
26 	int i;
27 
28 	ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache);
29 	if (ret)
30 		return ret;
31 
32 	ret = mlx5hws_definer_init_cache(&ctx->definer_cache);
33 	if (ret)
34 		goto uninit_pat_cache;
35 
36 	/* Create an STC pool per FT type */
37 	pool_attr.pool_type = MLX5HWS_POOL_TYPE_STC;
38 	pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STC_POOL;
39 	max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
40 	pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran);
41 
42 	for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
43 		pool_attr.table_type = i;
44 		ctx->stc_pool[i] = mlx5hws_pool_create(ctx, &pool_attr);
45 		if (!ctx->stc_pool[i]) {
46 			mlx5hws_err(ctx, "Failed to allocate STC pool [%d]", i);
47 			ret = -ENOMEM;
48 			goto free_stc_pools;
49 		}
50 	}
51 
52 	return 0;
53 
54 free_stc_pools:
55 	for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++)
56 		if (ctx->stc_pool[i])
57 			mlx5hws_pool_destroy(ctx->stc_pool[i]);
58 
59 	mlx5hws_definer_uninit_cache(ctx->definer_cache);
60 uninit_pat_cache:
61 	mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
62 	return ret;
63 }
64 
hws_context_pools_uninit(struct mlx5hws_context * ctx)65 static void hws_context_pools_uninit(struct mlx5hws_context *ctx)
66 {
67 	int i;
68 
69 	for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
70 		if (ctx->stc_pool[i])
71 			mlx5hws_pool_destroy(ctx->stc_pool[i]);
72 	}
73 
74 	mlx5hws_definer_uninit_cache(ctx->definer_cache);
75 	mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
76 }
77 
hws_context_init_pd(struct mlx5hws_context * ctx)78 static int hws_context_init_pd(struct mlx5hws_context *ctx)
79 {
80 	int ret = 0;
81 
82 	ret = mlx5_core_alloc_pd(ctx->mdev, &ctx->pd_num);
83 	if (ret) {
84 		mlx5hws_err(ctx, "Failed to allocate PD\n");
85 		return ret;
86 	}
87 
88 	ctx->flags |= MLX5HWS_CONTEXT_FLAG_PRIVATE_PD;
89 
90 	return 0;
91 }
92 
hws_context_uninit_pd(struct mlx5hws_context * ctx)93 static int hws_context_uninit_pd(struct mlx5hws_context *ctx)
94 {
95 	if (ctx->flags & MLX5HWS_CONTEXT_FLAG_PRIVATE_PD)
96 		mlx5_core_dealloc_pd(ctx->mdev, ctx->pd_num);
97 
98 	return 0;
99 }
100 
hws_context_check_hws_supp(struct mlx5hws_context * ctx)101 static void hws_context_check_hws_supp(struct mlx5hws_context *ctx)
102 {
103 	struct mlx5hws_cmd_query_caps *caps = ctx->caps;
104 
105 	/* HWS not supported on device / FW */
106 	if (!caps->wqe_based_update) {
107 		mlx5hws_err(ctx, "Required HWS WQE based insertion cap not supported\n");
108 		return;
109 	}
110 
111 	if (!caps->eswitch_manager) {
112 		mlx5hws_err(ctx, "HWS is not supported for non eswitch manager port\n");
113 		return;
114 	}
115 
116 	/* Current solution requires all rules to set reparse bit */
117 	if ((!caps->nic_ft.reparse ||
118 	     (!caps->fdb_ft.reparse && caps->eswitch_manager)) ||
119 	    !IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) {
120 		mlx5hws_err(ctx, "Required HWS reparse cap not supported\n");
121 		return;
122 	}
123 
124 	/* FW/HW must support 8DW STE */
125 	if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) {
126 		mlx5hws_err(ctx, "Required HWS STE format not supported\n");
127 		return;
128 	}
129 
130 	/* Adding rules by hash and by offset are requirements */
131 	if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH) ||
132 	    !IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET)) {
133 		mlx5hws_err(ctx, "Required HWS RTC update mode not supported\n");
134 		return;
135 	}
136 
137 	/* Support for SELECT definer ID is required */
138 	if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) {
139 		mlx5hws_err(ctx, "Required HWS Dynamic definer not supported\n");
140 		return;
141 	}
142 
143 	ctx->flags |= MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT;
144 }
145 
hws_context_init_hws(struct mlx5hws_context * ctx,struct mlx5hws_context_attr * attr)146 static int hws_context_init_hws(struct mlx5hws_context *ctx,
147 				struct mlx5hws_context_attr *attr)
148 {
149 	int ret;
150 
151 	hws_context_check_hws_supp(ctx);
152 
153 	if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
154 		return 0;
155 
156 	ret = hws_context_init_pd(ctx);
157 	if (ret)
158 		return ret;
159 
160 	ret = hws_context_pools_init(ctx);
161 	if (ret)
162 		goto uninit_pd;
163 
164 	if (attr->bwc)
165 		ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
166 
167 	ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size);
168 	if (ret)
169 		goto pools_uninit;
170 
171 	INIT_LIST_HEAD(&ctx->tbl_list);
172 
173 	return 0;
174 
175 pools_uninit:
176 	hws_context_pools_uninit(ctx);
177 uninit_pd:
178 	hws_context_uninit_pd(ctx);
179 	return ret;
180 }
181 
hws_context_uninit_hws(struct mlx5hws_context * ctx)182 static void hws_context_uninit_hws(struct mlx5hws_context *ctx)
183 {
184 	if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
185 		return;
186 
187 	mlx5hws_send_queues_close(ctx);
188 	hws_context_pools_uninit(ctx);
189 	hws_context_uninit_pd(ctx);
190 }
191 
mlx5hws_context_open(struct mlx5_core_dev * mdev,struct mlx5hws_context_attr * attr)192 struct mlx5hws_context *mlx5hws_context_open(struct mlx5_core_dev *mdev,
193 					     struct mlx5hws_context_attr *attr)
194 {
195 	struct mlx5hws_context *ctx;
196 	int ret;
197 
198 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
199 	if (!ctx)
200 		return NULL;
201 
202 	ctx->mdev = mdev;
203 
204 	mutex_init(&ctx->ctrl_lock);
205 	xa_init(&ctx->peer_ctx_xa);
206 
207 	ctx->caps = kzalloc(sizeof(*ctx->caps), GFP_KERNEL);
208 	if (!ctx->caps)
209 		goto free_ctx;
210 
211 	ret = mlx5hws_cmd_query_caps(mdev, ctx->caps);
212 	if (ret)
213 		goto free_caps;
214 
215 	ret = mlx5hws_vport_init_vports(ctx);
216 	if (ret)
217 		goto free_caps;
218 
219 	ret = hws_context_init_hws(ctx, attr);
220 	if (ret)
221 		goto uninit_vports;
222 
223 	mlx5hws_debug_init_dump(ctx);
224 
225 	return ctx;
226 
227 uninit_vports:
228 	mlx5hws_vport_uninit_vports(ctx);
229 free_caps:
230 	kfree(ctx->caps);
231 free_ctx:
232 	xa_destroy(&ctx->peer_ctx_xa);
233 	mutex_destroy(&ctx->ctrl_lock);
234 	kfree(ctx);
235 	return NULL;
236 }
237 
mlx5hws_context_close(struct mlx5hws_context * ctx)238 int mlx5hws_context_close(struct mlx5hws_context *ctx)
239 {
240 	mlx5hws_debug_uninit_dump(ctx);
241 	hws_context_uninit_hws(ctx);
242 	mlx5hws_vport_uninit_vports(ctx);
243 	kfree(ctx->caps);
244 	xa_destroy(&ctx->peer_ctx_xa);
245 	mutex_destroy(&ctx->ctrl_lock);
246 	kfree(ctx);
247 	return 0;
248 }
249 
mlx5hws_context_set_peer(struct mlx5hws_context * ctx,struct mlx5hws_context * peer_ctx,u16 peer_vhca_id)250 void mlx5hws_context_set_peer(struct mlx5hws_context *ctx,
251 			      struct mlx5hws_context *peer_ctx,
252 			      u16 peer_vhca_id)
253 {
254 	mutex_lock(&ctx->ctrl_lock);
255 
256 	if (xa_err(xa_store(&ctx->peer_ctx_xa, peer_vhca_id, peer_ctx, GFP_KERNEL)))
257 		pr_warn("HWS: failed storing peer vhca ID in peer xarray\n");
258 
259 	mutex_unlock(&ctx->ctrl_lock);
260 }
261