xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c (revision a1c613ae4c322ddd58d5a8539dbfba2a0380a8c0)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd */
3 #include <linux/mlx5/driver.h>
4 #include "vhca_event.h"
5 #include "priv.h"
6 #include "sf.h"
7 #include "mlx5_ifc_vhca_event.h"
8 #include "ecpf.h"
9 #include "mlx5_core.h"
10 #include "eswitch.h"
11 #include "diag/sf_tracepoint.h"
12 #include "devlink.h"
13 
14 struct mlx5_sf_hw {
15 	u32 usr_sfnum;
16 	u8 allocated: 1;
17 	u8 pending_delete: 1;
18 };
19 
20 struct mlx5_sf_hwc_table {
21 	struct mlx5_sf_hw *sfs;
22 	int max_fn;
23 	u16 start_fn_id;
24 };
25 
26 enum mlx5_sf_hwc_index {
27 	MLX5_SF_HWC_LOCAL,
28 	MLX5_SF_HWC_EXTERNAL,
29 	MLX5_SF_HWC_MAX,
30 };
31 
32 struct mlx5_sf_hw_table {
33 	struct mlx5_core_dev *dev;
34 	struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
35 	struct notifier_block vhca_nb;
36 	struct mlx5_sf_hwc_table hwc[MLX5_SF_HWC_MAX];
37 };
38 
39 static struct mlx5_sf_hwc_table *
mlx5_sf_controller_to_hwc(struct mlx5_core_dev * dev,u32 controller)40 mlx5_sf_controller_to_hwc(struct mlx5_core_dev *dev, u32 controller)
41 {
42 	int idx = !!controller;
43 
44 	return &dev->priv.sf_hw_table->hwc[idx];
45 }
46 
mlx5_sf_sw_to_hw_id(struct mlx5_core_dev * dev,u32 controller,u16 sw_id)47 u16 mlx5_sf_sw_to_hw_id(struct mlx5_core_dev *dev, u32 controller, u16 sw_id)
48 {
49 	struct mlx5_sf_hwc_table *hwc;
50 
51 	hwc = mlx5_sf_controller_to_hwc(dev, controller);
52 	return hwc->start_fn_id + sw_id;
53 }
54 
mlx5_sf_hw_to_sw_id(struct mlx5_sf_hwc_table * hwc,u16 hw_id)55 static u16 mlx5_sf_hw_to_sw_id(struct mlx5_sf_hwc_table *hwc, u16 hw_id)
56 {
57 	return hw_id - hwc->start_fn_id;
58 }
59 
60 static struct mlx5_sf_hwc_table *
mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table * table,u16 fn_id)61 mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id)
62 {
63 	int i;
64 
65 	for (i = 0; i < ARRAY_SIZE(table->hwc); i++) {
66 		if (table->hwc[i].max_fn &&
67 		    fn_id >= table->hwc[i].start_fn_id &&
68 		    fn_id < (table->hwc[i].start_fn_id + table->hwc[i].max_fn))
69 			return &table->hwc[i];
70 	}
71 	return NULL;
72 }
73 
mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table * table,u32 controller,u32 usr_sfnum)74 static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 controller,
75 				     u32 usr_sfnum)
76 {
77 	struct mlx5_sf_hwc_table *hwc;
78 	int free_idx = -1;
79 	int i;
80 
81 	hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
82 	if (!hwc->sfs)
83 		return -ENOSPC;
84 
85 	for (i = 0; i < hwc->max_fn; i++) {
86 		if (!hwc->sfs[i].allocated && free_idx == -1) {
87 			free_idx = i;
88 			continue;
89 		}
90 
91 		if (hwc->sfs[i].allocated && hwc->sfs[i].usr_sfnum == usr_sfnum)
92 			return -EEXIST;
93 	}
94 
95 	if (free_idx == -1)
96 		return -ENOSPC;
97 
98 	hwc->sfs[free_idx].usr_sfnum = usr_sfnum;
99 	hwc->sfs[free_idx].allocated = true;
100 	return free_idx;
101 }
102 
mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table * table,u32 controller,int id)103 static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id)
104 {
105 	struct mlx5_sf_hwc_table *hwc;
106 
107 	hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
108 	hwc->sfs[id].allocated = false;
109 	hwc->sfs[id].pending_delete = false;
110 }
111 
mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev * dev,u32 controller,u32 usr_sfnum)112 int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr_sfnum)
113 {
114 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
115 	u16 hw_fn_id;
116 	int sw_id;
117 	int err;
118 
119 	if (!table)
120 		return -EOPNOTSUPP;
121 
122 	mutex_lock(&table->table_lock);
123 	sw_id = mlx5_sf_hw_table_id_alloc(table, controller, usr_sfnum);
124 	if (sw_id < 0) {
125 		err = sw_id;
126 		goto exist_err;
127 	}
128 
129 	hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, sw_id);
130 	err = mlx5_cmd_alloc_sf(dev, hw_fn_id);
131 	if (err)
132 		goto err;
133 
134 	err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, usr_sfnum);
135 	if (err)
136 		goto vhca_err;
137 
138 	if (controller) {
139 		/* If this SF is for external controller, SF manager
140 		 * needs to arm firmware to receive the events.
141 		 */
142 		err = mlx5_vhca_event_arm(dev, hw_fn_id);
143 		if (err)
144 			goto vhca_err;
145 	}
146 
147 	trace_mlx5_sf_hwc_alloc(dev, controller, hw_fn_id, usr_sfnum);
148 	mutex_unlock(&table->table_lock);
149 	return sw_id;
150 
151 vhca_err:
152 	mlx5_cmd_dealloc_sf(dev, hw_fn_id);
153 err:
154 	mlx5_sf_hw_table_id_free(table, controller, sw_id);
155 exist_err:
156 	mutex_unlock(&table->table_lock);
157 	return err;
158 }
159 
mlx5_sf_hw_table_sf_free(struct mlx5_core_dev * dev,u32 controller,u16 id)160 void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
161 {
162 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
163 	u16 hw_fn_id;
164 
165 	mutex_lock(&table->table_lock);
166 	hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
167 	mlx5_cmd_dealloc_sf(dev, hw_fn_id);
168 	mlx5_sf_hw_table_id_free(table, controller, id);
169 	mutex_unlock(&table->table_lock);
170 }
171 
mlx5_sf_hw_table_hwc_sf_free(struct mlx5_core_dev * dev,struct mlx5_sf_hwc_table * hwc,int idx)172 static void mlx5_sf_hw_table_hwc_sf_free(struct mlx5_core_dev *dev,
173 					 struct mlx5_sf_hwc_table *hwc, int idx)
174 {
175 	mlx5_cmd_dealloc_sf(dev, hwc->start_fn_id + idx);
176 	hwc->sfs[idx].allocated = false;
177 	hwc->sfs[idx].pending_delete = false;
178 	trace_mlx5_sf_hwc_free(dev, hwc->start_fn_id + idx);
179 }
180 
mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev * dev,u32 controller,u16 id)181 void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
182 {
183 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
184 	u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
185 	struct mlx5_sf_hwc_table *hwc;
186 	u16 hw_fn_id;
187 	u8 state;
188 	int err;
189 
190 	hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
191 	hwc = mlx5_sf_controller_to_hwc(dev, controller);
192 	mutex_lock(&table->table_lock);
193 	err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out));
194 	if (err)
195 		goto err;
196 	state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
197 	if (state == MLX5_VHCA_STATE_ALLOCATED) {
198 		mlx5_cmd_dealloc_sf(dev, hw_fn_id);
199 		hwc->sfs[id].allocated = false;
200 	} else {
201 		hwc->sfs[id].pending_delete = true;
202 		trace_mlx5_sf_hwc_deferred_free(dev, hw_fn_id);
203 	}
204 err:
205 	mutex_unlock(&table->table_lock);
206 }
207 
mlx5_sf_hw_table_hwc_dealloc_all(struct mlx5_core_dev * dev,struct mlx5_sf_hwc_table * hwc)208 static void mlx5_sf_hw_table_hwc_dealloc_all(struct mlx5_core_dev *dev,
209 					     struct mlx5_sf_hwc_table *hwc)
210 {
211 	int i;
212 
213 	for (i = 0; i < hwc->max_fn; i++) {
214 		if (hwc->sfs[i].allocated)
215 			mlx5_sf_hw_table_hwc_sf_free(dev, hwc, i);
216 	}
217 }
218 
mlx5_sf_hw_table_dealloc_all(struct mlx5_sf_hw_table * table)219 static void mlx5_sf_hw_table_dealloc_all(struct mlx5_sf_hw_table *table)
220 {
221 	mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_EXTERNAL]);
222 	mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
223 }
224 
mlx5_sf_hw_table_hwc_init(struct mlx5_sf_hwc_table * hwc,u16 max_fn,u16 base_id)225 static int mlx5_sf_hw_table_hwc_init(struct mlx5_sf_hwc_table *hwc, u16 max_fn, u16 base_id)
226 {
227 	struct mlx5_sf_hw *sfs;
228 
229 	if (!max_fn)
230 		return 0;
231 
232 	sfs = kcalloc(max_fn, sizeof(*sfs), GFP_KERNEL);
233 	if (!sfs)
234 		return -ENOMEM;
235 
236 	hwc->sfs = sfs;
237 	hwc->max_fn = max_fn;
238 	hwc->start_fn_id = base_id;
239 	return 0;
240 }
241 
mlx5_sf_hw_table_hwc_cleanup(struct mlx5_sf_hwc_table * hwc)242 static void mlx5_sf_hw_table_hwc_cleanup(struct mlx5_sf_hwc_table *hwc)
243 {
244 	kfree(hwc->sfs);
245 }
246 
mlx5_sf_hw_table_res_unregister(struct mlx5_core_dev * dev)247 static void mlx5_sf_hw_table_res_unregister(struct mlx5_core_dev *dev)
248 {
249 	devl_resources_unregister(priv_to_devlink(dev));
250 }
251 
mlx5_sf_hw_table_res_register(struct mlx5_core_dev * dev,u16 max_fn,u16 max_ext_fn)252 static int mlx5_sf_hw_table_res_register(struct mlx5_core_dev *dev, u16 max_fn,
253 					 u16 max_ext_fn)
254 {
255 	struct devlink_resource_size_params size_params;
256 	struct devlink *devlink = priv_to_devlink(dev);
257 	int err;
258 
259 	devlink_resource_size_params_init(&size_params, max_fn, max_fn, 1,
260 					  DEVLINK_RESOURCE_UNIT_ENTRY);
261 	err = devl_resource_register(devlink, "max_local_SFs", max_fn, MLX5_DL_RES_MAX_LOCAL_SFS,
262 				     DEVLINK_RESOURCE_ID_PARENT_TOP, &size_params);
263 	if (err)
264 		return err;
265 
266 	devlink_resource_size_params_init(&size_params, max_ext_fn, max_ext_fn, 1,
267 					  DEVLINK_RESOURCE_UNIT_ENTRY);
268 	return devl_resource_register(devlink, "max_external_SFs", max_ext_fn,
269 				      MLX5_DL_RES_MAX_EXTERNAL_SFS, DEVLINK_RESOURCE_ID_PARENT_TOP,
270 				      &size_params);
271 }
272 
mlx5_sf_hw_table_init(struct mlx5_core_dev * dev)273 int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
274 {
275 	struct mlx5_sf_hw_table *table;
276 	u16 max_ext_fn = 0;
277 	u16 ext_base_id = 0;
278 	u16 base_id;
279 	u16 max_fn;
280 	int err;
281 
282 	if (!mlx5_vhca_event_supported(dev))
283 		return 0;
284 
285 	max_fn = mlx5_sf_max_functions(dev);
286 
287 	err = mlx5_esw_sf_max_hpf_functions(dev, &max_ext_fn, &ext_base_id);
288 	if (err)
289 		return err;
290 
291 	if (mlx5_sf_hw_table_res_register(dev, max_fn, max_ext_fn))
292 		mlx5_core_dbg(dev, "failed to register max SFs resources");
293 
294 	if (!max_fn && !max_ext_fn)
295 		return 0;
296 
297 	table = kzalloc(sizeof(*table), GFP_KERNEL);
298 	if (!table) {
299 		err = -ENOMEM;
300 		goto alloc_err;
301 	}
302 
303 	mutex_init(&table->table_lock);
304 	table->dev = dev;
305 	dev->priv.sf_hw_table = table;
306 
307 	base_id = mlx5_sf_start_function_id(dev);
308 	err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_LOCAL], max_fn, base_id);
309 	if (err)
310 		goto table_err;
311 
312 	err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_EXTERNAL],
313 					max_ext_fn, ext_base_id);
314 	if (err)
315 		goto ext_err;
316 
317 	mlx5_core_dbg(dev, "SF HW table: max sfs = %d, ext sfs = %d\n", max_fn, max_ext_fn);
318 	return 0;
319 
320 ext_err:
321 	mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
322 table_err:
323 	mutex_destroy(&table->table_lock);
324 	kfree(table);
325 alloc_err:
326 	mlx5_sf_hw_table_res_unregister(dev);
327 	return err;
328 }
329 
mlx5_sf_hw_table_cleanup(struct mlx5_core_dev * dev)330 void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
331 {
332 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
333 
334 	if (!table)
335 		goto res_unregister;
336 
337 	mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_EXTERNAL]);
338 	mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
339 	mutex_destroy(&table->table_lock);
340 	kfree(table);
341 res_unregister:
342 	mlx5_sf_hw_table_res_unregister(dev);
343 }
344 
mlx5_sf_hw_vhca_event(struct notifier_block * nb,unsigned long opcode,void * data)345 static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
346 {
347 	struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb);
348 	const struct mlx5_vhca_state_event *event = data;
349 	struct mlx5_sf_hwc_table *hwc;
350 	struct mlx5_sf_hw *sf_hw;
351 	u16 sw_id;
352 
353 	if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
354 		return 0;
355 
356 	hwc = mlx5_sf_table_fn_to_hwc(table, event->function_id);
357 	if (!hwc)
358 		return 0;
359 
360 	sw_id = mlx5_sf_hw_to_sw_id(hwc, event->function_id);
361 	sf_hw = &hwc->sfs[sw_id];
362 
363 	mutex_lock(&table->table_lock);
364 	/* SF driver notified through firmware that SF is finally detached.
365 	 * Hence recycle the sf hardware id for reuse.
366 	 */
367 	if (sf_hw->allocated && sf_hw->pending_delete)
368 		mlx5_sf_hw_table_hwc_sf_free(table->dev, hwc, sw_id);
369 	mutex_unlock(&table->table_lock);
370 	return 0;
371 }
372 
mlx5_sf_hw_table_create(struct mlx5_core_dev * dev)373 int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
374 {
375 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
376 
377 	if (!table)
378 		return 0;
379 
380 	table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
381 	return mlx5_vhca_event_notifier_register(dev, &table->vhca_nb);
382 }
383 
mlx5_sf_hw_table_destroy(struct mlx5_core_dev * dev)384 void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
385 {
386 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
387 
388 	if (!table)
389 		return;
390 
391 	mlx5_vhca_event_notifier_unregister(dev, &table->vhca_nb);
392 	/* Dealloc SFs whose firmware event has been missed. */
393 	mlx5_sf_hw_table_dealloc_all(table);
394 }
395 
mlx5_sf_hw_table_supported(const struct mlx5_core_dev * dev)396 bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev)
397 {
398 	return !!dev->priv.sf_hw_table;
399 }
400