xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd */
3 
4 #include <linux/mlx5/driver.h>
5 #include "eswitch.h"
6 #include "priv.h"
7 #include "sf/dev/dev.h"
8 #include "mlx5_ifc_vhca_event.h"
9 #include "vhca_event.h"
10 #include "ecpf.h"
11 #define CREATE_TRACE_POINTS
12 #include "diag/sf_tracepoint.h"
13 
14 struct mlx5_sf {
15 	struct mlx5_devlink_port dl_port;
16 	unsigned int port_index;
17 	u32 controller;
18 	u16 id;
19 	u16 hw_fn_id;
20 	u16 hw_state;
21 };
22 
mlx5_sf_by_dl_port(struct devlink_port * dl_port)23 static void *mlx5_sf_by_dl_port(struct devlink_port *dl_port)
24 {
25 	struct mlx5_devlink_port *mlx5_dl_port = mlx5_devlink_port_get(dl_port);
26 
27 	return container_of(mlx5_dl_port, struct mlx5_sf, dl_port);
28 }
29 
30 struct mlx5_sf_table {
31 	struct mlx5_core_dev *dev; /* To refer from notifier context. */
32 	struct xarray function_ids; /* function id based lookup. */
33 	struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
34 };
35 
36 static struct mlx5_sf *
mlx5_sf_lookup_by_function_id(struct mlx5_sf_table * table,unsigned int fn_id)37 mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id)
38 {
39 	return xa_load(&table->function_ids, fn_id);
40 }
41 
mlx5_sf_function_id_insert(struct mlx5_sf_table * table,struct mlx5_sf * sf)42 static int mlx5_sf_function_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
43 {
44 	return xa_insert(&table->function_ids, sf->hw_fn_id, sf, GFP_KERNEL);
45 }
46 
mlx5_sf_function_id_erase(struct mlx5_sf_table * table,struct mlx5_sf * sf)47 static void mlx5_sf_function_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
48 {
49 	xa_erase(&table->function_ids, sf->hw_fn_id);
50 }
51 
52 static struct mlx5_sf *
mlx5_sf_alloc(struct mlx5_sf_table * table,struct mlx5_eswitch * esw,u32 controller,u32 sfnum,struct netlink_ext_ack * extack)53 mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw,
54 	      u32 controller, u32 sfnum, struct netlink_ext_ack *extack)
55 {
56 	unsigned int dl_port_index;
57 	struct mlx5_sf *sf;
58 	u16 hw_fn_id;
59 	int id_err;
60 	int err;
61 
62 	if (!mlx5_esw_offloads_controller_valid(esw, controller)) {
63 		NL_SET_ERR_MSG_MOD(extack, "Invalid controller number");
64 		return ERR_PTR(-EINVAL);
65 	}
66 
67 	id_err = mlx5_sf_hw_table_sf_alloc(table->dev, controller, sfnum);
68 	if (id_err < 0) {
69 		err = id_err;
70 		goto id_err;
71 	}
72 
73 	sf = kzalloc_obj(*sf);
74 	if (!sf) {
75 		err = -ENOMEM;
76 		goto alloc_err;
77 	}
78 	sf->id = id_err;
79 	hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, controller, sf->id);
80 	dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id);
81 	sf->port_index = dl_port_index;
82 	sf->hw_fn_id = hw_fn_id;
83 	sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
84 	sf->controller = controller;
85 
86 	err = mlx5_sf_function_id_insert(table, sf);
87 	if (err)
88 		goto insert_err;
89 
90 	return sf;
91 
92 insert_err:
93 	kfree(sf);
94 alloc_err:
95 	mlx5_sf_hw_table_sf_free(table->dev, controller, id_err);
96 id_err:
97 	if (err == -EEXIST)
98 		NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum");
99 	return ERR_PTR(err);
100 }
101 
mlx5_sf_free(struct mlx5_sf_table * table,struct mlx5_sf * sf)102 static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
103 {
104 	mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
105 	trace_mlx5_sf_free(table->dev, sf->port_index, sf->controller, sf->hw_fn_id);
106 	kfree(sf);
107 }
108 
mlx5_sf_to_devlink_state(u8 hw_state)109 static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
110 {
111 	switch (hw_state) {
112 	case MLX5_VHCA_STATE_ACTIVE:
113 	case MLX5_VHCA_STATE_IN_USE:
114 		return DEVLINK_PORT_FN_STATE_ACTIVE;
115 	case MLX5_VHCA_STATE_INVALID:
116 	case MLX5_VHCA_STATE_ALLOCATED:
117 	case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
118 	default:
119 		return DEVLINK_PORT_FN_STATE_INACTIVE;
120 	}
121 }
122 
mlx5_sf_to_devlink_opstate(u8 hw_state)123 static enum devlink_port_fn_opstate mlx5_sf_to_devlink_opstate(u8 hw_state)
124 {
125 	switch (hw_state) {
126 	case MLX5_VHCA_STATE_IN_USE:
127 	case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
128 		return DEVLINK_PORT_FN_OPSTATE_ATTACHED;
129 	case MLX5_VHCA_STATE_INVALID:
130 	case MLX5_VHCA_STATE_ALLOCATED:
131 	case MLX5_VHCA_STATE_ACTIVE:
132 	default:
133 		return DEVLINK_PORT_FN_OPSTATE_DETACHED;
134 	}
135 }
136 
mlx5_sf_is_active(const struct mlx5_sf * sf)137 static bool mlx5_sf_is_active(const struct mlx5_sf *sf)
138 {
139 	return sf->hw_state == MLX5_VHCA_STATE_ACTIVE || sf->hw_state == MLX5_VHCA_STATE_IN_USE;
140 }
141 
mlx5_devlink_sf_port_fn_state_get(struct devlink_port * dl_port,enum devlink_port_fn_state * state,enum devlink_port_fn_opstate * opstate,struct netlink_ext_ack * extack)142 int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port,
143 				      enum devlink_port_fn_state *state,
144 				      enum devlink_port_fn_opstate *opstate,
145 				      struct netlink_ext_ack *extack)
146 {
147 	struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
148 	struct mlx5_sf_table *table = dev->priv.sf_table;
149 	struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
150 
151 	mutex_lock(&table->sf_state_lock);
152 	*state = mlx5_sf_to_devlink_state(sf->hw_state);
153 	*opstate = mlx5_sf_to_devlink_opstate(sf->hw_state);
154 	mutex_unlock(&table->sf_state_lock);
155 	return 0;
156 }
157 
mlx5_sf_activate(struct mlx5_core_dev * dev,struct mlx5_sf * sf,struct netlink_ext_ack * extack)158 static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
159 			    struct netlink_ext_ack *extack)
160 {
161 	struct mlx5_vport *vport;
162 	int err;
163 
164 	if (mlx5_sf_is_active(sf))
165 		return 0;
166 	if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED) {
167 		NL_SET_ERR_MSG_MOD(extack, "SF is inactivated but it is still attached");
168 		return -EBUSY;
169 	}
170 
171 	vport = mlx5_devlink_port_vport_get(&sf->dl_port.dl_port);
172 	if (!vport->max_eqs_set && MLX5_CAP_GEN_2(dev, max_num_eqs_24b)) {
173 		err = mlx5_devlink_port_fn_max_io_eqs_set_sf_default(&sf->dl_port.dl_port,
174 								     extack);
175 		if (err)
176 			return err;
177 	}
178 	err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
179 	if (err)
180 		return err;
181 
182 	sf->hw_state = MLX5_VHCA_STATE_ACTIVE;
183 	trace_mlx5_sf_activate(dev, sf->port_index, sf->controller, sf->hw_fn_id);
184 	return 0;
185 }
186 
mlx5_sf_deactivate(struct mlx5_core_dev * dev,struct mlx5_sf * sf)187 static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
188 {
189 	int err;
190 
191 	if (!mlx5_sf_is_active(sf))
192 		return 0;
193 
194 	err = mlx5_cmd_sf_disable_hca(dev, sf->hw_fn_id);
195 	if (err)
196 		return err;
197 
198 	sf->hw_state = MLX5_VHCA_STATE_TEARDOWN_REQUEST;
199 	trace_mlx5_sf_deactivate(dev, sf->port_index, sf->controller, sf->hw_fn_id);
200 	return 0;
201 }
202 
mlx5_sf_state_set(struct mlx5_core_dev * dev,struct mlx5_sf_table * table,struct mlx5_sf * sf,enum devlink_port_fn_state state,struct netlink_ext_ack * extack)203 static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
204 			     struct mlx5_sf *sf,
205 			     enum devlink_port_fn_state state,
206 			     struct netlink_ext_ack *extack)
207 {
208 	int err = 0;
209 
210 	mutex_lock(&table->sf_state_lock);
211 	if (state == mlx5_sf_to_devlink_state(sf->hw_state))
212 		goto out;
213 	if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
214 		err = mlx5_sf_activate(dev, sf, extack);
215 	else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
216 		err = mlx5_sf_deactivate(dev, sf);
217 	else
218 		err = -EINVAL;
219 out:
220 	mutex_unlock(&table->sf_state_lock);
221 	return err;
222 }
223 
mlx5_devlink_sf_port_fn_state_set(struct devlink_port * dl_port,enum devlink_port_fn_state state,struct netlink_ext_ack * extack)224 int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port,
225 				      enum devlink_port_fn_state state,
226 				      struct netlink_ext_ack *extack)
227 {
228 	struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
229 	struct mlx5_sf_table *table = dev->priv.sf_table;
230 	struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
231 
232 	return mlx5_sf_state_set(dev, table, sf, state, extack);
233 }
234 
mlx5_sf_add(struct mlx5_core_dev * dev,struct mlx5_sf_table * table,const struct devlink_port_new_attrs * new_attr,struct netlink_ext_ack * extack,struct devlink_port ** dl_port)235 static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
236 		       const struct devlink_port_new_attrs *new_attr,
237 		       struct netlink_ext_ack *extack,
238 		       struct devlink_port **dl_port)
239 {
240 	struct mlx5_eswitch *esw = dev->priv.eswitch;
241 	struct mlx5_sf *sf;
242 	int err;
243 
244 	sf = mlx5_sf_alloc(table, esw, new_attr->controller, new_attr->sfnum, extack);
245 	if (IS_ERR(sf))
246 		return PTR_ERR(sf);
247 
248 	err = mlx5_eswitch_load_sf_vport(esw, sf->hw_fn_id, MLX5_VPORT_UC_ADDR_CHANGE,
249 					 &sf->dl_port, new_attr->controller, new_attr->sfnum);
250 	if (err)
251 		goto esw_err;
252 	*dl_port = &sf->dl_port.dl_port;
253 	trace_mlx5_sf_add(dev, sf->port_index, sf->controller, sf->hw_fn_id, new_attr->sfnum);
254 	return 0;
255 
256 esw_err:
257 	mlx5_sf_function_id_erase(table, sf);
258 	mlx5_sf_free(table, sf);
259 	return err;
260 }
261 
262 static int
mlx5_sf_new_check_attr(struct mlx5_core_dev * dev,const struct devlink_port_new_attrs * new_attr,struct netlink_ext_ack * extack)263 mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_attrs *new_attr,
264 		       struct netlink_ext_ack *extack)
265 {
266 	if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) {
267 		NL_SET_ERR_MSG_MOD(extack, "Driver supports only SF port addition");
268 		return -EOPNOTSUPP;
269 	}
270 	if (new_attr->port_index_valid) {
271 		NL_SET_ERR_MSG_MOD(extack,
272 				   "Driver does not support user defined port index assignment");
273 		return -EOPNOTSUPP;
274 	}
275 	if (!new_attr->sfnum_valid) {
276 		NL_SET_ERR_MSG_MOD(extack,
277 				   "User must provide unique sfnum. Driver does not support auto assignment");
278 		return -EOPNOTSUPP;
279 	}
280 	if (new_attr->controller_valid && new_attr->controller &&
281 	    !mlx5_core_is_ecpf_esw_manager(dev)) {
282 		NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
283 		return -EOPNOTSUPP;
284 	}
285 	if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) {
286 		NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
287 		return -EOPNOTSUPP;
288 	}
289 	return 0;
290 }
291 
mlx5_sf_table_supported(const struct mlx5_core_dev * dev)292 static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
293 {
294 	return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
295 	       mlx5_sf_hw_table_supported(dev);
296 }
297 
mlx5_devlink_sf_port_new(struct devlink * devlink,const struct devlink_port_new_attrs * new_attr,struct netlink_ext_ack * extack,struct devlink_port ** dl_port)298 int mlx5_devlink_sf_port_new(struct devlink *devlink,
299 			     const struct devlink_port_new_attrs *new_attr,
300 			     struct netlink_ext_ack *extack,
301 			     struct devlink_port **dl_port)
302 {
303 	struct mlx5_core_dev *dev = devlink_priv(devlink);
304 	struct mlx5_sf_table *table = dev->priv.sf_table;
305 	int err;
306 
307 	err = mlx5_sf_new_check_attr(dev, new_attr, extack);
308 	if (err)
309 		return err;
310 
311 	if (!mlx5_sf_table_supported(dev)) {
312 		NL_SET_ERR_MSG_MOD(extack, "SF ports are not supported.");
313 		return -EOPNOTSUPP;
314 	}
315 
316 	if (!is_mdev_switchdev_mode(dev)) {
317 		NL_SET_ERR_MSG_MOD(extack,
318 				   "SF ports are only supported in eswitch switchdev mode.");
319 		return -EOPNOTSUPP;
320 	}
321 
322 	return mlx5_sf_add(dev, table, new_attr, extack, dl_port);
323 }
324 
mlx5_sf_dealloc(struct mlx5_sf_table * table,struct mlx5_sf * sf)325 static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
326 {
327 	struct mlx5_vport *vport;
328 
329 	mutex_lock(&table->sf_state_lock);
330 	vport = mlx5_devlink_port_vport_get(&sf->dl_port.dl_port);
331 	vport->max_eqs_set = false;
332 
333 	mlx5_sf_function_id_erase(table, sf);
334 
335 	if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) {
336 		mlx5_sf_free(table, sf);
337 	} else if (mlx5_sf_is_active(sf)) {
338 		/* Even if its active, it is treated as in_use because by the time,
339 		 * it is disabled here, it may getting used. So it is safe to
340 		 * always look for the event to ensure that it is recycled only after
341 		 * firmware gives confirmation that it is detached by the driver.
342 		 */
343 		mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id);
344 		mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
345 		kfree(sf);
346 	} else {
347 		mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
348 		kfree(sf);
349 	}
350 
351 	mutex_unlock(&table->sf_state_lock);
352 }
353 
mlx5_sf_del(struct mlx5_sf_table * table,struct mlx5_sf * sf)354 static void mlx5_sf_del(struct mlx5_sf_table *table, struct mlx5_sf *sf)
355 {
356 	struct mlx5_eswitch *esw = table->dev->priv.eswitch;
357 
358 	mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
359 	mlx5_sf_dealloc(table, sf);
360 }
361 
mlx5_devlink_sf_port_del(struct devlink * devlink,struct devlink_port * dl_port,struct netlink_ext_ack * extack)362 int mlx5_devlink_sf_port_del(struct devlink *devlink,
363 			     struct devlink_port *dl_port,
364 			     struct netlink_ext_ack *extack)
365 {
366 	struct mlx5_core_dev *dev = devlink_priv(devlink);
367 	struct mlx5_sf_table *table = dev->priv.sf_table;
368 	struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
369 
370 	mlx5_sf_del(table, sf);
371 	return 0;
372 }
373 
mlx5_sf_state_update_check(const struct mlx5_sf * sf,u8 new_state)374 static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
375 {
376 	if (sf->hw_state == MLX5_VHCA_STATE_ACTIVE && new_state == MLX5_VHCA_STATE_IN_USE)
377 		return true;
378 
379 	if (sf->hw_state == MLX5_VHCA_STATE_IN_USE && new_state == MLX5_VHCA_STATE_ACTIVE)
380 		return true;
381 
382 	if (sf->hw_state == MLX5_VHCA_STATE_TEARDOWN_REQUEST &&
383 	    new_state == MLX5_VHCA_STATE_ALLOCATED)
384 		return true;
385 
386 	return false;
387 }
388 
mlx5_sf_vhca_event(struct notifier_block * nb,unsigned long opcode,void * data)389 static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
390 {
391 	struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
392 						 priv.sf_table_vhca_nb);
393 	struct mlx5_sf_table *table = dev->priv.sf_table;
394 	const struct mlx5_vhca_state_event *event = data;
395 	bool update = false;
396 	struct mlx5_sf *sf;
397 
398 	if (!table)
399 		return 0;
400 
401 	mutex_lock(&table->sf_state_lock);
402 	sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
403 	if (!sf)
404 		goto unlock;
405 
406 	/* When driver is attached or detached to a function, an event
407 	 * notifies such state change.
408 	 */
409 	update = mlx5_sf_state_update_check(sf, event->new_vhca_state);
410 	if (update)
411 		sf->hw_state = event->new_vhca_state;
412 	trace_mlx5_sf_update_state(dev, sf->port_index, sf->controller,
413 				   sf->hw_fn_id, sf->hw_state);
414 unlock:
415 	mutex_unlock(&table->sf_state_lock);
416 	return 0;
417 }
418 
mlx5_sf_del_all(struct mlx5_sf_table * table)419 static void mlx5_sf_del_all(struct mlx5_sf_table *table)
420 {
421 	unsigned long index;
422 	struct mlx5_sf *sf;
423 
424 	xa_for_each(&table->function_ids, index, sf)
425 		mlx5_sf_del(table, sf);
426 }
427 
mlx5_sf_esw_event(struct notifier_block * nb,unsigned long event,void * data)428 static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
429 {
430 	struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
431 						 priv.sf_table_esw_nb);
432 	const struct mlx5_esw_event_info *mode = data;
433 
434 	if (!dev->priv.sf_table)
435 		return 0;
436 
437 	switch (mode->new_mode) {
438 	case MLX5_ESWITCH_LEGACY:
439 		mlx5_sf_del_all(dev->priv.sf_table);
440 		break;
441 	default:
442 		break;
443 	}
444 
445 	return 0;
446 }
447 
mlx5_sf_mdev_event(struct notifier_block * nb,unsigned long event,void * data)448 static int mlx5_sf_mdev_event(struct notifier_block *nb, unsigned long event, void *data)
449 {
450 	struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
451 						 priv.sf_table_mdev_nb);
452 	struct mlx5_sf_peer_devlink_event_ctx *event_ctx = data;
453 	struct mlx5_sf_table *table = dev->priv.sf_table;
454 	int ret = NOTIFY_DONE;
455 	struct mlx5_sf *sf;
456 
457 	if (!table || event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK)
458 		return NOTIFY_DONE;
459 
460 	mutex_lock(&table->sf_state_lock);
461 	sf = mlx5_sf_lookup_by_function_id(table, event_ctx->fn_id);
462 	if (!sf)
463 		goto out;
464 
465 	event_ctx->err = devl_port_fn_devlink_set(&sf->dl_port.dl_port,
466 						  event_ctx->devlink);
467 
468 	ret = NOTIFY_OK;
469 out:
470 	mutex_unlock(&table->sf_state_lock);
471 	return ret;
472 }
473 
mlx5_sf_notifiers_init(struct mlx5_core_dev * dev)474 int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev)
475 {
476 	int err;
477 
478 	if (mlx5_core_is_sf(dev))
479 		return 0;
480 
481 	dev->priv.sf_table_esw_nb.notifier_call = mlx5_sf_esw_event;
482 	err = mlx5_esw_event_notifier_register(dev, &dev->priv.sf_table_esw_nb);
483 	if (err)
484 		return err;
485 
486 	dev->priv.sf_table_vhca_nb.notifier_call = mlx5_sf_vhca_event;
487 	err = mlx5_vhca_event_notifier_register(dev,
488 						&dev->priv.sf_table_vhca_nb);
489 	if (err)
490 		goto vhca_err;
491 
492 	dev->priv.sf_table_mdev_nb.notifier_call = mlx5_sf_mdev_event;
493 	err = mlx5_blocking_notifier_register(dev, &dev->priv.sf_table_mdev_nb);
494 	if (err)
495 		goto mdev_err;
496 
497 	return 0;
498 mdev_err:
499 	mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_table_vhca_nb);
500 vhca_err:
501 	mlx5_esw_event_notifier_unregister(dev, &dev->priv.sf_table_esw_nb);
502 	return err;
503 }
504 
mlx5_sf_table_init(struct mlx5_core_dev * dev)505 int mlx5_sf_table_init(struct mlx5_core_dev *dev)
506 {
507 	struct mlx5_sf_table *table;
508 
509 	if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev))
510 		return 0;
511 
512 	table = kzalloc_obj(*table);
513 	if (!table)
514 		return -ENOMEM;
515 
516 	mutex_init(&table->sf_state_lock);
517 	table->dev = dev;
518 	xa_init(&table->function_ids);
519 	dev->priv.sf_table = table;
520 
521 	return 0;
522 }
523 
mlx5_sf_notifiers_cleanup(struct mlx5_core_dev * dev)524 void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev)
525 {
526 	if (mlx5_core_is_sf(dev))
527 		return;
528 
529 	mlx5_blocking_notifier_unregister(dev, &dev->priv.sf_table_mdev_nb);
530 	mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_table_vhca_nb);
531 	mlx5_esw_event_notifier_unregister(dev, &dev->priv.sf_table_esw_nb);
532 }
533 
mlx5_sf_table_cleanup(struct mlx5_core_dev * dev)534 void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
535 {
536 	struct mlx5_sf_table *table = dev->priv.sf_table;
537 
538 	if (!table)
539 		return;
540 
541 	mutex_destroy(&table->sf_state_lock);
542 	WARN_ON(!xa_empty(&table->function_ids));
543 	kfree(table);
544 }
545 
mlx5_sf_table_empty(const struct mlx5_core_dev * dev)546 bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev)
547 {
548 	struct mlx5_sf_table *table = dev->priv.sf_table;
549 
550 	if (!table)
551 		return true;
552 
553 	return xa_empty(&table->function_ids);
554 }
555