1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd */
3
4 #include <linux/mlx5/driver.h>
5 #include "eswitch.h"
6 #include "priv.h"
7 #include "sf/dev/dev.h"
8 #include "mlx5_ifc_vhca_event.h"
9 #include "vhca_event.h"
10 #include "ecpf.h"
11 #define CREATE_TRACE_POINTS
12 #include "diag/sf_tracepoint.h"
13
14 struct mlx5_sf {
15 struct mlx5_devlink_port dl_port;
16 unsigned int port_index;
17 u32 controller;
18 u16 id;
19 u16 hw_fn_id;
20 u16 hw_state;
21 };
22
mlx5_sf_by_dl_port(struct devlink_port * dl_port)23 static void *mlx5_sf_by_dl_port(struct devlink_port *dl_port)
24 {
25 struct mlx5_devlink_port *mlx5_dl_port = mlx5_devlink_port_get(dl_port);
26
27 return container_of(mlx5_dl_port, struct mlx5_sf, dl_port);
28 }
29
30 struct mlx5_sf_table {
31 struct mlx5_core_dev *dev; /* To refer from notifier context. */
32 struct xarray function_ids; /* function id based lookup. */
33 struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
34 struct notifier_block esw_nb;
35 struct notifier_block vhca_nb;
36 struct notifier_block mdev_nb;
37 };
38
39 static struct mlx5_sf *
mlx5_sf_lookup_by_function_id(struct mlx5_sf_table * table,unsigned int fn_id)40 mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id)
41 {
42 return xa_load(&table->function_ids, fn_id);
43 }
44
mlx5_sf_function_id_insert(struct mlx5_sf_table * table,struct mlx5_sf * sf)45 static int mlx5_sf_function_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
46 {
47 return xa_insert(&table->function_ids, sf->hw_fn_id, sf, GFP_KERNEL);
48 }
49
mlx5_sf_function_id_erase(struct mlx5_sf_table * table,struct mlx5_sf * sf)50 static void mlx5_sf_function_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
51 {
52 xa_erase(&table->function_ids, sf->hw_fn_id);
53 }
54
55 static struct mlx5_sf *
mlx5_sf_alloc(struct mlx5_sf_table * table,struct mlx5_eswitch * esw,u32 controller,u32 sfnum,struct netlink_ext_ack * extack)56 mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw,
57 u32 controller, u32 sfnum, struct netlink_ext_ack *extack)
58 {
59 unsigned int dl_port_index;
60 struct mlx5_sf *sf;
61 u16 hw_fn_id;
62 int id_err;
63 int err;
64
65 if (!mlx5_esw_offloads_controller_valid(esw, controller)) {
66 NL_SET_ERR_MSG_MOD(extack, "Invalid controller number");
67 return ERR_PTR(-EINVAL);
68 }
69
70 id_err = mlx5_sf_hw_table_sf_alloc(table->dev, controller, sfnum);
71 if (id_err < 0) {
72 err = id_err;
73 goto id_err;
74 }
75
76 sf = kzalloc(sizeof(*sf), GFP_KERNEL);
77 if (!sf) {
78 err = -ENOMEM;
79 goto alloc_err;
80 }
81 sf->id = id_err;
82 hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, controller, sf->id);
83 dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id);
84 sf->port_index = dl_port_index;
85 sf->hw_fn_id = hw_fn_id;
86 sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
87 sf->controller = controller;
88
89 err = mlx5_sf_function_id_insert(table, sf);
90 if (err)
91 goto insert_err;
92
93 return sf;
94
95 insert_err:
96 kfree(sf);
97 alloc_err:
98 mlx5_sf_hw_table_sf_free(table->dev, controller, id_err);
99 id_err:
100 if (err == -EEXIST)
101 NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum");
102 return ERR_PTR(err);
103 }
104
mlx5_sf_free(struct mlx5_sf_table * table,struct mlx5_sf * sf)105 static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
106 {
107 mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
108 trace_mlx5_sf_free(table->dev, sf->port_index, sf->controller, sf->hw_fn_id);
109 kfree(sf);
110 }
111
mlx5_sf_to_devlink_state(u8 hw_state)112 static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
113 {
114 switch (hw_state) {
115 case MLX5_VHCA_STATE_ACTIVE:
116 case MLX5_VHCA_STATE_IN_USE:
117 return DEVLINK_PORT_FN_STATE_ACTIVE;
118 case MLX5_VHCA_STATE_INVALID:
119 case MLX5_VHCA_STATE_ALLOCATED:
120 case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
121 default:
122 return DEVLINK_PORT_FN_STATE_INACTIVE;
123 }
124 }
125
mlx5_sf_to_devlink_opstate(u8 hw_state)126 static enum devlink_port_fn_opstate mlx5_sf_to_devlink_opstate(u8 hw_state)
127 {
128 switch (hw_state) {
129 case MLX5_VHCA_STATE_IN_USE:
130 case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
131 return DEVLINK_PORT_FN_OPSTATE_ATTACHED;
132 case MLX5_VHCA_STATE_INVALID:
133 case MLX5_VHCA_STATE_ALLOCATED:
134 case MLX5_VHCA_STATE_ACTIVE:
135 default:
136 return DEVLINK_PORT_FN_OPSTATE_DETACHED;
137 }
138 }
139
mlx5_sf_is_active(const struct mlx5_sf * sf)140 static bool mlx5_sf_is_active(const struct mlx5_sf *sf)
141 {
142 return sf->hw_state == MLX5_VHCA_STATE_ACTIVE || sf->hw_state == MLX5_VHCA_STATE_IN_USE;
143 }
144
mlx5_devlink_sf_port_fn_state_get(struct devlink_port * dl_port,enum devlink_port_fn_state * state,enum devlink_port_fn_opstate * opstate,struct netlink_ext_ack * extack)145 int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port,
146 enum devlink_port_fn_state *state,
147 enum devlink_port_fn_opstate *opstate,
148 struct netlink_ext_ack *extack)
149 {
150 struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
151 struct mlx5_sf_table *table = dev->priv.sf_table;
152 struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
153
154 mutex_lock(&table->sf_state_lock);
155 *state = mlx5_sf_to_devlink_state(sf->hw_state);
156 *opstate = mlx5_sf_to_devlink_opstate(sf->hw_state);
157 mutex_unlock(&table->sf_state_lock);
158 return 0;
159 }
160
mlx5_sf_activate(struct mlx5_core_dev * dev,struct mlx5_sf * sf,struct netlink_ext_ack * extack)161 static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
162 struct netlink_ext_ack *extack)
163 {
164 struct mlx5_vport *vport;
165 int err;
166
167 if (mlx5_sf_is_active(sf))
168 return 0;
169 if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED) {
170 NL_SET_ERR_MSG_MOD(extack, "SF is inactivated but it is still attached");
171 return -EBUSY;
172 }
173
174 vport = mlx5_devlink_port_vport_get(&sf->dl_port.dl_port);
175 if (!vport->max_eqs_set && MLX5_CAP_GEN_2(dev, max_num_eqs_24b)) {
176 err = mlx5_devlink_port_fn_max_io_eqs_set_sf_default(&sf->dl_port.dl_port,
177 extack);
178 if (err)
179 return err;
180 }
181 err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
182 if (err)
183 return err;
184
185 sf->hw_state = MLX5_VHCA_STATE_ACTIVE;
186 trace_mlx5_sf_activate(dev, sf->port_index, sf->controller, sf->hw_fn_id);
187 return 0;
188 }
189
mlx5_sf_deactivate(struct mlx5_core_dev * dev,struct mlx5_sf * sf)190 static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
191 {
192 int err;
193
194 if (!mlx5_sf_is_active(sf))
195 return 0;
196
197 err = mlx5_cmd_sf_disable_hca(dev, sf->hw_fn_id);
198 if (err)
199 return err;
200
201 sf->hw_state = MLX5_VHCA_STATE_TEARDOWN_REQUEST;
202 trace_mlx5_sf_deactivate(dev, sf->port_index, sf->controller, sf->hw_fn_id);
203 return 0;
204 }
205
mlx5_sf_state_set(struct mlx5_core_dev * dev,struct mlx5_sf_table * table,struct mlx5_sf * sf,enum devlink_port_fn_state state,struct netlink_ext_ack * extack)206 static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
207 struct mlx5_sf *sf,
208 enum devlink_port_fn_state state,
209 struct netlink_ext_ack *extack)
210 {
211 int err = 0;
212
213 mutex_lock(&table->sf_state_lock);
214 if (state == mlx5_sf_to_devlink_state(sf->hw_state))
215 goto out;
216 if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
217 err = mlx5_sf_activate(dev, sf, extack);
218 else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
219 err = mlx5_sf_deactivate(dev, sf);
220 else
221 err = -EINVAL;
222 out:
223 mutex_unlock(&table->sf_state_lock);
224 return err;
225 }
226
mlx5_devlink_sf_port_fn_state_set(struct devlink_port * dl_port,enum devlink_port_fn_state state,struct netlink_ext_ack * extack)227 int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port,
228 enum devlink_port_fn_state state,
229 struct netlink_ext_ack *extack)
230 {
231 struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
232 struct mlx5_sf_table *table = dev->priv.sf_table;
233 struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
234
235 return mlx5_sf_state_set(dev, table, sf, state, extack);
236 }
237
mlx5_sf_add(struct mlx5_core_dev * dev,struct mlx5_sf_table * table,const struct devlink_port_new_attrs * new_attr,struct netlink_ext_ack * extack,struct devlink_port ** dl_port)238 static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
239 const struct devlink_port_new_attrs *new_attr,
240 struct netlink_ext_ack *extack,
241 struct devlink_port **dl_port)
242 {
243 struct mlx5_eswitch *esw = dev->priv.eswitch;
244 struct mlx5_sf *sf;
245 int err;
246
247 sf = mlx5_sf_alloc(table, esw, new_attr->controller, new_attr->sfnum, extack);
248 if (IS_ERR(sf))
249 return PTR_ERR(sf);
250
251 err = mlx5_eswitch_load_sf_vport(esw, sf->hw_fn_id, MLX5_VPORT_UC_ADDR_CHANGE,
252 &sf->dl_port, new_attr->controller, new_attr->sfnum);
253 if (err)
254 goto esw_err;
255 *dl_port = &sf->dl_port.dl_port;
256 trace_mlx5_sf_add(dev, sf->port_index, sf->controller, sf->hw_fn_id, new_attr->sfnum);
257 return 0;
258
259 esw_err:
260 mlx5_sf_function_id_erase(table, sf);
261 mlx5_sf_free(table, sf);
262 return err;
263 }
264
265 static int
mlx5_sf_new_check_attr(struct mlx5_core_dev * dev,const struct devlink_port_new_attrs * new_attr,struct netlink_ext_ack * extack)266 mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_attrs *new_attr,
267 struct netlink_ext_ack *extack)
268 {
269 if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) {
270 NL_SET_ERR_MSG_MOD(extack, "Driver supports only SF port addition");
271 return -EOPNOTSUPP;
272 }
273 if (new_attr->port_index_valid) {
274 NL_SET_ERR_MSG_MOD(extack,
275 "Driver does not support user defined port index assignment");
276 return -EOPNOTSUPP;
277 }
278 if (!new_attr->sfnum_valid) {
279 NL_SET_ERR_MSG_MOD(extack,
280 "User must provide unique sfnum. Driver does not support auto assignment");
281 return -EOPNOTSUPP;
282 }
283 if (new_attr->controller_valid && new_attr->controller &&
284 !mlx5_core_is_ecpf_esw_manager(dev)) {
285 NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
286 return -EOPNOTSUPP;
287 }
288 if (new_attr->pfnum != mlx5_get_dev_index(dev)) {
289 NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
290 return -EOPNOTSUPP;
291 }
292 return 0;
293 }
294
mlx5_sf_table_supported(const struct mlx5_core_dev * dev)295 static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
296 {
297 return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
298 mlx5_sf_hw_table_supported(dev);
299 }
300
mlx5_devlink_sf_port_new(struct devlink * devlink,const struct devlink_port_new_attrs * new_attr,struct netlink_ext_ack * extack,struct devlink_port ** dl_port)301 int mlx5_devlink_sf_port_new(struct devlink *devlink,
302 const struct devlink_port_new_attrs *new_attr,
303 struct netlink_ext_ack *extack,
304 struct devlink_port **dl_port)
305 {
306 struct mlx5_core_dev *dev = devlink_priv(devlink);
307 struct mlx5_sf_table *table = dev->priv.sf_table;
308 int err;
309
310 err = mlx5_sf_new_check_attr(dev, new_attr, extack);
311 if (err)
312 return err;
313
314 if (!mlx5_sf_table_supported(dev)) {
315 NL_SET_ERR_MSG_MOD(extack, "SF ports are not supported.");
316 return -EOPNOTSUPP;
317 }
318
319 if (!is_mdev_switchdev_mode(dev)) {
320 NL_SET_ERR_MSG_MOD(extack,
321 "SF ports are only supported in eswitch switchdev mode.");
322 return -EOPNOTSUPP;
323 }
324
325 return mlx5_sf_add(dev, table, new_attr, extack, dl_port);
326 }
327
mlx5_sf_dealloc(struct mlx5_sf_table * table,struct mlx5_sf * sf)328 static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
329 {
330 struct mlx5_vport *vport;
331
332 mutex_lock(&table->sf_state_lock);
333 vport = mlx5_devlink_port_vport_get(&sf->dl_port.dl_port);
334 vport->max_eqs_set = false;
335
336 mlx5_sf_function_id_erase(table, sf);
337
338 if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) {
339 mlx5_sf_free(table, sf);
340 } else if (mlx5_sf_is_active(sf)) {
341 /* Even if its active, it is treated as in_use because by the time,
342 * it is disabled here, it may getting used. So it is safe to
343 * always look for the event to ensure that it is recycled only after
344 * firmware gives confirmation that it is detached by the driver.
345 */
346 mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id);
347 mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
348 kfree(sf);
349 } else {
350 mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
351 kfree(sf);
352 }
353
354 mutex_unlock(&table->sf_state_lock);
355 }
356
mlx5_sf_del(struct mlx5_sf_table * table,struct mlx5_sf * sf)357 static void mlx5_sf_del(struct mlx5_sf_table *table, struct mlx5_sf *sf)
358 {
359 struct mlx5_eswitch *esw = table->dev->priv.eswitch;
360
361 mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
362 mlx5_sf_dealloc(table, sf);
363 }
364
mlx5_devlink_sf_port_del(struct devlink * devlink,struct devlink_port * dl_port,struct netlink_ext_ack * extack)365 int mlx5_devlink_sf_port_del(struct devlink *devlink,
366 struct devlink_port *dl_port,
367 struct netlink_ext_ack *extack)
368 {
369 struct mlx5_core_dev *dev = devlink_priv(devlink);
370 struct mlx5_sf_table *table = dev->priv.sf_table;
371 struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
372
373 mlx5_sf_del(table, sf);
374 return 0;
375 }
376
mlx5_sf_state_update_check(const struct mlx5_sf * sf,u8 new_state)377 static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
378 {
379 if (sf->hw_state == MLX5_VHCA_STATE_ACTIVE && new_state == MLX5_VHCA_STATE_IN_USE)
380 return true;
381
382 if (sf->hw_state == MLX5_VHCA_STATE_IN_USE && new_state == MLX5_VHCA_STATE_ACTIVE)
383 return true;
384
385 if (sf->hw_state == MLX5_VHCA_STATE_TEARDOWN_REQUEST &&
386 new_state == MLX5_VHCA_STATE_ALLOCATED)
387 return true;
388
389 return false;
390 }
391
mlx5_sf_vhca_event(struct notifier_block * nb,unsigned long opcode,void * data)392 static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
393 {
394 struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, vhca_nb);
395 const struct mlx5_vhca_state_event *event = data;
396 bool update = false;
397 struct mlx5_sf *sf;
398
399 mutex_lock(&table->sf_state_lock);
400 sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
401 if (!sf)
402 goto unlock;
403
404 /* When driver is attached or detached to a function, an event
405 * notifies such state change.
406 */
407 update = mlx5_sf_state_update_check(sf, event->new_vhca_state);
408 if (update)
409 sf->hw_state = event->new_vhca_state;
410 trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller,
411 sf->hw_fn_id, sf->hw_state);
412 unlock:
413 mutex_unlock(&table->sf_state_lock);
414 return 0;
415 }
416
mlx5_sf_del_all(struct mlx5_sf_table * table)417 static void mlx5_sf_del_all(struct mlx5_sf_table *table)
418 {
419 unsigned long index;
420 struct mlx5_sf *sf;
421
422 xa_for_each(&table->function_ids, index, sf)
423 mlx5_sf_del(table, sf);
424 }
425
mlx5_sf_esw_event(struct notifier_block * nb,unsigned long event,void * data)426 static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
427 {
428 struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, esw_nb);
429 const struct mlx5_esw_event_info *mode = data;
430
431 switch (mode->new_mode) {
432 case MLX5_ESWITCH_LEGACY:
433 mlx5_sf_del_all(table);
434 break;
435 default:
436 break;
437 }
438
439 return 0;
440 }
441
mlx5_sf_mdev_event(struct notifier_block * nb,unsigned long event,void * data)442 static int mlx5_sf_mdev_event(struct notifier_block *nb, unsigned long event, void *data)
443 {
444 struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, mdev_nb);
445 struct mlx5_sf_peer_devlink_event_ctx *event_ctx = data;
446 int ret = NOTIFY_DONE;
447 struct mlx5_sf *sf;
448
449 if (event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK)
450 return NOTIFY_DONE;
451
452
453 mutex_lock(&table->sf_state_lock);
454 sf = mlx5_sf_lookup_by_function_id(table, event_ctx->fn_id);
455 if (!sf)
456 goto out;
457
458 event_ctx->err = devl_port_fn_devlink_set(&sf->dl_port.dl_port,
459 event_ctx->devlink);
460
461 ret = NOTIFY_OK;
462 out:
463 mutex_unlock(&table->sf_state_lock);
464 return ret;
465 }
466
mlx5_sf_table_init(struct mlx5_core_dev * dev)467 int mlx5_sf_table_init(struct mlx5_core_dev *dev)
468 {
469 struct mlx5_sf_table *table;
470 int err;
471
472 if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev))
473 return 0;
474
475 table = kzalloc(sizeof(*table), GFP_KERNEL);
476 if (!table)
477 return -ENOMEM;
478
479 mutex_init(&table->sf_state_lock);
480 table->dev = dev;
481 xa_init(&table->function_ids);
482 dev->priv.sf_table = table;
483 table->esw_nb.notifier_call = mlx5_sf_esw_event;
484 err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
485 if (err)
486 goto reg_err;
487
488 table->vhca_nb.notifier_call = mlx5_sf_vhca_event;
489 err = mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
490 if (err)
491 goto vhca_err;
492
493 table->mdev_nb.notifier_call = mlx5_sf_mdev_event;
494 mlx5_blocking_notifier_register(dev, &table->mdev_nb);
495
496 return 0;
497
498 vhca_err:
499 mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
500 reg_err:
501 mutex_destroy(&table->sf_state_lock);
502 kfree(table);
503 dev->priv.sf_table = NULL;
504 return err;
505 }
506
mlx5_sf_table_cleanup(struct mlx5_core_dev * dev)507 void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
508 {
509 struct mlx5_sf_table *table = dev->priv.sf_table;
510
511 if (!table)
512 return;
513
514 mlx5_blocking_notifier_unregister(dev, &table->mdev_nb);
515 mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
516 mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
517 mutex_destroy(&table->sf_state_lock);
518 WARN_ON(!xa_empty(&table->function_ids));
519 kfree(table);
520 }
521