1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies */
3
4 #include <devlink.h>
5
6 #include "mlx5_core.h"
7 #include "fw_reset.h"
8 #include "fs_core.h"
9 #include "eswitch.h"
10 #include "esw/qos.h"
11 #include "sf/dev/dev.h"
12 #include "sf/sf.h"
13 #include "lib/nv_param.h"
14
mlx5_devlink_flash_update(struct devlink * devlink,struct devlink_flash_update_params * params,struct netlink_ext_ack * extack)15 static int mlx5_devlink_flash_update(struct devlink *devlink,
16 struct devlink_flash_update_params *params,
17 struct netlink_ext_ack *extack)
18 {
19 struct mlx5_core_dev *dev = devlink_priv(devlink);
20
21 return mlx5_firmware_flash(dev, params->fw, extack);
22 }
23
mlx5_fw_ver_major(u32 version)24 static u8 mlx5_fw_ver_major(u32 version)
25 {
26 return (version >> 24) & 0xff;
27 }
28
mlx5_fw_ver_minor(u32 version)29 static u8 mlx5_fw_ver_minor(u32 version)
30 {
31 return (version >> 16) & 0xff;
32 }
33
mlx5_fw_ver_subminor(u32 version)34 static u16 mlx5_fw_ver_subminor(u32 version)
35 {
36 return version & 0xffff;
37 }
38
mlx5_devlink_serial_numbers_put(struct mlx5_core_dev * dev,struct devlink_info_req * req,struct netlink_ext_ack * extack)39 static int mlx5_devlink_serial_numbers_put(struct mlx5_core_dev *dev,
40 struct devlink_info_req *req,
41 struct netlink_ext_ack *extack)
42 {
43 struct pci_dev *pdev = dev->pdev;
44 unsigned int vpd_size, kw_len;
45 char *str, *end;
46 u8 *vpd_data;
47 int err = 0;
48 int start;
49
50 vpd_data = pci_vpd_alloc(pdev, &vpd_size);
51 if (IS_ERR(vpd_data))
52 return 0;
53
54 start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
55 PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
56 if (start >= 0) {
57 str = kstrndup(vpd_data + start, kw_len, GFP_KERNEL);
58 if (!str) {
59 err = -ENOMEM;
60 goto end;
61 }
62 end = strchrnul(str, ' ');
63 *end = '\0';
64 err = devlink_info_board_serial_number_put(req, str);
65 kfree(str);
66 if (err)
67 goto end;
68 }
69
70 start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, "V3", &kw_len);
71 if (start >= 0) {
72 str = kstrndup(vpd_data + start, kw_len, GFP_KERNEL);
73 if (!str) {
74 err = -ENOMEM;
75 goto end;
76 }
77 err = devlink_info_serial_number_put(req, str);
78 kfree(str);
79 if (err)
80 goto end;
81 }
82
83 end:
84 kfree(vpd_data);
85 return err;
86 }
87
88 #define DEVLINK_FW_STRING_LEN 32
89
90 static int
mlx5_devlink_info_get(struct devlink * devlink,struct devlink_info_req * req,struct netlink_ext_ack * extack)91 mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
92 struct netlink_ext_ack *extack)
93 {
94 struct mlx5_core_dev *dev = devlink_priv(devlink);
95 char version_str[DEVLINK_FW_STRING_LEN];
96 u32 running_fw, stored_fw;
97 int err;
98
99 if (!mlx5_core_is_pf(dev))
100 return 0;
101
102 err = mlx5_devlink_serial_numbers_put(dev, req, extack);
103 if (err)
104 return err;
105
106 err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id);
107 if (err)
108 return err;
109
110 err = mlx5_fw_version_query(dev, &running_fw, &stored_fw);
111 if (err)
112 return err;
113
114 snprintf(version_str, sizeof(version_str), "%d.%d.%04d",
115 mlx5_fw_ver_major(running_fw), mlx5_fw_ver_minor(running_fw),
116 mlx5_fw_ver_subminor(running_fw));
117 err = devlink_info_version_running_put(req, "fw.version", version_str);
118 if (err)
119 return err;
120 err = devlink_info_version_running_put(req,
121 DEVLINK_INFO_VERSION_GENERIC_FW,
122 version_str);
123 if (err)
124 return err;
125
126 /* no pending version, return running (stored) version */
127 if (stored_fw == 0)
128 stored_fw = running_fw;
129
130 snprintf(version_str, sizeof(version_str), "%d.%d.%04d",
131 mlx5_fw_ver_major(stored_fw), mlx5_fw_ver_minor(stored_fw),
132 mlx5_fw_ver_subminor(stored_fw));
133 err = devlink_info_version_stored_put(req, "fw.version", version_str);
134 if (err)
135 return err;
136 return devlink_info_version_stored_put(req,
137 DEVLINK_INFO_VERSION_GENERIC_FW,
138 version_str);
139 }
140
mlx5_devlink_reload_fw_activate(struct devlink * devlink,struct netlink_ext_ack * extack)141 static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netlink_ext_ack *extack)
142 {
143 struct mlx5_core_dev *dev = devlink_priv(devlink);
144 u8 reset_level, reset_type, net_port_alive;
145 int err;
146
147 err = mlx5_fw_reset_query(dev, &reset_level, &reset_type);
148 if (err)
149 return err;
150 if (!(reset_level & MLX5_MFRL_REG_RESET_LEVEL3)) {
151 NL_SET_ERR_MSG_MOD(extack, "FW activate requires reboot");
152 return -EINVAL;
153 }
154
155 net_port_alive = !!(reset_type & MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE);
156 err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive, extack);
157 if (err)
158 return err;
159
160 err = mlx5_fw_reset_wait_reset_done(dev);
161 if (err)
162 return err;
163
164 mlx5_sync_reset_unload_flow(dev, true);
165 err = mlx5_health_wait_pci_up(dev);
166 if (err)
167 NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
168
169 return err;
170 }
171
mlx5_devlink_trigger_fw_live_patch(struct devlink * devlink,struct netlink_ext_ack * extack)172 static int mlx5_devlink_trigger_fw_live_patch(struct devlink *devlink,
173 struct netlink_ext_ack *extack)
174 {
175 struct mlx5_core_dev *dev = devlink_priv(devlink);
176 u8 reset_level;
177 int err;
178
179 err = mlx5_fw_reset_query(dev, &reset_level, NULL);
180 if (err)
181 return err;
182 if (!(reset_level & MLX5_MFRL_REG_RESET_LEVEL0)) {
183 NL_SET_ERR_MSG_MOD(extack,
184 "FW upgrade to the stored FW can't be done by FW live patching");
185 return -EINVAL;
186 }
187
188 return mlx5_fw_reset_set_live_patch(dev);
189 }
190
mlx5_devlink_reload_down(struct devlink * devlink,bool netns_change,enum devlink_reload_action action,enum devlink_reload_limit limit,struct netlink_ext_ack * extack)191 static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
192 enum devlink_reload_action action,
193 enum devlink_reload_limit limit,
194 struct netlink_ext_ack *extack)
195 {
196 struct mlx5_core_dev *dev = devlink_priv(devlink);
197 struct pci_dev *pdev = dev->pdev;
198 int ret = 0;
199
200 if (mlx5_dev_is_lightweight(dev)) {
201 if (action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
202 return -EOPNOTSUPP;
203 mlx5_unload_one_light(dev);
204 return 0;
205 }
206
207 if (mlx5_core_is_mp_slave(dev)) {
208 NL_SET_ERR_MSG_MOD(extack, "reload is unsupported for multi port slave");
209 return -EOPNOTSUPP;
210 }
211
212 if (action == DEVLINK_RELOAD_ACTION_FW_ACTIVATE &&
213 !dev->priv.fw_reset) {
214 NL_SET_ERR_MSG_MOD(extack, "FW activate is unsupported for this function");
215 return -EOPNOTSUPP;
216 }
217
218 if (mlx5_core_is_pf(dev) && pci_num_vf(pdev))
219 NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
220
221 switch (action) {
222 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
223 mlx5_unload_one_devl_locked(dev, false);
224 break;
225 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
226 if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
227 ret = mlx5_devlink_trigger_fw_live_patch(devlink, extack);
228 else
229 ret = mlx5_devlink_reload_fw_activate(devlink, extack);
230 break;
231 default:
232 /* Unsupported action should not get to this function */
233 WARN_ON(1);
234 ret = -EOPNOTSUPP;
235 }
236
237 return ret;
238 }
239
mlx5_devlink_reload_up(struct devlink * devlink,enum devlink_reload_action action,enum devlink_reload_limit limit,u32 * actions_performed,struct netlink_ext_ack * extack)240 static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action,
241 enum devlink_reload_limit limit, u32 *actions_performed,
242 struct netlink_ext_ack *extack)
243 {
244 struct mlx5_core_dev *dev = devlink_priv(devlink);
245 int ret = 0;
246
247 *actions_performed = BIT(action);
248 switch (action) {
249 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
250 if (mlx5_dev_is_lightweight(dev)) {
251 mlx5_fw_reporters_create(dev);
252 return mlx5_init_one_devl_locked(dev);
253 }
254 ret = mlx5_load_one_devl_locked(dev, false);
255 break;
256 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
257 if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
258 break;
259 /* On fw_activate action, also driver is reloaded and reinit performed */
260 *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
261 ret = mlx5_load_one_devl_locked(dev, true);
262 if (ret)
263 return ret;
264 ret = mlx5_fw_reset_verify_fw_complete(dev, extack);
265 break;
266 default:
267 /* Unsupported action should not get to this function */
268 WARN_ON(1);
269 ret = -EOPNOTSUPP;
270 }
271
272 return ret;
273 }
274
mlx5_find_trap_by_id(struct mlx5_core_dev * dev,int trap_id)275 static struct mlx5_devlink_trap *mlx5_find_trap_by_id(struct mlx5_core_dev *dev, int trap_id)
276 {
277 struct mlx5_devlink_trap *dl_trap;
278
279 list_for_each_entry(dl_trap, &dev->priv.traps, list)
280 if (dl_trap->trap.id == trap_id)
281 return dl_trap;
282
283 return NULL;
284 }
285
mlx5_devlink_trap_init(struct devlink * devlink,const struct devlink_trap * trap,void * trap_ctx)286 static int mlx5_devlink_trap_init(struct devlink *devlink, const struct devlink_trap *trap,
287 void *trap_ctx)
288 {
289 struct mlx5_core_dev *dev = devlink_priv(devlink);
290 struct mlx5_devlink_trap *dl_trap;
291
292 dl_trap = kzalloc(sizeof(*dl_trap), GFP_KERNEL);
293 if (!dl_trap)
294 return -ENOMEM;
295
296 dl_trap->trap.id = trap->id;
297 dl_trap->trap.action = DEVLINK_TRAP_ACTION_DROP;
298 dl_trap->item = trap_ctx;
299
300 if (mlx5_find_trap_by_id(dev, trap->id)) {
301 kfree(dl_trap);
302 mlx5_core_err(dev, "Devlink trap: Trap 0x%x already found", trap->id);
303 return -EEXIST;
304 }
305
306 list_add_tail(&dl_trap->list, &dev->priv.traps);
307 return 0;
308 }
309
mlx5_devlink_trap_fini(struct devlink * devlink,const struct devlink_trap * trap,void * trap_ctx)310 static void mlx5_devlink_trap_fini(struct devlink *devlink, const struct devlink_trap *trap,
311 void *trap_ctx)
312 {
313 struct mlx5_core_dev *dev = devlink_priv(devlink);
314 struct mlx5_devlink_trap *dl_trap;
315
316 dl_trap = mlx5_find_trap_by_id(dev, trap->id);
317 if (!dl_trap) {
318 mlx5_core_err(dev, "Devlink trap: Missing trap id 0x%x", trap->id);
319 return;
320 }
321 list_del(&dl_trap->list);
322 kfree(dl_trap);
323 }
324
mlx5_devlink_trap_action_set(struct devlink * devlink,const struct devlink_trap * trap,enum devlink_trap_action action,struct netlink_ext_ack * extack)325 static int mlx5_devlink_trap_action_set(struct devlink *devlink,
326 const struct devlink_trap *trap,
327 enum devlink_trap_action action,
328 struct netlink_ext_ack *extack)
329 {
330 struct mlx5_core_dev *dev = devlink_priv(devlink);
331 struct mlx5_devlink_trap_event_ctx trap_event_ctx;
332 enum devlink_trap_action action_orig;
333 struct mlx5_devlink_trap *dl_trap;
334 int err;
335
336 if (is_mdev_switchdev_mode(dev)) {
337 NL_SET_ERR_MSG_MOD(extack, "Devlink traps can't be set in switchdev mode");
338 return -EOPNOTSUPP;
339 }
340
341 dl_trap = mlx5_find_trap_by_id(dev, trap->id);
342 if (!dl_trap) {
343 mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id);
344 return -EINVAL;
345 }
346
347 if (action != DEVLINK_TRAP_ACTION_DROP && action != DEVLINK_TRAP_ACTION_TRAP)
348 return -EOPNOTSUPP;
349
350 if (action == dl_trap->trap.action)
351 return 0;
352
353 action_orig = dl_trap->trap.action;
354 dl_trap->trap.action = action;
355 trap_event_ctx.trap = &dl_trap->trap;
356 trap_event_ctx.err = 0;
357 err = mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_TYPE_TRAP,
358 &trap_event_ctx);
359 if (err == NOTIFY_BAD)
360 dl_trap->trap.action = action_orig;
361
362 return trap_event_ctx.err;
363 }
364
365 static const struct devlink_ops mlx5_devlink_ops = {
366 #ifdef CONFIG_MLX5_ESWITCH
367 .eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
368 .eswitch_mode_get = mlx5_devlink_eswitch_mode_get,
369 .eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set,
370 .eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get,
371 .eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set,
372 .eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get,
373 .rate_leaf_tx_share_set = mlx5_esw_devlink_rate_leaf_tx_share_set,
374 .rate_leaf_tx_max_set = mlx5_esw_devlink_rate_leaf_tx_max_set,
375 .rate_leaf_tc_bw_set = mlx5_esw_devlink_rate_leaf_tc_bw_set,
376 .rate_node_tc_bw_set = mlx5_esw_devlink_rate_node_tc_bw_set,
377 .rate_node_tx_share_set = mlx5_esw_devlink_rate_node_tx_share_set,
378 .rate_node_tx_max_set = mlx5_esw_devlink_rate_node_tx_max_set,
379 .rate_node_new = mlx5_esw_devlink_rate_node_new,
380 .rate_node_del = mlx5_esw_devlink_rate_node_del,
381 .rate_leaf_parent_set = mlx5_esw_devlink_rate_leaf_parent_set,
382 .rate_node_parent_set = mlx5_esw_devlink_rate_node_parent_set,
383 #endif
384 #ifdef CONFIG_MLX5_SF_MANAGER
385 .port_new = mlx5_devlink_sf_port_new,
386 #endif
387 .flash_update = mlx5_devlink_flash_update,
388 .info_get = mlx5_devlink_info_get,
389 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
390 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
391 .reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
392 .reload_down = mlx5_devlink_reload_down,
393 .reload_up = mlx5_devlink_reload_up,
394 .trap_init = mlx5_devlink_trap_init,
395 .trap_fini = mlx5_devlink_trap_fini,
396 .trap_action_set = mlx5_devlink_trap_action_set,
397 };
398
mlx5_devlink_trap_report(struct mlx5_core_dev * dev,int trap_id,struct sk_buff * skb,struct devlink_port * dl_port)399 void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb,
400 struct devlink_port *dl_port)
401 {
402 struct devlink *devlink = priv_to_devlink(dev);
403 struct mlx5_devlink_trap *dl_trap;
404
405 dl_trap = mlx5_find_trap_by_id(dev, trap_id);
406 if (!dl_trap) {
407 mlx5_core_err(dev, "Devlink trap: Report on invalid trap id 0x%x", trap_id);
408 return;
409 }
410
411 if (dl_trap->trap.action != DEVLINK_TRAP_ACTION_TRAP) {
412 mlx5_core_dbg(dev, "Devlink trap: Trap id %d has action %d", trap_id,
413 dl_trap->trap.action);
414 return;
415 }
416 devlink_trap_report(devlink, skb, dl_trap->item, dl_port, NULL);
417 }
418
mlx5_devlink_trap_get_num_active(struct mlx5_core_dev * dev)419 int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev)
420 {
421 struct mlx5_devlink_trap *dl_trap;
422 int count = 0;
423
424 list_for_each_entry(dl_trap, &dev->priv.traps, list)
425 if (dl_trap->trap.action == DEVLINK_TRAP_ACTION_TRAP)
426 count++;
427
428 return count;
429 }
430
mlx5_devlink_traps_get_action(struct mlx5_core_dev * dev,int trap_id,enum devlink_trap_action * action)431 int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id,
432 enum devlink_trap_action *action)
433 {
434 struct mlx5_devlink_trap *dl_trap;
435
436 dl_trap = mlx5_find_trap_by_id(dev, trap_id);
437 if (!dl_trap) {
438 mlx5_core_err(dev, "Devlink trap: Get action on invalid trap id 0x%x",
439 trap_id);
440 return -EINVAL;
441 }
442
443 *action = dl_trap->trap.action;
444 return 0;
445 }
446
mlx5_devlink_alloc(struct device * dev)447 struct devlink *mlx5_devlink_alloc(struct device *dev)
448 {
449 return devlink_alloc(&mlx5_devlink_ops, sizeof(struct mlx5_core_dev),
450 dev);
451 }
452
mlx5_devlink_free(struct devlink * devlink)453 void mlx5_devlink_free(struct devlink *devlink)
454 {
455 devlink_free(devlink);
456 }
457
mlx5_devlink_enable_roce_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)458 static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
459 union devlink_param_value val,
460 struct netlink_ext_ack *extack)
461 {
462 struct mlx5_core_dev *dev = devlink_priv(devlink);
463 bool new_state = val.vbool;
464
465 if (new_state && !MLX5_CAP_GEN(dev, roce) &&
466 !(MLX5_CAP_GEN(dev, roce_rw_supported) && MLX5_CAP_GEN_MAX(dev, roce))) {
467 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
468 return -EOPNOTSUPP;
469 }
470 if (mlx5_core_is_mp_slave(dev) || mlx5_lag_is_active(dev)) {
471 NL_SET_ERR_MSG_MOD(extack, "Multi port slave/Lag device can't configure RoCE");
472 return -EOPNOTSUPP;
473 }
474
475 return 0;
476 }
477
478 #ifdef CONFIG_MLX5_ESWITCH
mlx5_devlink_large_group_num_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)479 static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id,
480 union devlink_param_value val,
481 struct netlink_ext_ack *extack)
482 {
483 int group_num = val.vu32;
484
485 if (group_num < 1 || group_num > 1024) {
486 NL_SET_ERR_MSG_MOD(extack,
487 "Unsupported group number, supported range is 1-1024");
488 return -EOPNOTSUPP;
489 }
490
491 return 0;
492 }
493 #endif
494
mlx5_devlink_eq_depth_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)495 static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id,
496 union devlink_param_value val,
497 struct netlink_ext_ack *extack)
498 {
499 return (val.vu32 >= 64 && val.vu32 <= 4096) ? 0 : -EINVAL;
500 }
501
502 static int
mlx5_devlink_hairpin_num_queues_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)503 mlx5_devlink_hairpin_num_queues_validate(struct devlink *devlink, u32 id,
504 union devlink_param_value val,
505 struct netlink_ext_ack *extack)
506 {
507 return val.vu32 ? 0 : -EINVAL;
508 }
509
510 static int
mlx5_devlink_hairpin_queue_size_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)511 mlx5_devlink_hairpin_queue_size_validate(struct devlink *devlink, u32 id,
512 union devlink_param_value val,
513 struct netlink_ext_ack *extack)
514 {
515 struct mlx5_core_dev *dev = devlink_priv(devlink);
516 u32 val32 = val.vu32;
517
518 if (!is_power_of_2(val32)) {
519 NL_SET_ERR_MSG_MOD(extack, "Value is not power of two");
520 return -EINVAL;
521 }
522
523 if (val32 > BIT(MLX5_CAP_GEN(dev, log_max_hairpin_num_packets))) {
524 NL_SET_ERR_MSG_FMT_MOD(
525 extack, "Maximum hairpin queue size is %lu",
526 BIT(MLX5_CAP_GEN(dev, log_max_hairpin_num_packets)));
527 return -EINVAL;
528 }
529
530 return 0;
531 }
532
mlx5_devlink_num_doorbells_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)533 static int mlx5_devlink_num_doorbells_validate(struct devlink *devlink, u32 id,
534 union devlink_param_value val,
535 struct netlink_ext_ack *extack)
536 {
537 struct mlx5_core_dev *mdev = devlink_priv(devlink);
538 u32 val32 = val.vu32;
539 u32 max_num_channels;
540
541 max_num_channels = mlx5e_get_max_num_channels(mdev);
542 if (val32 > max_num_channels) {
543 NL_SET_ERR_MSG_FMT_MOD(extack,
544 "Requested num_doorbells (%u) exceeds maximum number of channels (%u)",
545 val32, max_num_channels);
546 return -EINVAL;
547 }
548
549 return 0;
550 }
551
mlx5_devlink_hairpin_params_init_values(struct devlink * devlink)552 static void mlx5_devlink_hairpin_params_init_values(struct devlink *devlink)
553 {
554 struct mlx5_core_dev *dev = devlink_priv(devlink);
555 union devlink_param_value value;
556 u32 link_speed = 0;
557 u64 link_speed64;
558
559 /* set hairpin pair per each 50Gbs share of the link */
560 mlx5_port_max_linkspeed(dev, &link_speed);
561 link_speed = max_t(u32, link_speed, 50000);
562 link_speed64 = link_speed;
563 do_div(link_speed64, 50000);
564
565 value.vu32 = link_speed64;
566 devl_param_driverinit_value_set(
567 devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES, value);
568
569 value.vu32 =
570 BIT(min_t(u32, 16 - MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(dev),
571 MLX5_CAP_GEN(dev, log_max_hairpin_num_packets)));
572 devl_param_driverinit_value_set(
573 devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_QUEUE_SIZE, value);
574 }
575
576 static const struct devlink_param mlx5_devlink_params[] = {
577 DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
578 NULL, NULL, mlx5_devlink_enable_roce_validate),
579 #ifdef CONFIG_MLX5_ESWITCH
580 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
581 "fdb_large_groups", DEVLINK_PARAM_TYPE_U32,
582 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
583 NULL, NULL,
584 mlx5_devlink_large_group_num_validate),
585 #endif
586 DEVLINK_PARAM_GENERIC(IO_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
587 NULL, NULL, mlx5_devlink_eq_depth_validate),
588 DEVLINK_PARAM_GENERIC(EVENT_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
589 NULL, NULL, mlx5_devlink_eq_depth_validate),
590 };
591
mlx5_devlink_set_params_init_values(struct devlink * devlink)592 static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
593 {
594 struct mlx5_core_dev *dev = devlink_priv(devlink);
595 union devlink_param_value value;
596
597 value.vbool = MLX5_CAP_GEN(dev, roce) && !mlx5_dev_is_lightweight(dev);
598 devl_param_driverinit_value_set(devlink,
599 DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
600 value);
601
602 #ifdef CONFIG_MLX5_ESWITCH
603 value.vu32 = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
604 devl_param_driverinit_value_set(devlink,
605 MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
606 value);
607 #endif
608
609 value.vu32 = MLX5_COMP_EQ_SIZE;
610 devl_param_driverinit_value_set(devlink,
611 DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
612 value);
613
614 value.vu32 = MLX5_NUM_ASYNC_EQE;
615 devl_param_driverinit_value_set(devlink,
616 DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
617 value);
618 }
619
620 static const struct devlink_param mlx5_devlink_eth_params[] = {
621 DEVLINK_PARAM_GENERIC(ENABLE_ETH, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
622 NULL, NULL, NULL),
623 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES,
624 "hairpin_num_queues", DEVLINK_PARAM_TYPE_U32,
625 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
626 mlx5_devlink_hairpin_num_queues_validate),
627 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_HAIRPIN_QUEUE_SIZE,
628 "hairpin_queue_size", DEVLINK_PARAM_TYPE_U32,
629 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
630 mlx5_devlink_hairpin_queue_size_validate),
631 DEVLINK_PARAM_GENERIC(NUM_DOORBELLS,
632 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
633 mlx5_devlink_num_doorbells_validate),
634 };
635
mlx5_devlink_eth_params_register(struct devlink * devlink)636 static int mlx5_devlink_eth_params_register(struct devlink *devlink)
637 {
638 struct mlx5_core_dev *dev = devlink_priv(devlink);
639 union devlink_param_value value;
640 int err;
641
642 if (!mlx5_eth_supported(dev))
643 return 0;
644
645 err = devl_params_register(devlink, mlx5_devlink_eth_params,
646 ARRAY_SIZE(mlx5_devlink_eth_params));
647 if (err)
648 return err;
649
650 value.vbool = !mlx5_dev_is_lightweight(dev);
651 devl_param_driverinit_value_set(devlink,
652 DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
653 value);
654
655 mlx5_devlink_hairpin_params_init_values(devlink);
656
657 value.vu32 = MLX5_DEFAULT_NUM_DOORBELLS;
658 devl_param_driverinit_value_set(devlink,
659 DEVLINK_PARAM_GENERIC_ID_NUM_DOORBELLS,
660 value);
661 return 0;
662 }
663
mlx5_devlink_eth_params_unregister(struct devlink * devlink)664 static void mlx5_devlink_eth_params_unregister(struct devlink *devlink)
665 {
666 struct mlx5_core_dev *dev = devlink_priv(devlink);
667
668 if (!mlx5_eth_supported(dev))
669 return;
670
671 devl_params_unregister(devlink, mlx5_devlink_eth_params,
672 ARRAY_SIZE(mlx5_devlink_eth_params));
673 }
674
675 #define MLX5_PCIE_CONG_THRESH_MAX 10000
676 #define MLX5_PCIE_CONG_THRESH_DEF_LOW 7500
677 #define MLX5_PCIE_CONG_THRESH_DEF_HIGH 9000
678
679 static int
mlx5_devlink_pcie_cong_thresh_validate(struct devlink * devl,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)680 mlx5_devlink_pcie_cong_thresh_validate(struct devlink *devl, u32 id,
681 union devlink_param_value val,
682 struct netlink_ext_ack *extack)
683 {
684 if (val.vu16 > MLX5_PCIE_CONG_THRESH_MAX) {
685 NL_SET_ERR_MSG_FMT_MOD(extack, "Value %u > max supported (%u)",
686 val.vu16, MLX5_PCIE_CONG_THRESH_MAX);
687
688 return -EINVAL;
689 }
690
691 switch (id) {
692 case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW:
693 case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH:
694 case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW:
695 case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH:
696 break;
697 default:
698 return -EOPNOTSUPP;
699 }
700
701 return 0;
702 }
703
mlx5_devlink_pcie_cong_init_values(struct devlink * devlink)704 static void mlx5_devlink_pcie_cong_init_values(struct devlink *devlink)
705 {
706 union devlink_param_value value;
707 u32 id;
708
709 value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_LOW;
710 id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW;
711 devl_param_driverinit_value_set(devlink, id, value);
712
713 value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_HIGH;
714 id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH;
715 devl_param_driverinit_value_set(devlink, id, value);
716
717 value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_LOW;
718 id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW;
719 devl_param_driverinit_value_set(devlink, id, value);
720
721 value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_HIGH;
722 id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH;
723 devl_param_driverinit_value_set(devlink, id, value);
724 }
725
726 static const struct devlink_param mlx5_devlink_pcie_cong_params[] = {
727 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW,
728 "pcie_cong_inbound_low", DEVLINK_PARAM_TYPE_U16,
729 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
730 mlx5_devlink_pcie_cong_thresh_validate),
731 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH,
732 "pcie_cong_inbound_high", DEVLINK_PARAM_TYPE_U16,
733 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
734 mlx5_devlink_pcie_cong_thresh_validate),
735 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW,
736 "pcie_cong_outbound_low", DEVLINK_PARAM_TYPE_U16,
737 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
738 mlx5_devlink_pcie_cong_thresh_validate),
739 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH,
740 "pcie_cong_outbound_high", DEVLINK_PARAM_TYPE_U16,
741 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
742 mlx5_devlink_pcie_cong_thresh_validate),
743 };
744
mlx5_devlink_pcie_cong_params_register(struct devlink * devlink)745 static int mlx5_devlink_pcie_cong_params_register(struct devlink *devlink)
746 {
747 struct mlx5_core_dev *dev = devlink_priv(devlink);
748 int err;
749
750 if (!mlx5_pcie_cong_event_supported(dev))
751 return 0;
752
753 err = devl_params_register(devlink, mlx5_devlink_pcie_cong_params,
754 ARRAY_SIZE(mlx5_devlink_pcie_cong_params));
755 if (err)
756 return err;
757
758 mlx5_devlink_pcie_cong_init_values(devlink);
759
760 return 0;
761 }
762
mlx5_devlink_pcie_cong_params_unregister(struct devlink * devlink)763 static void mlx5_devlink_pcie_cong_params_unregister(struct devlink *devlink)
764 {
765 struct mlx5_core_dev *dev = devlink_priv(devlink);
766
767 if (!mlx5_pcie_cong_event_supported(dev))
768 return;
769
770 devl_params_unregister(devlink, mlx5_devlink_pcie_cong_params,
771 ARRAY_SIZE(mlx5_devlink_pcie_cong_params));
772 }
773
mlx5_devlink_enable_rdma_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)774 static int mlx5_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
775 union devlink_param_value val,
776 struct netlink_ext_ack *extack)
777 {
778 struct mlx5_core_dev *dev = devlink_priv(devlink);
779 bool new_state = val.vbool;
780
781 if (new_state && !mlx5_rdma_supported(dev))
782 return -EOPNOTSUPP;
783 return 0;
784 }
785
786 static const struct devlink_param mlx5_devlink_rdma_params[] = {
787 DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
788 NULL, NULL, mlx5_devlink_enable_rdma_validate),
789 };
790
mlx5_devlink_rdma_params_register(struct devlink * devlink)791 static int mlx5_devlink_rdma_params_register(struct devlink *devlink)
792 {
793 struct mlx5_core_dev *dev = devlink_priv(devlink);
794 union devlink_param_value value;
795 int err;
796
797 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
798 return 0;
799
800 err = devl_params_register(devlink, mlx5_devlink_rdma_params,
801 ARRAY_SIZE(mlx5_devlink_rdma_params));
802 if (err)
803 return err;
804
805 value.vbool = !mlx5_dev_is_lightweight(dev);
806 devl_param_driverinit_value_set(devlink,
807 DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
808 value);
809 return 0;
810 }
811
mlx5_devlink_rdma_params_unregister(struct devlink * devlink)812 static void mlx5_devlink_rdma_params_unregister(struct devlink *devlink)
813 {
814 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
815 return;
816
817 devl_params_unregister(devlink, mlx5_devlink_rdma_params,
818 ARRAY_SIZE(mlx5_devlink_rdma_params));
819 }
820
821 static const struct devlink_param mlx5_devlink_vnet_params[] = {
822 DEVLINK_PARAM_GENERIC(ENABLE_VNET, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
823 NULL, NULL, NULL),
824 };
825
mlx5_devlink_vnet_params_register(struct devlink * devlink)826 static int mlx5_devlink_vnet_params_register(struct devlink *devlink)
827 {
828 struct mlx5_core_dev *dev = devlink_priv(devlink);
829 union devlink_param_value value;
830 int err;
831
832 if (!mlx5_vnet_supported(dev))
833 return 0;
834
835 err = devl_params_register(devlink, mlx5_devlink_vnet_params,
836 ARRAY_SIZE(mlx5_devlink_vnet_params));
837 if (err)
838 return err;
839
840 value.vbool = !mlx5_dev_is_lightweight(dev);
841 devl_param_driverinit_value_set(devlink,
842 DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
843 value);
844 return 0;
845 }
846
mlx5_devlink_vnet_params_unregister(struct devlink * devlink)847 static void mlx5_devlink_vnet_params_unregister(struct devlink *devlink)
848 {
849 struct mlx5_core_dev *dev = devlink_priv(devlink);
850
851 if (!mlx5_vnet_supported(dev))
852 return;
853
854 devl_params_unregister(devlink, mlx5_devlink_vnet_params,
855 ARRAY_SIZE(mlx5_devlink_vnet_params));
856 }
857
mlx5_devlink_auxdev_params_register(struct devlink * devlink)858 static int mlx5_devlink_auxdev_params_register(struct devlink *devlink)
859 {
860 int err;
861
862 err = mlx5_devlink_eth_params_register(devlink);
863 if (err)
864 return err;
865
866 err = mlx5_devlink_rdma_params_register(devlink);
867 if (err)
868 goto rdma_err;
869
870 err = mlx5_devlink_vnet_params_register(devlink);
871 if (err)
872 goto vnet_err;
873 return 0;
874
875 vnet_err:
876 mlx5_devlink_rdma_params_unregister(devlink);
877 rdma_err:
878 mlx5_devlink_eth_params_unregister(devlink);
879 return err;
880 }
881
mlx5_devlink_auxdev_params_unregister(struct devlink * devlink)882 static void mlx5_devlink_auxdev_params_unregister(struct devlink *devlink)
883 {
884 mlx5_devlink_vnet_params_unregister(devlink);
885 mlx5_devlink_rdma_params_unregister(devlink);
886 mlx5_devlink_eth_params_unregister(devlink);
887 }
888
mlx5_devlink_max_uc_list_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)889 static int mlx5_devlink_max_uc_list_validate(struct devlink *devlink, u32 id,
890 union devlink_param_value val,
891 struct netlink_ext_ack *extack)
892 {
893 struct mlx5_core_dev *dev = devlink_priv(devlink);
894
895 if (val.vu32 == 0) {
896 NL_SET_ERR_MSG_MOD(extack, "max_macs value must be greater than 0");
897 return -EINVAL;
898 }
899
900 if (!is_power_of_2(val.vu32)) {
901 NL_SET_ERR_MSG_MOD(extack, "Only power of 2 values are supported for max_macs");
902 return -EINVAL;
903 }
904
905 if (ilog2(val.vu32) >
906 MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list)) {
907 NL_SET_ERR_MSG_MOD(extack, "max_macs value is out of the supported range");
908 return -EINVAL;
909 }
910
911 return 0;
912 }
913
914 static const struct devlink_param mlx5_devlink_max_uc_list_params[] = {
915 DEVLINK_PARAM_GENERIC(MAX_MACS, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
916 NULL, NULL, mlx5_devlink_max_uc_list_validate),
917 };
918
mlx5_devlink_max_uc_list_params_register(struct devlink * devlink)919 static int mlx5_devlink_max_uc_list_params_register(struct devlink *devlink)
920 {
921 struct mlx5_core_dev *dev = devlink_priv(devlink);
922 union devlink_param_value value;
923 int err;
924
925 if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported))
926 return 0;
927
928 err = devl_params_register(devlink, mlx5_devlink_max_uc_list_params,
929 ARRAY_SIZE(mlx5_devlink_max_uc_list_params));
930 if (err)
931 return err;
932
933 value.vu32 = 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list);
934 devl_param_driverinit_value_set(devlink,
935 DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
936 value);
937 return 0;
938 }
939
940 static void
mlx5_devlink_max_uc_list_params_unregister(struct devlink * devlink)941 mlx5_devlink_max_uc_list_params_unregister(struct devlink *devlink)
942 {
943 struct mlx5_core_dev *dev = devlink_priv(devlink);
944
945 if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported))
946 return;
947
948 devl_params_unregister(devlink, mlx5_devlink_max_uc_list_params,
949 ARRAY_SIZE(mlx5_devlink_max_uc_list_params));
950 }
951
952 #define MLX5_TRAP_DROP(_id, _group_id) \
953 DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
954 DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
955 DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT)
956
957 static const struct devlink_trap mlx5_traps_arr[] = {
958 MLX5_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
959 MLX5_TRAP_DROP(DMAC_FILTER, L2_DROPS),
960 };
961
962 static const struct devlink_trap_group mlx5_trap_groups_arr[] = {
963 DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
964 };
965
mlx5_devlink_traps_register(struct devlink * devlink)966 int mlx5_devlink_traps_register(struct devlink *devlink)
967 {
968 struct mlx5_core_dev *core_dev = devlink_priv(devlink);
969 int err;
970
971 err = devl_trap_groups_register(devlink, mlx5_trap_groups_arr,
972 ARRAY_SIZE(mlx5_trap_groups_arr));
973 if (err)
974 return err;
975
976 err = devl_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
977 &core_dev->priv);
978 if (err)
979 goto err_trap_group;
980 return 0;
981
982 err_trap_group:
983 devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
984 ARRAY_SIZE(mlx5_trap_groups_arr));
985 return err;
986 }
987
mlx5_devlink_traps_unregister(struct devlink * devlink)988 void mlx5_devlink_traps_unregister(struct devlink *devlink)
989 {
990 devl_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
991 devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
992 ARRAY_SIZE(mlx5_trap_groups_arr));
993 }
994
mlx5_devlink_params_register(struct devlink * devlink)995 int mlx5_devlink_params_register(struct devlink *devlink)
996 {
997 int err;
998
999 /* Here only the driver init params should be registered.
1000 * Runtime params should be registered by the code which
1001 * behaviour they configure.
1002 */
1003
1004 err = devl_params_register(devlink, mlx5_devlink_params,
1005 ARRAY_SIZE(mlx5_devlink_params));
1006 if (err)
1007 return err;
1008
1009 mlx5_devlink_set_params_init_values(devlink);
1010
1011 err = mlx5_devlink_auxdev_params_register(devlink);
1012 if (err)
1013 goto auxdev_reg_err;
1014
1015 err = mlx5_devlink_max_uc_list_params_register(devlink);
1016 if (err)
1017 goto max_uc_list_err;
1018
1019 err = mlx5_devlink_pcie_cong_params_register(devlink);
1020 if (err)
1021 goto pcie_cong_err;
1022
1023 err = mlx5_nv_param_register_dl_params(devlink);
1024 if (err)
1025 goto nv_param_err;
1026
1027 return 0;
1028
1029 nv_param_err:
1030 mlx5_devlink_pcie_cong_params_unregister(devlink);
1031 pcie_cong_err:
1032 mlx5_devlink_max_uc_list_params_unregister(devlink);
1033 max_uc_list_err:
1034 mlx5_devlink_auxdev_params_unregister(devlink);
1035 auxdev_reg_err:
1036 devl_params_unregister(devlink, mlx5_devlink_params,
1037 ARRAY_SIZE(mlx5_devlink_params));
1038 return err;
1039 }
1040
mlx5_devlink_params_unregister(struct devlink * devlink)1041 void mlx5_devlink_params_unregister(struct devlink *devlink)
1042 {
1043 mlx5_nv_param_unregister_dl_params(devlink);
1044 mlx5_devlink_pcie_cong_params_unregister(devlink);
1045 mlx5_devlink_max_uc_list_params_unregister(devlink);
1046 mlx5_devlink_auxdev_params_unregister(devlink);
1047 devl_params_unregister(devlink, mlx5_devlink_params,
1048 ARRAY_SIZE(mlx5_devlink_params));
1049 }
1050