1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2017 Broadcom Limited 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 */ 9 10 #include <linux/pci.h> 11 #include <linux/netdevice.h> 12 #include <linux/vmalloc.h> 13 #include <net/devlink.h> 14 #include <net/netdev_lock.h> 15 #include <linux/bnxt/hsi.h> 16 #include "bnxt.h" 17 #include "bnxt_hwrm.h" 18 #include "bnxt_vfr.h" 19 #include "bnxt_devlink.h" 20 #include "bnxt_ethtool.h" 21 #include "bnxt_ulp.h" 22 #include "bnxt_ptp.h" 23 #include "bnxt_coredump.h" 24 #include "bnxt_nvm_defs.h" 25 26 static void __bnxt_fw_recover(struct bnxt *bp) 27 { 28 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) || 29 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state)) 30 bnxt_fw_reset(bp); 31 else 32 bnxt_fw_exception(bp); 33 } 34 35 static int 36 bnxt_dl_flash_update(struct devlink *dl, 37 struct devlink_flash_update_params *params, 38 struct netlink_ext_ack *extack) 39 { 40 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 41 int rc; 42 43 devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0); 44 rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0, extack); 45 if (!rc) 46 devlink_flash_update_status_notify(dl, "Flashing done", NULL, 0, 0); 47 else 48 devlink_flash_update_status_notify(dl, "Flashing failed", NULL, 0, 0); 49 return rc; 50 } 51 52 static int bnxt_hwrm_remote_dev_reset_set(struct bnxt *bp, bool remote_reset) 53 { 54 struct hwrm_func_cfg_input *req; 55 int rc; 56 57 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) 58 return -EOPNOTSUPP; 59 60 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 61 if (rc) 62 return rc; 63 64 req->fid = cpu_to_le16(0xffff); 65 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT); 66 if (remote_reset) 67 req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS); 68 69 return hwrm_req_send(bp, req); 70 } 71 72 static char *bnxt_health_severity_str(enum bnxt_health_severity severity) 73 { 74 switch (severity) { 75 case SEVERITY_NORMAL: return "normal"; 76 case SEVERITY_WARNING: return "warning"; 77 case SEVERITY_RECOVERABLE: return "recoverable"; 78 case SEVERITY_FATAL: return "fatal"; 79 default: return "unknown"; 80 } 81 } 82 83 static char *bnxt_health_remedy_str(enum bnxt_health_remedy remedy) 84 { 85 switch (remedy) { 86 case REMEDY_DEVLINK_RECOVER: return "devlink recover"; 87 case REMEDY_POWER_CYCLE_DEVICE: return "device power cycle"; 88 case REMEDY_POWER_CYCLE_HOST: return "host power cycle"; 89 case REMEDY_FW_UPDATE: return "update firmware"; 90 case REMEDY_HW_REPLACE: return "replace hardware"; 91 default: return "unknown"; 92 } 93 } 94 95 static int bnxt_fw_diagnose(struct devlink_health_reporter *reporter, 96 struct devlink_fmsg *fmsg, 97 struct netlink_ext_ack *extack) 98 { 99 struct bnxt *bp = devlink_health_reporter_priv(reporter); 100 struct bnxt_fw_health *h = bp->fw_health; 101 u32 fw_status, fw_resets; 102 103 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 104 devlink_fmsg_string_pair_put(fmsg, "Status", "recovering"); 105 return 0; 106 } 107 108 if (!h->status_reliable) { 109 devlink_fmsg_string_pair_put(fmsg, "Status", "unknown"); 110 return 0; 111 } 112 113 mutex_lock(&h->lock); 114 fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 115 if (BNXT_FW_IS_BOOTING(fw_status)) { 116 devlink_fmsg_string_pair_put(fmsg, "Status", "initializing"); 117 } else if (h->severity || fw_status != BNXT_FW_STATUS_HEALTHY) { 118 if (!h->severity) { 119 h->severity = SEVERITY_FATAL; 120 h->remedy = REMEDY_POWER_CYCLE_DEVICE; 121 h->diagnoses++; 122 devlink_health_report(h->fw_reporter, 123 "FW error diagnosed", h); 124 } 125 devlink_fmsg_string_pair_put(fmsg, "Status", "error"); 126 devlink_fmsg_u32_pair_put(fmsg, "Syndrome", fw_status); 127 } else { 128 devlink_fmsg_string_pair_put(fmsg, "Status", "healthy"); 129 } 130 131 devlink_fmsg_string_pair_put(fmsg, "Severity", 132 bnxt_health_severity_str(h->severity)); 133 134 if (h->severity) { 135 devlink_fmsg_string_pair_put(fmsg, "Remedy", 136 bnxt_health_remedy_str(h->remedy)); 137 if (h->remedy == REMEDY_DEVLINK_RECOVER) 138 devlink_fmsg_string_pair_put(fmsg, "Impact", 139 "traffic+ntuple_cfg"); 140 } 141 142 mutex_unlock(&h->lock); 143 if (!h->resets_reliable) 144 return 0; 145 146 fw_resets = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 147 devlink_fmsg_u32_pair_put(fmsg, "Resets", fw_resets); 148 devlink_fmsg_u32_pair_put(fmsg, "Arrests", h->arrests); 149 devlink_fmsg_u32_pair_put(fmsg, "Survivals", h->survivals); 150 devlink_fmsg_u32_pair_put(fmsg, "Discoveries", h->discoveries); 151 devlink_fmsg_u32_pair_put(fmsg, "Fatalities", h->fatalities); 152 devlink_fmsg_u32_pair_put(fmsg, "Diagnoses", h->diagnoses); 153 return 0; 154 } 155 156 static int bnxt_fw_dump(struct devlink_health_reporter *reporter, 157 struct devlink_fmsg *fmsg, void *priv_ctx, 158 struct netlink_ext_ack *extack) 159 { 160 struct bnxt *bp = devlink_health_reporter_priv(reporter); 161 u32 dump_len; 162 void *data; 163 int rc; 164 165 /* TODO: no firmware dump support in devlink_health_report() context */ 166 if (priv_ctx) 167 return -EOPNOTSUPP; 168 169 dump_len = bnxt_get_coredump_length(bp, BNXT_DUMP_LIVE); 170 if (!dump_len) 171 return -EIO; 172 173 data = vmalloc(dump_len); 174 if (!data) 175 return -ENOMEM; 176 177 rc = bnxt_get_coredump(bp, BNXT_DUMP_LIVE, data, &dump_len); 178 if (!rc) { 179 devlink_fmsg_pair_nest_start(fmsg, "core"); 180 devlink_fmsg_binary_pair_put(fmsg, "data", data, dump_len); 181 devlink_fmsg_u32_pair_put(fmsg, "size", dump_len); 182 devlink_fmsg_pair_nest_end(fmsg); 183 } 184 185 vfree(data); 186 return rc; 187 } 188 189 static int bnxt_fw_recover(struct devlink_health_reporter *reporter, 190 void *priv_ctx, 191 struct netlink_ext_ack *extack) 192 { 193 struct bnxt *bp = devlink_health_reporter_priv(reporter); 194 195 if (bp->fw_health->severity == SEVERITY_FATAL) 196 return -ENODEV; 197 198 set_bit(BNXT_STATE_RECOVER, &bp->state); 199 __bnxt_fw_recover(bp); 200 201 return -EINPROGRESS; 202 } 203 204 static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = { 205 .name = "fw", 206 .diagnose = bnxt_fw_diagnose, 207 .dump = bnxt_fw_dump, 208 .recover = bnxt_fw_recover, 209 }; 210 211 static struct devlink_health_reporter * 212 __bnxt_dl_reporter_create(struct bnxt *bp, 213 const struct devlink_health_reporter_ops *ops) 214 { 215 struct devlink_health_reporter *reporter; 216 217 reporter = devlink_health_reporter_create(bp->dl, ops, bp); 218 if (IS_ERR(reporter)) { 219 netdev_warn(bp->dev, "Failed to create %s health reporter, rc = %ld\n", 220 ops->name, PTR_ERR(reporter)); 221 return NULL; 222 } 223 224 return reporter; 225 } 226 227 void bnxt_dl_fw_reporters_create(struct bnxt *bp) 228 { 229 struct bnxt_fw_health *fw_health = bp->fw_health; 230 231 if (fw_health && !fw_health->fw_reporter) 232 fw_health->fw_reporter = __bnxt_dl_reporter_create(bp, &bnxt_dl_fw_reporter_ops); 233 } 234 235 void bnxt_dl_fw_reporters_destroy(struct bnxt *bp) 236 { 237 struct bnxt_fw_health *fw_health = bp->fw_health; 238 239 if (fw_health && fw_health->fw_reporter) { 240 devlink_health_reporter_destroy(fw_health->fw_reporter); 241 fw_health->fw_reporter = NULL; 242 } 243 } 244 245 void bnxt_devlink_health_fw_report(struct bnxt *bp) 246 { 247 struct bnxt_fw_health *fw_health = bp->fw_health; 248 int rc; 249 250 if (!fw_health) 251 return; 252 253 if (!fw_health->fw_reporter) { 254 __bnxt_fw_recover(bp); 255 return; 256 } 257 258 mutex_lock(&fw_health->lock); 259 fw_health->severity = SEVERITY_RECOVERABLE; 260 fw_health->remedy = REMEDY_DEVLINK_RECOVER; 261 mutex_unlock(&fw_health->lock); 262 rc = devlink_health_report(fw_health->fw_reporter, "FW error reported", 263 fw_health); 264 if (rc == -ECANCELED) 265 __bnxt_fw_recover(bp); 266 } 267 268 void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy) 269 { 270 struct bnxt_fw_health *fw_health = bp->fw_health; 271 u8 state; 272 273 mutex_lock(&fw_health->lock); 274 if (healthy) { 275 fw_health->severity = SEVERITY_NORMAL; 276 state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY; 277 } else { 278 fw_health->severity = SEVERITY_FATAL; 279 fw_health->remedy = REMEDY_POWER_CYCLE_DEVICE; 280 state = DEVLINK_HEALTH_REPORTER_STATE_ERROR; 281 } 282 mutex_unlock(&fw_health->lock); 283 devlink_health_reporter_state_update(fw_health->fw_reporter, state); 284 } 285 286 void bnxt_dl_health_fw_recovery_done(struct bnxt *bp) 287 { 288 struct bnxt_dl *dl = devlink_priv(bp->dl); 289 290 devlink_health_reporter_recovery_done(bp->fw_health->fw_reporter); 291 bnxt_hwrm_remote_dev_reset_set(bp, dl->remote_reset); 292 } 293 294 static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, 295 struct netlink_ext_ack *extack); 296 297 static void 298 bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack, 299 struct hwrm_fw_livepatch_output *resp) 300 { 301 int err = ((struct hwrm_err_output *)resp)->cmd_err; 302 303 switch (err) { 304 case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE: 305 netdev_err(bp->dev, "Illegal live patch opcode"); 306 NL_SET_ERR_MSG_MOD(extack, "Invalid opcode"); 307 break; 308 case FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED: 309 NL_SET_ERR_MSG_MOD(extack, "Live patch operation not supported"); 310 break; 311 case FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED: 312 NL_SET_ERR_MSG_MOD(extack, "Live patch not found"); 313 break; 314 case FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED: 315 NL_SET_ERR_MSG_MOD(extack, 316 "Live patch deactivation failed. Firmware not patched."); 317 break; 318 case FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL: 319 NL_SET_ERR_MSG_MOD(extack, "Live patch not authenticated"); 320 break; 321 case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER: 322 NL_SET_ERR_MSG_MOD(extack, "Incompatible live patch"); 323 break; 324 case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE: 325 NL_SET_ERR_MSG_MOD(extack, "Live patch has invalid size"); 326 break; 327 case FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED: 328 NL_SET_ERR_MSG_MOD(extack, "Live patch already applied"); 329 break; 330 default: 331 netdev_err(bp->dev, "Unexpected live patch error: %d\n", err); 332 NL_SET_ERR_MSG_MOD(extack, "Failed to activate live patch"); 333 break; 334 } 335 } 336 337 /* Live patch status in NVM */ 338 #define BNXT_LIVEPATCH_NOT_INSTALLED 0 339 #define BNXT_LIVEPATCH_INSTALLED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 340 #define BNXT_LIVEPATCH_REMOVED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 341 #define BNXT_LIVEPATCH_MASK (FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL | \ 342 FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) 343 #define BNXT_LIVEPATCH_ACTIVATED BNXT_LIVEPATCH_MASK 344 345 #define BNXT_LIVEPATCH_STATE(flags) ((flags) & BNXT_LIVEPATCH_MASK) 346 347 static int 348 bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) 349 { 350 struct hwrm_fw_livepatch_query_output *query_resp; 351 struct hwrm_fw_livepatch_query_input *query_req; 352 struct hwrm_fw_livepatch_output *patch_resp; 353 struct hwrm_fw_livepatch_input *patch_req; 354 u16 flags, live_patch_state; 355 bool activated = false; 356 u32 installed = 0; 357 u8 target; 358 int rc; 359 360 if (~bp->fw_cap & BNXT_FW_CAP_LIVEPATCH) { 361 NL_SET_ERR_MSG_MOD(extack, "Device does not support live patch"); 362 return -EOPNOTSUPP; 363 } 364 365 rc = hwrm_req_init(bp, query_req, HWRM_FW_LIVEPATCH_QUERY); 366 if (rc) 367 return rc; 368 query_resp = hwrm_req_hold(bp, query_req); 369 370 rc = hwrm_req_init(bp, patch_req, HWRM_FW_LIVEPATCH); 371 if (rc) { 372 hwrm_req_drop(bp, query_req); 373 return rc; 374 } 375 patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL; 376 patch_resp = hwrm_req_hold(bp, patch_req); 377 378 for (target = 1; target <= FW_LIVEPATCH_REQ_FW_TARGET_LAST; target++) { 379 query_req->fw_target = target; 380 rc = hwrm_req_send(bp, query_req); 381 if (rc) { 382 NL_SET_ERR_MSG_MOD(extack, "Failed to query packages"); 383 break; 384 } 385 386 flags = le16_to_cpu(query_resp->status_flags); 387 live_patch_state = BNXT_LIVEPATCH_STATE(flags); 388 389 if (live_patch_state == BNXT_LIVEPATCH_NOT_INSTALLED) 390 continue; 391 392 if (live_patch_state == BNXT_LIVEPATCH_ACTIVATED) { 393 activated = true; 394 continue; 395 } 396 397 if (live_patch_state == BNXT_LIVEPATCH_INSTALLED) 398 patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE; 399 else if (live_patch_state == BNXT_LIVEPATCH_REMOVED) 400 patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE; 401 402 patch_req->fw_target = target; 403 rc = hwrm_req_send(bp, patch_req); 404 if (rc) { 405 bnxt_dl_livepatch_report_err(bp, extack, patch_resp); 406 break; 407 } 408 installed++; 409 } 410 411 if (!rc && !installed) { 412 if (activated) { 413 NL_SET_ERR_MSG_MOD(extack, "Live patch already activated"); 414 rc = -EEXIST; 415 } else { 416 NL_SET_ERR_MSG_MOD(extack, "No live patches found"); 417 rc = -ENOENT; 418 } 419 } 420 hwrm_req_drop(bp, query_req); 421 hwrm_req_drop(bp, patch_req); 422 return rc; 423 } 424 425 static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change, 426 enum devlink_reload_action action, 427 enum devlink_reload_limit limit, 428 struct netlink_ext_ack *extack) 429 { 430 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 431 int rc = 0; 432 433 switch (action) { 434 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: { 435 bnxt_ulp_stop(bp); 436 rtnl_lock(); 437 netdev_lock(bp->dev); 438 if (bnxt_sriov_cfg(bp)) { 439 NL_SET_ERR_MSG_MOD(extack, 440 "reload is unsupported while VFs are allocated or being configured"); 441 netdev_unlock(bp->dev); 442 rtnl_unlock(); 443 bnxt_ulp_start(bp, 0); 444 return -EOPNOTSUPP; 445 } 446 if (bp->dev->reg_state == NETREG_UNREGISTERED) { 447 netdev_unlock(bp->dev); 448 rtnl_unlock(); 449 bnxt_ulp_start(bp, 0); 450 return -ENODEV; 451 } 452 if (netif_running(bp->dev)) 453 bnxt_close_nic(bp, true, true); 454 bnxt_vf_reps_free(bp); 455 rc = bnxt_hwrm_func_drv_unrgtr(bp); 456 if (rc) { 457 NL_SET_ERR_MSG_MOD(extack, "Failed to deregister"); 458 if (netif_running(bp->dev)) 459 netif_close(bp->dev); 460 netdev_unlock(bp->dev); 461 rtnl_unlock(); 462 break; 463 } 464 bnxt_cancel_reservations(bp, false); 465 bnxt_free_ctx_mem(bp, false); 466 break; 467 } 468 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: { 469 if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET) 470 return bnxt_dl_livepatch_activate(bp, extack); 471 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET) { 472 NL_SET_ERR_MSG_MOD(extack, "Device not capable, requires reboot"); 473 return -EOPNOTSUPP; 474 } 475 if (!bnxt_hwrm_reset_permitted(bp)) { 476 NL_SET_ERR_MSG_MOD(extack, 477 "Reset denied by firmware, it may be inhibited by remote driver"); 478 return -EPERM; 479 } 480 rtnl_lock(); 481 netdev_lock(bp->dev); 482 if (bp->dev->reg_state == NETREG_UNREGISTERED) { 483 netdev_unlock(bp->dev); 484 rtnl_unlock(); 485 return -ENODEV; 486 } 487 if (netif_running(bp->dev)) 488 set_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); 489 rc = bnxt_hwrm_firmware_reset(bp->dev, 490 FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP, 491 FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP, 492 FW_RESET_REQ_FLAGS_RESET_GRACEFUL | 493 FW_RESET_REQ_FLAGS_FW_ACTIVATION); 494 if (rc) { 495 NL_SET_ERR_MSG_MOD(extack, "Failed to activate firmware"); 496 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); 497 netdev_unlock(bp->dev); 498 rtnl_unlock(); 499 } 500 break; 501 } 502 default: 503 rc = -EOPNOTSUPP; 504 } 505 506 return rc; 507 } 508 509 static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action action, 510 enum devlink_reload_limit limit, u32 *actions_performed, 511 struct netlink_ext_ack *extack) 512 { 513 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 514 int rc = 0; 515 516 netdev_assert_locked(bp->dev); 517 518 *actions_performed = 0; 519 switch (action) { 520 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: { 521 bnxt_fw_init_one(bp); 522 bnxt_vf_reps_alloc(bp); 523 if (netif_running(bp->dev)) 524 rc = bnxt_open_nic(bp, true, true); 525 if (!rc) { 526 bnxt_reenable_sriov(bp); 527 bnxt_ptp_reapply_pps(bp); 528 } 529 break; 530 } 531 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: { 532 unsigned long start = jiffies; 533 unsigned long timeout = start + BNXT_DFLT_FW_RST_MAX_DSECS * HZ / 10; 534 535 if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET) 536 break; 537 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 538 timeout = start + bp->fw_health->normal_func_wait_dsecs * HZ / 10; 539 if (!netif_running(bp->dev)) 540 NL_SET_ERR_MSG_MOD(extack, 541 "Device is closed, not waiting for reset notice that will never come"); 542 netdev_unlock(bp->dev); 543 rtnl_unlock(); 544 while (test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) { 545 if (time_after(jiffies, timeout)) { 546 NL_SET_ERR_MSG_MOD(extack, "Activation incomplete"); 547 rc = -ETIMEDOUT; 548 break; 549 } 550 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 551 NL_SET_ERR_MSG_MOD(extack, "Activation aborted"); 552 rc = -ENODEV; 553 break; 554 } 555 msleep(50); 556 } 557 rtnl_lock(); 558 netdev_lock(bp->dev); 559 if (!rc) 560 *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); 561 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); 562 break; 563 } 564 default: 565 return -EOPNOTSUPP; 566 } 567 568 if (!rc) { 569 bnxt_print_device_info(bp); 570 if (netif_running(bp->dev)) { 571 mutex_lock(&bp->link_lock); 572 bnxt_report_link(bp); 573 mutex_unlock(&bp->link_lock); 574 } 575 *actions_performed |= BIT(action); 576 } else if (netif_running(bp->dev)) { 577 netif_close(bp->dev); 578 } 579 netdev_unlock(bp->dev); 580 rtnl_unlock(); 581 if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT) 582 bnxt_ulp_start(bp, rc); 583 return rc; 584 } 585 586 static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack) 587 { 588 bool rc = false; 589 u32 datalen; 590 u16 index; 591 u8 *buf; 592 593 if (bnxt_find_nvram_item(bp->dev, BNX_DIR_TYPE_VPD, 594 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 595 &index, NULL, &datalen) || !datalen) { 596 NL_SET_ERR_MSG_MOD(extack, "nvm test vpd entry error"); 597 return false; 598 } 599 600 buf = kzalloc(datalen, GFP_KERNEL); 601 if (!buf) { 602 NL_SET_ERR_MSG_MOD(extack, "insufficient memory for nvm test"); 603 return false; 604 } 605 606 if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) { 607 NL_SET_ERR_MSG_MOD(extack, "nvm test vpd read error"); 608 goto done; 609 } 610 611 if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST, 612 BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) { 613 NL_SET_ERR_MSG_MOD(extack, "nvm test vpd write error"); 614 goto done; 615 } 616 617 rc = true; 618 619 done: 620 kfree(buf); 621 return rc; 622 } 623 624 static bool bnxt_dl_selftest_check(struct devlink *dl, unsigned int id, 625 struct netlink_ext_ack *extack) 626 { 627 return id == DEVLINK_ATTR_SELFTEST_ID_FLASH; 628 } 629 630 static enum devlink_selftest_status bnxt_dl_selftest_run(struct devlink *dl, 631 unsigned int id, 632 struct netlink_ext_ack *extack) 633 { 634 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 635 636 if (id == DEVLINK_ATTR_SELFTEST_ID_FLASH) 637 return bnxt_nvm_test(bp, extack) ? 638 DEVLINK_SELFTEST_STATUS_PASS : 639 DEVLINK_SELFTEST_STATUS_FAIL; 640 641 return DEVLINK_SELFTEST_STATUS_SKIP; 642 } 643 644 static const struct devlink_ops bnxt_dl_ops = { 645 #ifdef CONFIG_BNXT_SRIOV 646 .eswitch_mode_set = bnxt_dl_eswitch_mode_set, 647 .eswitch_mode_get = bnxt_dl_eswitch_mode_get, 648 #endif /* CONFIG_BNXT_SRIOV */ 649 .info_get = bnxt_dl_info_get, 650 .flash_update = bnxt_dl_flash_update, 651 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | 652 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), 653 .reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET), 654 .reload_down = bnxt_dl_reload_down, 655 .reload_up = bnxt_dl_reload_up, 656 .selftest_check = bnxt_dl_selftest_check, 657 .selftest_run = bnxt_dl_selftest_run, 658 }; 659 660 static const struct devlink_ops bnxt_vf_dl_ops; 661 662 enum bnxt_dl_param_id { 663 BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, 664 BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, 665 }; 666 667 static const struct bnxt_dl_nvm_param nvm_params[] = { 668 {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV, 669 BNXT_NVM_SHARED_CFG, 1, 1}, 670 {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI, 671 BNXT_NVM_SHARED_CFG, 1, 1}, 672 {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, 673 NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4}, 674 {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, 675 NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4}, 676 {DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, NVM_OFF_SUPPORT_RDMA, 677 BNXT_NVM_FUNC_CFG, 1, 1}, 678 {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK, 679 BNXT_NVM_SHARED_CFG, 1, 1}, 680 }; 681 682 union bnxt_nvm_data { 683 u8 val8; 684 __le32 val32; 685 }; 686 687 static void bnxt_copy_to_nvm_data(union bnxt_nvm_data *dst, 688 union devlink_param_value *src, 689 int nvm_num_bits, int dl_num_bytes) 690 { 691 u32 val32 = 0; 692 693 if (nvm_num_bits == 1) { 694 dst->val8 = src->vbool; 695 return; 696 } 697 if (dl_num_bytes == 4) 698 val32 = src->vu32; 699 else if (dl_num_bytes == 2) 700 val32 = (u32)src->vu16; 701 else if (dl_num_bytes == 1) 702 val32 = (u32)src->vu8; 703 dst->val32 = cpu_to_le32(val32); 704 } 705 706 static void bnxt_copy_from_nvm_data(union devlink_param_value *dst, 707 union bnxt_nvm_data *src, 708 int nvm_num_bits, int dl_num_bytes) 709 { 710 u32 val32; 711 712 if (nvm_num_bits == 1) { 713 dst->vbool = src->val8; 714 return; 715 } 716 val32 = le32_to_cpu(src->val32); 717 if (dl_num_bytes == 4) 718 dst->vu32 = val32; 719 else if (dl_num_bytes == 2) 720 dst->vu16 = (u16)val32; 721 else if (dl_num_bytes == 1) 722 dst->vu8 = (u8)val32; 723 } 724 725 static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, u32 *nvm_cfg_ver) 726 { 727 struct hwrm_nvm_get_variable_input *req; 728 u16 bytes = BNXT_NVM_CFG_VER_BYTES; 729 u16 bits = BNXT_NVM_CFG_VER_BITS; 730 union devlink_param_value ver; 731 union bnxt_nvm_data *data; 732 dma_addr_t data_dma_addr; 733 int rc, i = 2; 734 u16 dim = 1; 735 736 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE); 737 if (rc) 738 return rc; 739 740 data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr); 741 if (!data) { 742 rc = -ENOMEM; 743 goto exit; 744 } 745 746 /* earlier devices present as an array of raw bytes */ 747 if (!BNXT_CHIP_P5_PLUS(bp)) { 748 dim = 0; 749 i = 0; 750 bits *= 3; /* array of 3 version components */ 751 bytes *= 4; /* copy whole word */ 752 } 753 754 hwrm_req_hold(bp, req); 755 req->dest_data_addr = cpu_to_le64(data_dma_addr); 756 req->data_len = cpu_to_le16(bits); 757 req->option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER); 758 req->dimensions = cpu_to_le16(dim); 759 760 while (i >= 0) { 761 req->index_0 = cpu_to_le16(i--); 762 rc = hwrm_req_send_silent(bp, req); 763 if (rc) 764 goto exit; 765 bnxt_copy_from_nvm_data(&ver, data, bits, bytes); 766 767 if (BNXT_CHIP_P5_PLUS(bp)) { 768 *nvm_cfg_ver <<= 8; 769 *nvm_cfg_ver |= ver.vu8; 770 } else { 771 *nvm_cfg_ver = ver.vu32; 772 } 773 } 774 775 exit: 776 hwrm_req_drop(bp, req); 777 return rc; 778 } 779 780 static int bnxt_dl_info_put(struct bnxt *bp, struct devlink_info_req *req, 781 enum bnxt_dl_version_type type, const char *key, 782 char *buf) 783 { 784 if (!strlen(buf)) 785 return 0; 786 787 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 788 (!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI) || 789 !strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_ROCE))) 790 return 0; 791 792 switch (type) { 793 case BNXT_VERSION_FIXED: 794 return devlink_info_version_fixed_put(req, key, buf); 795 case BNXT_VERSION_RUNNING: 796 return devlink_info_version_running_put(req, key, buf); 797 case BNXT_VERSION_STORED: 798 return devlink_info_version_stored_put(req, key, buf); 799 } 800 return 0; 801 } 802 803 #define BNXT_FW_SRT_PATCH "fw.srt.patch" 804 #define BNXT_FW_CRT_PATCH "fw.crt.patch" 805 806 static int bnxt_dl_livepatch_info_put(struct bnxt *bp, 807 struct devlink_info_req *req, 808 const char *key) 809 { 810 struct hwrm_fw_livepatch_query_input *query; 811 struct hwrm_fw_livepatch_query_output *resp; 812 u16 flags; 813 int rc; 814 815 if (~bp->fw_cap & BNXT_FW_CAP_LIVEPATCH) 816 return 0; 817 818 rc = hwrm_req_init(bp, query, HWRM_FW_LIVEPATCH_QUERY); 819 if (rc) 820 return rc; 821 822 if (!strcmp(key, BNXT_FW_SRT_PATCH)) 823 query->fw_target = FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW; 824 else if (!strcmp(key, BNXT_FW_CRT_PATCH)) 825 query->fw_target = FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW; 826 else 827 goto exit; 828 829 resp = hwrm_req_hold(bp, query); 830 rc = hwrm_req_send(bp, query); 831 if (rc) 832 goto exit; 833 834 flags = le16_to_cpu(resp->status_flags); 835 if (flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) { 836 resp->active_ver[sizeof(resp->active_ver) - 1] = '\0'; 837 rc = devlink_info_version_running_put(req, key, resp->active_ver); 838 if (rc) 839 goto exit; 840 } 841 842 if (flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL) { 843 resp->install_ver[sizeof(resp->install_ver) - 1] = '\0'; 844 rc = devlink_info_version_stored_put(req, key, resp->install_ver); 845 if (rc) 846 goto exit; 847 } 848 849 exit: 850 hwrm_req_drop(bp, query); 851 return rc; 852 } 853 854 #define HWRM_FW_VER_STR_LEN 16 855 856 static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, 857 struct netlink_ext_ack *extack) 858 { 859 struct hwrm_nvm_get_dev_info_output nvm_dev_info; 860 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 861 struct hwrm_ver_get_output *ver_resp; 862 char mgmt_ver[FW_VER_STR_LEN]; 863 char roce_ver[FW_VER_STR_LEN]; 864 char ncsi_ver[FW_VER_STR_LEN]; 865 char buf[32]; 866 u32 ver = 0; 867 int rc; 868 869 if (BNXT_PF(bp) && (bp->flags & BNXT_FLAG_DSN_VALID)) { 870 sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X", 871 bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4], 872 bp->dsn[3], bp->dsn[2], bp->dsn[1], bp->dsn[0]); 873 rc = devlink_info_serial_number_put(req, buf); 874 if (rc) 875 return rc; 876 } 877 878 if (strlen(bp->board_serialno)) { 879 rc = devlink_info_board_serial_number_put(req, bp->board_serialno); 880 if (rc) 881 return rc; 882 } 883 884 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED, 885 DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, 886 bp->board_partno); 887 if (rc) 888 return rc; 889 890 sprintf(buf, "%X", bp->chip_num); 891 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED, 892 DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf); 893 if (rc) 894 return rc; 895 896 ver_resp = &bp->ver_resp; 897 sprintf(buf, "%c%d", 'A' + ver_resp->chip_rev, ver_resp->chip_metal); 898 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED, 899 DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf); 900 if (rc) 901 return rc; 902 903 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING, 904 DEVLINK_INFO_VERSION_GENERIC_FW_PSID, 905 bp->nvm_cfg_ver); 906 if (rc) 907 return rc; 908 909 buf[0] = 0; 910 strncat(buf, ver_resp->active_pkg_name, HWRM_FW_VER_STR_LEN); 911 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING, 912 DEVLINK_INFO_VERSION_GENERIC_FW, buf); 913 if (rc) 914 return rc; 915 916 if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &ver)) { 917 sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xff, (ver >> 8) & 0xff, 918 ver & 0xff); 919 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, 920 DEVLINK_INFO_VERSION_GENERIC_FW_PSID, 921 buf); 922 if (rc) 923 return rc; 924 } 925 926 if (ver_resp->flags & VER_GET_RESP_FLAGS_EXT_VER_AVAIL) { 927 snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", 928 ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor, 929 ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch); 930 931 snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", 932 ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor, 933 ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch); 934 935 snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", 936 ver_resp->roce_fw_major, ver_resp->roce_fw_minor, 937 ver_resp->roce_fw_build, ver_resp->roce_fw_patch); 938 } else { 939 snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", 940 ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b, 941 ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b); 942 943 snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", 944 ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b, 945 ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b); 946 947 snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", 948 ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b, 949 ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b); 950 } 951 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING, 952 DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver); 953 if (rc) 954 return rc; 955 956 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING, 957 DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API, 958 bp->hwrm_ver_supp); 959 if (rc) 960 return rc; 961 962 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING, 963 DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver); 964 if (rc) 965 return rc; 966 967 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING, 968 DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver); 969 if (rc) 970 return rc; 971 972 rc = bnxt_hwrm_nvm_get_dev_info(bp, &nvm_dev_info); 973 if (rc || 974 !(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID)) { 975 if (!bnxt_get_pkginfo(bp->dev, buf, sizeof(buf))) 976 return bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, 977 DEVLINK_INFO_VERSION_GENERIC_FW, 978 buf); 979 return 0; 980 } 981 982 buf[0] = 0; 983 strncat(buf, nvm_dev_info.pkg_name, HWRM_FW_VER_STR_LEN); 984 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, 985 DEVLINK_INFO_VERSION_GENERIC_FW, buf); 986 if (rc) 987 return rc; 988 989 snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", 990 nvm_dev_info.hwrm_fw_major, nvm_dev_info.hwrm_fw_minor, 991 nvm_dev_info.hwrm_fw_build, nvm_dev_info.hwrm_fw_patch); 992 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, 993 DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver); 994 if (rc) 995 return rc; 996 997 snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", 998 nvm_dev_info.mgmt_fw_major, nvm_dev_info.mgmt_fw_minor, 999 nvm_dev_info.mgmt_fw_build, nvm_dev_info.mgmt_fw_patch); 1000 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, 1001 DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver); 1002 if (rc) 1003 return rc; 1004 1005 snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", 1006 nvm_dev_info.roce_fw_major, nvm_dev_info.roce_fw_minor, 1007 nvm_dev_info.roce_fw_build, nvm_dev_info.roce_fw_patch); 1008 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, 1009 DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver); 1010 if (rc) 1011 return rc; 1012 1013 if (BNXT_CHIP_P5_PLUS(bp)) { 1014 rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH); 1015 if (rc) 1016 return rc; 1017 } 1018 return bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_CRT_PATCH); 1019 1020 } 1021 1022 static int __bnxt_hwrm_nvm_req(struct bnxt *bp, 1023 const struct bnxt_dl_nvm_param *nvm, void *msg, 1024 union devlink_param_value *val) 1025 { 1026 struct hwrm_nvm_get_variable_input *req = msg; 1027 struct hwrm_err_output *resp; 1028 union bnxt_nvm_data *data; 1029 dma_addr_t data_dma_addr; 1030 int idx = 0, rc; 1031 1032 if (nvm->dir_type == BNXT_NVM_PORT_CFG) 1033 idx = bp->pf.port_id; 1034 else if (nvm->dir_type == BNXT_NVM_FUNC_CFG) 1035 idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; 1036 1037 data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr); 1038 1039 if (!data) { 1040 hwrm_req_drop(bp, req); 1041 return -ENOMEM; 1042 } 1043 1044 req->dest_data_addr = cpu_to_le64(data_dma_addr); 1045 req->data_len = cpu_to_le16(nvm->nvm_num_bits); 1046 req->option_num = cpu_to_le16(nvm->offset); 1047 req->index_0 = cpu_to_le16(idx); 1048 if (idx) 1049 req->dimensions = cpu_to_le16(1); 1050 1051 resp = hwrm_req_hold(bp, req); 1052 if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) { 1053 bnxt_copy_to_nvm_data(data, val, nvm->nvm_num_bits, 1054 nvm->dl_num_bytes); 1055 rc = hwrm_req_send(bp, msg); 1056 } else { 1057 rc = hwrm_req_send_silent(bp, msg); 1058 if (!rc) { 1059 bnxt_copy_from_nvm_data(val, data, 1060 nvm->nvm_num_bits, 1061 nvm->dl_num_bytes); 1062 } else { 1063 if (resp->cmd_err == 1064 NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST) 1065 rc = -EOPNOTSUPP; 1066 } 1067 } 1068 hwrm_req_drop(bp, req); 1069 if (rc == -EACCES) 1070 netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n"); 1071 return rc; 1072 } 1073 1074 static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, 1075 union devlink_param_value *val) 1076 { 1077 const struct bnxt_dl_nvm_param *nvm_param; 1078 int i; 1079 1080 for (i = 0; i < ARRAY_SIZE(nvm_params); i++) { 1081 nvm_param = &nvm_params[i]; 1082 if (nvm_param->id == param_id) 1083 return __bnxt_hwrm_nvm_req(bp, nvm_param, msg, val); 1084 } 1085 return -EOPNOTSUPP; 1086 } 1087 1088 static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, 1089 struct devlink_param_gset_ctx *ctx) 1090 { 1091 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 1092 struct hwrm_nvm_get_variable_input *req; 1093 int rc; 1094 1095 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE); 1096 if (rc) 1097 return rc; 1098 1099 rc = bnxt_hwrm_nvm_req(bp, id, req, &ctx->val); 1100 if (!rc && id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) 1101 ctx->val.vbool = !ctx->val.vbool; 1102 1103 return rc; 1104 } 1105 1106 static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, 1107 struct devlink_param_gset_ctx *ctx, 1108 struct netlink_ext_ack *extack) 1109 { 1110 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 1111 struct hwrm_nvm_set_variable_input *req; 1112 int rc; 1113 1114 rc = hwrm_req_init(bp, req, HWRM_NVM_SET_VARIABLE); 1115 if (rc) 1116 return rc; 1117 1118 if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) 1119 ctx->val.vbool = !ctx->val.vbool; 1120 1121 return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val); 1122 } 1123 1124 static int bnxt_dl_roce_validate(struct devlink *dl, u32 id, 1125 union devlink_param_value val, 1126 struct netlink_ext_ack *extack) 1127 { 1128 const struct bnxt_dl_nvm_param nvm_roce_cap = {0, NVM_OFF_RDMA_CAPABLE, 1129 BNXT_NVM_SHARED_CFG, 1, 1}; 1130 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 1131 struct hwrm_nvm_get_variable_input *req; 1132 union devlink_param_value roce_cap; 1133 int rc; 1134 1135 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE); 1136 if (rc) 1137 return rc; 1138 1139 if (__bnxt_hwrm_nvm_req(bp, &nvm_roce_cap, req, &roce_cap)) { 1140 NL_SET_ERR_MSG_MOD(extack, "Unable to verify if device is RDMA Capable"); 1141 return -EINVAL; 1142 } 1143 if (!roce_cap.vbool) { 1144 NL_SET_ERR_MSG_MOD(extack, "Device does not support RDMA"); 1145 return -EINVAL; 1146 } 1147 return 0; 1148 } 1149 1150 static int bnxt_dl_msix_validate(struct devlink *dl, u32 id, 1151 union devlink_param_value val, 1152 struct netlink_ext_ack *extack) 1153 { 1154 int max_val = -1; 1155 1156 if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX) 1157 max_val = BNXT_MSIX_VEC_MAX; 1158 1159 if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN) 1160 max_val = BNXT_MSIX_VEC_MIN_MAX; 1161 1162 if (val.vu32 > max_val) { 1163 NL_SET_ERR_MSG_MOD(extack, "MSIX value is exceeding the range"); 1164 return -EINVAL; 1165 } 1166 1167 return 0; 1168 } 1169 1170 static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id, 1171 struct devlink_param_gset_ctx *ctx) 1172 { 1173 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 1174 1175 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) 1176 return -EOPNOTSUPP; 1177 1178 ctx->val.vbool = bnxt_dl_get_remote_reset(dl); 1179 return 0; 1180 } 1181 1182 static int bnxt_remote_dev_reset_set(struct devlink *dl, u32 id, 1183 struct devlink_param_gset_ctx *ctx, 1184 struct netlink_ext_ack *extack) 1185 { 1186 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 1187 int rc; 1188 1189 rc = bnxt_hwrm_remote_dev_reset_set(bp, ctx->val.vbool); 1190 if (rc) 1191 return rc; 1192 1193 bnxt_dl_set_remote_reset(dl, ctx->val.vbool); 1194 return rc; 1195 } 1196 1197 static const struct devlink_param bnxt_dl_params[] = { 1198 DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, 1199 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 1200 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, 1201 NULL), 1202 DEVLINK_PARAM_GENERIC(IGNORE_ARI, 1203 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 1204 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, 1205 NULL), 1206 DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX, 1207 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 1208 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, 1209 bnxt_dl_msix_validate), 1210 DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN, 1211 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 1212 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, 1213 bnxt_dl_msix_validate), 1214 DEVLINK_PARAM_GENERIC(ENABLE_ROCE, 1215 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 1216 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, 1217 bnxt_dl_roce_validate), 1218 DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, 1219 "gre_ver_check", DEVLINK_PARAM_TYPE_BOOL, 1220 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 1221 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, 1222 NULL), 1223 /* keep REMOTE_DEV_RESET last, it is excluded based on caps */ 1224 DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, 1225 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1226 bnxt_remote_dev_reset_get, 1227 bnxt_remote_dev_reset_set, NULL), 1228 }; 1229 1230 static int bnxt_dl_params_register(struct bnxt *bp) 1231 { 1232 int num_params = ARRAY_SIZE(bnxt_dl_params); 1233 int rc; 1234 1235 if (bp->hwrm_spec_code < 0x10600) 1236 return 0; 1237 1238 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) 1239 num_params--; 1240 1241 rc = devlink_params_register(bp->dl, bnxt_dl_params, num_params); 1242 if (rc) 1243 netdev_warn(bp->dev, "devlink_params_register failed. rc=%d\n", 1244 rc); 1245 return rc; 1246 } 1247 1248 static void bnxt_dl_params_unregister(struct bnxt *bp) 1249 { 1250 int num_params = ARRAY_SIZE(bnxt_dl_params); 1251 1252 if (bp->hwrm_spec_code < 0x10600) 1253 return; 1254 1255 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) 1256 num_params--; 1257 1258 devlink_params_unregister(bp->dl, bnxt_dl_params, num_params); 1259 } 1260 1261 int bnxt_dl_register(struct bnxt *bp) 1262 { 1263 const struct devlink_ops *devlink_ops; 1264 struct devlink_port_attrs attrs = {}; 1265 struct bnxt_dl *bp_dl; 1266 struct devlink *dl; 1267 int rc; 1268 1269 if (BNXT_PF(bp)) 1270 devlink_ops = &bnxt_dl_ops; 1271 else 1272 devlink_ops = &bnxt_vf_dl_ops; 1273 1274 dl = devlink_alloc(devlink_ops, sizeof(struct bnxt_dl), &bp->pdev->dev); 1275 if (!dl) { 1276 netdev_warn(bp->dev, "devlink_alloc failed\n"); 1277 return -ENOMEM; 1278 } 1279 1280 bp->dl = dl; 1281 bp_dl = devlink_priv(dl); 1282 bp_dl->bp = bp; 1283 bnxt_dl_set_remote_reset(dl, true); 1284 1285 /* Add switchdev eswitch mode setting, if SRIOV supported */ 1286 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) && 1287 bp->hwrm_spec_code > 0x10803) 1288 bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; 1289 1290 if (!BNXT_PF(bp)) 1291 goto out; 1292 1293 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; 1294 attrs.phys.port_number = bp->pf.port_id; 1295 memcpy(attrs.switch_id.id, bp->dsn, sizeof(bp->dsn)); 1296 attrs.switch_id.id_len = sizeof(bp->dsn); 1297 devlink_port_attrs_set(&bp->dl_port, &attrs); 1298 rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id); 1299 if (rc) { 1300 netdev_err(bp->dev, "devlink_port_register failed\n"); 1301 goto err_dl_free; 1302 } 1303 1304 rc = bnxt_dl_params_register(bp); 1305 if (rc) 1306 goto err_dl_port_unreg; 1307 1308 out: 1309 devlink_register(dl); 1310 return 0; 1311 1312 err_dl_port_unreg: 1313 devlink_port_unregister(&bp->dl_port); 1314 err_dl_free: 1315 devlink_free(dl); 1316 return rc; 1317 } 1318 1319 void bnxt_dl_unregister(struct bnxt *bp) 1320 { 1321 struct devlink *dl = bp->dl; 1322 1323 devlink_unregister(dl); 1324 if (BNXT_PF(bp)) { 1325 bnxt_dl_params_unregister(bp); 1326 devlink_port_unregister(&bp->dl_port); 1327 } 1328 devlink_free(dl); 1329 } 1330