1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020, Intel Corporation. */ 3 4 #include <linux/vmalloc.h> 5 6 #include "ice.h" 7 #include "ice_lib.h" 8 #include "devlink.h" 9 #include "port.h" 10 #include "ice_eswitch.h" 11 #include "ice_fw_update.h" 12 #include "ice_dcb_lib.h" 13 #include "ice_sf_eth.h" 14 15 /* context for devlink info version reporting */ 16 struct ice_info_ctx { 17 char buf[128]; 18 struct ice_orom_info pending_orom; 19 struct ice_nvm_info pending_nvm; 20 struct ice_netlist_info pending_netlist; 21 struct ice_hw_dev_caps dev_caps; 22 }; 23 24 /* The following functions are used to format specific strings for various 25 * devlink info versions. The ctx parameter is used to provide the storage 26 * buffer, as well as any ancillary information calculated when the info 27 * request was made. 28 * 29 * If a version does not exist, for example when attempting to get the 30 * inactive version of flash when there is no pending update, the function 31 * should leave the buffer in the ctx structure empty. 32 */ 33 34 static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx) 35 { 36 u8 dsn[8]; 37 38 /* Copy the DSN into an array in Big Endian format */ 39 put_unaligned_be64(pci_get_dsn(pf->pdev), dsn); 40 41 snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn); 42 } 43 44 static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) 45 { 46 struct ice_hw *hw = &pf->hw; 47 int status; 48 49 status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf)); 50 if (status) 51 /* We failed to locate the PBA, so just skip this entry */ 52 dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %d\n", 53 status); 54 } 55 56 static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx) 57 { 58 struct ice_hw *hw = &pf->hw; 59 60 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", 61 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch); 62 } 63 64 static void ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx) 65 { 66 struct ice_hw *hw = &pf->hw; 67 68 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver, 69 hw->api_min_ver, hw->api_patch); 70 } 71 72 static void ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx) 73 { 74 struct ice_hw *hw = &pf->hw; 75 76 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build); 77 } 78 79 static void ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) 80 { 81 struct ice_orom_info *orom = &pf->hw.flash.orom; 82 83 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", 84 orom->major, orom->build, orom->patch); 85 } 86 87 static void 88 ice_info_pending_orom_ver(struct ice_pf __always_unused *pf, 89 struct ice_info_ctx *ctx) 90 { 91 struct ice_orom_info *orom = &ctx->pending_orom; 92 93 if (ctx->dev_caps.common_cap.nvm_update_pending_orom) 94 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", 95 orom->major, orom->build, orom->patch); 96 } 97 98 static void ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) 99 { 100 struct ice_nvm_info *nvm = &pf->hw.flash.nvm; 101 102 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor); 103 } 104 105 static void 106 ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf, 107 struct ice_info_ctx *ctx) 108 { 109 struct ice_nvm_info *nvm = &ctx->pending_nvm; 110 111 if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) 112 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", 113 nvm->major, nvm->minor); 114 } 115 116 static void ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) 117 { 118 struct ice_nvm_info *nvm = &pf->hw.flash.nvm; 119 120 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack); 121 } 122 123 static void 124 ice_info_pending_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) 125 { 126 struct ice_nvm_info *nvm = &ctx->pending_nvm; 127 128 if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) 129 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack); 130 } 131 132 static void ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx) 133 { 134 struct ice_hw *hw = &pf->hw; 135 136 snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name); 137 } 138 139 static void 140 ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx) 141 { 142 struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver; 143 144 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", 145 pkg->major, pkg->minor, pkg->update, pkg->draft); 146 } 147 148 static void 149 ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx) 150 { 151 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id); 152 } 153 154 static void ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) 155 { 156 struct ice_netlist_info *netlist = &pf->hw.flash.netlist; 157 158 /* The netlist version fields are BCD formatted */ 159 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", 160 netlist->major, netlist->minor, 161 netlist->type >> 16, netlist->type & 0xFFFF, 162 netlist->rev, netlist->cust_ver); 163 } 164 165 static void ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx) 166 { 167 struct ice_netlist_info *netlist = &pf->hw.flash.netlist; 168 169 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); 170 } 171 172 static void 173 ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, 174 struct ice_info_ctx *ctx) 175 { 176 struct ice_netlist_info *netlist = &ctx->pending_netlist; 177 178 /* The netlist version fields are BCD formatted */ 179 if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) 180 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", 181 netlist->major, netlist->minor, 182 netlist->type >> 16, netlist->type & 0xFFFF, 183 netlist->rev, netlist->cust_ver); 184 } 185 186 static void 187 ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, 188 struct ice_info_ctx *ctx) 189 { 190 struct ice_netlist_info *netlist = &ctx->pending_netlist; 191 192 if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) 193 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); 194 } 195 196 static void ice_info_cgu_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx) 197 { 198 u32 id, cfg_ver, fw_ver; 199 200 if (!ice_is_feature_supported(pf, ICE_F_CGU)) 201 return; 202 if (ice_aq_get_cgu_info(&pf->hw, &id, &cfg_ver, &fw_ver)) 203 return; 204 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", id, cfg_ver, fw_ver); 205 } 206 207 static void ice_info_cgu_id(struct ice_pf *pf, struct ice_info_ctx *ctx) 208 { 209 if (!ice_is_feature_supported(pf, ICE_F_CGU)) 210 return; 211 snprintf(ctx->buf, sizeof(ctx->buf), "%u", pf->hw.cgu_part_number); 212 } 213 214 #define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL } 215 #define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL } 216 #define stored(key, getter, fallback) { ICE_VERSION_STORED, key, getter, fallback } 217 218 /* The combined() macro inserts both the running entry as well as a stored 219 * entry. The running entry will always report the version from the active 220 * handler. The stored entry will first try the pending handler, and fallback 221 * to the active handler if the pending function does not report a version. 222 * The pending handler should check the status of a pending update for the 223 * relevant flash component. It should only fill in the buffer in the case 224 * where a valid pending version is available. This ensures that the related 225 * stored and running versions remain in sync, and that stored versions are 226 * correctly reported as expected. 227 */ 228 #define combined(key, active, pending) \ 229 running(key, active), \ 230 stored(key, pending, active) 231 232 enum ice_version_type { 233 ICE_VERSION_FIXED, 234 ICE_VERSION_RUNNING, 235 ICE_VERSION_STORED, 236 }; 237 238 static const struct ice_devlink_version { 239 enum ice_version_type type; 240 const char *key; 241 void (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx); 242 void (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx); 243 } ice_devlink_versions[] = { 244 fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba), 245 running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt), 246 running("fw.mgmt.api", ice_info_fw_api), 247 running("fw.mgmt.build", ice_info_fw_build), 248 combined(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver, ice_info_pending_orom_ver), 249 combined("fw.psid.api", ice_info_nvm_ver, ice_info_pending_nvm_ver), 250 combined(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack, ice_info_pending_eetrack), 251 running("fw.app.name", ice_info_ddp_pkg_name), 252 running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version), 253 running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id), 254 combined("fw.netlist", ice_info_netlist_ver, ice_info_pending_netlist_ver), 255 combined("fw.netlist.build", ice_info_netlist_build, ice_info_pending_netlist_build), 256 fixed("cgu.id", ice_info_cgu_id), 257 running("fw.cgu", ice_info_cgu_fw_build), 258 }; 259 260 /** 261 * ice_devlink_info_get - .info_get devlink handler 262 * @devlink: devlink instance structure 263 * @req: the devlink info request 264 * @extack: extended netdev ack structure 265 * 266 * Callback for the devlink .info_get operation. Reports information about the 267 * device. 268 * 269 * Return: zero on success or an error code on failure. 270 */ 271 static int ice_devlink_info_get(struct devlink *devlink, 272 struct devlink_info_req *req, 273 struct netlink_ext_ack *extack) 274 { 275 struct ice_pf *pf = devlink_priv(devlink); 276 struct device *dev = ice_pf_to_dev(pf); 277 struct ice_hw *hw = &pf->hw; 278 struct ice_info_ctx *ctx; 279 size_t i; 280 int err; 281 282 err = ice_wait_for_reset(pf, 10 * HZ); 283 if (err) { 284 NL_SET_ERR_MSG_MOD(extack, "Device is busy resetting"); 285 return err; 286 } 287 288 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 289 if (!ctx) 290 return -ENOMEM; 291 292 /* discover capabilities first */ 293 err = ice_discover_dev_caps(hw, &ctx->dev_caps); 294 if (err) { 295 dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n", 296 err, libie_aq_str(hw->adminq.sq_last_status)); 297 NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities"); 298 goto out_free_ctx; 299 } 300 301 if (ctx->dev_caps.common_cap.nvm_update_pending_orom) { 302 err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom); 303 if (err) { 304 dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n", 305 err, libie_aq_str(hw->adminq.sq_last_status)); 306 307 /* disable display of pending Option ROM */ 308 ctx->dev_caps.common_cap.nvm_update_pending_orom = false; 309 } 310 } 311 312 if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) { 313 err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm); 314 if (err) { 315 dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n", 316 err, libie_aq_str(hw->adminq.sq_last_status)); 317 318 /* disable display of pending Option ROM */ 319 ctx->dev_caps.common_cap.nvm_update_pending_nvm = false; 320 } 321 } 322 323 if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) { 324 err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist); 325 if (err) { 326 dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n", 327 err, libie_aq_str(hw->adminq.sq_last_status)); 328 329 /* disable display of pending Option ROM */ 330 ctx->dev_caps.common_cap.nvm_update_pending_netlist = false; 331 } 332 } 333 334 ice_info_get_dsn(pf, ctx); 335 336 err = devlink_info_serial_number_put(req, ctx->buf); 337 if (err) { 338 NL_SET_ERR_MSG_MOD(extack, "Unable to set serial number"); 339 goto out_free_ctx; 340 } 341 342 for (i = 0; i < ARRAY_SIZE(ice_devlink_versions); i++) { 343 enum ice_version_type type = ice_devlink_versions[i].type; 344 const char *key = ice_devlink_versions[i].key; 345 346 memset(ctx->buf, 0, sizeof(ctx->buf)); 347 348 ice_devlink_versions[i].getter(pf, ctx); 349 350 /* If the default getter doesn't report a version, use the 351 * fallback function. This is primarily useful in the case of 352 * "stored" versions that want to report the same value as the 353 * running version in the normal case of no pending update. 354 */ 355 if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback) 356 ice_devlink_versions[i].fallback(pf, ctx); 357 358 /* Do not report missing versions */ 359 if (ctx->buf[0] == '\0') 360 continue; 361 362 switch (type) { 363 case ICE_VERSION_FIXED: 364 err = devlink_info_version_fixed_put(req, key, ctx->buf); 365 if (err) { 366 NL_SET_ERR_MSG_MOD(extack, "Unable to set fixed version"); 367 goto out_free_ctx; 368 } 369 break; 370 case ICE_VERSION_RUNNING: 371 err = devlink_info_version_running_put_ext(req, key, 372 ctx->buf, 373 DEVLINK_INFO_VERSION_TYPE_COMPONENT); 374 if (err) { 375 NL_SET_ERR_MSG_MOD(extack, "Unable to set running version"); 376 goto out_free_ctx; 377 } 378 break; 379 case ICE_VERSION_STORED: 380 err = devlink_info_version_stored_put_ext(req, key, 381 ctx->buf, 382 DEVLINK_INFO_VERSION_TYPE_COMPONENT); 383 if (err) { 384 NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version"); 385 goto out_free_ctx; 386 } 387 break; 388 } 389 } 390 391 out_free_ctx: 392 kfree(ctx); 393 return err; 394 } 395 396 /** 397 * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware 398 * @pf: pointer to the pf instance 399 * @extack: netlink extended ACK structure 400 * 401 * Allow user to activate new Embedded Management Processor firmware by 402 * issuing device specific EMP reset. Called in response to 403 * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE. 404 * 405 * Note that teardown and rebuild of the driver state happens automatically as 406 * part of an interrupt and watchdog task. This is because all physical 407 * functions on the device must be able to reset when an EMP reset occurs from 408 * any source. 409 */ 410 static int 411 ice_devlink_reload_empr_start(struct ice_pf *pf, 412 struct netlink_ext_ack *extack) 413 { 414 struct device *dev = ice_pf_to_dev(pf); 415 struct ice_hw *hw = &pf->hw; 416 u8 pending; 417 int err; 418 419 err = ice_get_pending_updates(pf, &pending, extack); 420 if (err) 421 return err; 422 423 /* pending is a bitmask of which flash banks have a pending update, 424 * including the main NVM bank, the Option ROM bank, and the netlist 425 * bank. If any of these bits are set, then there is a pending update 426 * waiting to be activated. 427 */ 428 if (!pending) { 429 NL_SET_ERR_MSG_MOD(extack, "No pending firmware update"); 430 return -ECANCELED; 431 } 432 433 if (pf->fw_emp_reset_disabled) { 434 NL_SET_ERR_MSG_MOD(extack, "EMP reset is not available. To activate firmware, a reboot or power cycle is needed"); 435 return -ECANCELED; 436 } 437 438 dev_dbg(dev, "Issuing device EMP reset to activate firmware\n"); 439 440 err = ice_aq_nvm_update_empr(hw); 441 if (err) { 442 dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n", 443 err, libie_aq_str(hw->adminq.sq_last_status)); 444 NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware"); 445 return err; 446 } 447 448 return 0; 449 } 450 451 /** 452 * ice_devlink_reinit_down - unload given PF 453 * @pf: pointer to the PF struct 454 */ 455 static void ice_devlink_reinit_down(struct ice_pf *pf) 456 { 457 /* No need to take devl_lock, it's already taken by devlink API */ 458 ice_unload(pf); 459 rtnl_lock(); 460 ice_vsi_decfg(ice_get_main_vsi(pf)); 461 rtnl_unlock(); 462 ice_deinit_pf(pf); 463 ice_deinit_dev(pf); 464 } 465 466 /** 467 * ice_devlink_reload_down - prepare for reload 468 * @devlink: pointer to the devlink instance to reload 469 * @netns_change: if true, the network namespace is changing 470 * @action: the action to perform 471 * @limit: limits on what reload should do, such as not resetting 472 * @extack: netlink extended ACK structure 473 */ 474 static int 475 ice_devlink_reload_down(struct devlink *devlink, bool netns_change, 476 enum devlink_reload_action action, 477 enum devlink_reload_limit limit, 478 struct netlink_ext_ack *extack) 479 { 480 struct ice_pf *pf = devlink_priv(devlink); 481 482 switch (action) { 483 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: 484 if (ice_is_eswitch_mode_switchdev(pf)) { 485 NL_SET_ERR_MSG_MOD(extack, 486 "Go to legacy mode before doing reinit"); 487 return -EOPNOTSUPP; 488 } 489 if (ice_is_adq_active(pf)) { 490 NL_SET_ERR_MSG_MOD(extack, 491 "Turn off ADQ before doing reinit"); 492 return -EOPNOTSUPP; 493 } 494 if (ice_has_vfs(pf)) { 495 NL_SET_ERR_MSG_MOD(extack, 496 "Remove all VFs before doing reinit"); 497 return -EOPNOTSUPP; 498 } 499 ice_devlink_reinit_down(pf); 500 return 0; 501 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: 502 return ice_devlink_reload_empr_start(pf, extack); 503 default: 504 WARN_ON(1); 505 return -EOPNOTSUPP; 506 } 507 } 508 509 /** 510 * ice_devlink_reload_empr_finish - Wait for EMP reset to finish 511 * @pf: pointer to the pf instance 512 * @extack: netlink extended ACK structure 513 * 514 * Wait for driver to finish rebuilding after EMP reset is completed. This 515 * includes time to wait for both the actual device reset as well as the time 516 * for the driver's rebuild to complete. 517 */ 518 static int 519 ice_devlink_reload_empr_finish(struct ice_pf *pf, 520 struct netlink_ext_ack *extack) 521 { 522 int err; 523 524 err = ice_wait_for_reset(pf, 60 * HZ); 525 if (err) { 526 NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute"); 527 return err; 528 } 529 530 return 0; 531 } 532 533 /** 534 * ice_get_tx_topo_user_sel - Read user's choice from flash 535 * @pf: pointer to pf structure 536 * @layers: value read from flash will be saved here 537 * 538 * Reads user's preference for Tx Scheduler Topology Tree from PFA TLV. 539 * 540 * Return: zero when read was successful, negative values otherwise. 541 */ 542 static int ice_get_tx_topo_user_sel(struct ice_pf *pf, uint8_t *layers) 543 { 544 struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {}; 545 struct ice_hw *hw = &pf->hw; 546 int err; 547 548 err = ice_acquire_nvm(hw, ICE_RES_READ); 549 if (err) 550 return err; 551 552 err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0, 553 sizeof(usr_sel), &usr_sel, true, true, NULL); 554 if (err) 555 goto exit_release_res; 556 557 if (usr_sel.data & ICE_AQC_NVM_TX_TOPO_USER_SEL) 558 *layers = ICE_SCHED_5_LAYERS; 559 else 560 *layers = ICE_SCHED_9_LAYERS; 561 562 exit_release_res: 563 ice_release_nvm(hw); 564 565 return err; 566 } 567 568 /** 569 * ice_update_tx_topo_user_sel - Save user's preference in flash 570 * @pf: pointer to pf structure 571 * @layers: value to be saved in flash 572 * 573 * Variable "layers" defines user's preference about number of layers in Tx 574 * Scheduler Topology Tree. This choice should be stored in PFA TLV field 575 * and be picked up by driver, next time during init. 576 * 577 * Return: zero when save was successful, negative values otherwise. 578 */ 579 static int ice_update_tx_topo_user_sel(struct ice_pf *pf, int layers) 580 { 581 struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {}; 582 struct ice_hw *hw = &pf->hw; 583 int err; 584 585 err = ice_acquire_nvm(hw, ICE_RES_WRITE); 586 if (err) 587 return err; 588 589 err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0, 590 sizeof(usr_sel), &usr_sel, true, true, NULL); 591 if (err) 592 goto exit_release_res; 593 594 if (layers == ICE_SCHED_5_LAYERS) 595 usr_sel.data |= ICE_AQC_NVM_TX_TOPO_USER_SEL; 596 else 597 usr_sel.data &= ~ICE_AQC_NVM_TX_TOPO_USER_SEL; 598 599 err = ice_write_one_nvm_block(pf, ICE_AQC_NVM_TX_TOPO_MOD_ID, 2, 600 sizeof(usr_sel.data), &usr_sel.data, 601 true, NULL, NULL); 602 exit_release_res: 603 ice_release_nvm(hw); 604 605 return err; 606 } 607 608 /** 609 * ice_devlink_tx_sched_layers_get - Get tx_scheduling_layers parameter 610 * @devlink: pointer to the devlink instance 611 * @id: the parameter ID to set 612 * @ctx: context to store the parameter value 613 * 614 * Return: zero on success and negative value on failure. 615 */ 616 static int ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id, 617 struct devlink_param_gset_ctx *ctx) 618 { 619 struct ice_pf *pf = devlink_priv(devlink); 620 int err; 621 622 err = ice_get_tx_topo_user_sel(pf, &ctx->val.vu8); 623 if (err) 624 return err; 625 626 return 0; 627 } 628 629 /** 630 * ice_devlink_tx_sched_layers_set - Set tx_scheduling_layers parameter 631 * @devlink: pointer to the devlink instance 632 * @id: the parameter ID to set 633 * @ctx: context to get the parameter value 634 * @extack: netlink extended ACK structure 635 * 636 * Return: zero on success and negative value on failure. 637 */ 638 static int ice_devlink_tx_sched_layers_set(struct devlink *devlink, u32 id, 639 struct devlink_param_gset_ctx *ctx, 640 struct netlink_ext_ack *extack) 641 { 642 struct ice_pf *pf = devlink_priv(devlink); 643 int err; 644 645 err = ice_update_tx_topo_user_sel(pf, ctx->val.vu8); 646 if (err) 647 return err; 648 649 NL_SET_ERR_MSG_MOD(extack, 650 "Tx scheduling layers have been changed on this device. You must do the PCI slot powercycle for the change to take effect."); 651 652 return 0; 653 } 654 655 /** 656 * ice_devlink_tx_sched_layers_validate - Validate passed tx_scheduling_layers 657 * parameter value 658 * @devlink: unused pointer to devlink instance 659 * @id: the parameter ID to validate 660 * @val: value to validate 661 * @extack: netlink extended ACK structure 662 * 663 * Supported values are: 664 * - 5 - five layers Tx Scheduler Topology Tree 665 * - 9 - nine layers Tx Scheduler Topology Tree 666 * 667 * Return: zero when passed parameter value is supported. Negative value on 668 * error. 669 */ 670 static int ice_devlink_tx_sched_layers_validate(struct devlink *devlink, u32 id, 671 union devlink_param_value val, 672 struct netlink_ext_ack *extack) 673 { 674 if (val.vu8 != ICE_SCHED_5_LAYERS && val.vu8 != ICE_SCHED_9_LAYERS) { 675 NL_SET_ERR_MSG_MOD(extack, 676 "Wrong number of tx scheduler layers provided."); 677 return -EINVAL; 678 } 679 680 return 0; 681 } 682 683 /** 684 * ice_tear_down_devlink_rate_tree - removes devlink-rate exported tree 685 * @pf: pf struct 686 * 687 * This function tears down tree exported during VF's creation. 688 */ 689 void ice_tear_down_devlink_rate_tree(struct ice_pf *pf) 690 { 691 struct devlink *devlink; 692 struct ice_vf *vf; 693 unsigned int bkt; 694 695 devlink = priv_to_devlink(pf); 696 697 devl_lock(devlink); 698 mutex_lock(&pf->vfs.table_lock); 699 ice_for_each_vf(pf, bkt, vf) { 700 if (vf->devlink_port.devlink_rate) 701 devl_rate_leaf_destroy(&vf->devlink_port); 702 } 703 mutex_unlock(&pf->vfs.table_lock); 704 705 devl_rate_nodes_destroy(devlink); 706 devl_unlock(devlink); 707 } 708 709 /** 710 * ice_enable_custom_tx - try to enable custom Tx feature 711 * @pf: pf struct 712 * 713 * This function tries to enable custom Tx feature, 714 * it's not possible to enable it, if DCB or ADQ is active. 715 */ 716 static bool ice_enable_custom_tx(struct ice_pf *pf) 717 { 718 struct ice_port_info *pi = ice_get_main_vsi(pf)->port_info; 719 struct device *dev = ice_pf_to_dev(pf); 720 721 if (pi->is_custom_tx_enabled) 722 /* already enabled, return true */ 723 return true; 724 725 if (ice_is_adq_active(pf)) { 726 dev_err(dev, "ADQ active, can't modify Tx scheduler tree\n"); 727 return false; 728 } 729 730 if (ice_is_dcb_active(pf)) { 731 dev_err(dev, "DCB active, can't modify Tx scheduler tree\n"); 732 return false; 733 } 734 735 pi->is_custom_tx_enabled = true; 736 737 return true; 738 } 739 740 /** 741 * ice_traverse_tx_tree - traverse Tx scheduler tree 742 * @devlink: devlink struct 743 * @node: current node, used for recursion 744 * @tc_node: tc_node struct, that is treated as a root 745 * @pf: pf struct 746 * 747 * This function traverses Tx scheduler tree and exports 748 * entire structure to the devlink-rate. 749 */ 750 static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node *node, 751 struct ice_sched_node *tc_node, struct ice_pf *pf) 752 { 753 struct devlink_rate *rate_node = NULL; 754 struct ice_dynamic_port *sf; 755 struct ice_vf *vf; 756 int i; 757 758 if (node->rate_node) 759 /* already added, skip to the next */ 760 goto traverse_children; 761 762 if (node->parent == tc_node) { 763 /* create root node */ 764 rate_node = devl_rate_node_create(devlink, node, node->name, NULL); 765 } else if (node->vsi_handle && 766 pf->vsi[node->vsi_handle]->type == ICE_VSI_VF && 767 pf->vsi[node->vsi_handle]->vf) { 768 vf = pf->vsi[node->vsi_handle]->vf; 769 if (!vf->devlink_port.devlink_rate) 770 /* leaf nodes doesn't have children 771 * so we don't set rate_node 772 */ 773 devl_rate_leaf_create(&vf->devlink_port, node, 774 node->parent->rate_node); 775 } else if (node->vsi_handle && 776 pf->vsi[node->vsi_handle]->type == ICE_VSI_SF && 777 pf->vsi[node->vsi_handle]->sf) { 778 sf = pf->vsi[node->vsi_handle]->sf; 779 if (!sf->devlink_port.devlink_rate) 780 /* leaf nodes doesn't have children 781 * so we don't set rate_node 782 */ 783 devl_rate_leaf_create(&sf->devlink_port, node, 784 node->parent->rate_node); 785 } else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF && 786 node->parent->rate_node) { 787 rate_node = devl_rate_node_create(devlink, node, node->name, 788 node->parent->rate_node); 789 } 790 791 if (rate_node && !IS_ERR(rate_node)) 792 node->rate_node = rate_node; 793 794 traverse_children: 795 for (i = 0; i < node->num_children; i++) 796 ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf); 797 } 798 799 /** 800 * ice_devlink_rate_init_tx_topology - export Tx scheduler tree to devlink rate 801 * @devlink: devlink struct 802 * @vsi: main vsi struct 803 * 804 * This function finds a root node, then calls ice_traverse_tx tree, which 805 * traverses the tree and exports it's contents to devlink rate. 806 */ 807 int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi) 808 { 809 struct ice_port_info *pi = vsi->port_info; 810 struct ice_sched_node *tc_node; 811 struct ice_pf *pf = vsi->back; 812 int i; 813 814 tc_node = pi->root->children[0]; 815 mutex_lock(&pi->sched_lock); 816 for (i = 0; i < tc_node->num_children; i++) 817 ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf); 818 mutex_unlock(&pi->sched_lock); 819 820 return 0; 821 } 822 823 static void ice_clear_rate_nodes(struct ice_sched_node *node) 824 { 825 node->rate_node = NULL; 826 827 for (int i = 0; i < node->num_children; i++) 828 ice_clear_rate_nodes(node->children[i]); 829 } 830 831 /** 832 * ice_devlink_rate_clear_tx_topology - clear node->rate_node 833 * @vsi: main vsi struct 834 * 835 * Clear rate_node to cleanup creation of Tx topology. 836 * 837 */ 838 void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi) 839 { 840 struct ice_port_info *pi = vsi->port_info; 841 842 mutex_lock(&pi->sched_lock); 843 ice_clear_rate_nodes(pi->root->children[0]); 844 mutex_unlock(&pi->sched_lock); 845 } 846 847 /** 848 * ice_set_object_tx_share - sets node scheduling parameter 849 * @pi: devlink struct instance 850 * @node: node struct instance 851 * @bw: bandwidth in bytes per second 852 * @extack: extended netdev ack structure 853 * 854 * This function sets ICE_MIN_BW scheduling BW limit. 855 */ 856 static int ice_set_object_tx_share(struct ice_port_info *pi, struct ice_sched_node *node, 857 u64 bw, struct netlink_ext_ack *extack) 858 { 859 int status; 860 861 mutex_lock(&pi->sched_lock); 862 /* converts bytes per second to kilo bits per second */ 863 node->tx_share = div_u64(bw, 125); 864 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, node->tx_share); 865 mutex_unlock(&pi->sched_lock); 866 867 if (status) 868 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_share"); 869 870 return status; 871 } 872 873 /** 874 * ice_set_object_tx_max - sets node scheduling parameter 875 * @pi: devlink struct instance 876 * @node: node struct instance 877 * @bw: bandwidth in bytes per second 878 * @extack: extended netdev ack structure 879 * 880 * This function sets ICE_MAX_BW scheduling BW limit. 881 */ 882 static int ice_set_object_tx_max(struct ice_port_info *pi, struct ice_sched_node *node, 883 u64 bw, struct netlink_ext_ack *extack) 884 { 885 int status; 886 887 mutex_lock(&pi->sched_lock); 888 /* converts bytes per second value to kilo bits per second */ 889 node->tx_max = div_u64(bw, 125); 890 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, node->tx_max); 891 mutex_unlock(&pi->sched_lock); 892 893 if (status) 894 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_max"); 895 896 return status; 897 } 898 899 /** 900 * ice_set_object_tx_priority - sets node scheduling parameter 901 * @pi: devlink struct instance 902 * @node: node struct instance 903 * @priority: value representing priority for strict priority arbitration 904 * @extack: extended netdev ack structure 905 * 906 * This function sets priority of node among siblings. 907 */ 908 static int ice_set_object_tx_priority(struct ice_port_info *pi, struct ice_sched_node *node, 909 u32 priority, struct netlink_ext_ack *extack) 910 { 911 int status; 912 913 if (priority >= 8) { 914 NL_SET_ERR_MSG_MOD(extack, "Priority should be less than 8"); 915 return -EINVAL; 916 } 917 918 mutex_lock(&pi->sched_lock); 919 node->tx_priority = priority; 920 status = ice_sched_set_node_priority(pi, node, node->tx_priority); 921 mutex_unlock(&pi->sched_lock); 922 923 if (status) 924 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_priority"); 925 926 return status; 927 } 928 929 /** 930 * ice_set_object_tx_weight - sets node scheduling parameter 931 * @pi: devlink struct instance 932 * @node: node struct instance 933 * @weight: value represeting relative weight for WFQ arbitration 934 * @extack: extended netdev ack structure 935 * 936 * This function sets node weight for WFQ algorithm. 937 */ 938 static int ice_set_object_tx_weight(struct ice_port_info *pi, struct ice_sched_node *node, 939 u32 weight, struct netlink_ext_ack *extack) 940 { 941 int status; 942 943 if (weight > 200 || weight < 1) { 944 NL_SET_ERR_MSG_MOD(extack, "Weight must be between 1 and 200"); 945 return -EINVAL; 946 } 947 948 mutex_lock(&pi->sched_lock); 949 node->tx_weight = weight; 950 status = ice_sched_set_node_weight(pi, node, node->tx_weight); 951 mutex_unlock(&pi->sched_lock); 952 953 if (status) 954 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_weight"); 955 956 return status; 957 } 958 959 /** 960 * ice_get_pi_from_dev_rate - get port info from devlink_rate 961 * @rate_node: devlink struct instance 962 * 963 * This function returns corresponding port_info struct of devlink_rate 964 */ 965 static struct ice_port_info *ice_get_pi_from_dev_rate(struct devlink_rate *rate_node) 966 { 967 struct ice_pf *pf = devlink_priv(rate_node->devlink); 968 969 return ice_get_main_vsi(pf)->port_info; 970 } 971 972 static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv, 973 struct netlink_ext_ack *extack) 974 { 975 struct ice_sched_node *node; 976 struct ice_port_info *pi; 977 978 pi = ice_get_pi_from_dev_rate(rate_node); 979 980 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 981 return -EBUSY; 982 983 /* preallocate memory for ice_sched_node */ 984 node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL); 985 if (!node) 986 return -ENOMEM; 987 988 *priv = node; 989 990 return 0; 991 } 992 993 static int ice_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, 994 struct netlink_ext_ack *extack) 995 { 996 struct ice_sched_node *node, *tc_node; 997 struct ice_port_info *pi; 998 999 pi = ice_get_pi_from_dev_rate(rate_node); 1000 tc_node = pi->root->children[0]; 1001 node = priv; 1002 1003 if (!rate_node->parent || !node || tc_node == node || !extack) 1004 return 0; 1005 1006 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1007 return -EBUSY; 1008 1009 /* can't allow to delete a node with children */ 1010 if (node->num_children) 1011 return -EINVAL; 1012 1013 mutex_lock(&pi->sched_lock); 1014 ice_free_sched_node(pi, node); 1015 mutex_unlock(&pi->sched_lock); 1016 1017 return 0; 1018 } 1019 1020 static int ice_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv, 1021 u64 tx_max, struct netlink_ext_ack *extack) 1022 { 1023 struct ice_sched_node *node = priv; 1024 1025 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1026 return -EBUSY; 1027 1028 if (!node) 1029 return 0; 1030 1031 return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_leaf), 1032 node, tx_max, extack); 1033 } 1034 1035 static int ice_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv, 1036 u64 tx_share, struct netlink_ext_ack *extack) 1037 { 1038 struct ice_sched_node *node = priv; 1039 1040 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1041 return -EBUSY; 1042 1043 if (!node) 1044 return 0; 1045 1046 return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_leaf), node, 1047 tx_share, extack); 1048 } 1049 1050 static int ice_devlink_rate_leaf_tx_priority_set(struct devlink_rate *rate_leaf, void *priv, 1051 u32 tx_priority, struct netlink_ext_ack *extack) 1052 { 1053 struct ice_sched_node *node = priv; 1054 1055 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1056 return -EBUSY; 1057 1058 if (!node) 1059 return 0; 1060 1061 return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_leaf), node, 1062 tx_priority, extack); 1063 } 1064 1065 static int ice_devlink_rate_leaf_tx_weight_set(struct devlink_rate *rate_leaf, void *priv, 1066 u32 tx_weight, struct netlink_ext_ack *extack) 1067 { 1068 struct ice_sched_node *node = priv; 1069 1070 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1071 return -EBUSY; 1072 1073 if (!node) 1074 return 0; 1075 1076 return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_leaf), node, 1077 tx_weight, extack); 1078 } 1079 1080 static int ice_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv, 1081 u64 tx_max, struct netlink_ext_ack *extack) 1082 { 1083 struct ice_sched_node *node = priv; 1084 1085 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1086 return -EBUSY; 1087 1088 if (!node) 1089 return 0; 1090 1091 return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_node), 1092 node, tx_max, extack); 1093 } 1094 1095 static int ice_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv, 1096 u64 tx_share, struct netlink_ext_ack *extack) 1097 { 1098 struct ice_sched_node *node = priv; 1099 1100 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1101 return -EBUSY; 1102 1103 if (!node) 1104 return 0; 1105 1106 return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_node), 1107 node, tx_share, extack); 1108 } 1109 1110 static int ice_devlink_rate_node_tx_priority_set(struct devlink_rate *rate_node, void *priv, 1111 u32 tx_priority, struct netlink_ext_ack *extack) 1112 { 1113 struct ice_sched_node *node = priv; 1114 1115 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1116 return -EBUSY; 1117 1118 if (!node) 1119 return 0; 1120 1121 return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_node), 1122 node, tx_priority, extack); 1123 } 1124 1125 static int ice_devlink_rate_node_tx_weight_set(struct devlink_rate *rate_node, void *priv, 1126 u32 tx_weight, struct netlink_ext_ack *extack) 1127 { 1128 struct ice_sched_node *node = priv; 1129 1130 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1131 return -EBUSY; 1132 1133 if (!node) 1134 return 0; 1135 1136 return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_node), 1137 node, tx_weight, extack); 1138 } 1139 1140 static int ice_devlink_set_parent(struct devlink_rate *devlink_rate, 1141 struct devlink_rate *parent, 1142 void *priv, void *parent_priv, 1143 struct netlink_ext_ack *extack) 1144 { 1145 struct ice_port_info *pi = ice_get_pi_from_dev_rate(devlink_rate); 1146 struct ice_sched_node *tc_node, *node, *parent_node; 1147 u16 num_nodes_added; 1148 u32 first_node_teid; 1149 u32 node_teid; 1150 int status; 1151 1152 tc_node = pi->root->children[0]; 1153 node = priv; 1154 1155 if (!extack) 1156 return 0; 1157 1158 if (!ice_enable_custom_tx(devlink_priv(devlink_rate->devlink))) 1159 return -EBUSY; 1160 1161 if (!parent) { 1162 if (!node || tc_node == node || node->num_children) 1163 return -EINVAL; 1164 1165 mutex_lock(&pi->sched_lock); 1166 ice_free_sched_node(pi, node); 1167 mutex_unlock(&pi->sched_lock); 1168 1169 return 0; 1170 } 1171 1172 parent_node = parent_priv; 1173 1174 /* if the node doesn't exist, create it */ 1175 if (!node->parent) { 1176 mutex_lock(&pi->sched_lock); 1177 status = ice_sched_add_elems(pi, tc_node, parent_node, 1178 parent_node->tx_sched_layer + 1, 1179 1, &num_nodes_added, &first_node_teid, 1180 &node); 1181 mutex_unlock(&pi->sched_lock); 1182 1183 if (status) { 1184 NL_SET_ERR_MSG_MOD(extack, "Can't add a new node"); 1185 return status; 1186 } 1187 1188 if (devlink_rate->tx_share) 1189 ice_set_object_tx_share(pi, node, devlink_rate->tx_share, extack); 1190 if (devlink_rate->tx_max) 1191 ice_set_object_tx_max(pi, node, devlink_rate->tx_max, extack); 1192 if (devlink_rate->tx_priority) 1193 ice_set_object_tx_priority(pi, node, devlink_rate->tx_priority, extack); 1194 if (devlink_rate->tx_weight) 1195 ice_set_object_tx_weight(pi, node, devlink_rate->tx_weight, extack); 1196 } else { 1197 node_teid = le32_to_cpu(node->info.node_teid); 1198 mutex_lock(&pi->sched_lock); 1199 status = ice_sched_move_nodes(pi, parent_node, 1, &node_teid); 1200 mutex_unlock(&pi->sched_lock); 1201 1202 if (status) 1203 NL_SET_ERR_MSG_MOD(extack, "Can't move existing node to a new parent"); 1204 } 1205 1206 return status; 1207 } 1208 1209 static void ice_set_min_max_msix(struct ice_pf *pf) 1210 { 1211 struct devlink *devlink = priv_to_devlink(pf); 1212 union devlink_param_value val; 1213 int err; 1214 1215 err = devl_param_driverinit_value_get(devlink, 1216 DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, 1217 &val); 1218 if (!err) 1219 pf->msix.min = val.vu32; 1220 1221 err = devl_param_driverinit_value_get(devlink, 1222 DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, 1223 &val); 1224 if (!err) 1225 pf->msix.max = val.vu32; 1226 } 1227 1228 /** 1229 * ice_devlink_reinit_up - do reinit of the given PF 1230 * @pf: pointer to the PF struct 1231 */ 1232 static int ice_devlink_reinit_up(struct ice_pf *pf) 1233 { 1234 struct ice_vsi *vsi = ice_get_main_vsi(pf); 1235 struct device *dev = ice_pf_to_dev(pf); 1236 bool need_dev_deinit = false; 1237 int err; 1238 1239 err = ice_init_hw(&pf->hw); 1240 if (err) { 1241 dev_err(dev, "ice_init_hw failed: %d\n", err); 1242 return err; 1243 } 1244 1245 /* load MSI-X values */ 1246 ice_set_min_max_msix(pf); 1247 1248 err = ice_init_dev(pf); 1249 if (err) 1250 goto unroll_hw_init; 1251 1252 err = ice_init_pf(pf); 1253 if (err) { 1254 dev_err(dev, "ice_init_pf failed: %d\n", err); 1255 goto unroll_dev_init; 1256 } 1257 1258 vsi->flags = ICE_VSI_FLAG_INIT; 1259 1260 rtnl_lock(); 1261 err = ice_vsi_cfg(vsi); 1262 rtnl_unlock(); 1263 if (err) 1264 goto unroll_pf_init; 1265 1266 /* No need to take devl_lock, it's already taken by devlink API */ 1267 err = ice_load(pf); 1268 if (err) 1269 goto err_load; 1270 1271 return 0; 1272 1273 err_load: 1274 rtnl_lock(); 1275 ice_vsi_decfg(vsi); 1276 rtnl_unlock(); 1277 unroll_pf_init: 1278 ice_deinit_pf(pf); 1279 unroll_dev_init: 1280 need_dev_deinit = true; 1281 unroll_hw_init: 1282 ice_deinit_hw(&pf->hw); 1283 if (need_dev_deinit) 1284 ice_deinit_dev(pf); 1285 return err; 1286 } 1287 1288 /** 1289 * ice_devlink_reload_up - do reload up after reinit 1290 * @devlink: pointer to the devlink instance reloading 1291 * @action: the action requested 1292 * @limit: limits imposed by userspace, such as not resetting 1293 * @actions_performed: on return, indicate what actions actually performed 1294 * @extack: netlink extended ACK structure 1295 */ 1296 static int 1297 ice_devlink_reload_up(struct devlink *devlink, 1298 enum devlink_reload_action action, 1299 enum devlink_reload_limit limit, 1300 u32 *actions_performed, 1301 struct netlink_ext_ack *extack) 1302 { 1303 struct ice_pf *pf = devlink_priv(devlink); 1304 1305 switch (action) { 1306 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: 1307 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); 1308 return ice_devlink_reinit_up(pf); 1309 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: 1310 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE); 1311 return ice_devlink_reload_empr_finish(pf, extack); 1312 default: 1313 WARN_ON(1); 1314 return -EOPNOTSUPP; 1315 } 1316 } 1317 1318 static const struct devlink_ops ice_devlink_ops = { 1319 .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK, 1320 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | 1321 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), 1322 .reload_down = ice_devlink_reload_down, 1323 .reload_up = ice_devlink_reload_up, 1324 .eswitch_mode_get = ice_eswitch_mode_get, 1325 .eswitch_mode_set = ice_eswitch_mode_set, 1326 .info_get = ice_devlink_info_get, 1327 .flash_update = ice_devlink_flash_update, 1328 1329 .rate_node_new = ice_devlink_rate_node_new, 1330 .rate_node_del = ice_devlink_rate_node_del, 1331 1332 .rate_leaf_tx_max_set = ice_devlink_rate_leaf_tx_max_set, 1333 .rate_leaf_tx_share_set = ice_devlink_rate_leaf_tx_share_set, 1334 .rate_leaf_tx_priority_set = ice_devlink_rate_leaf_tx_priority_set, 1335 .rate_leaf_tx_weight_set = ice_devlink_rate_leaf_tx_weight_set, 1336 1337 .rate_node_tx_max_set = ice_devlink_rate_node_tx_max_set, 1338 .rate_node_tx_share_set = ice_devlink_rate_node_tx_share_set, 1339 .rate_node_tx_priority_set = ice_devlink_rate_node_tx_priority_set, 1340 .rate_node_tx_weight_set = ice_devlink_rate_node_tx_weight_set, 1341 1342 .rate_leaf_parent_set = ice_devlink_set_parent, 1343 .rate_node_parent_set = ice_devlink_set_parent, 1344 1345 .port_new = ice_devlink_port_new, 1346 }; 1347 1348 static const struct devlink_ops ice_sf_devlink_ops; 1349 1350 static int 1351 ice_devlink_enable_roce_get(struct devlink *devlink, u32 id, 1352 struct devlink_param_gset_ctx *ctx) 1353 { 1354 struct ice_pf *pf = devlink_priv(devlink); 1355 struct iidc_rdma_core_dev_info *cdev; 1356 1357 cdev = pf->cdev_info; 1358 if (!cdev) 1359 return -ENODEV; 1360 1361 ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2); 1362 1363 return 0; 1364 } 1365 1366 static int ice_devlink_enable_roce_set(struct devlink *devlink, u32 id, 1367 struct devlink_param_gset_ctx *ctx, 1368 struct netlink_ext_ack *extack) 1369 { 1370 struct ice_pf *pf = devlink_priv(devlink); 1371 struct iidc_rdma_core_dev_info *cdev; 1372 bool roce_ena = ctx->val.vbool; 1373 int ret; 1374 1375 cdev = pf->cdev_info; 1376 if (!cdev) 1377 return -ENODEV; 1378 1379 if (!roce_ena) { 1380 ice_unplug_aux_dev(pf); 1381 cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2; 1382 return 0; 1383 } 1384 1385 cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2; 1386 ret = ice_plug_aux_dev(pf); 1387 if (ret) 1388 cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2; 1389 1390 return ret; 1391 } 1392 1393 static int 1394 ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id, 1395 union devlink_param_value val, 1396 struct netlink_ext_ack *extack) 1397 { 1398 struct ice_pf *pf = devlink_priv(devlink); 1399 struct iidc_rdma_core_dev_info *cdev; 1400 1401 cdev = pf->cdev_info; 1402 if (!cdev) 1403 return -ENODEV; 1404 1405 if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 1406 return -EOPNOTSUPP; 1407 1408 if (cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP) { 1409 NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); 1410 return -EOPNOTSUPP; 1411 } 1412 1413 return 0; 1414 } 1415 1416 static int 1417 ice_devlink_enable_iw_get(struct devlink *devlink, u32 id, 1418 struct devlink_param_gset_ctx *ctx) 1419 { 1420 struct ice_pf *pf = devlink_priv(devlink); 1421 struct iidc_rdma_core_dev_info *cdev; 1422 1423 cdev = pf->cdev_info; 1424 if (!cdev) 1425 return -ENODEV; 1426 1427 ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP); 1428 1429 return 0; 1430 } 1431 1432 static int ice_devlink_enable_iw_set(struct devlink *devlink, u32 id, 1433 struct devlink_param_gset_ctx *ctx, 1434 struct netlink_ext_ack *extack) 1435 { 1436 struct ice_pf *pf = devlink_priv(devlink); 1437 struct iidc_rdma_core_dev_info *cdev; 1438 bool iw_ena = ctx->val.vbool; 1439 int ret; 1440 1441 cdev = pf->cdev_info; 1442 if (!cdev) 1443 return -ENODEV; 1444 1445 if (!iw_ena) { 1446 ice_unplug_aux_dev(pf); 1447 cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP; 1448 return 0; 1449 } 1450 1451 cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_IWARP; 1452 ret = ice_plug_aux_dev(pf); 1453 if (ret) 1454 cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP; 1455 1456 return ret; 1457 } 1458 1459 static int 1460 ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id, 1461 union devlink_param_value val, 1462 struct netlink_ext_ack *extack) 1463 { 1464 struct ice_pf *pf = devlink_priv(devlink); 1465 1466 if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 1467 return -EOPNOTSUPP; 1468 1469 if (pf->cdev_info->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2) { 1470 NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); 1471 return -EOPNOTSUPP; 1472 } 1473 1474 return 0; 1475 } 1476 1477 #define DEVLINK_LOCAL_FWD_DISABLED_STR "disabled" 1478 #define DEVLINK_LOCAL_FWD_ENABLED_STR "enabled" 1479 #define DEVLINK_LOCAL_FWD_PRIORITIZED_STR "prioritized" 1480 1481 /** 1482 * ice_devlink_local_fwd_mode_to_str - Get string for local_fwd mode. 1483 * @mode: local forwarding for mode used in port_info struct. 1484 * 1485 * Return: Mode respective string or "Invalid". 1486 */ 1487 static const char * 1488 ice_devlink_local_fwd_mode_to_str(enum ice_local_fwd_mode mode) 1489 { 1490 switch (mode) { 1491 case ICE_LOCAL_FWD_MODE_ENABLED: 1492 return DEVLINK_LOCAL_FWD_ENABLED_STR; 1493 case ICE_LOCAL_FWD_MODE_PRIORITIZED: 1494 return DEVLINK_LOCAL_FWD_PRIORITIZED_STR; 1495 case ICE_LOCAL_FWD_MODE_DISABLED: 1496 return DEVLINK_LOCAL_FWD_DISABLED_STR; 1497 } 1498 1499 return "Invalid"; 1500 } 1501 1502 /** 1503 * ice_devlink_local_fwd_str_to_mode - Get local_fwd mode from string name. 1504 * @mode_str: local forwarding mode string. 1505 * 1506 * Return: Mode value or negative number if invalid. 1507 */ 1508 static int ice_devlink_local_fwd_str_to_mode(const char *mode_str) 1509 { 1510 if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_ENABLED_STR)) 1511 return ICE_LOCAL_FWD_MODE_ENABLED; 1512 else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_PRIORITIZED_STR)) 1513 return ICE_LOCAL_FWD_MODE_PRIORITIZED; 1514 else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_DISABLED_STR)) 1515 return ICE_LOCAL_FWD_MODE_DISABLED; 1516 1517 return -EINVAL; 1518 } 1519 1520 /** 1521 * ice_devlink_local_fwd_get - Get local_fwd parameter. 1522 * @devlink: Pointer to the devlink instance. 1523 * @id: The parameter ID to set. 1524 * @ctx: Context to store the parameter value. 1525 * 1526 * Return: Zero. 1527 */ 1528 static int ice_devlink_local_fwd_get(struct devlink *devlink, u32 id, 1529 struct devlink_param_gset_ctx *ctx) 1530 { 1531 struct ice_pf *pf = devlink_priv(devlink); 1532 struct ice_port_info *pi; 1533 const char *mode_str; 1534 1535 pi = pf->hw.port_info; 1536 mode_str = ice_devlink_local_fwd_mode_to_str(pi->local_fwd_mode); 1537 snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s", mode_str); 1538 1539 return 0; 1540 } 1541 1542 /** 1543 * ice_devlink_local_fwd_set - Set local_fwd parameter. 1544 * @devlink: Pointer to the devlink instance. 1545 * @id: The parameter ID to set. 1546 * @ctx: Context to get the parameter value. 1547 * @extack: Netlink extended ACK structure. 1548 * 1549 * Return: Zero. 1550 */ 1551 static int ice_devlink_local_fwd_set(struct devlink *devlink, u32 id, 1552 struct devlink_param_gset_ctx *ctx, 1553 struct netlink_ext_ack *extack) 1554 { 1555 int new_local_fwd_mode = ice_devlink_local_fwd_str_to_mode(ctx->val.vstr); 1556 struct ice_pf *pf = devlink_priv(devlink); 1557 struct device *dev = ice_pf_to_dev(pf); 1558 struct ice_port_info *pi; 1559 1560 pi = pf->hw.port_info; 1561 if (pi->local_fwd_mode != new_local_fwd_mode) { 1562 pi->local_fwd_mode = new_local_fwd_mode; 1563 dev_info(dev, "Setting local_fwd to %s\n", ctx->val.vstr); 1564 ice_schedule_reset(pf, ICE_RESET_CORER); 1565 } 1566 1567 return 0; 1568 } 1569 1570 /** 1571 * ice_devlink_local_fwd_validate - Validate passed local_fwd parameter value. 1572 * @devlink: Unused pointer to devlink instance. 1573 * @id: The parameter ID to validate. 1574 * @val: Value to validate. 1575 * @extack: Netlink extended ACK structure. 1576 * 1577 * Supported values are: 1578 * "enabled" - local_fwd is enabled, "disabled" - local_fwd is disabled 1579 * "prioritized" - local_fwd traffic is prioritized in scheduling. 1580 * 1581 * Return: Zero when passed parameter value is supported. Negative value on 1582 * error. 1583 */ 1584 static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id, 1585 union devlink_param_value val, 1586 struct netlink_ext_ack *extack) 1587 { 1588 if (ice_devlink_local_fwd_str_to_mode(val.vstr) < 0) { 1589 NL_SET_ERR_MSG_MOD(extack, "Error: Requested value is not supported."); 1590 return -EINVAL; 1591 } 1592 1593 return 0; 1594 } 1595 1596 static int 1597 ice_devlink_msix_max_pf_validate(struct devlink *devlink, u32 id, 1598 union devlink_param_value val, 1599 struct netlink_ext_ack *extack) 1600 { 1601 struct ice_pf *pf = devlink_priv(devlink); 1602 1603 if (val.vu32 > pf->hw.func_caps.common_cap.num_msix_vectors) 1604 return -EINVAL; 1605 1606 return 0; 1607 } 1608 1609 static int 1610 ice_devlink_msix_min_pf_validate(struct devlink *devlink, u32 id, 1611 union devlink_param_value val, 1612 struct netlink_ext_ack *extack) 1613 { 1614 if (val.vu32 < ICE_MIN_MSIX) 1615 return -EINVAL; 1616 1617 return 0; 1618 } 1619 1620 static int ice_devlink_enable_rdma_validate(struct devlink *devlink, u32 id, 1621 union devlink_param_value val, 1622 struct netlink_ext_ack *extack) 1623 { 1624 struct ice_pf *pf = devlink_priv(devlink); 1625 bool new_state = val.vbool; 1626 1627 if (new_state && !test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 1628 return -EOPNOTSUPP; 1629 1630 return 0; 1631 } 1632 1633 enum ice_param_id { 1634 ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, 1635 ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS, 1636 ICE_DEVLINK_PARAM_ID_LOCAL_FWD, 1637 }; 1638 1639 static const struct devlink_param ice_dvl_rdma_params[] = { 1640 DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1641 ice_devlink_enable_roce_get, 1642 ice_devlink_enable_roce_set, 1643 ice_devlink_enable_roce_validate), 1644 DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1645 ice_devlink_enable_iw_get, 1646 ice_devlink_enable_iw_set, 1647 ice_devlink_enable_iw_validate), 1648 DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 1649 NULL, NULL, ice_devlink_enable_rdma_validate), 1650 }; 1651 1652 static const struct devlink_param ice_dvl_msix_params[] = { 1653 DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX, 1654 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 1655 NULL, NULL, ice_devlink_msix_max_pf_validate), 1656 DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN, 1657 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 1658 NULL, NULL, ice_devlink_msix_min_pf_validate), 1659 }; 1660 1661 static const struct devlink_param ice_dvl_sched_params[] = { 1662 DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS, 1663 "tx_scheduling_layers", 1664 DEVLINK_PARAM_TYPE_U8, 1665 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 1666 ice_devlink_tx_sched_layers_get, 1667 ice_devlink_tx_sched_layers_set, 1668 ice_devlink_tx_sched_layers_validate), 1669 DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_LOCAL_FWD, 1670 "local_forwarding", DEVLINK_PARAM_TYPE_STRING, 1671 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1672 ice_devlink_local_fwd_get, 1673 ice_devlink_local_fwd_set, 1674 ice_devlink_local_fwd_validate), 1675 }; 1676 1677 static void ice_devlink_free(void *devlink_ptr) 1678 { 1679 devlink_free((struct devlink *)devlink_ptr); 1680 } 1681 1682 /** 1683 * ice_allocate_pf - Allocate devlink and return PF structure pointer 1684 * @dev: the device to allocate for 1685 * 1686 * Allocate a devlink instance for this device and return the private area as 1687 * the PF structure. The devlink memory is kept track of through devres by 1688 * adding an action to remove it when unwinding. 1689 */ 1690 struct ice_pf *ice_allocate_pf(struct device *dev) 1691 { 1692 struct devlink *devlink; 1693 1694 devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev); 1695 if (!devlink) 1696 return NULL; 1697 1698 /* Add an action to teardown the devlink when unwinding the driver */ 1699 if (devm_add_action_or_reset(dev, ice_devlink_free, devlink)) 1700 return NULL; 1701 1702 return devlink_priv(devlink); 1703 } 1704 1705 /** 1706 * ice_allocate_sf - Allocate devlink and return SF structure pointer 1707 * @dev: the device to allocate for 1708 * @pf: pointer to the PF structure 1709 * 1710 * Allocate a devlink instance for SF. 1711 * 1712 * Return: ice_sf_priv pointer to allocated memory or ERR_PTR in case of error 1713 */ 1714 struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf) 1715 { 1716 struct devlink *devlink; 1717 int err; 1718 1719 devlink = devlink_alloc(&ice_sf_devlink_ops, sizeof(struct ice_sf_priv), 1720 dev); 1721 if (!devlink) 1722 return ERR_PTR(-ENOMEM); 1723 1724 err = devl_nested_devlink_set(priv_to_devlink(pf), devlink); 1725 if (err) { 1726 devlink_free(devlink); 1727 return ERR_PTR(err); 1728 } 1729 1730 return devlink_priv(devlink); 1731 } 1732 1733 /** 1734 * ice_devlink_register - Register devlink interface for this PF 1735 * @pf: the PF to register the devlink for. 1736 * 1737 * Register the devlink instance associated with this physical function. 1738 * 1739 * Return: zero on success or an error code on failure. 1740 */ 1741 void ice_devlink_register(struct ice_pf *pf) 1742 { 1743 struct devlink *devlink = priv_to_devlink(pf); 1744 1745 devl_register(devlink); 1746 } 1747 1748 /** 1749 * ice_devlink_unregister - Unregister devlink resources for this PF. 1750 * @pf: the PF structure to cleanup 1751 * 1752 * Releases resources used by devlink and cleans up associated memory. 1753 */ 1754 void ice_devlink_unregister(struct ice_pf *pf) 1755 { 1756 devl_unregister(priv_to_devlink(pf)); 1757 } 1758 1759 int ice_devlink_register_params(struct ice_pf *pf) 1760 { 1761 struct devlink *devlink = priv_to_devlink(pf); 1762 union devlink_param_value value; 1763 struct ice_hw *hw = &pf->hw; 1764 int status; 1765 1766 status = devl_params_register(devlink, ice_dvl_rdma_params, 1767 ARRAY_SIZE(ice_dvl_rdma_params)); 1768 if (status) 1769 return status; 1770 1771 status = devl_params_register(devlink, ice_dvl_msix_params, 1772 ARRAY_SIZE(ice_dvl_msix_params)); 1773 if (status) 1774 goto unregister_rdma_params; 1775 1776 if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) 1777 status = devl_params_register(devlink, ice_dvl_sched_params, 1778 ARRAY_SIZE(ice_dvl_sched_params)); 1779 if (status) 1780 goto unregister_msix_params; 1781 1782 value.vu32 = pf->msix.max; 1783 devl_param_driverinit_value_set(devlink, 1784 DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, 1785 value); 1786 value.vu32 = pf->msix.min; 1787 devl_param_driverinit_value_set(devlink, 1788 DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, 1789 value); 1790 1791 value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags); 1792 devl_param_driverinit_value_set(devlink, 1793 DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, 1794 value); 1795 1796 return 0; 1797 1798 unregister_msix_params: 1799 devl_params_unregister(devlink, ice_dvl_msix_params, 1800 ARRAY_SIZE(ice_dvl_msix_params)); 1801 unregister_rdma_params: 1802 devl_params_unregister(devlink, ice_dvl_rdma_params, 1803 ARRAY_SIZE(ice_dvl_rdma_params)); 1804 return status; 1805 } 1806 1807 void ice_devlink_unregister_params(struct ice_pf *pf) 1808 { 1809 struct devlink *devlink = priv_to_devlink(pf); 1810 struct ice_hw *hw = &pf->hw; 1811 1812 devl_params_unregister(devlink, ice_dvl_rdma_params, 1813 ARRAY_SIZE(ice_dvl_rdma_params)); 1814 devl_params_unregister(devlink, ice_dvl_msix_params, 1815 ARRAY_SIZE(ice_dvl_msix_params)); 1816 1817 if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) 1818 devl_params_unregister(devlink, ice_dvl_sched_params, 1819 ARRAY_SIZE(ice_dvl_sched_params)); 1820 } 1821 1822 #define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024) 1823 1824 static const struct devlink_region_ops ice_nvm_region_ops; 1825 static const struct devlink_region_ops ice_sram_region_ops; 1826 1827 /** 1828 * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents 1829 * @devlink: the devlink instance 1830 * @ops: the devlink region to snapshot 1831 * @extack: extended ACK response structure 1832 * @data: on exit points to snapshot data buffer 1833 * 1834 * This function is called in response to a DEVLINK_CMD_REGION_NEW for either 1835 * the nvm-flash or shadow-ram region. 1836 * 1837 * It captures a snapshot of the NVM or Shadow RAM flash contents. This 1838 * snapshot can then later be viewed via the DEVLINK_CMD_REGION_READ netlink 1839 * interface. 1840 * 1841 * @returns zero on success, and updates the data pointer. Returns a non-zero 1842 * error code on failure. 1843 */ 1844 static int ice_devlink_nvm_snapshot(struct devlink *devlink, 1845 const struct devlink_region_ops *ops, 1846 struct netlink_ext_ack *extack, u8 **data) 1847 { 1848 struct ice_pf *pf = devlink_priv(devlink); 1849 struct device *dev = ice_pf_to_dev(pf); 1850 struct ice_hw *hw = &pf->hw; 1851 bool read_shadow_ram; 1852 u8 *nvm_data, *tmp, i; 1853 u32 nvm_size, left; 1854 s8 num_blks; 1855 int status; 1856 1857 if (ops == &ice_nvm_region_ops) { 1858 read_shadow_ram = false; 1859 nvm_size = hw->flash.flash_size; 1860 } else if (ops == &ice_sram_region_ops) { 1861 read_shadow_ram = true; 1862 nvm_size = hw->flash.sr_words * 2u; 1863 } else { 1864 NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function"); 1865 return -EOPNOTSUPP; 1866 } 1867 1868 nvm_data = vzalloc(nvm_size); 1869 if (!nvm_data) 1870 return -ENOMEM; 1871 1872 num_blks = DIV_ROUND_UP(nvm_size, ICE_DEVLINK_READ_BLK_SIZE); 1873 tmp = nvm_data; 1874 left = nvm_size; 1875 1876 /* Some systems take longer to read the NVM than others which causes the 1877 * FW to reclaim the NVM lock before the entire NVM has been read. Fix 1878 * this by breaking the reads of the NVM into smaller chunks that will 1879 * probably not take as long. This has some overhead since we are 1880 * increasing the number of AQ commands, but it should always work 1881 */ 1882 for (i = 0; i < num_blks; i++) { 1883 u32 read_sz = min_t(u32, ICE_DEVLINK_READ_BLK_SIZE, left); 1884 1885 status = ice_acquire_nvm(hw, ICE_RES_READ); 1886 if (status) { 1887 dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", 1888 status, hw->adminq.sq_last_status); 1889 NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); 1890 vfree(nvm_data); 1891 return -EIO; 1892 } 1893 1894 status = ice_read_flat_nvm(hw, i * ICE_DEVLINK_READ_BLK_SIZE, 1895 &read_sz, tmp, read_shadow_ram); 1896 if (status) { 1897 dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", 1898 read_sz, status, hw->adminq.sq_last_status); 1899 NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); 1900 ice_release_nvm(hw); 1901 vfree(nvm_data); 1902 return -EIO; 1903 } 1904 ice_release_nvm(hw); 1905 1906 tmp += read_sz; 1907 left -= read_sz; 1908 } 1909 1910 *data = nvm_data; 1911 1912 return 0; 1913 } 1914 1915 /** 1916 * ice_devlink_nvm_read - Read a portion of NVM flash contents 1917 * @devlink: the devlink instance 1918 * @ops: the devlink region to snapshot 1919 * @extack: extended ACK response structure 1920 * @offset: the offset to start at 1921 * @size: the amount to read 1922 * @data: the data buffer to read into 1923 * 1924 * This function is called in response to DEVLINK_CMD_REGION_READ to directly 1925 * read a section of the NVM contents. 1926 * 1927 * It reads from either the nvm-flash or shadow-ram region contents. 1928 * 1929 * @returns zero on success, and updates the data pointer. Returns a non-zero 1930 * error code on failure. 1931 */ 1932 static int ice_devlink_nvm_read(struct devlink *devlink, 1933 const struct devlink_region_ops *ops, 1934 struct netlink_ext_ack *extack, 1935 u64 offset, u32 size, u8 *data) 1936 { 1937 struct ice_pf *pf = devlink_priv(devlink); 1938 struct device *dev = ice_pf_to_dev(pf); 1939 struct ice_hw *hw = &pf->hw; 1940 bool read_shadow_ram; 1941 u64 nvm_size; 1942 int status; 1943 1944 if (ops == &ice_nvm_region_ops) { 1945 read_shadow_ram = false; 1946 nvm_size = hw->flash.flash_size; 1947 } else if (ops == &ice_sram_region_ops) { 1948 read_shadow_ram = true; 1949 nvm_size = hw->flash.sr_words * 2u; 1950 } else { 1951 NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function"); 1952 return -EOPNOTSUPP; 1953 } 1954 1955 if (offset + size >= nvm_size) { 1956 NL_SET_ERR_MSG_MOD(extack, "Cannot read beyond the region size"); 1957 return -ERANGE; 1958 } 1959 1960 status = ice_acquire_nvm(hw, ICE_RES_READ); 1961 if (status) { 1962 dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", 1963 status, hw->adminq.sq_last_status); 1964 NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); 1965 return -EIO; 1966 } 1967 1968 status = ice_read_flat_nvm(hw, (u32)offset, &size, data, 1969 read_shadow_ram); 1970 if (status) { 1971 dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", 1972 size, status, hw->adminq.sq_last_status); 1973 NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); 1974 ice_release_nvm(hw); 1975 return -EIO; 1976 } 1977 ice_release_nvm(hw); 1978 1979 return 0; 1980 } 1981 1982 /** 1983 * ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities 1984 * @devlink: the devlink instance 1985 * @ops: the devlink region being snapshotted 1986 * @extack: extended ACK response structure 1987 * @data: on exit points to snapshot data buffer 1988 * 1989 * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for 1990 * the device-caps devlink region. It captures a snapshot of the device 1991 * capabilities reported by firmware. 1992 * 1993 * @returns zero on success, and updates the data pointer. Returns a non-zero 1994 * error code on failure. 1995 */ 1996 static int 1997 ice_devlink_devcaps_snapshot(struct devlink *devlink, 1998 const struct devlink_region_ops *ops, 1999 struct netlink_ext_ack *extack, u8 **data) 2000 { 2001 struct ice_pf *pf = devlink_priv(devlink); 2002 struct device *dev = ice_pf_to_dev(pf); 2003 struct ice_hw *hw = &pf->hw; 2004 void *devcaps; 2005 int status; 2006 2007 devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN); 2008 if (!devcaps) 2009 return -ENOMEM; 2010 2011 status = ice_aq_list_caps(hw, devcaps, ICE_AQ_MAX_BUF_LEN, NULL, 2012 ice_aqc_opc_list_dev_caps, NULL); 2013 if (status) { 2014 dev_dbg(dev, "ice_aq_list_caps: failed to read device capabilities, err %d aq_err %d\n", 2015 status, hw->adminq.sq_last_status); 2016 NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities"); 2017 vfree(devcaps); 2018 return status; 2019 } 2020 2021 *data = (u8 *)devcaps; 2022 2023 return 0; 2024 } 2025 2026 static const struct devlink_region_ops ice_nvm_region_ops = { 2027 .name = "nvm-flash", 2028 .destructor = vfree, 2029 .snapshot = ice_devlink_nvm_snapshot, 2030 .read = ice_devlink_nvm_read, 2031 }; 2032 2033 static const struct devlink_region_ops ice_sram_region_ops = { 2034 .name = "shadow-ram", 2035 .destructor = vfree, 2036 .snapshot = ice_devlink_nvm_snapshot, 2037 .read = ice_devlink_nvm_read, 2038 }; 2039 2040 static const struct devlink_region_ops ice_devcaps_region_ops = { 2041 .name = "device-caps", 2042 .destructor = vfree, 2043 .snapshot = ice_devlink_devcaps_snapshot, 2044 }; 2045 2046 /** 2047 * ice_devlink_init_regions - Initialize devlink regions 2048 * @pf: the PF device structure 2049 * 2050 * Create devlink regions used to enable access to dump the contents of the 2051 * flash memory on the device. 2052 */ 2053 void ice_devlink_init_regions(struct ice_pf *pf) 2054 { 2055 struct devlink *devlink = priv_to_devlink(pf); 2056 struct device *dev = ice_pf_to_dev(pf); 2057 u64 nvm_size, sram_size; 2058 2059 nvm_size = pf->hw.flash.flash_size; 2060 pf->nvm_region = devl_region_create(devlink, &ice_nvm_region_ops, 1, 2061 nvm_size); 2062 if (IS_ERR(pf->nvm_region)) { 2063 dev_err(dev, "failed to create NVM devlink region, err %ld\n", 2064 PTR_ERR(pf->nvm_region)); 2065 pf->nvm_region = NULL; 2066 } 2067 2068 sram_size = pf->hw.flash.sr_words * 2u; 2069 pf->sram_region = devl_region_create(devlink, &ice_sram_region_ops, 2070 1, sram_size); 2071 if (IS_ERR(pf->sram_region)) { 2072 dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n", 2073 PTR_ERR(pf->sram_region)); 2074 pf->sram_region = NULL; 2075 } 2076 2077 pf->devcaps_region = devl_region_create(devlink, 2078 &ice_devcaps_region_ops, 10, 2079 ICE_AQ_MAX_BUF_LEN); 2080 if (IS_ERR(pf->devcaps_region)) { 2081 dev_err(dev, "failed to create device-caps devlink region, err %ld\n", 2082 PTR_ERR(pf->devcaps_region)); 2083 pf->devcaps_region = NULL; 2084 } 2085 } 2086 2087 /** 2088 * ice_devlink_destroy_regions - Destroy devlink regions 2089 * @pf: the PF device structure 2090 * 2091 * Remove previously created regions for this PF. 2092 */ 2093 void ice_devlink_destroy_regions(struct ice_pf *pf) 2094 { 2095 if (pf->nvm_region) 2096 devl_region_destroy(pf->nvm_region); 2097 2098 if (pf->sram_region) 2099 devl_region_destroy(pf->sram_region); 2100 2101 if (pf->devcaps_region) 2102 devl_region_destroy(pf->devcaps_region); 2103 } 2104