1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020, Intel Corporation. */ 3 4 #include <linux/vmalloc.h> 5 6 #include "ice.h" 7 #include "ice_lib.h" 8 #include "devlink.h" 9 #include "port.h" 10 #include "ice_eswitch.h" 11 #include "ice_fw_update.h" 12 #include "ice_dcb_lib.h" 13 #include "ice_sf_eth.h" 14 15 /* context for devlink info version reporting */ 16 struct ice_info_ctx { 17 char buf[128]; 18 struct ice_orom_info pending_orom; 19 struct ice_nvm_info pending_nvm; 20 struct ice_netlist_info pending_netlist; 21 struct ice_hw_dev_caps dev_caps; 22 }; 23 24 /* The following functions are used to format specific strings for various 25 * devlink info versions. The ctx parameter is used to provide the storage 26 * buffer, as well as any ancillary information calculated when the info 27 * request was made. 28 * 29 * If a version does not exist, for example when attempting to get the 30 * inactive version of flash when there is no pending update, the function 31 * should leave the buffer in the ctx structure empty. 32 */ 33 34 static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx) 35 { 36 u8 dsn[8]; 37 38 /* Copy the DSN into an array in Big Endian format */ 39 put_unaligned_be64(pci_get_dsn(pf->pdev), dsn); 40 41 snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn); 42 } 43 44 static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) 45 { 46 struct ice_hw *hw = &pf->hw; 47 int status; 48 49 status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf)); 50 if (status) 51 /* We failed to locate the PBA, so just skip this entry */ 52 dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %d\n", 53 status); 54 } 55 56 static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx) 57 { 58 struct ice_hw *hw = &pf->hw; 59 60 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", 61 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch); 62 } 63 64 static void ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx) 65 { 66 struct ice_hw *hw = &pf->hw; 67 68 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver, 69 hw->api_min_ver, hw->api_patch); 70 } 71 72 static void ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx) 73 { 74 struct ice_hw *hw = &pf->hw; 75 76 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build); 77 } 78 79 static void ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) 80 { 81 struct ice_orom_info *orom = &pf->hw.flash.orom; 82 83 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", 84 orom->major, orom->build, orom->patch); 85 } 86 87 static void 88 ice_info_pending_orom_ver(struct ice_pf __always_unused *pf, 89 struct ice_info_ctx *ctx) 90 { 91 struct ice_orom_info *orom = &ctx->pending_orom; 92 93 if (ctx->dev_caps.common_cap.nvm_update_pending_orom) 94 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", 95 orom->major, orom->build, orom->patch); 96 } 97 98 static void ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) 99 { 100 struct ice_nvm_info *nvm = &pf->hw.flash.nvm; 101 102 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor); 103 } 104 105 static void 106 ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf, 107 struct ice_info_ctx *ctx) 108 { 109 struct ice_nvm_info *nvm = &ctx->pending_nvm; 110 111 if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) 112 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", 113 nvm->major, nvm->minor); 114 } 115 116 static void ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) 117 { 118 struct ice_nvm_info *nvm = &pf->hw.flash.nvm; 119 120 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack); 121 } 122 123 static void 124 ice_info_pending_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) 125 { 126 struct ice_nvm_info *nvm = &ctx->pending_nvm; 127 128 if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) 129 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack); 130 } 131 132 static void ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx) 133 { 134 struct ice_hw *hw = &pf->hw; 135 136 snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name); 137 } 138 139 static void 140 ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx) 141 { 142 struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver; 143 144 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", 145 pkg->major, pkg->minor, pkg->update, pkg->draft); 146 } 147 148 static void 149 ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx) 150 { 151 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id); 152 } 153 154 static void ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) 155 { 156 struct ice_netlist_info *netlist = &pf->hw.flash.netlist; 157 158 /* The netlist version fields are BCD formatted */ 159 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", 160 netlist->major, netlist->minor, 161 netlist->type >> 16, netlist->type & 0xFFFF, 162 netlist->rev, netlist->cust_ver); 163 } 164 165 static void ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx) 166 { 167 struct ice_netlist_info *netlist = &pf->hw.flash.netlist; 168 169 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); 170 } 171 172 static void 173 ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, 174 struct ice_info_ctx *ctx) 175 { 176 struct ice_netlist_info *netlist = &ctx->pending_netlist; 177 178 /* The netlist version fields are BCD formatted */ 179 if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) 180 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", 181 netlist->major, netlist->minor, 182 netlist->type >> 16, netlist->type & 0xFFFF, 183 netlist->rev, netlist->cust_ver); 184 } 185 186 static void 187 ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, 188 struct ice_info_ctx *ctx) 189 { 190 struct ice_netlist_info *netlist = &ctx->pending_netlist; 191 192 if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) 193 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); 194 } 195 196 static void ice_info_cgu_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx) 197 { 198 u32 id, cfg_ver, fw_ver; 199 200 if (!ice_is_feature_supported(pf, ICE_F_CGU)) 201 return; 202 if (ice_aq_get_cgu_info(&pf->hw, &id, &cfg_ver, &fw_ver)) 203 return; 204 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", id, cfg_ver, fw_ver); 205 } 206 207 static void ice_info_cgu_id(struct ice_pf *pf, struct ice_info_ctx *ctx) 208 { 209 if (!ice_is_feature_supported(pf, ICE_F_CGU)) 210 return; 211 snprintf(ctx->buf, sizeof(ctx->buf), "%u", pf->hw.cgu_part_number); 212 } 213 214 #define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL } 215 #define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL } 216 #define stored(key, getter, fallback) { ICE_VERSION_STORED, key, getter, fallback } 217 218 /* The combined() macro inserts both the running entry as well as a stored 219 * entry. The running entry will always report the version from the active 220 * handler. The stored entry will first try the pending handler, and fallback 221 * to the active handler if the pending function does not report a version. 222 * The pending handler should check the status of a pending update for the 223 * relevant flash component. It should only fill in the buffer in the case 224 * where a valid pending version is available. This ensures that the related 225 * stored and running versions remain in sync, and that stored versions are 226 * correctly reported as expected. 227 */ 228 #define combined(key, active, pending) \ 229 running(key, active), \ 230 stored(key, pending, active) 231 232 enum ice_version_type { 233 ICE_VERSION_FIXED, 234 ICE_VERSION_RUNNING, 235 ICE_VERSION_STORED, 236 }; 237 238 static const struct ice_devlink_version { 239 enum ice_version_type type; 240 const char *key; 241 void (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx); 242 void (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx); 243 } ice_devlink_versions[] = { 244 fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba), 245 running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt), 246 running("fw.mgmt.api", ice_info_fw_api), 247 running("fw.mgmt.build", ice_info_fw_build), 248 combined(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver, ice_info_pending_orom_ver), 249 combined("fw.psid.api", ice_info_nvm_ver, ice_info_pending_nvm_ver), 250 combined(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack, ice_info_pending_eetrack), 251 running("fw.app.name", ice_info_ddp_pkg_name), 252 running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version), 253 running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id), 254 combined("fw.netlist", ice_info_netlist_ver, ice_info_pending_netlist_ver), 255 combined("fw.netlist.build", ice_info_netlist_build, ice_info_pending_netlist_build), 256 fixed("cgu.id", ice_info_cgu_id), 257 running("fw.cgu", ice_info_cgu_fw_build), 258 }; 259 260 /** 261 * ice_devlink_info_get - .info_get devlink handler 262 * @devlink: devlink instance structure 263 * @req: the devlink info request 264 * @extack: extended netdev ack structure 265 * 266 * Callback for the devlink .info_get operation. Reports information about the 267 * device. 268 * 269 * Return: zero on success or an error code on failure. 270 */ 271 static int ice_devlink_info_get(struct devlink *devlink, 272 struct devlink_info_req *req, 273 struct netlink_ext_ack *extack) 274 { 275 struct ice_pf *pf = devlink_priv(devlink); 276 struct device *dev = ice_pf_to_dev(pf); 277 struct ice_hw *hw = &pf->hw; 278 struct ice_info_ctx *ctx; 279 size_t i; 280 int err; 281 282 err = ice_wait_for_reset(pf, 10 * HZ); 283 if (err) { 284 NL_SET_ERR_MSG_MOD(extack, "Device is busy resetting"); 285 return err; 286 } 287 288 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 289 if (!ctx) 290 return -ENOMEM; 291 292 /* discover capabilities first */ 293 err = ice_discover_dev_caps(hw, &ctx->dev_caps); 294 if (err) { 295 dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n", 296 err, libie_aq_str(hw->adminq.sq_last_status)); 297 NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities"); 298 goto out_free_ctx; 299 } 300 301 if (ctx->dev_caps.common_cap.nvm_update_pending_orom) { 302 err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom); 303 if (err) { 304 dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n", 305 err, libie_aq_str(hw->adminq.sq_last_status)); 306 307 /* disable display of pending Option ROM */ 308 ctx->dev_caps.common_cap.nvm_update_pending_orom = false; 309 } 310 } 311 312 if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) { 313 err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm); 314 if (err) { 315 dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n", 316 err, libie_aq_str(hw->adminq.sq_last_status)); 317 318 /* disable display of pending Option ROM */ 319 ctx->dev_caps.common_cap.nvm_update_pending_nvm = false; 320 } 321 } 322 323 if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) { 324 err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist); 325 if (err) { 326 dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n", 327 err, libie_aq_str(hw->adminq.sq_last_status)); 328 329 /* disable display of pending Option ROM */ 330 ctx->dev_caps.common_cap.nvm_update_pending_netlist = false; 331 } 332 } 333 334 ice_info_get_dsn(pf, ctx); 335 336 err = devlink_info_serial_number_put(req, ctx->buf); 337 if (err) { 338 NL_SET_ERR_MSG_MOD(extack, "Unable to set serial number"); 339 goto out_free_ctx; 340 } 341 342 for (i = 0; i < ARRAY_SIZE(ice_devlink_versions); i++) { 343 enum ice_version_type type = ice_devlink_versions[i].type; 344 const char *key = ice_devlink_versions[i].key; 345 346 memset(ctx->buf, 0, sizeof(ctx->buf)); 347 348 ice_devlink_versions[i].getter(pf, ctx); 349 350 /* If the default getter doesn't report a version, use the 351 * fallback function. This is primarily useful in the case of 352 * "stored" versions that want to report the same value as the 353 * running version in the normal case of no pending update. 354 */ 355 if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback) 356 ice_devlink_versions[i].fallback(pf, ctx); 357 358 /* Do not report missing versions */ 359 if (ctx->buf[0] == '\0') 360 continue; 361 362 switch (type) { 363 case ICE_VERSION_FIXED: 364 err = devlink_info_version_fixed_put(req, key, ctx->buf); 365 if (err) { 366 NL_SET_ERR_MSG_MOD(extack, "Unable to set fixed version"); 367 goto out_free_ctx; 368 } 369 break; 370 case ICE_VERSION_RUNNING: 371 err = devlink_info_version_running_put_ext(req, key, 372 ctx->buf, 373 DEVLINK_INFO_VERSION_TYPE_COMPONENT); 374 if (err) { 375 NL_SET_ERR_MSG_MOD(extack, "Unable to set running version"); 376 goto out_free_ctx; 377 } 378 break; 379 case ICE_VERSION_STORED: 380 err = devlink_info_version_stored_put_ext(req, key, 381 ctx->buf, 382 DEVLINK_INFO_VERSION_TYPE_COMPONENT); 383 if (err) { 384 NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version"); 385 goto out_free_ctx; 386 } 387 break; 388 } 389 } 390 391 out_free_ctx: 392 kfree(ctx); 393 return err; 394 } 395 396 /** 397 * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware 398 * @pf: pointer to the pf instance 399 * @extack: netlink extended ACK structure 400 * 401 * Allow user to activate new Embedded Management Processor firmware by 402 * issuing device specific EMP reset. Called in response to 403 * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE. 404 * 405 * Note that teardown and rebuild of the driver state happens automatically as 406 * part of an interrupt and watchdog task. This is because all physical 407 * functions on the device must be able to reset when an EMP reset occurs from 408 * any source. 409 */ 410 static int 411 ice_devlink_reload_empr_start(struct ice_pf *pf, 412 struct netlink_ext_ack *extack) 413 { 414 struct device *dev = ice_pf_to_dev(pf); 415 struct ice_hw *hw = &pf->hw; 416 u8 pending; 417 int err; 418 419 err = ice_get_pending_updates(pf, &pending, extack); 420 if (err) 421 return err; 422 423 /* pending is a bitmask of which flash banks have a pending update, 424 * including the main NVM bank, the Option ROM bank, and the netlist 425 * bank. If any of these bits are set, then there is a pending update 426 * waiting to be activated. 427 */ 428 if (!pending) { 429 NL_SET_ERR_MSG_MOD(extack, "No pending firmware update"); 430 return -ECANCELED; 431 } 432 433 if (pf->fw_emp_reset_disabled) { 434 NL_SET_ERR_MSG_MOD(extack, "EMP reset is not available. To activate firmware, a reboot or power cycle is needed"); 435 return -ECANCELED; 436 } 437 438 dev_dbg(dev, "Issuing device EMP reset to activate firmware\n"); 439 440 err = ice_aq_nvm_update_empr(hw); 441 if (err) { 442 dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n", 443 err, libie_aq_str(hw->adminq.sq_last_status)); 444 NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware"); 445 return err; 446 } 447 448 return 0; 449 } 450 451 /** 452 * ice_devlink_reinit_down - unload given PF 453 * @pf: pointer to the PF struct 454 */ 455 static void ice_devlink_reinit_down(struct ice_pf *pf) 456 { 457 /* No need to take devl_lock, it's already taken by devlink API */ 458 ice_unload(pf); 459 rtnl_lock(); 460 ice_vsi_decfg(ice_get_main_vsi(pf)); 461 rtnl_unlock(); 462 ice_deinit_pf(pf); 463 ice_deinit_dev(pf); 464 } 465 466 /** 467 * ice_devlink_reload_down - prepare for reload 468 * @devlink: pointer to the devlink instance to reload 469 * @netns_change: if true, the network namespace is changing 470 * @action: the action to perform 471 * @limit: limits on what reload should do, such as not resetting 472 * @extack: netlink extended ACK structure 473 */ 474 static int 475 ice_devlink_reload_down(struct devlink *devlink, bool netns_change, 476 enum devlink_reload_action action, 477 enum devlink_reload_limit limit, 478 struct netlink_ext_ack *extack) 479 { 480 struct ice_pf *pf = devlink_priv(devlink); 481 482 switch (action) { 483 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: 484 if (ice_is_eswitch_mode_switchdev(pf)) { 485 NL_SET_ERR_MSG_MOD(extack, 486 "Go to legacy mode before doing reinit"); 487 return -EOPNOTSUPP; 488 } 489 if (ice_is_adq_active(pf)) { 490 NL_SET_ERR_MSG_MOD(extack, 491 "Turn off ADQ before doing reinit"); 492 return -EOPNOTSUPP; 493 } 494 if (ice_has_vfs(pf)) { 495 NL_SET_ERR_MSG_MOD(extack, 496 "Remove all VFs before doing reinit"); 497 return -EOPNOTSUPP; 498 } 499 ice_devlink_reinit_down(pf); 500 return 0; 501 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: 502 return ice_devlink_reload_empr_start(pf, extack); 503 default: 504 WARN_ON(1); 505 return -EOPNOTSUPP; 506 } 507 } 508 509 /** 510 * ice_devlink_reload_empr_finish - Wait for EMP reset to finish 511 * @pf: pointer to the pf instance 512 * @extack: netlink extended ACK structure 513 * 514 * Wait for driver to finish rebuilding after EMP reset is completed. This 515 * includes time to wait for both the actual device reset as well as the time 516 * for the driver's rebuild to complete. 517 */ 518 static int 519 ice_devlink_reload_empr_finish(struct ice_pf *pf, 520 struct netlink_ext_ack *extack) 521 { 522 int err; 523 524 err = ice_wait_for_reset(pf, 60 * HZ); 525 if (err) { 526 NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute"); 527 return err; 528 } 529 530 return 0; 531 } 532 533 /** 534 * ice_get_tx_topo_user_sel - Read user's choice from flash 535 * @pf: pointer to pf structure 536 * @layers: value read from flash will be saved here 537 * 538 * Reads user's preference for Tx Scheduler Topology Tree from PFA TLV. 539 * 540 * Return: zero when read was successful, negative values otherwise. 541 */ 542 static int ice_get_tx_topo_user_sel(struct ice_pf *pf, uint8_t *layers) 543 { 544 struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {}; 545 struct ice_hw *hw = &pf->hw; 546 int err; 547 548 err = ice_acquire_nvm(hw, ICE_RES_READ); 549 if (err) 550 return err; 551 552 err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0, 553 sizeof(usr_sel), &usr_sel, true, true, NULL); 554 if (err) 555 goto exit_release_res; 556 557 if (usr_sel.data & ICE_AQC_NVM_TX_TOPO_USER_SEL) 558 *layers = ICE_SCHED_5_LAYERS; 559 else 560 *layers = ICE_SCHED_9_LAYERS; 561 562 exit_release_res: 563 ice_release_nvm(hw); 564 565 return err; 566 } 567 568 /** 569 * ice_update_tx_topo_user_sel - Save user's preference in flash 570 * @pf: pointer to pf structure 571 * @layers: value to be saved in flash 572 * 573 * Variable "layers" defines user's preference about number of layers in Tx 574 * Scheduler Topology Tree. This choice should be stored in PFA TLV field 575 * and be picked up by driver, next time during init. 576 * 577 * Return: zero when save was successful, negative values otherwise. 578 */ 579 static int ice_update_tx_topo_user_sel(struct ice_pf *pf, int layers) 580 { 581 struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {}; 582 struct ice_hw *hw = &pf->hw; 583 int err; 584 585 err = ice_acquire_nvm(hw, ICE_RES_WRITE); 586 if (err) 587 return err; 588 589 err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0, 590 sizeof(usr_sel), &usr_sel, true, true, NULL); 591 if (err) 592 goto exit_release_res; 593 594 if (layers == ICE_SCHED_5_LAYERS) 595 usr_sel.data |= ICE_AQC_NVM_TX_TOPO_USER_SEL; 596 else 597 usr_sel.data &= ~ICE_AQC_NVM_TX_TOPO_USER_SEL; 598 599 err = ice_write_one_nvm_block(pf, ICE_AQC_NVM_TX_TOPO_MOD_ID, 2, 600 sizeof(usr_sel.data), &usr_sel.data, 601 true, NULL, NULL); 602 exit_release_res: 603 ice_release_nvm(hw); 604 605 return err; 606 } 607 608 /** 609 * ice_devlink_tx_sched_layers_get - Get tx_scheduling_layers parameter 610 * @devlink: pointer to the devlink instance 611 * @id: the parameter ID to set 612 * @ctx: context to store the parameter value 613 * @extack: netlink extended ACK structure 614 * 615 * Return: zero on success and negative value on failure. 616 */ 617 static int ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id, 618 struct devlink_param_gset_ctx *ctx, 619 struct netlink_ext_ack *extack) 620 { 621 struct ice_pf *pf = devlink_priv(devlink); 622 int err; 623 624 err = ice_get_tx_topo_user_sel(pf, &ctx->val.vu8); 625 if (err) 626 return err; 627 628 return 0; 629 } 630 631 /** 632 * ice_devlink_tx_sched_layers_set - Set tx_scheduling_layers parameter 633 * @devlink: pointer to the devlink instance 634 * @id: the parameter ID to set 635 * @ctx: context to get the parameter value 636 * @extack: netlink extended ACK structure 637 * 638 * Return: zero on success and negative value on failure. 639 */ 640 static int ice_devlink_tx_sched_layers_set(struct devlink *devlink, u32 id, 641 struct devlink_param_gset_ctx *ctx, 642 struct netlink_ext_ack *extack) 643 { 644 struct ice_pf *pf = devlink_priv(devlink); 645 int err; 646 647 err = ice_update_tx_topo_user_sel(pf, ctx->val.vu8); 648 if (err) 649 return err; 650 651 NL_SET_ERR_MSG_MOD(extack, 652 "Tx scheduling layers have been changed on this device. You must do the PCI slot powercycle for the change to take effect."); 653 654 return 0; 655 } 656 657 /** 658 * ice_devlink_tx_sched_layers_validate - Validate passed tx_scheduling_layers 659 * parameter value 660 * @devlink: unused pointer to devlink instance 661 * @id: the parameter ID to validate 662 * @val: value to validate 663 * @extack: netlink extended ACK structure 664 * 665 * Supported values are: 666 * - 5 - five layers Tx Scheduler Topology Tree 667 * - 9 - nine layers Tx Scheduler Topology Tree 668 * 669 * Return: zero when passed parameter value is supported. Negative value on 670 * error. 671 */ 672 static int ice_devlink_tx_sched_layers_validate(struct devlink *devlink, u32 id, 673 union devlink_param_value val, 674 struct netlink_ext_ack *extack) 675 { 676 if (val.vu8 != ICE_SCHED_5_LAYERS && val.vu8 != ICE_SCHED_9_LAYERS) { 677 NL_SET_ERR_MSG_MOD(extack, 678 "Wrong number of tx scheduler layers provided."); 679 return -EINVAL; 680 } 681 682 return 0; 683 } 684 685 /** 686 * ice_tear_down_devlink_rate_tree - removes devlink-rate exported tree 687 * @pf: pf struct 688 * 689 * This function tears down tree exported during VF's creation. 690 */ 691 void ice_tear_down_devlink_rate_tree(struct ice_pf *pf) 692 { 693 struct devlink *devlink; 694 struct ice_vf *vf; 695 unsigned int bkt; 696 697 devlink = priv_to_devlink(pf); 698 699 devl_lock(devlink); 700 mutex_lock(&pf->vfs.table_lock); 701 ice_for_each_vf(pf, bkt, vf) { 702 if (vf->devlink_port.devlink_rate) 703 devl_rate_leaf_destroy(&vf->devlink_port); 704 } 705 mutex_unlock(&pf->vfs.table_lock); 706 707 devl_rate_nodes_destroy(devlink); 708 devl_unlock(devlink); 709 } 710 711 /** 712 * ice_enable_custom_tx - try to enable custom Tx feature 713 * @pf: pf struct 714 * 715 * This function tries to enable custom Tx feature, 716 * it's not possible to enable it, if DCB or ADQ is active. 717 */ 718 static bool ice_enable_custom_tx(struct ice_pf *pf) 719 { 720 struct ice_port_info *pi = ice_get_main_vsi(pf)->port_info; 721 struct device *dev = ice_pf_to_dev(pf); 722 723 if (pi->is_custom_tx_enabled) 724 /* already enabled, return true */ 725 return true; 726 727 if (ice_is_adq_active(pf)) { 728 dev_err(dev, "ADQ active, can't modify Tx scheduler tree\n"); 729 return false; 730 } 731 732 if (ice_is_dcb_active(pf)) { 733 dev_err(dev, "DCB active, can't modify Tx scheduler tree\n"); 734 return false; 735 } 736 737 pi->is_custom_tx_enabled = true; 738 739 return true; 740 } 741 742 /** 743 * ice_traverse_tx_tree - traverse Tx scheduler tree 744 * @devlink: devlink struct 745 * @node: current node, used for recursion 746 * @tc_node: tc_node struct, that is treated as a root 747 * @pf: pf struct 748 * 749 * This function traverses Tx scheduler tree and exports 750 * entire structure to the devlink-rate. 751 */ 752 static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node *node, 753 struct ice_sched_node *tc_node, struct ice_pf *pf) 754 { 755 struct devlink_rate *rate_node = NULL; 756 struct ice_dynamic_port *sf; 757 struct ice_vf *vf; 758 int i; 759 760 if (node->rate_node) 761 /* already added, skip to the next */ 762 goto traverse_children; 763 764 if (node->parent == tc_node) { 765 /* create root node */ 766 rate_node = devl_rate_node_create(devlink, node, node->name, NULL); 767 } else if (node->vsi_handle && 768 pf->vsi[node->vsi_handle]->type == ICE_VSI_VF && 769 pf->vsi[node->vsi_handle]->vf) { 770 vf = pf->vsi[node->vsi_handle]->vf; 771 if (!vf->devlink_port.devlink_rate) 772 /* leaf nodes doesn't have children 773 * so we don't set rate_node 774 */ 775 devl_rate_leaf_create(&vf->devlink_port, node, 776 node->parent->rate_node); 777 } else if (node->vsi_handle && 778 pf->vsi[node->vsi_handle]->type == ICE_VSI_SF && 779 pf->vsi[node->vsi_handle]->sf) { 780 sf = pf->vsi[node->vsi_handle]->sf; 781 if (!sf->devlink_port.devlink_rate) 782 /* leaf nodes doesn't have children 783 * so we don't set rate_node 784 */ 785 devl_rate_leaf_create(&sf->devlink_port, node, 786 node->parent->rate_node); 787 } else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF && 788 node->parent->rate_node) { 789 rate_node = devl_rate_node_create(devlink, node, node->name, 790 node->parent->rate_node); 791 } 792 793 if (rate_node && !IS_ERR(rate_node)) 794 node->rate_node = rate_node; 795 796 traverse_children: 797 for (i = 0; i < node->num_children; i++) 798 ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf); 799 } 800 801 /** 802 * ice_devlink_rate_init_tx_topology - export Tx scheduler tree to devlink rate 803 * @devlink: devlink struct 804 * @vsi: main vsi struct 805 * 806 * This function finds a root node, then calls ice_traverse_tx tree, which 807 * traverses the tree and exports it's contents to devlink rate. 808 */ 809 int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi) 810 { 811 struct ice_port_info *pi = vsi->port_info; 812 struct ice_sched_node *tc_node; 813 struct ice_pf *pf = vsi->back; 814 int i; 815 816 tc_node = pi->root->children[0]; 817 mutex_lock(&pi->sched_lock); 818 for (i = 0; i < tc_node->num_children; i++) 819 ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf); 820 mutex_unlock(&pi->sched_lock); 821 822 return 0; 823 } 824 825 static void ice_clear_rate_nodes(struct ice_sched_node *node) 826 { 827 node->rate_node = NULL; 828 829 for (int i = 0; i < node->num_children; i++) 830 ice_clear_rate_nodes(node->children[i]); 831 } 832 833 /** 834 * ice_devlink_rate_clear_tx_topology - clear node->rate_node 835 * @vsi: main vsi struct 836 * 837 * Clear rate_node to cleanup creation of Tx topology. 838 * 839 */ 840 void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi) 841 { 842 struct ice_port_info *pi = vsi->port_info; 843 844 mutex_lock(&pi->sched_lock); 845 ice_clear_rate_nodes(pi->root->children[0]); 846 mutex_unlock(&pi->sched_lock); 847 } 848 849 /** 850 * ice_set_object_tx_share - sets node scheduling parameter 851 * @pi: devlink struct instance 852 * @node: node struct instance 853 * @bw: bandwidth in bytes per second 854 * @extack: extended netdev ack structure 855 * 856 * This function sets ICE_MIN_BW scheduling BW limit. 857 */ 858 static int ice_set_object_tx_share(struct ice_port_info *pi, struct ice_sched_node *node, 859 u64 bw, struct netlink_ext_ack *extack) 860 { 861 int status; 862 863 mutex_lock(&pi->sched_lock); 864 /* converts bytes per second to kilo bits per second */ 865 node->tx_share = div_u64(bw, 125); 866 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, node->tx_share); 867 mutex_unlock(&pi->sched_lock); 868 869 if (status) 870 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_share"); 871 872 return status; 873 } 874 875 /** 876 * ice_set_object_tx_max - sets node scheduling parameter 877 * @pi: devlink struct instance 878 * @node: node struct instance 879 * @bw: bandwidth in bytes per second 880 * @extack: extended netdev ack structure 881 * 882 * This function sets ICE_MAX_BW scheduling BW limit. 883 */ 884 static int ice_set_object_tx_max(struct ice_port_info *pi, struct ice_sched_node *node, 885 u64 bw, struct netlink_ext_ack *extack) 886 { 887 int status; 888 889 mutex_lock(&pi->sched_lock); 890 /* converts bytes per second value to kilo bits per second */ 891 node->tx_max = div_u64(bw, 125); 892 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, node->tx_max); 893 mutex_unlock(&pi->sched_lock); 894 895 if (status) 896 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_max"); 897 898 return status; 899 } 900 901 /** 902 * ice_set_object_tx_priority - sets node scheduling parameter 903 * @pi: devlink struct instance 904 * @node: node struct instance 905 * @priority: value representing priority for strict priority arbitration 906 * @extack: extended netdev ack structure 907 * 908 * This function sets priority of node among siblings. 909 */ 910 static int ice_set_object_tx_priority(struct ice_port_info *pi, struct ice_sched_node *node, 911 u32 priority, struct netlink_ext_ack *extack) 912 { 913 int status; 914 915 if (priority >= 8) { 916 NL_SET_ERR_MSG_MOD(extack, "Priority should be less than 8"); 917 return -EINVAL; 918 } 919 920 mutex_lock(&pi->sched_lock); 921 node->tx_priority = priority; 922 status = ice_sched_set_node_priority(pi, node, node->tx_priority); 923 mutex_unlock(&pi->sched_lock); 924 925 if (status) 926 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_priority"); 927 928 return status; 929 } 930 931 /** 932 * ice_set_object_tx_weight - sets node scheduling parameter 933 * @pi: devlink struct instance 934 * @node: node struct instance 935 * @weight: value represeting relative weight for WFQ arbitration 936 * @extack: extended netdev ack structure 937 * 938 * This function sets node weight for WFQ algorithm. 939 */ 940 static int ice_set_object_tx_weight(struct ice_port_info *pi, struct ice_sched_node *node, 941 u32 weight, struct netlink_ext_ack *extack) 942 { 943 int status; 944 945 if (weight > 200 || weight < 1) { 946 NL_SET_ERR_MSG_MOD(extack, "Weight must be between 1 and 200"); 947 return -EINVAL; 948 } 949 950 mutex_lock(&pi->sched_lock); 951 node->tx_weight = weight; 952 status = ice_sched_set_node_weight(pi, node, node->tx_weight); 953 mutex_unlock(&pi->sched_lock); 954 955 if (status) 956 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_weight"); 957 958 return status; 959 } 960 961 /** 962 * ice_get_pi_from_dev_rate - get port info from devlink_rate 963 * @rate_node: devlink struct instance 964 * 965 * This function returns corresponding port_info struct of devlink_rate 966 */ 967 static struct ice_port_info *ice_get_pi_from_dev_rate(struct devlink_rate *rate_node) 968 { 969 struct ice_pf *pf = devlink_priv(rate_node->devlink); 970 971 return ice_get_main_vsi(pf)->port_info; 972 } 973 974 static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv, 975 struct netlink_ext_ack *extack) 976 { 977 struct ice_sched_node *node; 978 struct ice_port_info *pi; 979 980 pi = ice_get_pi_from_dev_rate(rate_node); 981 982 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 983 return -EBUSY; 984 985 /* preallocate memory for ice_sched_node */ 986 node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL); 987 if (!node) 988 return -ENOMEM; 989 990 *priv = node; 991 992 return 0; 993 } 994 995 static int ice_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, 996 struct netlink_ext_ack *extack) 997 { 998 struct ice_sched_node *node, *tc_node; 999 struct ice_port_info *pi; 1000 1001 pi = ice_get_pi_from_dev_rate(rate_node); 1002 tc_node = pi->root->children[0]; 1003 node = priv; 1004 1005 if (!rate_node->parent || !node || tc_node == node || !extack) 1006 return 0; 1007 1008 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1009 return -EBUSY; 1010 1011 /* can't allow to delete a node with children */ 1012 if (node->num_children) 1013 return -EINVAL; 1014 1015 mutex_lock(&pi->sched_lock); 1016 ice_free_sched_node(pi, node); 1017 mutex_unlock(&pi->sched_lock); 1018 1019 return 0; 1020 } 1021 1022 static int ice_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv, 1023 u64 tx_max, struct netlink_ext_ack *extack) 1024 { 1025 struct ice_sched_node *node = priv; 1026 1027 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1028 return -EBUSY; 1029 1030 if (!node) 1031 return 0; 1032 1033 return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_leaf), 1034 node, tx_max, extack); 1035 } 1036 1037 static int ice_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv, 1038 u64 tx_share, struct netlink_ext_ack *extack) 1039 { 1040 struct ice_sched_node *node = priv; 1041 1042 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1043 return -EBUSY; 1044 1045 if (!node) 1046 return 0; 1047 1048 return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_leaf), node, 1049 tx_share, extack); 1050 } 1051 1052 static int ice_devlink_rate_leaf_tx_priority_set(struct devlink_rate *rate_leaf, void *priv, 1053 u32 tx_priority, struct netlink_ext_ack *extack) 1054 { 1055 struct ice_sched_node *node = priv; 1056 1057 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1058 return -EBUSY; 1059 1060 if (!node) 1061 return 0; 1062 1063 return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_leaf), node, 1064 tx_priority, extack); 1065 } 1066 1067 static int ice_devlink_rate_leaf_tx_weight_set(struct devlink_rate *rate_leaf, void *priv, 1068 u32 tx_weight, struct netlink_ext_ack *extack) 1069 { 1070 struct ice_sched_node *node = priv; 1071 1072 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1073 return -EBUSY; 1074 1075 if (!node) 1076 return 0; 1077 1078 return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_leaf), node, 1079 tx_weight, extack); 1080 } 1081 1082 static int ice_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv, 1083 u64 tx_max, struct netlink_ext_ack *extack) 1084 { 1085 struct ice_sched_node *node = priv; 1086 1087 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1088 return -EBUSY; 1089 1090 if (!node) 1091 return 0; 1092 1093 return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_node), 1094 node, tx_max, extack); 1095 } 1096 1097 static int ice_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv, 1098 u64 tx_share, struct netlink_ext_ack *extack) 1099 { 1100 struct ice_sched_node *node = priv; 1101 1102 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1103 return -EBUSY; 1104 1105 if (!node) 1106 return 0; 1107 1108 return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_node), 1109 node, tx_share, extack); 1110 } 1111 1112 static int ice_devlink_rate_node_tx_priority_set(struct devlink_rate *rate_node, void *priv, 1113 u32 tx_priority, struct netlink_ext_ack *extack) 1114 { 1115 struct ice_sched_node *node = priv; 1116 1117 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1118 return -EBUSY; 1119 1120 if (!node) 1121 return 0; 1122 1123 return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_node), 1124 node, tx_priority, extack); 1125 } 1126 1127 static int ice_devlink_rate_node_tx_weight_set(struct devlink_rate *rate_node, void *priv, 1128 u32 tx_weight, struct netlink_ext_ack *extack) 1129 { 1130 struct ice_sched_node *node = priv; 1131 1132 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1133 return -EBUSY; 1134 1135 if (!node) 1136 return 0; 1137 1138 return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_node), 1139 node, tx_weight, extack); 1140 } 1141 1142 static int ice_devlink_set_parent(struct devlink_rate *devlink_rate, 1143 struct devlink_rate *parent, 1144 void *priv, void *parent_priv, 1145 struct netlink_ext_ack *extack) 1146 { 1147 struct ice_port_info *pi = ice_get_pi_from_dev_rate(devlink_rate); 1148 struct ice_sched_node *tc_node, *node, *parent_node; 1149 u16 num_nodes_added; 1150 u32 first_node_teid; 1151 u32 node_teid; 1152 int status; 1153 1154 tc_node = pi->root->children[0]; 1155 node = priv; 1156 1157 if (!extack) 1158 return 0; 1159 1160 if (!ice_enable_custom_tx(devlink_priv(devlink_rate->devlink))) 1161 return -EBUSY; 1162 1163 if (!parent) { 1164 if (!node || tc_node == node || node->num_children) 1165 return -EINVAL; 1166 1167 mutex_lock(&pi->sched_lock); 1168 ice_free_sched_node(pi, node); 1169 mutex_unlock(&pi->sched_lock); 1170 1171 return 0; 1172 } 1173 1174 parent_node = parent_priv; 1175 1176 /* if the node doesn't exist, create it */ 1177 if (!node->parent) { 1178 mutex_lock(&pi->sched_lock); 1179 status = ice_sched_add_elems(pi, tc_node, parent_node, 1180 parent_node->tx_sched_layer + 1, 1181 1, &num_nodes_added, &first_node_teid, 1182 &node); 1183 mutex_unlock(&pi->sched_lock); 1184 1185 if (status) { 1186 NL_SET_ERR_MSG_MOD(extack, "Can't add a new node"); 1187 return status; 1188 } 1189 1190 if (devlink_rate->tx_share) 1191 ice_set_object_tx_share(pi, node, devlink_rate->tx_share, extack); 1192 if (devlink_rate->tx_max) 1193 ice_set_object_tx_max(pi, node, devlink_rate->tx_max, extack); 1194 if (devlink_rate->tx_priority) 1195 ice_set_object_tx_priority(pi, node, devlink_rate->tx_priority, extack); 1196 if (devlink_rate->tx_weight) 1197 ice_set_object_tx_weight(pi, node, devlink_rate->tx_weight, extack); 1198 } else { 1199 node_teid = le32_to_cpu(node->info.node_teid); 1200 mutex_lock(&pi->sched_lock); 1201 status = ice_sched_move_nodes(pi, parent_node, 1, &node_teid); 1202 mutex_unlock(&pi->sched_lock); 1203 1204 if (status) 1205 NL_SET_ERR_MSG_MOD(extack, "Can't move existing node to a new parent"); 1206 } 1207 1208 return status; 1209 } 1210 1211 static void ice_set_min_max_msix(struct ice_pf *pf) 1212 { 1213 struct devlink *devlink = priv_to_devlink(pf); 1214 union devlink_param_value val; 1215 int err; 1216 1217 err = devl_param_driverinit_value_get(devlink, 1218 DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, 1219 &val); 1220 if (!err) 1221 pf->msix.min = val.vu32; 1222 1223 err = devl_param_driverinit_value_get(devlink, 1224 DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, 1225 &val); 1226 if (!err) 1227 pf->msix.max = val.vu32; 1228 } 1229 1230 /** 1231 * ice_devlink_reinit_up - do reinit of the given PF 1232 * @pf: pointer to the PF struct 1233 */ 1234 static int ice_devlink_reinit_up(struct ice_pf *pf) 1235 { 1236 struct ice_vsi *vsi = ice_get_main_vsi(pf); 1237 struct device *dev = ice_pf_to_dev(pf); 1238 bool need_dev_deinit = false; 1239 int err; 1240 1241 err = ice_init_hw(&pf->hw); 1242 if (err) { 1243 dev_err(dev, "ice_init_hw failed: %d\n", err); 1244 return err; 1245 } 1246 1247 /* load MSI-X values */ 1248 ice_set_min_max_msix(pf); 1249 1250 err = ice_init_dev(pf); 1251 if (err) 1252 goto unroll_hw_init; 1253 1254 err = ice_init_pf(pf); 1255 if (err) { 1256 dev_err(dev, "ice_init_pf failed: %d\n", err); 1257 goto unroll_dev_init; 1258 } 1259 1260 vsi->flags = ICE_VSI_FLAG_INIT; 1261 1262 rtnl_lock(); 1263 err = ice_vsi_cfg(vsi); 1264 rtnl_unlock(); 1265 if (err) 1266 goto unroll_pf_init; 1267 1268 /* No need to take devl_lock, it's already taken by devlink API */ 1269 err = ice_load(pf); 1270 if (err) 1271 goto err_load; 1272 1273 return 0; 1274 1275 err_load: 1276 rtnl_lock(); 1277 ice_vsi_decfg(vsi); 1278 rtnl_unlock(); 1279 unroll_pf_init: 1280 ice_deinit_pf(pf); 1281 unroll_dev_init: 1282 need_dev_deinit = true; 1283 unroll_hw_init: 1284 ice_deinit_hw(&pf->hw); 1285 if (need_dev_deinit) 1286 ice_deinit_dev(pf); 1287 return err; 1288 } 1289 1290 /** 1291 * ice_devlink_reload_up - do reload up after reinit 1292 * @devlink: pointer to the devlink instance reloading 1293 * @action: the action requested 1294 * @limit: limits imposed by userspace, such as not resetting 1295 * @actions_performed: on return, indicate what actions actually performed 1296 * @extack: netlink extended ACK structure 1297 */ 1298 static int 1299 ice_devlink_reload_up(struct devlink *devlink, 1300 enum devlink_reload_action action, 1301 enum devlink_reload_limit limit, 1302 u32 *actions_performed, 1303 struct netlink_ext_ack *extack) 1304 { 1305 struct ice_pf *pf = devlink_priv(devlink); 1306 1307 switch (action) { 1308 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: 1309 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); 1310 return ice_devlink_reinit_up(pf); 1311 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: 1312 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE); 1313 return ice_devlink_reload_empr_finish(pf, extack); 1314 default: 1315 WARN_ON(1); 1316 return -EOPNOTSUPP; 1317 } 1318 } 1319 1320 static const struct devlink_ops ice_devlink_ops = { 1321 .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK, 1322 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | 1323 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), 1324 .reload_down = ice_devlink_reload_down, 1325 .reload_up = ice_devlink_reload_up, 1326 .eswitch_mode_get = ice_eswitch_mode_get, 1327 .eswitch_mode_set = ice_eswitch_mode_set, 1328 .info_get = ice_devlink_info_get, 1329 .flash_update = ice_devlink_flash_update, 1330 1331 .rate_node_new = ice_devlink_rate_node_new, 1332 .rate_node_del = ice_devlink_rate_node_del, 1333 1334 .rate_leaf_tx_max_set = ice_devlink_rate_leaf_tx_max_set, 1335 .rate_leaf_tx_share_set = ice_devlink_rate_leaf_tx_share_set, 1336 .rate_leaf_tx_priority_set = ice_devlink_rate_leaf_tx_priority_set, 1337 .rate_leaf_tx_weight_set = ice_devlink_rate_leaf_tx_weight_set, 1338 1339 .rate_node_tx_max_set = ice_devlink_rate_node_tx_max_set, 1340 .rate_node_tx_share_set = ice_devlink_rate_node_tx_share_set, 1341 .rate_node_tx_priority_set = ice_devlink_rate_node_tx_priority_set, 1342 .rate_node_tx_weight_set = ice_devlink_rate_node_tx_weight_set, 1343 1344 .rate_leaf_parent_set = ice_devlink_set_parent, 1345 .rate_node_parent_set = ice_devlink_set_parent, 1346 1347 .port_new = ice_devlink_port_new, 1348 }; 1349 1350 static const struct devlink_ops ice_sf_devlink_ops; 1351 1352 static int 1353 ice_devlink_enable_roce_get(struct devlink *devlink, u32 id, 1354 struct devlink_param_gset_ctx *ctx, 1355 struct netlink_ext_ack *extack) 1356 { 1357 struct ice_pf *pf = devlink_priv(devlink); 1358 struct iidc_rdma_core_dev_info *cdev; 1359 1360 cdev = pf->cdev_info; 1361 if (!cdev) 1362 return -ENODEV; 1363 1364 ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2); 1365 1366 return 0; 1367 } 1368 1369 static int ice_devlink_enable_roce_set(struct devlink *devlink, u32 id, 1370 struct devlink_param_gset_ctx *ctx, 1371 struct netlink_ext_ack *extack) 1372 { 1373 struct ice_pf *pf = devlink_priv(devlink); 1374 struct iidc_rdma_core_dev_info *cdev; 1375 bool roce_ena = ctx->val.vbool; 1376 int ret; 1377 1378 cdev = pf->cdev_info; 1379 if (!cdev) 1380 return -ENODEV; 1381 1382 if (!roce_ena) { 1383 ice_unplug_aux_dev(pf); 1384 cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2; 1385 return 0; 1386 } 1387 1388 cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2; 1389 ret = ice_plug_aux_dev(pf); 1390 if (ret) 1391 cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2; 1392 1393 return ret; 1394 } 1395 1396 static int 1397 ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id, 1398 union devlink_param_value val, 1399 struct netlink_ext_ack *extack) 1400 { 1401 struct ice_pf *pf = devlink_priv(devlink); 1402 struct iidc_rdma_core_dev_info *cdev; 1403 1404 cdev = pf->cdev_info; 1405 if (!cdev) 1406 return -ENODEV; 1407 1408 if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 1409 return -EOPNOTSUPP; 1410 1411 if (cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP) { 1412 NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); 1413 return -EOPNOTSUPP; 1414 } 1415 1416 return 0; 1417 } 1418 1419 static int 1420 ice_devlink_enable_iw_get(struct devlink *devlink, u32 id, 1421 struct devlink_param_gset_ctx *ctx, 1422 struct netlink_ext_ack *extack) 1423 { 1424 struct ice_pf *pf = devlink_priv(devlink); 1425 struct iidc_rdma_core_dev_info *cdev; 1426 1427 cdev = pf->cdev_info; 1428 if (!cdev) 1429 return -ENODEV; 1430 1431 ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP); 1432 1433 return 0; 1434 } 1435 1436 static int ice_devlink_enable_iw_set(struct devlink *devlink, u32 id, 1437 struct devlink_param_gset_ctx *ctx, 1438 struct netlink_ext_ack *extack) 1439 { 1440 struct ice_pf *pf = devlink_priv(devlink); 1441 struct iidc_rdma_core_dev_info *cdev; 1442 bool iw_ena = ctx->val.vbool; 1443 int ret; 1444 1445 cdev = pf->cdev_info; 1446 if (!cdev) 1447 return -ENODEV; 1448 1449 if (!iw_ena) { 1450 ice_unplug_aux_dev(pf); 1451 cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP; 1452 return 0; 1453 } 1454 1455 cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_IWARP; 1456 ret = ice_plug_aux_dev(pf); 1457 if (ret) 1458 cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP; 1459 1460 return ret; 1461 } 1462 1463 static int 1464 ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id, 1465 union devlink_param_value val, 1466 struct netlink_ext_ack *extack) 1467 { 1468 struct ice_pf *pf = devlink_priv(devlink); 1469 1470 if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 1471 return -EOPNOTSUPP; 1472 1473 if (pf->cdev_info->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2) { 1474 NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); 1475 return -EOPNOTSUPP; 1476 } 1477 1478 return 0; 1479 } 1480 1481 #define DEVLINK_LOCAL_FWD_DISABLED_STR "disabled" 1482 #define DEVLINK_LOCAL_FWD_ENABLED_STR "enabled" 1483 #define DEVLINK_LOCAL_FWD_PRIORITIZED_STR "prioritized" 1484 1485 /** 1486 * ice_devlink_local_fwd_mode_to_str - Get string for local_fwd mode. 1487 * @mode: local forwarding for mode used in port_info struct. 1488 * 1489 * Return: Mode respective string or "Invalid". 1490 */ 1491 static const char * 1492 ice_devlink_local_fwd_mode_to_str(enum ice_local_fwd_mode mode) 1493 { 1494 switch (mode) { 1495 case ICE_LOCAL_FWD_MODE_ENABLED: 1496 return DEVLINK_LOCAL_FWD_ENABLED_STR; 1497 case ICE_LOCAL_FWD_MODE_PRIORITIZED: 1498 return DEVLINK_LOCAL_FWD_PRIORITIZED_STR; 1499 case ICE_LOCAL_FWD_MODE_DISABLED: 1500 return DEVLINK_LOCAL_FWD_DISABLED_STR; 1501 } 1502 1503 return "Invalid"; 1504 } 1505 1506 /** 1507 * ice_devlink_local_fwd_str_to_mode - Get local_fwd mode from string name. 1508 * @mode_str: local forwarding mode string. 1509 * 1510 * Return: Mode value or negative number if invalid. 1511 */ 1512 static int ice_devlink_local_fwd_str_to_mode(const char *mode_str) 1513 { 1514 if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_ENABLED_STR)) 1515 return ICE_LOCAL_FWD_MODE_ENABLED; 1516 else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_PRIORITIZED_STR)) 1517 return ICE_LOCAL_FWD_MODE_PRIORITIZED; 1518 else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_DISABLED_STR)) 1519 return ICE_LOCAL_FWD_MODE_DISABLED; 1520 1521 return -EINVAL; 1522 } 1523 1524 /** 1525 * ice_devlink_local_fwd_get - Get local_fwd parameter. 1526 * @devlink: Pointer to the devlink instance. 1527 * @id: The parameter ID to set. 1528 * @ctx: Context to store the parameter value. 1529 * @extack: netlink extended ACK structure 1530 * 1531 * Return: Zero. 1532 */ 1533 static int ice_devlink_local_fwd_get(struct devlink *devlink, u32 id, 1534 struct devlink_param_gset_ctx *ctx, 1535 struct netlink_ext_ack *extack) 1536 { 1537 struct ice_pf *pf = devlink_priv(devlink); 1538 struct ice_port_info *pi; 1539 const char *mode_str; 1540 1541 pi = pf->hw.port_info; 1542 mode_str = ice_devlink_local_fwd_mode_to_str(pi->local_fwd_mode); 1543 snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s", mode_str); 1544 1545 return 0; 1546 } 1547 1548 /** 1549 * ice_devlink_local_fwd_set - Set local_fwd parameter. 1550 * @devlink: Pointer to the devlink instance. 1551 * @id: The parameter ID to set. 1552 * @ctx: Context to get the parameter value. 1553 * @extack: Netlink extended ACK structure. 1554 * 1555 * Return: Zero. 1556 */ 1557 static int ice_devlink_local_fwd_set(struct devlink *devlink, u32 id, 1558 struct devlink_param_gset_ctx *ctx, 1559 struct netlink_ext_ack *extack) 1560 { 1561 int new_local_fwd_mode = ice_devlink_local_fwd_str_to_mode(ctx->val.vstr); 1562 struct ice_pf *pf = devlink_priv(devlink); 1563 struct device *dev = ice_pf_to_dev(pf); 1564 struct ice_port_info *pi; 1565 1566 pi = pf->hw.port_info; 1567 if (pi->local_fwd_mode != new_local_fwd_mode) { 1568 pi->local_fwd_mode = new_local_fwd_mode; 1569 dev_info(dev, "Setting local_fwd to %s\n", ctx->val.vstr); 1570 ice_schedule_reset(pf, ICE_RESET_CORER); 1571 } 1572 1573 return 0; 1574 } 1575 1576 /** 1577 * ice_devlink_local_fwd_validate - Validate passed local_fwd parameter value. 1578 * @devlink: Unused pointer to devlink instance. 1579 * @id: The parameter ID to validate. 1580 * @val: Value to validate. 1581 * @extack: Netlink extended ACK structure. 1582 * 1583 * Supported values are: 1584 * "enabled" - local_fwd is enabled, "disabled" - local_fwd is disabled 1585 * "prioritized" - local_fwd traffic is prioritized in scheduling. 1586 * 1587 * Return: Zero when passed parameter value is supported. Negative value on 1588 * error. 1589 */ 1590 static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id, 1591 union devlink_param_value val, 1592 struct netlink_ext_ack *extack) 1593 { 1594 if (ice_devlink_local_fwd_str_to_mode(val.vstr) < 0) { 1595 NL_SET_ERR_MSG_MOD(extack, "Error: Requested value is not supported."); 1596 return -EINVAL; 1597 } 1598 1599 return 0; 1600 } 1601 1602 static int 1603 ice_devlink_msix_max_pf_validate(struct devlink *devlink, u32 id, 1604 union devlink_param_value val, 1605 struct netlink_ext_ack *extack) 1606 { 1607 struct ice_pf *pf = devlink_priv(devlink); 1608 1609 if (val.vu32 > pf->hw.func_caps.common_cap.num_msix_vectors) 1610 return -EINVAL; 1611 1612 return 0; 1613 } 1614 1615 static int 1616 ice_devlink_msix_min_pf_validate(struct devlink *devlink, u32 id, 1617 union devlink_param_value val, 1618 struct netlink_ext_ack *extack) 1619 { 1620 if (val.vu32 < ICE_MIN_MSIX) 1621 return -EINVAL; 1622 1623 return 0; 1624 } 1625 1626 static int ice_devlink_enable_rdma_validate(struct devlink *devlink, u32 id, 1627 union devlink_param_value val, 1628 struct netlink_ext_ack *extack) 1629 { 1630 struct ice_pf *pf = devlink_priv(devlink); 1631 bool new_state = val.vbool; 1632 1633 if (new_state && !test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 1634 return -EOPNOTSUPP; 1635 1636 return 0; 1637 } 1638 1639 enum ice_param_id { 1640 ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, 1641 ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS, 1642 ICE_DEVLINK_PARAM_ID_LOCAL_FWD, 1643 }; 1644 1645 static const struct devlink_param ice_dvl_rdma_params[] = { 1646 DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1647 ice_devlink_enable_roce_get, 1648 ice_devlink_enable_roce_set, 1649 ice_devlink_enable_roce_validate), 1650 DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1651 ice_devlink_enable_iw_get, 1652 ice_devlink_enable_iw_set, 1653 ice_devlink_enable_iw_validate), 1654 DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 1655 NULL, NULL, ice_devlink_enable_rdma_validate), 1656 }; 1657 1658 static const struct devlink_param ice_dvl_msix_params[] = { 1659 DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX, 1660 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 1661 NULL, NULL, ice_devlink_msix_max_pf_validate), 1662 DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN, 1663 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 1664 NULL, NULL, ice_devlink_msix_min_pf_validate), 1665 }; 1666 1667 static const struct devlink_param ice_dvl_sched_params[] = { 1668 DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS, 1669 "tx_scheduling_layers", 1670 DEVLINK_PARAM_TYPE_U8, 1671 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 1672 ice_devlink_tx_sched_layers_get, 1673 ice_devlink_tx_sched_layers_set, 1674 ice_devlink_tx_sched_layers_validate), 1675 DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_LOCAL_FWD, 1676 "local_forwarding", DEVLINK_PARAM_TYPE_STRING, 1677 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1678 ice_devlink_local_fwd_get, 1679 ice_devlink_local_fwd_set, 1680 ice_devlink_local_fwd_validate), 1681 }; 1682 1683 static void ice_devlink_free(void *devlink_ptr) 1684 { 1685 devlink_free((struct devlink *)devlink_ptr); 1686 } 1687 1688 /** 1689 * ice_allocate_pf - Allocate devlink and return PF structure pointer 1690 * @dev: the device to allocate for 1691 * 1692 * Allocate a devlink instance for this device and return the private area as 1693 * the PF structure. The devlink memory is kept track of through devres by 1694 * adding an action to remove it when unwinding. 1695 */ 1696 struct ice_pf *ice_allocate_pf(struct device *dev) 1697 { 1698 struct devlink *devlink; 1699 1700 devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev); 1701 if (!devlink) 1702 return NULL; 1703 1704 /* Add an action to teardown the devlink when unwinding the driver */ 1705 if (devm_add_action_or_reset(dev, ice_devlink_free, devlink)) 1706 return NULL; 1707 1708 return devlink_priv(devlink); 1709 } 1710 1711 /** 1712 * ice_allocate_sf - Allocate devlink and return SF structure pointer 1713 * @dev: the device to allocate for 1714 * @pf: pointer to the PF structure 1715 * 1716 * Allocate a devlink instance for SF. 1717 * 1718 * Return: ice_sf_priv pointer to allocated memory or ERR_PTR in case of error 1719 */ 1720 struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf) 1721 { 1722 struct devlink *devlink; 1723 int err; 1724 1725 devlink = devlink_alloc(&ice_sf_devlink_ops, sizeof(struct ice_sf_priv), 1726 dev); 1727 if (!devlink) 1728 return ERR_PTR(-ENOMEM); 1729 1730 err = devl_nested_devlink_set(priv_to_devlink(pf), devlink); 1731 if (err) { 1732 devlink_free(devlink); 1733 return ERR_PTR(err); 1734 } 1735 1736 return devlink_priv(devlink); 1737 } 1738 1739 /** 1740 * ice_devlink_register - Register devlink interface for this PF 1741 * @pf: the PF to register the devlink for. 1742 * 1743 * Register the devlink instance associated with this physical function. 1744 * 1745 * Return: zero on success or an error code on failure. 1746 */ 1747 void ice_devlink_register(struct ice_pf *pf) 1748 { 1749 struct devlink *devlink = priv_to_devlink(pf); 1750 1751 devl_register(devlink); 1752 } 1753 1754 /** 1755 * ice_devlink_unregister - Unregister devlink resources for this PF. 1756 * @pf: the PF structure to cleanup 1757 * 1758 * Releases resources used by devlink and cleans up associated memory. 1759 */ 1760 void ice_devlink_unregister(struct ice_pf *pf) 1761 { 1762 devl_unregister(priv_to_devlink(pf)); 1763 } 1764 1765 int ice_devlink_register_params(struct ice_pf *pf) 1766 { 1767 struct devlink *devlink = priv_to_devlink(pf); 1768 union devlink_param_value value; 1769 struct ice_hw *hw = &pf->hw; 1770 int status; 1771 1772 status = devl_params_register(devlink, ice_dvl_rdma_params, 1773 ARRAY_SIZE(ice_dvl_rdma_params)); 1774 if (status) 1775 return status; 1776 1777 status = devl_params_register(devlink, ice_dvl_msix_params, 1778 ARRAY_SIZE(ice_dvl_msix_params)); 1779 if (status) 1780 goto unregister_rdma_params; 1781 1782 if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) 1783 status = devl_params_register(devlink, ice_dvl_sched_params, 1784 ARRAY_SIZE(ice_dvl_sched_params)); 1785 if (status) 1786 goto unregister_msix_params; 1787 1788 value.vu32 = pf->msix.max; 1789 devl_param_driverinit_value_set(devlink, 1790 DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, 1791 value); 1792 value.vu32 = pf->msix.min; 1793 devl_param_driverinit_value_set(devlink, 1794 DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, 1795 value); 1796 1797 value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags); 1798 devl_param_driverinit_value_set(devlink, 1799 DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, 1800 value); 1801 1802 return 0; 1803 1804 unregister_msix_params: 1805 devl_params_unregister(devlink, ice_dvl_msix_params, 1806 ARRAY_SIZE(ice_dvl_msix_params)); 1807 unregister_rdma_params: 1808 devl_params_unregister(devlink, ice_dvl_rdma_params, 1809 ARRAY_SIZE(ice_dvl_rdma_params)); 1810 return status; 1811 } 1812 1813 void ice_devlink_unregister_params(struct ice_pf *pf) 1814 { 1815 struct devlink *devlink = priv_to_devlink(pf); 1816 struct ice_hw *hw = &pf->hw; 1817 1818 devl_params_unregister(devlink, ice_dvl_rdma_params, 1819 ARRAY_SIZE(ice_dvl_rdma_params)); 1820 devl_params_unregister(devlink, ice_dvl_msix_params, 1821 ARRAY_SIZE(ice_dvl_msix_params)); 1822 1823 if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) 1824 devl_params_unregister(devlink, ice_dvl_sched_params, 1825 ARRAY_SIZE(ice_dvl_sched_params)); 1826 } 1827 1828 #define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024) 1829 1830 static const struct devlink_region_ops ice_nvm_region_ops; 1831 static const struct devlink_region_ops ice_sram_region_ops; 1832 1833 /** 1834 * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents 1835 * @devlink: the devlink instance 1836 * @ops: the devlink region to snapshot 1837 * @extack: extended ACK response structure 1838 * @data: on exit points to snapshot data buffer 1839 * 1840 * This function is called in response to a DEVLINK_CMD_REGION_NEW for either 1841 * the nvm-flash or shadow-ram region. 1842 * 1843 * It captures a snapshot of the NVM or Shadow RAM flash contents. This 1844 * snapshot can then later be viewed via the DEVLINK_CMD_REGION_READ netlink 1845 * interface. 1846 * 1847 * @returns zero on success, and updates the data pointer. Returns a non-zero 1848 * error code on failure. 1849 */ 1850 static int ice_devlink_nvm_snapshot(struct devlink *devlink, 1851 const struct devlink_region_ops *ops, 1852 struct netlink_ext_ack *extack, u8 **data) 1853 { 1854 struct ice_pf *pf = devlink_priv(devlink); 1855 struct device *dev = ice_pf_to_dev(pf); 1856 struct ice_hw *hw = &pf->hw; 1857 bool read_shadow_ram; 1858 u8 *nvm_data, *tmp, i; 1859 u32 nvm_size, left; 1860 s8 num_blks; 1861 int status; 1862 1863 if (ops == &ice_nvm_region_ops) { 1864 read_shadow_ram = false; 1865 nvm_size = hw->flash.flash_size; 1866 } else if (ops == &ice_sram_region_ops) { 1867 read_shadow_ram = true; 1868 nvm_size = hw->flash.sr_words * 2u; 1869 } else { 1870 NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function"); 1871 return -EOPNOTSUPP; 1872 } 1873 1874 nvm_data = vzalloc(nvm_size); 1875 if (!nvm_data) 1876 return -ENOMEM; 1877 1878 num_blks = DIV_ROUND_UP(nvm_size, ICE_DEVLINK_READ_BLK_SIZE); 1879 tmp = nvm_data; 1880 left = nvm_size; 1881 1882 /* Some systems take longer to read the NVM than others which causes the 1883 * FW to reclaim the NVM lock before the entire NVM has been read. Fix 1884 * this by breaking the reads of the NVM into smaller chunks that will 1885 * probably not take as long. This has some overhead since we are 1886 * increasing the number of AQ commands, but it should always work 1887 */ 1888 for (i = 0; i < num_blks; i++) { 1889 u32 read_sz = min_t(u32, ICE_DEVLINK_READ_BLK_SIZE, left); 1890 1891 status = ice_acquire_nvm(hw, ICE_RES_READ); 1892 if (status) { 1893 dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", 1894 status, hw->adminq.sq_last_status); 1895 NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); 1896 vfree(nvm_data); 1897 return -EIO; 1898 } 1899 1900 status = ice_read_flat_nvm(hw, i * ICE_DEVLINK_READ_BLK_SIZE, 1901 &read_sz, tmp, read_shadow_ram); 1902 if (status) { 1903 dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", 1904 read_sz, status, hw->adminq.sq_last_status); 1905 NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); 1906 ice_release_nvm(hw); 1907 vfree(nvm_data); 1908 return -EIO; 1909 } 1910 ice_release_nvm(hw); 1911 1912 tmp += read_sz; 1913 left -= read_sz; 1914 } 1915 1916 *data = nvm_data; 1917 1918 return 0; 1919 } 1920 1921 /** 1922 * ice_devlink_nvm_read - Read a portion of NVM flash contents 1923 * @devlink: the devlink instance 1924 * @ops: the devlink region to snapshot 1925 * @extack: extended ACK response structure 1926 * @offset: the offset to start at 1927 * @size: the amount to read 1928 * @data: the data buffer to read into 1929 * 1930 * This function is called in response to DEVLINK_CMD_REGION_READ to directly 1931 * read a section of the NVM contents. 1932 * 1933 * It reads from either the nvm-flash or shadow-ram region contents. 1934 * 1935 * @returns zero on success, and updates the data pointer. Returns a non-zero 1936 * error code on failure. 1937 */ 1938 static int ice_devlink_nvm_read(struct devlink *devlink, 1939 const struct devlink_region_ops *ops, 1940 struct netlink_ext_ack *extack, 1941 u64 offset, u32 size, u8 *data) 1942 { 1943 struct ice_pf *pf = devlink_priv(devlink); 1944 struct device *dev = ice_pf_to_dev(pf); 1945 struct ice_hw *hw = &pf->hw; 1946 bool read_shadow_ram; 1947 u64 nvm_size; 1948 int status; 1949 1950 if (ops == &ice_nvm_region_ops) { 1951 read_shadow_ram = false; 1952 nvm_size = hw->flash.flash_size; 1953 } else if (ops == &ice_sram_region_ops) { 1954 read_shadow_ram = true; 1955 nvm_size = hw->flash.sr_words * 2u; 1956 } else { 1957 NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function"); 1958 return -EOPNOTSUPP; 1959 } 1960 1961 if (offset + size >= nvm_size) { 1962 NL_SET_ERR_MSG_MOD(extack, "Cannot read beyond the region size"); 1963 return -ERANGE; 1964 } 1965 1966 status = ice_acquire_nvm(hw, ICE_RES_READ); 1967 if (status) { 1968 dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", 1969 status, hw->adminq.sq_last_status); 1970 NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); 1971 return -EIO; 1972 } 1973 1974 status = ice_read_flat_nvm(hw, (u32)offset, &size, data, 1975 read_shadow_ram); 1976 if (status) { 1977 dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", 1978 size, status, hw->adminq.sq_last_status); 1979 NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); 1980 ice_release_nvm(hw); 1981 return -EIO; 1982 } 1983 ice_release_nvm(hw); 1984 1985 return 0; 1986 } 1987 1988 /** 1989 * ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities 1990 * @devlink: the devlink instance 1991 * @ops: the devlink region being snapshotted 1992 * @extack: extended ACK response structure 1993 * @data: on exit points to snapshot data buffer 1994 * 1995 * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for 1996 * the device-caps devlink region. It captures a snapshot of the device 1997 * capabilities reported by firmware. 1998 * 1999 * @returns zero on success, and updates the data pointer. Returns a non-zero 2000 * error code on failure. 2001 */ 2002 static int 2003 ice_devlink_devcaps_snapshot(struct devlink *devlink, 2004 const struct devlink_region_ops *ops, 2005 struct netlink_ext_ack *extack, u8 **data) 2006 { 2007 struct ice_pf *pf = devlink_priv(devlink); 2008 struct device *dev = ice_pf_to_dev(pf); 2009 struct ice_hw *hw = &pf->hw; 2010 void *devcaps; 2011 int status; 2012 2013 devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN); 2014 if (!devcaps) 2015 return -ENOMEM; 2016 2017 status = ice_aq_list_caps(hw, devcaps, ICE_AQ_MAX_BUF_LEN, NULL, 2018 ice_aqc_opc_list_dev_caps, NULL); 2019 if (status) { 2020 dev_dbg(dev, "ice_aq_list_caps: failed to read device capabilities, err %d aq_err %d\n", 2021 status, hw->adminq.sq_last_status); 2022 NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities"); 2023 vfree(devcaps); 2024 return status; 2025 } 2026 2027 *data = (u8 *)devcaps; 2028 2029 return 0; 2030 } 2031 2032 static const struct devlink_region_ops ice_nvm_region_ops = { 2033 .name = "nvm-flash", 2034 .destructor = vfree, 2035 .snapshot = ice_devlink_nvm_snapshot, 2036 .read = ice_devlink_nvm_read, 2037 }; 2038 2039 static const struct devlink_region_ops ice_sram_region_ops = { 2040 .name = "shadow-ram", 2041 .destructor = vfree, 2042 .snapshot = ice_devlink_nvm_snapshot, 2043 .read = ice_devlink_nvm_read, 2044 }; 2045 2046 static const struct devlink_region_ops ice_devcaps_region_ops = { 2047 .name = "device-caps", 2048 .destructor = vfree, 2049 .snapshot = ice_devlink_devcaps_snapshot, 2050 }; 2051 2052 /** 2053 * ice_devlink_init_regions - Initialize devlink regions 2054 * @pf: the PF device structure 2055 * 2056 * Create devlink regions used to enable access to dump the contents of the 2057 * flash memory on the device. 2058 */ 2059 void ice_devlink_init_regions(struct ice_pf *pf) 2060 { 2061 struct devlink *devlink = priv_to_devlink(pf); 2062 struct device *dev = ice_pf_to_dev(pf); 2063 u64 nvm_size, sram_size; 2064 2065 nvm_size = pf->hw.flash.flash_size; 2066 pf->nvm_region = devl_region_create(devlink, &ice_nvm_region_ops, 1, 2067 nvm_size); 2068 if (IS_ERR(pf->nvm_region)) { 2069 dev_err(dev, "failed to create NVM devlink region, err %ld\n", 2070 PTR_ERR(pf->nvm_region)); 2071 pf->nvm_region = NULL; 2072 } 2073 2074 sram_size = pf->hw.flash.sr_words * 2u; 2075 pf->sram_region = devl_region_create(devlink, &ice_sram_region_ops, 2076 1, sram_size); 2077 if (IS_ERR(pf->sram_region)) { 2078 dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n", 2079 PTR_ERR(pf->sram_region)); 2080 pf->sram_region = NULL; 2081 } 2082 2083 pf->devcaps_region = devl_region_create(devlink, 2084 &ice_devcaps_region_ops, 10, 2085 ICE_AQ_MAX_BUF_LEN); 2086 if (IS_ERR(pf->devcaps_region)) { 2087 dev_err(dev, "failed to create device-caps devlink region, err %ld\n", 2088 PTR_ERR(pf->devcaps_region)); 2089 pf->devcaps_region = NULL; 2090 } 2091 } 2092 2093 /** 2094 * ice_devlink_destroy_regions - Destroy devlink regions 2095 * @pf: the PF device structure 2096 * 2097 * Remove previously created regions for this PF. 2098 */ 2099 void ice_devlink_destroy_regions(struct ice_pf *pf) 2100 { 2101 if (pf->nvm_region) 2102 devl_region_destroy(pf->nvm_region); 2103 2104 if (pf->sram_region) 2105 devl_region_destroy(pf->sram_region); 2106 2107 if (pf->devcaps_region) 2108 devl_region_destroy(pf->devcaps_region); 2109 } 2110