1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020, Intel Corporation. */ 3 4 #include <linux/vmalloc.h> 5 6 #include "ice.h" 7 #include "ice_lib.h" 8 #include "devlink.h" 9 #include "port.h" 10 #include "ice_eswitch.h" 11 #include "ice_fw_update.h" 12 #include "ice_dcb_lib.h" 13 #include "ice_sf_eth.h" 14 15 /* context for devlink info version reporting */ 16 struct ice_info_ctx { 17 char buf[128]; 18 struct ice_orom_info pending_orom; 19 struct ice_nvm_info pending_nvm; 20 struct ice_netlist_info pending_netlist; 21 struct ice_hw_dev_caps dev_caps; 22 }; 23 24 /* The following functions are used to format specific strings for various 25 * devlink info versions. The ctx parameter is used to provide the storage 26 * buffer, as well as any ancillary information calculated when the info 27 * request was made. 28 * 29 * If a version does not exist, for example when attempting to get the 30 * inactive version of flash when there is no pending update, the function 31 * should leave the buffer in the ctx structure empty. 32 */ 33 34 static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx) 35 { 36 u8 dsn[8]; 37 38 /* Copy the DSN into an array in Big Endian format */ 39 put_unaligned_be64(pci_get_dsn(pf->pdev), dsn); 40 41 snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn); 42 } 43 44 static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) 45 { 46 struct ice_hw *hw = &pf->hw; 47 int status; 48 49 status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf)); 50 if (status) 51 /* We failed to locate the PBA, so just skip this entry */ 52 dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %d\n", 53 status); 54 } 55 56 static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx) 57 { 58 struct ice_hw *hw = &pf->hw; 59 60 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", 61 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch); 62 } 63 64 static void ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx) 65 { 66 struct ice_hw *hw = &pf->hw; 67 68 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver, 69 hw->api_min_ver, hw->api_patch); 70 } 71 72 static void ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx) 73 { 74 struct ice_hw *hw = &pf->hw; 75 76 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build); 77 } 78 79 static void ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) 80 { 81 struct ice_orom_info *orom = &pf->hw.flash.orom; 82 83 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", 84 orom->major, orom->build, orom->patch); 85 } 86 87 static void 88 ice_info_pending_orom_ver(struct ice_pf __always_unused *pf, 89 struct ice_info_ctx *ctx) 90 { 91 struct ice_orom_info *orom = &ctx->pending_orom; 92 93 if (ctx->dev_caps.common_cap.nvm_update_pending_orom) 94 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", 95 orom->major, orom->build, orom->patch); 96 } 97 98 static void ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) 99 { 100 struct ice_nvm_info *nvm = &pf->hw.flash.nvm; 101 102 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor); 103 } 104 105 static void 106 ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf, 107 struct ice_info_ctx *ctx) 108 { 109 struct ice_nvm_info *nvm = &ctx->pending_nvm; 110 111 if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) 112 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", 113 nvm->major, nvm->minor); 114 } 115 116 static void ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) 117 { 118 struct ice_nvm_info *nvm = &pf->hw.flash.nvm; 119 120 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack); 121 } 122 123 static void 124 ice_info_pending_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) 125 { 126 struct ice_nvm_info *nvm = &ctx->pending_nvm; 127 128 if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) 129 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack); 130 } 131 132 static void ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx) 133 { 134 struct ice_hw *hw = &pf->hw; 135 136 snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name); 137 } 138 139 static void 140 ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx) 141 { 142 struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver; 143 144 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", 145 pkg->major, pkg->minor, pkg->update, pkg->draft); 146 } 147 148 static void 149 ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx) 150 { 151 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id); 152 } 153 154 static void ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) 155 { 156 struct ice_netlist_info *netlist = &pf->hw.flash.netlist; 157 158 /* The netlist version fields are BCD formatted */ 159 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", 160 netlist->major, netlist->minor, 161 netlist->type >> 16, netlist->type & 0xFFFF, 162 netlist->rev, netlist->cust_ver); 163 } 164 165 static void ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx) 166 { 167 struct ice_netlist_info *netlist = &pf->hw.flash.netlist; 168 169 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); 170 } 171 172 static void 173 ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, 174 struct ice_info_ctx *ctx) 175 { 176 struct ice_netlist_info *netlist = &ctx->pending_netlist; 177 178 /* The netlist version fields are BCD formatted */ 179 if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) 180 snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", 181 netlist->major, netlist->minor, 182 netlist->type >> 16, netlist->type & 0xFFFF, 183 netlist->rev, netlist->cust_ver); 184 } 185 186 static void 187 ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, 188 struct ice_info_ctx *ctx) 189 { 190 struct ice_netlist_info *netlist = &ctx->pending_netlist; 191 192 if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) 193 snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); 194 } 195 196 static void ice_info_cgu_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx) 197 { 198 u32 id, cfg_ver, fw_ver; 199 200 if (!ice_is_feature_supported(pf, ICE_F_CGU)) 201 return; 202 if (ice_aq_get_cgu_info(&pf->hw, &id, &cfg_ver, &fw_ver)) 203 return; 204 snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", id, cfg_ver, fw_ver); 205 } 206 207 static void ice_info_cgu_id(struct ice_pf *pf, struct ice_info_ctx *ctx) 208 { 209 if (!ice_is_feature_supported(pf, ICE_F_CGU)) 210 return; 211 snprintf(ctx->buf, sizeof(ctx->buf), "%u", pf->hw.cgu_part_number); 212 } 213 214 #define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL } 215 #define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL } 216 #define stored(key, getter, fallback) { ICE_VERSION_STORED, key, getter, fallback } 217 218 /* The combined() macro inserts both the running entry as well as a stored 219 * entry. The running entry will always report the version from the active 220 * handler. The stored entry will first try the pending handler, and fallback 221 * to the active handler if the pending function does not report a version. 222 * The pending handler should check the status of a pending update for the 223 * relevant flash component. It should only fill in the buffer in the case 224 * where a valid pending version is available. This ensures that the related 225 * stored and running versions remain in sync, and that stored versions are 226 * correctly reported as expected. 227 */ 228 #define combined(key, active, pending) \ 229 running(key, active), \ 230 stored(key, pending, active) 231 232 enum ice_version_type { 233 ICE_VERSION_FIXED, 234 ICE_VERSION_RUNNING, 235 ICE_VERSION_STORED, 236 }; 237 238 static const struct ice_devlink_version { 239 enum ice_version_type type; 240 const char *key; 241 void (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx); 242 void (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx); 243 } ice_devlink_versions[] = { 244 fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba), 245 running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt), 246 running("fw.mgmt.api", ice_info_fw_api), 247 running("fw.mgmt.build", ice_info_fw_build), 248 combined(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver, ice_info_pending_orom_ver), 249 combined("fw.psid.api", ice_info_nvm_ver, ice_info_pending_nvm_ver), 250 combined(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack, ice_info_pending_eetrack), 251 running("fw.app.name", ice_info_ddp_pkg_name), 252 running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version), 253 running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id), 254 combined("fw.netlist", ice_info_netlist_ver, ice_info_pending_netlist_ver), 255 combined("fw.netlist.build", ice_info_netlist_build, ice_info_pending_netlist_build), 256 fixed("cgu.id", ice_info_cgu_id), 257 running("fw.cgu", ice_info_cgu_fw_build), 258 }; 259 260 /** 261 * ice_devlink_info_get - .info_get devlink handler 262 * @devlink: devlink instance structure 263 * @req: the devlink info request 264 * @extack: extended netdev ack structure 265 * 266 * Callback for the devlink .info_get operation. Reports information about the 267 * device. 268 * 269 * Return: zero on success or an error code on failure. 270 */ 271 static int ice_devlink_info_get(struct devlink *devlink, 272 struct devlink_info_req *req, 273 struct netlink_ext_ack *extack) 274 { 275 struct ice_pf *pf = devlink_priv(devlink); 276 struct device *dev = ice_pf_to_dev(pf); 277 struct ice_hw *hw = &pf->hw; 278 struct ice_info_ctx *ctx; 279 size_t i; 280 int err; 281 282 err = ice_wait_for_reset(pf, 10 * HZ); 283 if (err) { 284 NL_SET_ERR_MSG_MOD(extack, "Device is busy resetting"); 285 return err; 286 } 287 288 ctx = kzalloc_obj(*ctx); 289 if (!ctx) 290 return -ENOMEM; 291 292 /* discover capabilities first */ 293 err = ice_discover_dev_caps(hw, &ctx->dev_caps); 294 if (err) { 295 dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n", 296 err, libie_aq_str(hw->adminq.sq_last_status)); 297 NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities"); 298 goto out_free_ctx; 299 } 300 301 if (ctx->dev_caps.common_cap.nvm_update_pending_orom) { 302 err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom); 303 if (err) { 304 dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n", 305 err, libie_aq_str(hw->adminq.sq_last_status)); 306 307 /* disable display of pending Option ROM */ 308 ctx->dev_caps.common_cap.nvm_update_pending_orom = false; 309 } 310 } 311 312 if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) { 313 err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm); 314 if (err) { 315 dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n", 316 err, libie_aq_str(hw->adminq.sq_last_status)); 317 318 /* disable display of pending Option ROM */ 319 ctx->dev_caps.common_cap.nvm_update_pending_nvm = false; 320 } 321 } 322 323 if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) { 324 err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist); 325 if (err) { 326 dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n", 327 err, libie_aq_str(hw->adminq.sq_last_status)); 328 329 /* disable display of pending Option ROM */ 330 ctx->dev_caps.common_cap.nvm_update_pending_netlist = false; 331 } 332 } 333 334 ice_info_get_dsn(pf, ctx); 335 336 err = devlink_info_serial_number_put(req, ctx->buf); 337 if (err) { 338 NL_SET_ERR_MSG_MOD(extack, "Unable to set serial number"); 339 goto out_free_ctx; 340 } 341 342 for (i = 0; i < ARRAY_SIZE(ice_devlink_versions); i++) { 343 enum ice_version_type type = ice_devlink_versions[i].type; 344 const char *key = ice_devlink_versions[i].key; 345 346 memset(ctx->buf, 0, sizeof(ctx->buf)); 347 348 ice_devlink_versions[i].getter(pf, ctx); 349 350 /* If the default getter doesn't report a version, use the 351 * fallback function. This is primarily useful in the case of 352 * "stored" versions that want to report the same value as the 353 * running version in the normal case of no pending update. 354 */ 355 if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback) 356 ice_devlink_versions[i].fallback(pf, ctx); 357 358 /* Do not report missing versions */ 359 if (ctx->buf[0] == '\0') 360 continue; 361 362 switch (type) { 363 case ICE_VERSION_FIXED: 364 err = devlink_info_version_fixed_put(req, key, ctx->buf); 365 if (err) { 366 NL_SET_ERR_MSG_MOD(extack, "Unable to set fixed version"); 367 goto out_free_ctx; 368 } 369 break; 370 case ICE_VERSION_RUNNING: 371 err = devlink_info_version_running_put_ext(req, key, 372 ctx->buf, 373 DEVLINK_INFO_VERSION_TYPE_COMPONENT); 374 if (err) { 375 NL_SET_ERR_MSG_MOD(extack, "Unable to set running version"); 376 goto out_free_ctx; 377 } 378 break; 379 case ICE_VERSION_STORED: 380 err = devlink_info_version_stored_put_ext(req, key, 381 ctx->buf, 382 DEVLINK_INFO_VERSION_TYPE_COMPONENT); 383 if (err) { 384 NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version"); 385 goto out_free_ctx; 386 } 387 break; 388 } 389 } 390 391 out_free_ctx: 392 kfree(ctx); 393 return err; 394 } 395 396 /** 397 * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware 398 * @pf: pointer to the pf instance 399 * @extack: netlink extended ACK structure 400 * 401 * Allow user to activate new Embedded Management Processor firmware by 402 * issuing device specific EMP reset. Called in response to 403 * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE. 404 * 405 * Note that teardown and rebuild of the driver state happens automatically as 406 * part of an interrupt and watchdog task. This is because all physical 407 * functions on the device must be able to reset when an EMP reset occurs from 408 * any source. 409 */ 410 static int 411 ice_devlink_reload_empr_start(struct ice_pf *pf, 412 struct netlink_ext_ack *extack) 413 { 414 struct device *dev = ice_pf_to_dev(pf); 415 struct ice_hw *hw = &pf->hw; 416 u8 pending; 417 int err; 418 419 err = ice_get_pending_updates(pf, &pending, extack); 420 if (err) 421 return err; 422 423 /* pending is a bitmask of which flash banks have a pending update, 424 * including the main NVM bank, the Option ROM bank, and the netlist 425 * bank. If any of these bits are set, then there is a pending update 426 * waiting to be activated. 427 */ 428 if (!pending) { 429 NL_SET_ERR_MSG_MOD(extack, "No pending firmware update"); 430 return -ECANCELED; 431 } 432 433 if (pf->fw_emp_reset_disabled) { 434 NL_SET_ERR_MSG_MOD(extack, "EMP reset is not available. To activate firmware, a reboot or power cycle is needed"); 435 return -ECANCELED; 436 } 437 438 dev_dbg(dev, "Issuing device EMP reset to activate firmware\n"); 439 440 err = ice_aq_nvm_update_empr(hw); 441 if (err) { 442 dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n", 443 err, libie_aq_str(hw->adminq.sq_last_status)); 444 NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware"); 445 return err; 446 } 447 448 return 0; 449 } 450 451 /** 452 * ice_devlink_reinit_down - unload given PF 453 * @pf: pointer to the PF struct 454 */ 455 static void ice_devlink_reinit_down(struct ice_pf *pf) 456 { 457 /* No need to take devl_lock, it's already taken by devlink API */ 458 ice_unload(pf); 459 rtnl_lock(); 460 ice_vsi_decfg(ice_get_main_vsi(pf)); 461 rtnl_unlock(); 462 ice_deinit_pf(pf); 463 ice_deinit_hw(&pf->hw); 464 ice_deinit_dev(pf); 465 } 466 467 /** 468 * ice_devlink_reload_down - prepare for reload 469 * @devlink: pointer to the devlink instance to reload 470 * @netns_change: if true, the network namespace is changing 471 * @action: the action to perform 472 * @limit: limits on what reload should do, such as not resetting 473 * @extack: netlink extended ACK structure 474 */ 475 static int 476 ice_devlink_reload_down(struct devlink *devlink, bool netns_change, 477 enum devlink_reload_action action, 478 enum devlink_reload_limit limit, 479 struct netlink_ext_ack *extack) 480 { 481 struct ice_pf *pf = devlink_priv(devlink); 482 483 switch (action) { 484 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: 485 if (ice_is_eswitch_mode_switchdev(pf)) { 486 NL_SET_ERR_MSG_MOD(extack, 487 "Go to legacy mode before doing reinit"); 488 return -EOPNOTSUPP; 489 } 490 if (ice_is_adq_active(pf)) { 491 NL_SET_ERR_MSG_MOD(extack, 492 "Turn off ADQ before doing reinit"); 493 return -EOPNOTSUPP; 494 } 495 if (ice_has_vfs(pf)) { 496 NL_SET_ERR_MSG_MOD(extack, 497 "Remove all VFs before doing reinit"); 498 return -EOPNOTSUPP; 499 } 500 ice_devlink_reinit_down(pf); 501 return 0; 502 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: 503 return ice_devlink_reload_empr_start(pf, extack); 504 default: 505 WARN_ON(1); 506 return -EOPNOTSUPP; 507 } 508 } 509 510 /** 511 * ice_devlink_reload_empr_finish - Wait for EMP reset to finish 512 * @pf: pointer to the pf instance 513 * @extack: netlink extended ACK structure 514 * 515 * Wait for driver to finish rebuilding after EMP reset is completed. This 516 * includes time to wait for both the actual device reset as well as the time 517 * for the driver's rebuild to complete. 518 */ 519 static int 520 ice_devlink_reload_empr_finish(struct ice_pf *pf, 521 struct netlink_ext_ack *extack) 522 { 523 int err; 524 525 err = ice_wait_for_reset(pf, 60 * HZ); 526 if (err) { 527 NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute"); 528 return err; 529 } 530 531 return 0; 532 } 533 534 /** 535 * ice_get_tx_topo_user_sel - Read user's choice from flash 536 * @pf: pointer to pf structure 537 * @layers: value read from flash will be saved here 538 * 539 * Reads user's preference for Tx Scheduler Topology Tree from PFA TLV. 540 * 541 * Return: zero when read was successful, negative values otherwise. 542 */ 543 static int ice_get_tx_topo_user_sel(struct ice_pf *pf, uint8_t *layers) 544 { 545 struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {}; 546 struct ice_hw *hw = &pf->hw; 547 int err; 548 549 err = ice_acquire_nvm(hw, ICE_RES_READ); 550 if (err) 551 return err; 552 553 err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0, 554 sizeof(usr_sel), &usr_sel, true, true, NULL); 555 if (err) 556 goto exit_release_res; 557 558 if (usr_sel.data & ICE_AQC_NVM_TX_TOPO_USER_SEL) 559 *layers = ICE_SCHED_5_LAYERS; 560 else 561 *layers = ICE_SCHED_9_LAYERS; 562 563 exit_release_res: 564 ice_release_nvm(hw); 565 566 return err; 567 } 568 569 /** 570 * ice_update_tx_topo_user_sel - Save user's preference in flash 571 * @pf: pointer to pf structure 572 * @layers: value to be saved in flash 573 * 574 * Variable "layers" defines user's preference about number of layers in Tx 575 * Scheduler Topology Tree. This choice should be stored in PFA TLV field 576 * and be picked up by driver, next time during init. 577 * 578 * Return: zero when save was successful, negative values otherwise. 579 */ 580 static int ice_update_tx_topo_user_sel(struct ice_pf *pf, int layers) 581 { 582 struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {}; 583 struct ice_hw *hw = &pf->hw; 584 int err; 585 586 err = ice_acquire_nvm(hw, ICE_RES_WRITE); 587 if (err) 588 return err; 589 590 err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0, 591 sizeof(usr_sel), &usr_sel, true, true, NULL); 592 if (err) 593 goto exit_release_res; 594 595 if (layers == ICE_SCHED_5_LAYERS) 596 usr_sel.data |= ICE_AQC_NVM_TX_TOPO_USER_SEL; 597 else 598 usr_sel.data &= ~ICE_AQC_NVM_TX_TOPO_USER_SEL; 599 600 err = ice_write_one_nvm_block(pf, ICE_AQC_NVM_TX_TOPO_MOD_ID, 2, 601 sizeof(usr_sel.data), &usr_sel.data, 602 true, NULL, NULL); 603 exit_release_res: 604 ice_release_nvm(hw); 605 606 return err; 607 } 608 609 /** 610 * ice_devlink_tx_sched_layers_get - Get tx_scheduling_layers parameter 611 * @devlink: pointer to the devlink instance 612 * @id: the parameter ID to set 613 * @ctx: context to store the parameter value 614 * @extack: netlink extended ACK structure 615 * 616 * Return: zero on success and negative value on failure. 617 */ 618 static int ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id, 619 struct devlink_param_gset_ctx *ctx, 620 struct netlink_ext_ack *extack) 621 { 622 struct ice_pf *pf = devlink_priv(devlink); 623 int err; 624 625 err = ice_get_tx_topo_user_sel(pf, &ctx->val.vu8); 626 if (err) 627 return err; 628 629 return 0; 630 } 631 632 /** 633 * ice_devlink_tx_sched_layers_set - Set tx_scheduling_layers parameter 634 * @devlink: pointer to the devlink instance 635 * @id: the parameter ID to set 636 * @ctx: context to get the parameter value 637 * @extack: netlink extended ACK structure 638 * 639 * Return: zero on success and negative value on failure. 640 */ 641 static int ice_devlink_tx_sched_layers_set(struct devlink *devlink, u32 id, 642 struct devlink_param_gset_ctx *ctx, 643 struct netlink_ext_ack *extack) 644 { 645 struct ice_pf *pf = devlink_priv(devlink); 646 int err; 647 648 err = ice_update_tx_topo_user_sel(pf, ctx->val.vu8); 649 if (err) 650 return err; 651 652 NL_SET_ERR_MSG_MOD(extack, 653 "Tx scheduling layers have been changed on this device. You must do the PCI slot powercycle for the change to take effect."); 654 655 return 0; 656 } 657 658 /** 659 * ice_devlink_tx_sched_layers_validate - Validate passed tx_scheduling_layers 660 * parameter value 661 * @devlink: unused pointer to devlink instance 662 * @id: the parameter ID to validate 663 * @val: value to validate 664 * @extack: netlink extended ACK structure 665 * 666 * Supported values are: 667 * - 5 - five layers Tx Scheduler Topology Tree 668 * - 9 - nine layers Tx Scheduler Topology Tree 669 * 670 * Return: zero when passed parameter value is supported. Negative value on 671 * error. 672 */ 673 static int ice_devlink_tx_sched_layers_validate(struct devlink *devlink, u32 id, 674 union devlink_param_value val, 675 struct netlink_ext_ack *extack) 676 { 677 if (val.vu8 != ICE_SCHED_5_LAYERS && val.vu8 != ICE_SCHED_9_LAYERS) { 678 NL_SET_ERR_MSG_MOD(extack, 679 "Wrong number of tx scheduler layers provided."); 680 return -EINVAL; 681 } 682 683 return 0; 684 } 685 686 /** 687 * ice_tear_down_devlink_rate_tree - removes devlink-rate exported tree 688 * @pf: pf struct 689 * 690 * This function tears down tree exported during VF's creation. 691 */ 692 void ice_tear_down_devlink_rate_tree(struct ice_pf *pf) 693 { 694 struct devlink *devlink; 695 struct ice_vf *vf; 696 unsigned int bkt; 697 698 devlink = priv_to_devlink(pf); 699 700 devl_lock(devlink); 701 mutex_lock(&pf->vfs.table_lock); 702 ice_for_each_vf(pf, bkt, vf) { 703 if (vf->devlink_port.devlink_rate) 704 devl_rate_leaf_destroy(&vf->devlink_port); 705 } 706 mutex_unlock(&pf->vfs.table_lock); 707 708 devl_rate_nodes_destroy(devlink); 709 devl_unlock(devlink); 710 } 711 712 /** 713 * ice_enable_custom_tx - try to enable custom Tx feature 714 * @pf: pf struct 715 * 716 * This function tries to enable custom Tx feature, 717 * it's not possible to enable it, if DCB or ADQ is active. 718 */ 719 static bool ice_enable_custom_tx(struct ice_pf *pf) 720 { 721 struct ice_port_info *pi = ice_get_main_vsi(pf)->port_info; 722 struct device *dev = ice_pf_to_dev(pf); 723 724 if (pi->is_custom_tx_enabled) 725 /* already enabled, return true */ 726 return true; 727 728 if (ice_is_adq_active(pf)) { 729 dev_err(dev, "ADQ active, can't modify Tx scheduler tree\n"); 730 return false; 731 } 732 733 if (ice_is_dcb_active(pf)) { 734 dev_err(dev, "DCB active, can't modify Tx scheduler tree\n"); 735 return false; 736 } 737 738 pi->is_custom_tx_enabled = true; 739 740 return true; 741 } 742 743 /** 744 * ice_traverse_tx_tree - traverse Tx scheduler tree 745 * @devlink: devlink struct 746 * @node: current node, used for recursion 747 * @tc_node: tc_node struct, that is treated as a root 748 * @pf: pf struct 749 * 750 * This function traverses Tx scheduler tree and exports 751 * entire structure to the devlink-rate. 752 */ 753 static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node *node, 754 struct ice_sched_node *tc_node, struct ice_pf *pf) 755 { 756 struct devlink_rate *rate_node = NULL; 757 struct ice_dynamic_port *sf; 758 struct ice_vf *vf; 759 int i; 760 761 if (node->rate_node) 762 /* already added, skip to the next */ 763 goto traverse_children; 764 765 if (node->parent == tc_node) { 766 /* create root node */ 767 rate_node = devl_rate_node_create(devlink, node, node->name, NULL); 768 } else if (node->vsi_handle && 769 pf->vsi[node->vsi_handle]->type == ICE_VSI_VF && 770 pf->vsi[node->vsi_handle]->vf) { 771 vf = pf->vsi[node->vsi_handle]->vf; 772 if (!vf->devlink_port.devlink_rate) 773 /* leaf nodes doesn't have children 774 * so we don't set rate_node 775 */ 776 devl_rate_leaf_create(&vf->devlink_port, node, 777 node->parent->rate_node); 778 } else if (node->vsi_handle && 779 pf->vsi[node->vsi_handle]->type == ICE_VSI_SF && 780 pf->vsi[node->vsi_handle]->sf) { 781 sf = pf->vsi[node->vsi_handle]->sf; 782 if (!sf->devlink_port.devlink_rate) 783 /* leaf nodes doesn't have children 784 * so we don't set rate_node 785 */ 786 devl_rate_leaf_create(&sf->devlink_port, node, 787 node->parent->rate_node); 788 } else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF && 789 node->parent->rate_node) { 790 rate_node = devl_rate_node_create(devlink, node, node->name, 791 node->parent->rate_node); 792 } 793 794 if (rate_node && !IS_ERR(rate_node)) 795 node->rate_node = rate_node; 796 797 traverse_children: 798 for (i = 0; i < node->num_children; i++) 799 ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf); 800 } 801 802 /** 803 * ice_devlink_rate_init_tx_topology - export Tx scheduler tree to devlink rate 804 * @devlink: devlink struct 805 * @vsi: main vsi struct 806 * 807 * This function finds a root node, then calls ice_traverse_tx tree, which 808 * traverses the tree and exports it's contents to devlink rate. 809 */ 810 int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi) 811 { 812 struct ice_port_info *pi = vsi->port_info; 813 struct ice_sched_node *tc_node; 814 struct ice_pf *pf = vsi->back; 815 int i; 816 817 tc_node = pi->root->children[0]; 818 mutex_lock(&pi->sched_lock); 819 for (i = 0; i < tc_node->num_children; i++) 820 ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf); 821 mutex_unlock(&pi->sched_lock); 822 823 return 0; 824 } 825 826 static void ice_clear_rate_nodes(struct ice_sched_node *node) 827 { 828 node->rate_node = NULL; 829 830 for (int i = 0; i < node->num_children; i++) 831 ice_clear_rate_nodes(node->children[i]); 832 } 833 834 /** 835 * ice_devlink_rate_clear_tx_topology - clear node->rate_node 836 * @vsi: main vsi struct 837 * 838 * Clear rate_node to cleanup creation of Tx topology. 839 * 840 */ 841 void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi) 842 { 843 struct ice_port_info *pi = vsi->port_info; 844 845 mutex_lock(&pi->sched_lock); 846 ice_clear_rate_nodes(pi->root->children[0]); 847 mutex_unlock(&pi->sched_lock); 848 } 849 850 /** 851 * ice_set_object_tx_share - sets node scheduling parameter 852 * @pi: devlink struct instance 853 * @node: node struct instance 854 * @bw: bandwidth in bytes per second 855 * @extack: extended netdev ack structure 856 * 857 * This function sets ICE_MIN_BW scheduling BW limit. 858 */ 859 static int ice_set_object_tx_share(struct ice_port_info *pi, struct ice_sched_node *node, 860 u64 bw, struct netlink_ext_ack *extack) 861 { 862 int status; 863 864 mutex_lock(&pi->sched_lock); 865 /* converts bytes per second to kilo bits per second */ 866 node->tx_share = div_u64(bw, 125); 867 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, node->tx_share); 868 mutex_unlock(&pi->sched_lock); 869 870 if (status) 871 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_share"); 872 873 return status; 874 } 875 876 /** 877 * ice_set_object_tx_max - sets node scheduling parameter 878 * @pi: devlink struct instance 879 * @node: node struct instance 880 * @bw: bandwidth in bytes per second 881 * @extack: extended netdev ack structure 882 * 883 * This function sets ICE_MAX_BW scheduling BW limit. 884 */ 885 static int ice_set_object_tx_max(struct ice_port_info *pi, struct ice_sched_node *node, 886 u64 bw, struct netlink_ext_ack *extack) 887 { 888 int status; 889 890 mutex_lock(&pi->sched_lock); 891 /* converts bytes per second value to kilo bits per second */ 892 node->tx_max = div_u64(bw, 125); 893 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, node->tx_max); 894 mutex_unlock(&pi->sched_lock); 895 896 if (status) 897 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_max"); 898 899 return status; 900 } 901 902 /** 903 * ice_set_object_tx_priority - sets node scheduling parameter 904 * @pi: devlink struct instance 905 * @node: node struct instance 906 * @priority: value representing priority for strict priority arbitration 907 * @extack: extended netdev ack structure 908 * 909 * This function sets priority of node among siblings. 910 */ 911 static int ice_set_object_tx_priority(struct ice_port_info *pi, struct ice_sched_node *node, 912 u32 priority, struct netlink_ext_ack *extack) 913 { 914 int status; 915 916 if (priority >= 8) { 917 NL_SET_ERR_MSG_MOD(extack, "Priority should be less than 8"); 918 return -EINVAL; 919 } 920 921 mutex_lock(&pi->sched_lock); 922 node->tx_priority = priority; 923 status = ice_sched_set_node_priority(pi, node, node->tx_priority); 924 mutex_unlock(&pi->sched_lock); 925 926 if (status) 927 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_priority"); 928 929 return status; 930 } 931 932 /** 933 * ice_set_object_tx_weight - sets node scheduling parameter 934 * @pi: devlink struct instance 935 * @node: node struct instance 936 * @weight: value represeting relative weight for WFQ arbitration 937 * @extack: extended netdev ack structure 938 * 939 * This function sets node weight for WFQ algorithm. 940 */ 941 static int ice_set_object_tx_weight(struct ice_port_info *pi, struct ice_sched_node *node, 942 u32 weight, struct netlink_ext_ack *extack) 943 { 944 int status; 945 946 if (weight > 200 || weight < 1) { 947 NL_SET_ERR_MSG_MOD(extack, "Weight must be between 1 and 200"); 948 return -EINVAL; 949 } 950 951 mutex_lock(&pi->sched_lock); 952 node->tx_weight = weight; 953 status = ice_sched_set_node_weight(pi, node, node->tx_weight); 954 mutex_unlock(&pi->sched_lock); 955 956 if (status) 957 NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_weight"); 958 959 return status; 960 } 961 962 /** 963 * ice_get_pi_from_dev_rate - get port info from devlink_rate 964 * @rate_node: devlink struct instance 965 * 966 * This function returns corresponding port_info struct of devlink_rate 967 */ 968 static struct ice_port_info *ice_get_pi_from_dev_rate(struct devlink_rate *rate_node) 969 { 970 struct ice_pf *pf = devlink_priv(rate_node->devlink); 971 972 return ice_get_main_vsi(pf)->port_info; 973 } 974 975 static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv, 976 struct netlink_ext_ack *extack) 977 { 978 struct ice_sched_node *node; 979 struct ice_port_info *pi; 980 981 pi = ice_get_pi_from_dev_rate(rate_node); 982 983 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 984 return -EBUSY; 985 986 /* preallocate memory for ice_sched_node */ 987 node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL); 988 if (!node) 989 return -ENOMEM; 990 991 *priv = node; 992 993 return 0; 994 } 995 996 static int ice_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, 997 struct netlink_ext_ack *extack) 998 { 999 struct ice_sched_node *node, *tc_node; 1000 struct ice_port_info *pi; 1001 1002 pi = ice_get_pi_from_dev_rate(rate_node); 1003 tc_node = pi->root->children[0]; 1004 node = priv; 1005 1006 if (!rate_node->parent || !node || tc_node == node || !extack) 1007 return 0; 1008 1009 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1010 return -EBUSY; 1011 1012 /* can't allow to delete a node with children */ 1013 if (node->num_children) 1014 return -EINVAL; 1015 1016 mutex_lock(&pi->sched_lock); 1017 ice_free_sched_node(pi, node); 1018 mutex_unlock(&pi->sched_lock); 1019 1020 return 0; 1021 } 1022 1023 static int ice_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv, 1024 u64 tx_max, struct netlink_ext_ack *extack) 1025 { 1026 struct ice_sched_node *node = priv; 1027 1028 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1029 return -EBUSY; 1030 1031 if (!node) 1032 return 0; 1033 1034 return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_leaf), 1035 node, tx_max, extack); 1036 } 1037 1038 static int ice_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv, 1039 u64 tx_share, struct netlink_ext_ack *extack) 1040 { 1041 struct ice_sched_node *node = priv; 1042 1043 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1044 return -EBUSY; 1045 1046 if (!node) 1047 return 0; 1048 1049 return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_leaf), node, 1050 tx_share, extack); 1051 } 1052 1053 static int ice_devlink_rate_leaf_tx_priority_set(struct devlink_rate *rate_leaf, void *priv, 1054 u32 tx_priority, struct netlink_ext_ack *extack) 1055 { 1056 struct ice_sched_node *node = priv; 1057 1058 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1059 return -EBUSY; 1060 1061 if (!node) 1062 return 0; 1063 1064 return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_leaf), node, 1065 tx_priority, extack); 1066 } 1067 1068 static int ice_devlink_rate_leaf_tx_weight_set(struct devlink_rate *rate_leaf, void *priv, 1069 u32 tx_weight, struct netlink_ext_ack *extack) 1070 { 1071 struct ice_sched_node *node = priv; 1072 1073 if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) 1074 return -EBUSY; 1075 1076 if (!node) 1077 return 0; 1078 1079 return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_leaf), node, 1080 tx_weight, extack); 1081 } 1082 1083 static int ice_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv, 1084 u64 tx_max, struct netlink_ext_ack *extack) 1085 { 1086 struct ice_sched_node *node = priv; 1087 1088 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1089 return -EBUSY; 1090 1091 if (!node) 1092 return 0; 1093 1094 return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_node), 1095 node, tx_max, extack); 1096 } 1097 1098 static int ice_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv, 1099 u64 tx_share, struct netlink_ext_ack *extack) 1100 { 1101 struct ice_sched_node *node = priv; 1102 1103 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1104 return -EBUSY; 1105 1106 if (!node) 1107 return 0; 1108 1109 return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_node), 1110 node, tx_share, extack); 1111 } 1112 1113 static int ice_devlink_rate_node_tx_priority_set(struct devlink_rate *rate_node, void *priv, 1114 u32 tx_priority, struct netlink_ext_ack *extack) 1115 { 1116 struct ice_sched_node *node = priv; 1117 1118 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1119 return -EBUSY; 1120 1121 if (!node) 1122 return 0; 1123 1124 return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_node), 1125 node, tx_priority, extack); 1126 } 1127 1128 static int ice_devlink_rate_node_tx_weight_set(struct devlink_rate *rate_node, void *priv, 1129 u32 tx_weight, struct netlink_ext_ack *extack) 1130 { 1131 struct ice_sched_node *node = priv; 1132 1133 if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) 1134 return -EBUSY; 1135 1136 if (!node) 1137 return 0; 1138 1139 return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_node), 1140 node, tx_weight, extack); 1141 } 1142 1143 static int ice_devlink_set_parent(struct devlink_rate *devlink_rate, 1144 struct devlink_rate *parent, 1145 void *priv, void *parent_priv, 1146 struct netlink_ext_ack *extack) 1147 { 1148 struct ice_port_info *pi = ice_get_pi_from_dev_rate(devlink_rate); 1149 struct ice_sched_node *tc_node, *node, *parent_node; 1150 u16 num_nodes_added; 1151 u32 first_node_teid; 1152 u32 node_teid; 1153 int status; 1154 1155 tc_node = pi->root->children[0]; 1156 node = priv; 1157 1158 if (!extack) 1159 return 0; 1160 1161 if (!ice_enable_custom_tx(devlink_priv(devlink_rate->devlink))) 1162 return -EBUSY; 1163 1164 if (!parent) { 1165 if (!node || tc_node == node || node->num_children) 1166 return -EINVAL; 1167 1168 mutex_lock(&pi->sched_lock); 1169 ice_free_sched_node(pi, node); 1170 mutex_unlock(&pi->sched_lock); 1171 1172 return 0; 1173 } 1174 1175 parent_node = parent_priv; 1176 1177 /* if the node doesn't exist, create it */ 1178 if (!node->parent) { 1179 mutex_lock(&pi->sched_lock); 1180 status = ice_sched_add_elems(pi, tc_node, parent_node, 1181 parent_node->tx_sched_layer + 1, 1182 1, &num_nodes_added, &first_node_teid, 1183 &node); 1184 mutex_unlock(&pi->sched_lock); 1185 1186 if (status) { 1187 NL_SET_ERR_MSG_MOD(extack, "Can't add a new node"); 1188 return status; 1189 } 1190 1191 if (devlink_rate->tx_share) 1192 ice_set_object_tx_share(pi, node, devlink_rate->tx_share, extack); 1193 if (devlink_rate->tx_max) 1194 ice_set_object_tx_max(pi, node, devlink_rate->tx_max, extack); 1195 if (devlink_rate->tx_priority) 1196 ice_set_object_tx_priority(pi, node, devlink_rate->tx_priority, extack); 1197 if (devlink_rate->tx_weight) 1198 ice_set_object_tx_weight(pi, node, devlink_rate->tx_weight, extack); 1199 } else { 1200 node_teid = le32_to_cpu(node->info.node_teid); 1201 mutex_lock(&pi->sched_lock); 1202 status = ice_sched_move_nodes(pi, parent_node, 1, &node_teid); 1203 mutex_unlock(&pi->sched_lock); 1204 1205 if (status) 1206 NL_SET_ERR_MSG_MOD(extack, "Can't move existing node to a new parent"); 1207 } 1208 1209 return status; 1210 } 1211 1212 static void ice_set_min_max_msix(struct ice_pf *pf) 1213 { 1214 struct devlink *devlink = priv_to_devlink(pf); 1215 union devlink_param_value val; 1216 int err; 1217 1218 err = devl_param_driverinit_value_get(devlink, 1219 DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, 1220 &val); 1221 if (!err) 1222 pf->msix.min = val.vu32; 1223 1224 err = devl_param_driverinit_value_get(devlink, 1225 DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, 1226 &val); 1227 if (!err) 1228 pf->msix.max = val.vu32; 1229 } 1230 1231 /** 1232 * ice_devlink_reinit_up - do reinit of the given PF 1233 * @pf: pointer to the PF struct 1234 */ 1235 static int ice_devlink_reinit_up(struct ice_pf *pf) 1236 { 1237 struct ice_vsi *vsi = ice_get_main_vsi(pf); 1238 struct device *dev = ice_pf_to_dev(pf); 1239 bool need_dev_deinit = false; 1240 int err; 1241 1242 err = ice_init_hw(&pf->hw); 1243 if (err) { 1244 dev_err(dev, "ice_init_hw failed: %d\n", err); 1245 return err; 1246 } 1247 1248 ice_init_dev_hw(pf); 1249 1250 /* load MSI-X values */ 1251 ice_set_min_max_msix(pf); 1252 1253 err = ice_init_dev(pf); 1254 if (err) 1255 goto unroll_hw_init; 1256 1257 err = ice_init_pf(pf); 1258 if (err) { 1259 dev_err(dev, "ice_init_pf failed: %d\n", err); 1260 goto unroll_dev_init; 1261 } 1262 1263 vsi->flags = ICE_VSI_FLAG_INIT; 1264 1265 rtnl_lock(); 1266 err = ice_vsi_cfg(vsi); 1267 rtnl_unlock(); 1268 if (err) 1269 goto unroll_pf_init; 1270 1271 /* No need to take devl_lock, it's already taken by devlink API */ 1272 err = ice_load(pf); 1273 if (err) 1274 goto err_load; 1275 1276 return 0; 1277 1278 err_load: 1279 rtnl_lock(); 1280 ice_vsi_decfg(vsi); 1281 rtnl_unlock(); 1282 unroll_pf_init: 1283 ice_deinit_pf(pf); 1284 unroll_dev_init: 1285 need_dev_deinit = true; 1286 unroll_hw_init: 1287 ice_deinit_hw(&pf->hw); 1288 if (need_dev_deinit) 1289 ice_deinit_dev(pf); 1290 return err; 1291 } 1292 1293 /** 1294 * ice_devlink_reload_up - do reload up after reinit 1295 * @devlink: pointer to the devlink instance reloading 1296 * @action: the action requested 1297 * @limit: limits imposed by userspace, such as not resetting 1298 * @actions_performed: on return, indicate what actions actually performed 1299 * @extack: netlink extended ACK structure 1300 */ 1301 static int 1302 ice_devlink_reload_up(struct devlink *devlink, 1303 enum devlink_reload_action action, 1304 enum devlink_reload_limit limit, 1305 u32 *actions_performed, 1306 struct netlink_ext_ack *extack) 1307 { 1308 struct ice_pf *pf = devlink_priv(devlink); 1309 1310 switch (action) { 1311 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: 1312 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); 1313 return ice_devlink_reinit_up(pf); 1314 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: 1315 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE); 1316 return ice_devlink_reload_empr_finish(pf, extack); 1317 default: 1318 WARN_ON(1); 1319 return -EOPNOTSUPP; 1320 } 1321 } 1322 1323 static const struct devlink_ops ice_devlink_ops = { 1324 .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK, 1325 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | 1326 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), 1327 .reload_down = ice_devlink_reload_down, 1328 .reload_up = ice_devlink_reload_up, 1329 .eswitch_mode_get = ice_eswitch_mode_get, 1330 .eswitch_mode_set = ice_eswitch_mode_set, 1331 .info_get = ice_devlink_info_get, 1332 .flash_update = ice_devlink_flash_update, 1333 1334 .rate_node_new = ice_devlink_rate_node_new, 1335 .rate_node_del = ice_devlink_rate_node_del, 1336 1337 .rate_leaf_tx_max_set = ice_devlink_rate_leaf_tx_max_set, 1338 .rate_leaf_tx_share_set = ice_devlink_rate_leaf_tx_share_set, 1339 .rate_leaf_tx_priority_set = ice_devlink_rate_leaf_tx_priority_set, 1340 .rate_leaf_tx_weight_set = ice_devlink_rate_leaf_tx_weight_set, 1341 1342 .rate_node_tx_max_set = ice_devlink_rate_node_tx_max_set, 1343 .rate_node_tx_share_set = ice_devlink_rate_node_tx_share_set, 1344 .rate_node_tx_priority_set = ice_devlink_rate_node_tx_priority_set, 1345 .rate_node_tx_weight_set = ice_devlink_rate_node_tx_weight_set, 1346 1347 .rate_leaf_parent_set = ice_devlink_set_parent, 1348 .rate_node_parent_set = ice_devlink_set_parent, 1349 1350 .port_new = ice_devlink_port_new, 1351 }; 1352 1353 static const struct devlink_ops ice_sf_devlink_ops; 1354 1355 static int 1356 ice_devlink_enable_roce_get(struct devlink *devlink, u32 id, 1357 struct devlink_param_gset_ctx *ctx, 1358 struct netlink_ext_ack *extack) 1359 { 1360 struct ice_pf *pf = devlink_priv(devlink); 1361 struct iidc_rdma_core_dev_info *cdev; 1362 1363 cdev = pf->cdev_info; 1364 if (!cdev) 1365 return -EOPNOTSUPP; 1366 1367 ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2); 1368 1369 return 0; 1370 } 1371 1372 static int ice_devlink_enable_roce_set(struct devlink *devlink, u32 id, 1373 struct devlink_param_gset_ctx *ctx, 1374 struct netlink_ext_ack *extack) 1375 { 1376 struct ice_pf *pf = devlink_priv(devlink); 1377 struct iidc_rdma_core_dev_info *cdev; 1378 bool roce_ena = ctx->val.vbool; 1379 int ret; 1380 1381 cdev = pf->cdev_info; 1382 if (!cdev) 1383 return -ENODEV; 1384 1385 if (!roce_ena) { 1386 ice_unplug_aux_dev(pf); 1387 cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2; 1388 return 0; 1389 } 1390 1391 cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2; 1392 ret = ice_plug_aux_dev(pf); 1393 if (ret) 1394 cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2; 1395 1396 return ret; 1397 } 1398 1399 static int 1400 ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id, 1401 union devlink_param_value val, 1402 struct netlink_ext_ack *extack) 1403 { 1404 struct ice_pf *pf = devlink_priv(devlink); 1405 struct iidc_rdma_core_dev_info *cdev; 1406 1407 cdev = pf->cdev_info; 1408 if (!cdev) 1409 return -ENODEV; 1410 1411 if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 1412 return -EOPNOTSUPP; 1413 1414 if (cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP) { 1415 NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); 1416 return -EOPNOTSUPP; 1417 } 1418 1419 return 0; 1420 } 1421 1422 static int 1423 ice_devlink_enable_iw_get(struct devlink *devlink, u32 id, 1424 struct devlink_param_gset_ctx *ctx, 1425 struct netlink_ext_ack *extack) 1426 { 1427 struct ice_pf *pf = devlink_priv(devlink); 1428 struct iidc_rdma_core_dev_info *cdev; 1429 1430 cdev = pf->cdev_info; 1431 if (!cdev) 1432 return -EOPNOTSUPP; 1433 1434 ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP); 1435 1436 return 0; 1437 } 1438 1439 static int ice_devlink_enable_iw_set(struct devlink *devlink, u32 id, 1440 struct devlink_param_gset_ctx *ctx, 1441 struct netlink_ext_ack *extack) 1442 { 1443 struct ice_pf *pf = devlink_priv(devlink); 1444 struct iidc_rdma_core_dev_info *cdev; 1445 bool iw_ena = ctx->val.vbool; 1446 int ret; 1447 1448 cdev = pf->cdev_info; 1449 if (!cdev) 1450 return -ENODEV; 1451 1452 if (!iw_ena) { 1453 ice_unplug_aux_dev(pf); 1454 cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP; 1455 return 0; 1456 } 1457 1458 cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_IWARP; 1459 ret = ice_plug_aux_dev(pf); 1460 if (ret) 1461 cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP; 1462 1463 return ret; 1464 } 1465 1466 static int 1467 ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id, 1468 union devlink_param_value val, 1469 struct netlink_ext_ack *extack) 1470 { 1471 struct ice_pf *pf = devlink_priv(devlink); 1472 1473 if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 1474 return -EOPNOTSUPP; 1475 1476 if (pf->cdev_info->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2) { 1477 NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); 1478 return -EOPNOTSUPP; 1479 } 1480 1481 return 0; 1482 } 1483 1484 #define DEVLINK_LOCAL_FWD_DISABLED_STR "disabled" 1485 #define DEVLINK_LOCAL_FWD_ENABLED_STR "enabled" 1486 #define DEVLINK_LOCAL_FWD_PRIORITIZED_STR "prioritized" 1487 1488 /** 1489 * ice_devlink_local_fwd_mode_to_str - Get string for local_fwd mode. 1490 * @mode: local forwarding for mode used in port_info struct. 1491 * 1492 * Return: Mode respective string or "Invalid". 1493 */ 1494 static const char * 1495 ice_devlink_local_fwd_mode_to_str(enum ice_local_fwd_mode mode) 1496 { 1497 switch (mode) { 1498 case ICE_LOCAL_FWD_MODE_ENABLED: 1499 return DEVLINK_LOCAL_FWD_ENABLED_STR; 1500 case ICE_LOCAL_FWD_MODE_PRIORITIZED: 1501 return DEVLINK_LOCAL_FWD_PRIORITIZED_STR; 1502 case ICE_LOCAL_FWD_MODE_DISABLED: 1503 return DEVLINK_LOCAL_FWD_DISABLED_STR; 1504 } 1505 1506 return "Invalid"; 1507 } 1508 1509 /** 1510 * ice_devlink_local_fwd_str_to_mode - Get local_fwd mode from string name. 1511 * @mode_str: local forwarding mode string. 1512 * 1513 * Return: Mode value or negative number if invalid. 1514 */ 1515 static int ice_devlink_local_fwd_str_to_mode(const char *mode_str) 1516 { 1517 if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_ENABLED_STR)) 1518 return ICE_LOCAL_FWD_MODE_ENABLED; 1519 else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_PRIORITIZED_STR)) 1520 return ICE_LOCAL_FWD_MODE_PRIORITIZED; 1521 else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_DISABLED_STR)) 1522 return ICE_LOCAL_FWD_MODE_DISABLED; 1523 1524 return -EINVAL; 1525 } 1526 1527 /** 1528 * ice_devlink_local_fwd_get - Get local_fwd parameter. 1529 * @devlink: Pointer to the devlink instance. 1530 * @id: The parameter ID to set. 1531 * @ctx: Context to store the parameter value. 1532 * @extack: netlink extended ACK structure 1533 * 1534 * Return: Zero. 1535 */ 1536 static int ice_devlink_local_fwd_get(struct devlink *devlink, u32 id, 1537 struct devlink_param_gset_ctx *ctx, 1538 struct netlink_ext_ack *extack) 1539 { 1540 struct ice_pf *pf = devlink_priv(devlink); 1541 struct ice_port_info *pi; 1542 const char *mode_str; 1543 1544 pi = pf->hw.port_info; 1545 mode_str = ice_devlink_local_fwd_mode_to_str(pi->local_fwd_mode); 1546 snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s", mode_str); 1547 1548 return 0; 1549 } 1550 1551 /** 1552 * ice_devlink_local_fwd_set - Set local_fwd parameter. 1553 * @devlink: Pointer to the devlink instance. 1554 * @id: The parameter ID to set. 1555 * @ctx: Context to get the parameter value. 1556 * @extack: Netlink extended ACK structure. 1557 * 1558 * Return: Zero. 1559 */ 1560 static int ice_devlink_local_fwd_set(struct devlink *devlink, u32 id, 1561 struct devlink_param_gset_ctx *ctx, 1562 struct netlink_ext_ack *extack) 1563 { 1564 int new_local_fwd_mode = ice_devlink_local_fwd_str_to_mode(ctx->val.vstr); 1565 struct ice_pf *pf = devlink_priv(devlink); 1566 struct device *dev = ice_pf_to_dev(pf); 1567 struct ice_port_info *pi; 1568 1569 pi = pf->hw.port_info; 1570 if (pi->local_fwd_mode != new_local_fwd_mode) { 1571 pi->local_fwd_mode = new_local_fwd_mode; 1572 dev_info(dev, "Setting local_fwd to %s\n", ctx->val.vstr); 1573 ice_schedule_reset(pf, ICE_RESET_CORER); 1574 } 1575 1576 return 0; 1577 } 1578 1579 /** 1580 * ice_devlink_local_fwd_validate - Validate passed local_fwd parameter value. 1581 * @devlink: Unused pointer to devlink instance. 1582 * @id: The parameter ID to validate. 1583 * @val: Value to validate. 1584 * @extack: Netlink extended ACK structure. 1585 * 1586 * Supported values are: 1587 * "enabled" - local_fwd is enabled, "disabled" - local_fwd is disabled 1588 * "prioritized" - local_fwd traffic is prioritized in scheduling. 1589 * 1590 * Return: Zero when passed parameter value is supported. Negative value on 1591 * error. 1592 */ 1593 static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id, 1594 union devlink_param_value val, 1595 struct netlink_ext_ack *extack) 1596 { 1597 if (ice_devlink_local_fwd_str_to_mode(val.vstr) < 0) { 1598 NL_SET_ERR_MSG_MOD(extack, "Error: Requested value is not supported."); 1599 return -EINVAL; 1600 } 1601 1602 return 0; 1603 } 1604 1605 static int 1606 ice_devlink_msix_max_pf_validate(struct devlink *devlink, u32 id, 1607 union devlink_param_value val, 1608 struct netlink_ext_ack *extack) 1609 { 1610 struct ice_pf *pf = devlink_priv(devlink); 1611 1612 if (val.vu32 > pf->hw.func_caps.common_cap.num_msix_vectors) 1613 return -EINVAL; 1614 1615 return 0; 1616 } 1617 1618 static int 1619 ice_devlink_msix_min_pf_validate(struct devlink *devlink, u32 id, 1620 union devlink_param_value val, 1621 struct netlink_ext_ack *extack) 1622 { 1623 if (val.vu32 < ICE_MIN_MSIX) 1624 return -EINVAL; 1625 1626 return 0; 1627 } 1628 1629 static int ice_devlink_enable_rdma_validate(struct devlink *devlink, u32 id, 1630 union devlink_param_value val, 1631 struct netlink_ext_ack *extack) 1632 { 1633 struct ice_pf *pf = devlink_priv(devlink); 1634 bool new_state = val.vbool; 1635 1636 if (new_state && !test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 1637 return -EOPNOTSUPP; 1638 1639 return 0; 1640 } 1641 1642 enum ice_param_id { 1643 ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, 1644 ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS, 1645 ICE_DEVLINK_PARAM_ID_LOCAL_FWD, 1646 }; 1647 1648 static const struct devlink_param ice_dvl_rdma_params[] = { 1649 DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1650 ice_devlink_enable_roce_get, 1651 ice_devlink_enable_roce_set, 1652 ice_devlink_enable_roce_validate), 1653 DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1654 ice_devlink_enable_iw_get, 1655 ice_devlink_enable_iw_set, 1656 ice_devlink_enable_iw_validate), 1657 DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 1658 NULL, NULL, ice_devlink_enable_rdma_validate), 1659 }; 1660 1661 static const struct devlink_param ice_dvl_msix_params[] = { 1662 DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX, 1663 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 1664 NULL, NULL, ice_devlink_msix_max_pf_validate), 1665 DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN, 1666 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 1667 NULL, NULL, ice_devlink_msix_min_pf_validate), 1668 }; 1669 1670 static const struct devlink_param ice_dvl_sched_params[] = { 1671 DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS, 1672 "tx_scheduling_layers", 1673 DEVLINK_PARAM_TYPE_U8, 1674 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 1675 ice_devlink_tx_sched_layers_get, 1676 ice_devlink_tx_sched_layers_set, 1677 ice_devlink_tx_sched_layers_validate), 1678 DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_LOCAL_FWD, 1679 "local_forwarding", DEVLINK_PARAM_TYPE_STRING, 1680 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1681 ice_devlink_local_fwd_get, 1682 ice_devlink_local_fwd_set, 1683 ice_devlink_local_fwd_validate), 1684 }; 1685 1686 static void ice_devlink_free(void *devlink_ptr) 1687 { 1688 devlink_free((struct devlink *)devlink_ptr); 1689 } 1690 1691 /** 1692 * ice_allocate_pf - Allocate devlink and return PF structure pointer 1693 * @dev: the device to allocate for 1694 * 1695 * Allocate a devlink instance for this device and return the private area as 1696 * the PF structure. The devlink memory is kept track of through devres by 1697 * adding an action to remove it when unwinding. 1698 */ 1699 struct ice_pf *ice_allocate_pf(struct device *dev) 1700 { 1701 struct devlink *devlink; 1702 1703 devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev); 1704 if (!devlink) 1705 return NULL; 1706 1707 /* Add an action to teardown the devlink when unwinding the driver */ 1708 if (devm_add_action_or_reset(dev, ice_devlink_free, devlink)) 1709 return NULL; 1710 1711 return devlink_priv(devlink); 1712 } 1713 1714 /** 1715 * ice_allocate_sf - Allocate devlink and return SF structure pointer 1716 * @dev: the device to allocate for 1717 * @pf: pointer to the PF structure 1718 * 1719 * Allocate a devlink instance for SF. 1720 * 1721 * Return: ice_sf_priv pointer to allocated memory or ERR_PTR in case of error 1722 */ 1723 struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf) 1724 { 1725 struct devlink *devlink; 1726 int err; 1727 1728 devlink = devlink_alloc(&ice_sf_devlink_ops, sizeof(struct ice_sf_priv), 1729 dev); 1730 if (!devlink) 1731 return ERR_PTR(-ENOMEM); 1732 1733 err = devl_nested_devlink_set(priv_to_devlink(pf), devlink); 1734 if (err) { 1735 devlink_free(devlink); 1736 return ERR_PTR(err); 1737 } 1738 1739 return devlink_priv(devlink); 1740 } 1741 1742 /** 1743 * ice_devlink_register - Register devlink interface for this PF 1744 * @pf: the PF to register the devlink for. 1745 * 1746 * Register the devlink instance associated with this physical function. 1747 * 1748 * Return: zero on success or an error code on failure. 1749 */ 1750 void ice_devlink_register(struct ice_pf *pf) 1751 { 1752 struct devlink *devlink = priv_to_devlink(pf); 1753 1754 devl_register(devlink); 1755 } 1756 1757 /** 1758 * ice_devlink_unregister - Unregister devlink resources for this PF. 1759 * @pf: the PF structure to cleanup 1760 * 1761 * Releases resources used by devlink and cleans up associated memory. 1762 */ 1763 void ice_devlink_unregister(struct ice_pf *pf) 1764 { 1765 devl_unregister(priv_to_devlink(pf)); 1766 } 1767 1768 int ice_devlink_register_params(struct ice_pf *pf) 1769 { 1770 struct devlink *devlink = priv_to_devlink(pf); 1771 union devlink_param_value value; 1772 struct ice_hw *hw = &pf->hw; 1773 int status; 1774 1775 status = devl_params_register(devlink, ice_dvl_rdma_params, 1776 ARRAY_SIZE(ice_dvl_rdma_params)); 1777 if (status) 1778 return status; 1779 1780 status = devl_params_register(devlink, ice_dvl_msix_params, 1781 ARRAY_SIZE(ice_dvl_msix_params)); 1782 if (status) 1783 goto unregister_rdma_params; 1784 1785 if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) 1786 status = devl_params_register(devlink, ice_dvl_sched_params, 1787 ARRAY_SIZE(ice_dvl_sched_params)); 1788 if (status) 1789 goto unregister_msix_params; 1790 1791 value.vu32 = pf->msix.max; 1792 devl_param_driverinit_value_set(devlink, 1793 DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, 1794 value); 1795 value.vu32 = pf->msix.min; 1796 devl_param_driverinit_value_set(devlink, 1797 DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, 1798 value); 1799 1800 value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags); 1801 devl_param_driverinit_value_set(devlink, 1802 DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, 1803 value); 1804 1805 return 0; 1806 1807 unregister_msix_params: 1808 devl_params_unregister(devlink, ice_dvl_msix_params, 1809 ARRAY_SIZE(ice_dvl_msix_params)); 1810 unregister_rdma_params: 1811 devl_params_unregister(devlink, ice_dvl_rdma_params, 1812 ARRAY_SIZE(ice_dvl_rdma_params)); 1813 return status; 1814 } 1815 1816 void ice_devlink_unregister_params(struct ice_pf *pf) 1817 { 1818 struct devlink *devlink = priv_to_devlink(pf); 1819 struct ice_hw *hw = &pf->hw; 1820 1821 devl_params_unregister(devlink, ice_dvl_rdma_params, 1822 ARRAY_SIZE(ice_dvl_rdma_params)); 1823 devl_params_unregister(devlink, ice_dvl_msix_params, 1824 ARRAY_SIZE(ice_dvl_msix_params)); 1825 1826 if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) 1827 devl_params_unregister(devlink, ice_dvl_sched_params, 1828 ARRAY_SIZE(ice_dvl_sched_params)); 1829 } 1830 1831 #define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024) 1832 1833 static const struct devlink_region_ops ice_nvm_region_ops; 1834 static const struct devlink_region_ops ice_sram_region_ops; 1835 1836 /** 1837 * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents 1838 * @devlink: the devlink instance 1839 * @ops: the devlink region to snapshot 1840 * @extack: extended ACK response structure 1841 * @data: on exit points to snapshot data buffer 1842 * 1843 * This function is called in response to a DEVLINK_CMD_REGION_NEW for either 1844 * the nvm-flash or shadow-ram region. 1845 * 1846 * It captures a snapshot of the NVM or Shadow RAM flash contents. This 1847 * snapshot can then later be viewed via the DEVLINK_CMD_REGION_READ netlink 1848 * interface. 1849 * 1850 * @returns zero on success, and updates the data pointer. Returns a non-zero 1851 * error code on failure. 1852 */ 1853 static int ice_devlink_nvm_snapshot(struct devlink *devlink, 1854 const struct devlink_region_ops *ops, 1855 struct netlink_ext_ack *extack, u8 **data) 1856 { 1857 struct ice_pf *pf = devlink_priv(devlink); 1858 struct device *dev = ice_pf_to_dev(pf); 1859 struct ice_hw *hw = &pf->hw; 1860 bool read_shadow_ram; 1861 u8 *nvm_data, *tmp, i; 1862 u32 nvm_size, left; 1863 s8 num_blks; 1864 int status; 1865 1866 if (ops == &ice_nvm_region_ops) { 1867 read_shadow_ram = false; 1868 nvm_size = hw->flash.flash_size; 1869 } else if (ops == &ice_sram_region_ops) { 1870 read_shadow_ram = true; 1871 nvm_size = hw->flash.sr_words * 2u; 1872 } else { 1873 NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function"); 1874 return -EOPNOTSUPP; 1875 } 1876 1877 nvm_data = vzalloc(nvm_size); 1878 if (!nvm_data) 1879 return -ENOMEM; 1880 1881 num_blks = DIV_ROUND_UP(nvm_size, ICE_DEVLINK_READ_BLK_SIZE); 1882 tmp = nvm_data; 1883 left = nvm_size; 1884 1885 /* Some systems take longer to read the NVM than others which causes the 1886 * FW to reclaim the NVM lock before the entire NVM has been read. Fix 1887 * this by breaking the reads of the NVM into smaller chunks that will 1888 * probably not take as long. This has some overhead since we are 1889 * increasing the number of AQ commands, but it should always work 1890 */ 1891 for (i = 0; i < num_blks; i++) { 1892 u32 read_sz = min_t(u32, ICE_DEVLINK_READ_BLK_SIZE, left); 1893 1894 status = ice_acquire_nvm(hw, ICE_RES_READ); 1895 if (status) { 1896 dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", 1897 status, hw->adminq.sq_last_status); 1898 NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); 1899 vfree(nvm_data); 1900 return -EIO; 1901 } 1902 1903 status = ice_read_flat_nvm(hw, i * ICE_DEVLINK_READ_BLK_SIZE, 1904 &read_sz, tmp, read_shadow_ram); 1905 if (status) { 1906 dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", 1907 read_sz, status, hw->adminq.sq_last_status); 1908 NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); 1909 ice_release_nvm(hw); 1910 vfree(nvm_data); 1911 return -EIO; 1912 } 1913 ice_release_nvm(hw); 1914 1915 tmp += read_sz; 1916 left -= read_sz; 1917 } 1918 1919 *data = nvm_data; 1920 1921 return 0; 1922 } 1923 1924 /** 1925 * ice_devlink_nvm_read - Read a portion of NVM flash contents 1926 * @devlink: the devlink instance 1927 * @ops: the devlink region to snapshot 1928 * @extack: extended ACK response structure 1929 * @offset: the offset to start at 1930 * @size: the amount to read 1931 * @data: the data buffer to read into 1932 * 1933 * This function is called in response to DEVLINK_CMD_REGION_READ to directly 1934 * read a section of the NVM contents. 1935 * 1936 * It reads from either the nvm-flash or shadow-ram region contents. 1937 * 1938 * @returns zero on success, and updates the data pointer. Returns a non-zero 1939 * error code on failure. 1940 */ 1941 static int ice_devlink_nvm_read(struct devlink *devlink, 1942 const struct devlink_region_ops *ops, 1943 struct netlink_ext_ack *extack, 1944 u64 offset, u32 size, u8 *data) 1945 { 1946 struct ice_pf *pf = devlink_priv(devlink); 1947 struct device *dev = ice_pf_to_dev(pf); 1948 struct ice_hw *hw = &pf->hw; 1949 bool read_shadow_ram; 1950 u64 nvm_size; 1951 int status; 1952 1953 if (ops == &ice_nvm_region_ops) { 1954 read_shadow_ram = false; 1955 nvm_size = hw->flash.flash_size; 1956 } else if (ops == &ice_sram_region_ops) { 1957 read_shadow_ram = true; 1958 nvm_size = hw->flash.sr_words * 2u; 1959 } else { 1960 NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function"); 1961 return -EOPNOTSUPP; 1962 } 1963 1964 if (offset + size >= nvm_size) { 1965 NL_SET_ERR_MSG_MOD(extack, "Cannot read beyond the region size"); 1966 return -ERANGE; 1967 } 1968 1969 status = ice_acquire_nvm(hw, ICE_RES_READ); 1970 if (status) { 1971 dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", 1972 status, hw->adminq.sq_last_status); 1973 NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); 1974 return -EIO; 1975 } 1976 1977 status = ice_read_flat_nvm(hw, (u32)offset, &size, data, 1978 read_shadow_ram); 1979 if (status) { 1980 dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", 1981 size, status, hw->adminq.sq_last_status); 1982 NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); 1983 ice_release_nvm(hw); 1984 return -EIO; 1985 } 1986 ice_release_nvm(hw); 1987 1988 return 0; 1989 } 1990 1991 /** 1992 * ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities 1993 * @devlink: the devlink instance 1994 * @ops: the devlink region being snapshotted 1995 * @extack: extended ACK response structure 1996 * @data: on exit points to snapshot data buffer 1997 * 1998 * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for 1999 * the device-caps devlink region. It captures a snapshot of the device 2000 * capabilities reported by firmware. 2001 * 2002 * @returns zero on success, and updates the data pointer. Returns a non-zero 2003 * error code on failure. 2004 */ 2005 static int 2006 ice_devlink_devcaps_snapshot(struct devlink *devlink, 2007 const struct devlink_region_ops *ops, 2008 struct netlink_ext_ack *extack, u8 **data) 2009 { 2010 struct ice_pf *pf = devlink_priv(devlink); 2011 struct device *dev = ice_pf_to_dev(pf); 2012 struct ice_hw *hw = &pf->hw; 2013 void *devcaps; 2014 int status; 2015 2016 devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN); 2017 if (!devcaps) 2018 return -ENOMEM; 2019 2020 status = ice_aq_list_caps(hw, devcaps, ICE_AQ_MAX_BUF_LEN, NULL, 2021 ice_aqc_opc_list_dev_caps, NULL); 2022 if (status) { 2023 dev_dbg(dev, "ice_aq_list_caps: failed to read device capabilities, err %d aq_err %d\n", 2024 status, hw->adminq.sq_last_status); 2025 NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities"); 2026 vfree(devcaps); 2027 return status; 2028 } 2029 2030 *data = (u8 *)devcaps; 2031 2032 return 0; 2033 } 2034 2035 static const struct devlink_region_ops ice_nvm_region_ops = { 2036 .name = "nvm-flash", 2037 .destructor = vfree, 2038 .snapshot = ice_devlink_nvm_snapshot, 2039 .read = ice_devlink_nvm_read, 2040 }; 2041 2042 static const struct devlink_region_ops ice_sram_region_ops = { 2043 .name = "shadow-ram", 2044 .destructor = vfree, 2045 .snapshot = ice_devlink_nvm_snapshot, 2046 .read = ice_devlink_nvm_read, 2047 }; 2048 2049 static const struct devlink_region_ops ice_devcaps_region_ops = { 2050 .name = "device-caps", 2051 .destructor = vfree, 2052 .snapshot = ice_devlink_devcaps_snapshot, 2053 }; 2054 2055 /** 2056 * ice_devlink_init_regions - Initialize devlink regions 2057 * @pf: the PF device structure 2058 * 2059 * Create devlink regions used to enable access to dump the contents of the 2060 * flash memory on the device. 2061 */ 2062 void ice_devlink_init_regions(struct ice_pf *pf) 2063 { 2064 struct devlink *devlink = priv_to_devlink(pf); 2065 struct device *dev = ice_pf_to_dev(pf); 2066 u64 nvm_size, sram_size; 2067 2068 nvm_size = pf->hw.flash.flash_size; 2069 pf->nvm_region = devl_region_create(devlink, &ice_nvm_region_ops, 1, 2070 nvm_size); 2071 if (IS_ERR(pf->nvm_region)) { 2072 dev_err(dev, "failed to create NVM devlink region, err %ld\n", 2073 PTR_ERR(pf->nvm_region)); 2074 pf->nvm_region = NULL; 2075 } 2076 2077 sram_size = pf->hw.flash.sr_words * 2u; 2078 pf->sram_region = devl_region_create(devlink, &ice_sram_region_ops, 2079 1, sram_size); 2080 if (IS_ERR(pf->sram_region)) { 2081 dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n", 2082 PTR_ERR(pf->sram_region)); 2083 pf->sram_region = NULL; 2084 } 2085 2086 pf->devcaps_region = devl_region_create(devlink, 2087 &ice_devcaps_region_ops, 10, 2088 ICE_AQ_MAX_BUF_LEN); 2089 if (IS_ERR(pf->devcaps_region)) { 2090 dev_err(dev, "failed to create device-caps devlink region, err %ld\n", 2091 PTR_ERR(pf->devcaps_region)); 2092 pf->devcaps_region = NULL; 2093 } 2094 } 2095 2096 /** 2097 * ice_devlink_destroy_regions - Destroy devlink regions 2098 * @pf: the PF device structure 2099 * 2100 * Remove previously created regions for this PF. 2101 */ 2102 void ice_devlink_destroy_regions(struct ice_pf *pf) 2103 { 2104 if (pf->nvm_region) 2105 devl_region_destroy(pf->nvm_region); 2106 2107 if (pf->sram_region) 2108 devl_region_destroy(pf->sram_region); 2109 2110 if (pf->devcaps_region) 2111 devl_region_destroy(pf->devcaps_region); 2112 } 2113