1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2018 Intel Corporation. All rights reserved. */ 3 #include <linux/libnvdimm.h> 4 #include <linux/ndctl.h> 5 #include <linux/acpi.h> 6 #include <linux/memregion.h> 7 #include <asm/smp.h> 8 #include "intel.h" 9 #include "nfit.h" 10 11 static ssize_t firmware_activate_noidle_show(struct device *dev, 12 struct device_attribute *attr, char *buf) 13 { 14 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 15 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 16 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 17 18 return sprintf(buf, "%s\n", acpi_desc->fwa_noidle ? "Y" : "N"); 19 } 20 21 static ssize_t firmware_activate_noidle_store(struct device *dev, 22 struct device_attribute *attr, const char *buf, size_t size) 23 { 24 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 25 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 26 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 27 ssize_t rc; 28 bool val; 29 30 rc = kstrtobool(buf, &val); 31 if (rc) 32 return rc; 33 if (val != acpi_desc->fwa_noidle) 34 acpi_desc->fwa_cap = NVDIMM_FWA_CAP_INVALID; 35 acpi_desc->fwa_noidle = val; 36 return size; 37 } 38 DEVICE_ATTR_RW(firmware_activate_noidle); 39 40 bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus) 41 { 42 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 43 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 44 unsigned long *mask; 45 46 if (!test_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask)) 47 return false; 48 49 mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]; 50 return *mask == NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK; 51 } 52 53 static unsigned long intel_security_flags(struct nvdimm *nvdimm, 54 enum nvdimm_passphrase_type ptype) 55 { 56 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 57 unsigned long security_flags = 0; 58 TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, 59 struct nd_intel_get_security_state cmd; 60 ) nd_cmd = { 61 .pkg = { 62 .nd_command = NVDIMM_INTEL_GET_SECURITY_STATE, 63 .nd_family = NVDIMM_FAMILY_INTEL, 64 .nd_size_out = 65 sizeof(struct nd_intel_get_security_state), 66 .nd_fw_size = 67 sizeof(struct nd_intel_get_security_state), 68 }, 69 }; 70 int rc; 71 72 if (!test_bit(NVDIMM_INTEL_GET_SECURITY_STATE, &nfit_mem->dsm_mask)) 73 return 0; 74 75 /* 76 * Short circuit the state retrieval while we are doing overwrite. 77 * The DSM spec states that the security state is indeterminate 78 * until the overwrite DSM completes. 79 */ 80 if (nvdimm_in_overwrite(nvdimm) && ptype == NVDIMM_USER) 81 return BIT(NVDIMM_SECURITY_OVERWRITE); 82 83 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); 84 if (rc < 0 || nd_cmd.cmd.status) { 85 pr_err("%s: security state retrieval failed (%d:%#x)\n", 86 nvdimm_name(nvdimm), rc, nd_cmd.cmd.status); 87 return 0; 88 } 89 90 /* check and see if security is enabled and locked */ 91 if (ptype == NVDIMM_MASTER) { 92 if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_ENABLED) 93 set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags); 94 else 95 set_bit(NVDIMM_SECURITY_DISABLED, &security_flags); 96 if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_PLIMIT) 97 set_bit(NVDIMM_SECURITY_FROZEN, &security_flags); 98 return security_flags; 99 } 100 101 if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_UNSUPPORTED) 102 return 0; 103 104 if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_ENABLED) { 105 if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_FROZEN || 106 nd_cmd.cmd.state & ND_INTEL_SEC_STATE_PLIMIT) 107 set_bit(NVDIMM_SECURITY_FROZEN, &security_flags); 108 109 if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_LOCKED) 110 set_bit(NVDIMM_SECURITY_LOCKED, &security_flags); 111 else 112 set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags); 113 } else 114 set_bit(NVDIMM_SECURITY_DISABLED, &security_flags); 115 116 return security_flags; 117 } 118 119 static int intel_security_freeze(struct nvdimm *nvdimm) 120 { 121 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 122 TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, 123 struct nd_intel_freeze_lock cmd; 124 ) nd_cmd = { 125 .pkg = { 126 .nd_command = NVDIMM_INTEL_FREEZE_LOCK, 127 .nd_family = NVDIMM_FAMILY_INTEL, 128 .nd_size_out = ND_INTEL_STATUS_SIZE, 129 .nd_fw_size = ND_INTEL_STATUS_SIZE, 130 }, 131 }; 132 int rc; 133 134 if (!test_bit(NVDIMM_INTEL_FREEZE_LOCK, &nfit_mem->dsm_mask)) 135 return -ENOTTY; 136 137 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); 138 if (rc < 0) 139 return rc; 140 if (nd_cmd.cmd.status) 141 return -EIO; 142 return 0; 143 } 144 145 static int intel_security_change_key(struct nvdimm *nvdimm, 146 const struct nvdimm_key_data *old_data, 147 const struct nvdimm_key_data *new_data, 148 enum nvdimm_passphrase_type ptype) 149 { 150 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 151 unsigned int cmd = ptype == NVDIMM_MASTER ? 152 NVDIMM_INTEL_SET_MASTER_PASSPHRASE : 153 NVDIMM_INTEL_SET_PASSPHRASE; 154 TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, 155 struct nd_intel_set_passphrase cmd; 156 ) nd_cmd = { 157 .pkg = { 158 .nd_family = NVDIMM_FAMILY_INTEL, 159 .nd_size_in = ND_INTEL_PASSPHRASE_SIZE * 2, 160 .nd_size_out = ND_INTEL_STATUS_SIZE, 161 .nd_fw_size = ND_INTEL_STATUS_SIZE, 162 .nd_command = cmd, 163 }, 164 }; 165 int rc; 166 167 if (!test_bit(cmd, &nfit_mem->dsm_mask)) 168 return -ENOTTY; 169 170 memcpy(nd_cmd.cmd.old_pass, old_data->data, 171 sizeof(nd_cmd.cmd.old_pass)); 172 memcpy(nd_cmd.cmd.new_pass, new_data->data, 173 sizeof(nd_cmd.cmd.new_pass)); 174 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); 175 if (rc < 0) 176 return rc; 177 178 switch (nd_cmd.cmd.status) { 179 case 0: 180 return 0; 181 case ND_INTEL_STATUS_INVALID_PASS: 182 return -EINVAL; 183 case ND_INTEL_STATUS_NOT_SUPPORTED: 184 return -EOPNOTSUPP; 185 case ND_INTEL_STATUS_INVALID_STATE: 186 default: 187 return -EIO; 188 } 189 } 190 191 static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, 192 const struct nvdimm_key_data *key_data) 193 { 194 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 195 TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, 196 struct nd_intel_unlock_unit cmd; 197 ) nd_cmd = { 198 .pkg = { 199 .nd_command = NVDIMM_INTEL_UNLOCK_UNIT, 200 .nd_family = NVDIMM_FAMILY_INTEL, 201 .nd_size_in = ND_INTEL_PASSPHRASE_SIZE, 202 .nd_size_out = ND_INTEL_STATUS_SIZE, 203 .nd_fw_size = ND_INTEL_STATUS_SIZE, 204 }, 205 }; 206 int rc; 207 208 if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask)) 209 return -ENOTTY; 210 211 memcpy(nd_cmd.cmd.passphrase, key_data->data, 212 sizeof(nd_cmd.cmd.passphrase)); 213 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); 214 if (rc < 0) 215 return rc; 216 switch (nd_cmd.cmd.status) { 217 case 0: 218 break; 219 case ND_INTEL_STATUS_INVALID_PASS: 220 return -EINVAL; 221 default: 222 return -EIO; 223 } 224 225 return 0; 226 } 227 228 static int intel_security_disable(struct nvdimm *nvdimm, 229 const struct nvdimm_key_data *key_data) 230 { 231 int rc; 232 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 233 TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, 234 struct nd_intel_disable_passphrase cmd; 235 ) nd_cmd = { 236 .pkg = { 237 .nd_command = NVDIMM_INTEL_DISABLE_PASSPHRASE, 238 .nd_family = NVDIMM_FAMILY_INTEL, 239 .nd_size_in = ND_INTEL_PASSPHRASE_SIZE, 240 .nd_size_out = ND_INTEL_STATUS_SIZE, 241 .nd_fw_size = ND_INTEL_STATUS_SIZE, 242 }, 243 }; 244 245 if (!test_bit(NVDIMM_INTEL_DISABLE_PASSPHRASE, &nfit_mem->dsm_mask)) 246 return -ENOTTY; 247 248 memcpy(nd_cmd.cmd.passphrase, key_data->data, 249 sizeof(nd_cmd.cmd.passphrase)); 250 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); 251 if (rc < 0) 252 return rc; 253 254 switch (nd_cmd.cmd.status) { 255 case 0: 256 break; 257 case ND_INTEL_STATUS_INVALID_PASS: 258 return -EINVAL; 259 case ND_INTEL_STATUS_INVALID_STATE: 260 default: 261 return -ENXIO; 262 } 263 264 return 0; 265 } 266 267 static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, 268 const struct nvdimm_key_data *key, 269 enum nvdimm_passphrase_type ptype) 270 { 271 int rc; 272 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 273 unsigned int cmd = ptype == NVDIMM_MASTER ? 274 NVDIMM_INTEL_MASTER_SECURE_ERASE : NVDIMM_INTEL_SECURE_ERASE; 275 TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, 276 struct nd_intel_secure_erase cmd; 277 ) nd_cmd = { 278 .pkg = { 279 .nd_family = NVDIMM_FAMILY_INTEL, 280 .nd_size_in = ND_INTEL_PASSPHRASE_SIZE, 281 .nd_size_out = ND_INTEL_STATUS_SIZE, 282 .nd_fw_size = ND_INTEL_STATUS_SIZE, 283 .nd_command = cmd, 284 }, 285 }; 286 287 if (!test_bit(cmd, &nfit_mem->dsm_mask)) 288 return -ENOTTY; 289 290 memcpy(nd_cmd.cmd.passphrase, key->data, 291 sizeof(nd_cmd.cmd.passphrase)); 292 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); 293 if (rc < 0) 294 return rc; 295 296 switch (nd_cmd.cmd.status) { 297 case 0: 298 break; 299 case ND_INTEL_STATUS_NOT_SUPPORTED: 300 return -EOPNOTSUPP; 301 case ND_INTEL_STATUS_INVALID_PASS: 302 return -EINVAL; 303 case ND_INTEL_STATUS_INVALID_STATE: 304 default: 305 return -ENXIO; 306 } 307 308 return 0; 309 } 310 311 static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm) 312 { 313 int rc; 314 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 315 TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, 316 struct nd_intel_query_overwrite cmd; 317 ) nd_cmd = { 318 .pkg = { 319 .nd_command = NVDIMM_INTEL_QUERY_OVERWRITE, 320 .nd_family = NVDIMM_FAMILY_INTEL, 321 .nd_size_out = ND_INTEL_STATUS_SIZE, 322 .nd_fw_size = ND_INTEL_STATUS_SIZE, 323 }, 324 }; 325 326 if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask)) 327 return -ENOTTY; 328 329 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); 330 if (rc < 0) 331 return rc; 332 333 switch (nd_cmd.cmd.status) { 334 case 0: 335 break; 336 case ND_INTEL_STATUS_OQUERY_INPROGRESS: 337 return -EBUSY; 338 default: 339 return -ENXIO; 340 } 341 342 return 0; 343 } 344 345 static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, 346 const struct nvdimm_key_data *nkey) 347 { 348 int rc; 349 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 350 TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, 351 struct nd_intel_overwrite cmd; 352 ) nd_cmd = { 353 .pkg = { 354 .nd_command = NVDIMM_INTEL_OVERWRITE, 355 .nd_family = NVDIMM_FAMILY_INTEL, 356 .nd_size_in = ND_INTEL_PASSPHRASE_SIZE, 357 .nd_size_out = ND_INTEL_STATUS_SIZE, 358 .nd_fw_size = ND_INTEL_STATUS_SIZE, 359 }, 360 }; 361 362 if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask)) 363 return -ENOTTY; 364 365 memcpy(nd_cmd.cmd.passphrase, nkey->data, 366 sizeof(nd_cmd.cmd.passphrase)); 367 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); 368 if (rc < 0) 369 return rc; 370 371 switch (nd_cmd.cmd.status) { 372 case 0: 373 return 0; 374 case ND_INTEL_STATUS_OVERWRITE_UNSUPPORTED: 375 return -ENOTSUPP; 376 case ND_INTEL_STATUS_INVALID_PASS: 377 return -EINVAL; 378 case ND_INTEL_STATUS_INVALID_STATE: 379 default: 380 return -ENXIO; 381 } 382 } 383 384 static const struct nvdimm_security_ops __intel_security_ops = { 385 .get_flags = intel_security_flags, 386 .freeze = intel_security_freeze, 387 .change_key = intel_security_change_key, 388 .disable = intel_security_disable, 389 #ifdef CONFIG_X86 390 .unlock = intel_security_unlock, 391 .erase = intel_security_erase, 392 .overwrite = intel_security_overwrite, 393 .query_overwrite = intel_security_query_overwrite, 394 #endif 395 }; 396 397 const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops; 398 399 static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc, 400 struct nd_intel_bus_fw_activate_businfo *info) 401 { 402 TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, 403 struct nd_intel_bus_fw_activate_businfo cmd; 404 ) nd_cmd = { 405 .pkg = { 406 .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO, 407 .nd_family = NVDIMM_BUS_FAMILY_INTEL, 408 .nd_size_out = 409 sizeof(struct nd_intel_bus_fw_activate_businfo), 410 .nd_fw_size = 411 sizeof(struct nd_intel_bus_fw_activate_businfo), 412 }, 413 }; 414 int rc; 415 416 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), 417 NULL); 418 *info = nd_cmd.cmd; 419 return rc; 420 } 421 422 /* The fw_ops expect to be called with the nvdimm_bus_lock() held */ 423 static enum nvdimm_fwa_state intel_bus_fwa_state( 424 struct nvdimm_bus_descriptor *nd_desc) 425 { 426 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 427 struct nd_intel_bus_fw_activate_businfo info; 428 struct device *dev = acpi_desc->dev; 429 enum nvdimm_fwa_state state; 430 int rc; 431 432 /* 433 * It should not be possible for platform firmware to return 434 * busy because activate is a synchronous operation. Treat it 435 * similar to invalid, i.e. always refresh / poll the status. 436 */ 437 switch (acpi_desc->fwa_state) { 438 case NVDIMM_FWA_INVALID: 439 case NVDIMM_FWA_BUSY: 440 break; 441 default: 442 /* check if capability needs to be refreshed */ 443 if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) 444 break; 445 return acpi_desc->fwa_state; 446 } 447 448 /* Refresh with platform firmware */ 449 rc = intel_bus_fwa_businfo(nd_desc, &info); 450 if (rc) 451 return NVDIMM_FWA_INVALID; 452 453 switch (info.state) { 454 case ND_INTEL_FWA_IDLE: 455 state = NVDIMM_FWA_IDLE; 456 break; 457 case ND_INTEL_FWA_BUSY: 458 state = NVDIMM_FWA_BUSY; 459 break; 460 case ND_INTEL_FWA_ARMED: 461 if (info.activate_tmo > info.max_quiesce_tmo) 462 state = NVDIMM_FWA_ARM_OVERFLOW; 463 else 464 state = NVDIMM_FWA_ARMED; 465 break; 466 default: 467 dev_err_once(dev, "invalid firmware activate state %d\n", 468 info.state); 469 return NVDIMM_FWA_INVALID; 470 } 471 472 /* 473 * Capability data is available in the same payload as state. It 474 * is expected to be static. 475 */ 476 if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) { 477 if (info.capability & ND_INTEL_BUS_FWA_CAP_FWQUIESCE) 478 acpi_desc->fwa_cap = NVDIMM_FWA_CAP_QUIESCE; 479 else if (info.capability & ND_INTEL_BUS_FWA_CAP_OSQUIESCE) { 480 /* 481 * Skip hibernate cycle by default if platform 482 * indicates that it does not need devices to be 483 * quiesced. 484 */ 485 acpi_desc->fwa_cap = NVDIMM_FWA_CAP_LIVE; 486 } else 487 acpi_desc->fwa_cap = NVDIMM_FWA_CAP_NONE; 488 } 489 490 acpi_desc->fwa_state = state; 491 492 return state; 493 } 494 495 static enum nvdimm_fwa_capability intel_bus_fwa_capability( 496 struct nvdimm_bus_descriptor *nd_desc) 497 { 498 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 499 500 if (acpi_desc->fwa_cap > NVDIMM_FWA_CAP_INVALID) 501 return acpi_desc->fwa_cap; 502 503 if (intel_bus_fwa_state(nd_desc) > NVDIMM_FWA_INVALID) 504 return acpi_desc->fwa_cap; 505 506 return NVDIMM_FWA_CAP_INVALID; 507 } 508 509 static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc) 510 { 511 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 512 TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, 513 struct nd_intel_bus_fw_activate cmd; 514 ) nd_cmd; 515 int rc; 516 517 nd_cmd.pkg = (struct nd_cmd_pkg) { 518 .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE, 519 .nd_family = NVDIMM_BUS_FAMILY_INTEL, 520 .nd_size_in = sizeof(nd_cmd.cmd.iodev_state), 521 .nd_size_out = 522 sizeof(struct nd_intel_bus_fw_activate), 523 .nd_fw_size = 524 sizeof(struct nd_intel_bus_fw_activate), 525 }; 526 nd_cmd.cmd = (struct nd_intel_bus_fw_activate) { 527 /* 528 * Even though activate is run from a suspended context, 529 * for safety, still ask platform firmware to force 530 * quiesce devices by default. Let a module 531 * parameter override that policy. 532 */ 533 .iodev_state = acpi_desc->fwa_noidle 534 ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE 535 : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE, 536 }; 537 switch (intel_bus_fwa_state(nd_desc)) { 538 case NVDIMM_FWA_ARMED: 539 case NVDIMM_FWA_ARM_OVERFLOW: 540 break; 541 default: 542 return -ENXIO; 543 } 544 545 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), 546 NULL); 547 548 /* 549 * Whether the command succeeded, or failed, the agent checking 550 * for the result needs to query the DIMMs individually. 551 * Increment the activation count to invalidate all the DIMM 552 * states at once (it's otherwise not possible to take 553 * acpi_desc->init_mutex in this context) 554 */ 555 acpi_desc->fwa_state = NVDIMM_FWA_INVALID; 556 acpi_desc->fwa_count++; 557 558 dev_dbg(acpi_desc->dev, "result: %d\n", rc); 559 560 return rc; 561 } 562 563 static const struct nvdimm_bus_fw_ops __intel_bus_fw_ops = { 564 .activate_state = intel_bus_fwa_state, 565 .capability = intel_bus_fwa_capability, 566 .activate = intel_bus_fwa_activate, 567 }; 568 569 const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops; 570 571 static int intel_fwa_dimminfo(struct nvdimm *nvdimm, 572 struct nd_intel_fw_activate_dimminfo *info) 573 { 574 TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, 575 struct nd_intel_fw_activate_dimminfo cmd; 576 ) nd_cmd = { 577 .pkg = { 578 .nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO, 579 .nd_family = NVDIMM_FAMILY_INTEL, 580 .nd_size_out = 581 sizeof(struct nd_intel_fw_activate_dimminfo), 582 .nd_fw_size = 583 sizeof(struct nd_intel_fw_activate_dimminfo), 584 }, 585 }; 586 int rc; 587 588 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); 589 *info = nd_cmd.cmd; 590 return rc; 591 } 592 593 static enum nvdimm_fwa_state intel_fwa_state(struct nvdimm *nvdimm) 594 { 595 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 596 struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; 597 struct nd_intel_fw_activate_dimminfo info; 598 int rc; 599 600 /* 601 * Similar to the bus state, since activate is synchronous the 602 * busy state should resolve within the context of 'activate'. 603 */ 604 switch (nfit_mem->fwa_state) { 605 case NVDIMM_FWA_INVALID: 606 case NVDIMM_FWA_BUSY: 607 break; 608 default: 609 /* If no activations occurred the old state is still valid */ 610 if (nfit_mem->fwa_count == acpi_desc->fwa_count) 611 return nfit_mem->fwa_state; 612 } 613 614 rc = intel_fwa_dimminfo(nvdimm, &info); 615 if (rc) 616 return NVDIMM_FWA_INVALID; 617 618 switch (info.state) { 619 case ND_INTEL_FWA_IDLE: 620 nfit_mem->fwa_state = NVDIMM_FWA_IDLE; 621 break; 622 case ND_INTEL_FWA_BUSY: 623 nfit_mem->fwa_state = NVDIMM_FWA_BUSY; 624 break; 625 case ND_INTEL_FWA_ARMED: 626 nfit_mem->fwa_state = NVDIMM_FWA_ARMED; 627 break; 628 default: 629 nfit_mem->fwa_state = NVDIMM_FWA_INVALID; 630 break; 631 } 632 633 switch (info.result) { 634 case ND_INTEL_DIMM_FWA_NONE: 635 nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NONE; 636 break; 637 case ND_INTEL_DIMM_FWA_SUCCESS: 638 nfit_mem->fwa_result = NVDIMM_FWA_RESULT_SUCCESS; 639 break; 640 case ND_INTEL_DIMM_FWA_NOTSTAGED: 641 nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NOTSTAGED; 642 break; 643 case ND_INTEL_DIMM_FWA_NEEDRESET: 644 nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NEEDRESET; 645 break; 646 case ND_INTEL_DIMM_FWA_MEDIAFAILED: 647 case ND_INTEL_DIMM_FWA_ABORT: 648 case ND_INTEL_DIMM_FWA_NOTSUPP: 649 case ND_INTEL_DIMM_FWA_ERROR: 650 default: 651 nfit_mem->fwa_result = NVDIMM_FWA_RESULT_FAIL; 652 break; 653 } 654 655 nfit_mem->fwa_count = acpi_desc->fwa_count; 656 657 return nfit_mem->fwa_state; 658 } 659 660 static enum nvdimm_fwa_result intel_fwa_result(struct nvdimm *nvdimm) 661 { 662 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 663 struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; 664 665 if (nfit_mem->fwa_count == acpi_desc->fwa_count 666 && nfit_mem->fwa_result > NVDIMM_FWA_RESULT_INVALID) 667 return nfit_mem->fwa_result; 668 669 if (intel_fwa_state(nvdimm) > NVDIMM_FWA_INVALID) 670 return nfit_mem->fwa_result; 671 672 return NVDIMM_FWA_RESULT_INVALID; 673 } 674 675 static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm) 676 { 677 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 678 struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; 679 TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, 680 struct nd_intel_fw_activate_arm cmd; 681 ) nd_cmd; 682 int rc; 683 684 nd_cmd.pkg = (struct nd_cmd_pkg) { 685 .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM, 686 .nd_family = NVDIMM_FAMILY_INTEL, 687 .nd_size_in = sizeof(nd_cmd.cmd.activate_arm), 688 .nd_size_out = sizeof(struct nd_intel_fw_activate_arm), 689 .nd_fw_size = sizeof(struct nd_intel_fw_activate_arm), 690 }; 691 nd_cmd.cmd = (struct nd_intel_fw_activate_arm) { 692 .activate_arm = arm == NVDIMM_FWA_ARM ? 693 ND_INTEL_DIMM_FWA_ARM : 694 ND_INTEL_DIMM_FWA_DISARM, 695 }; 696 697 switch (intel_fwa_state(nvdimm)) { 698 case NVDIMM_FWA_INVALID: 699 return -ENXIO; 700 case NVDIMM_FWA_BUSY: 701 return -EBUSY; 702 case NVDIMM_FWA_IDLE: 703 if (arm == NVDIMM_FWA_DISARM) 704 return 0; 705 break; 706 case NVDIMM_FWA_ARMED: 707 if (arm == NVDIMM_FWA_ARM) 708 return 0; 709 break; 710 default: 711 return -ENXIO; 712 } 713 714 /* 715 * Invalidate the bus-level state, now that we're committed to 716 * changing the 'arm' state. 717 */ 718 acpi_desc->fwa_state = NVDIMM_FWA_INVALID; 719 nfit_mem->fwa_state = NVDIMM_FWA_INVALID; 720 721 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); 722 723 dev_dbg(acpi_desc->dev, "%s result: %d\n", arm == NVDIMM_FWA_ARM 724 ? "arm" : "disarm", rc); 725 return rc; 726 } 727 728 static const struct nvdimm_fw_ops __intel_fw_ops = { 729 .activate_state = intel_fwa_state, 730 .activate_result = intel_fwa_result, 731 .arm = intel_fwa_arm, 732 }; 733 734 const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops; 735