1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/list_sort.h> 14 #include <linux/libnvdimm.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/ndctl.h> 18 #include <linux/sysfs.h> 19 #include <linux/delay.h> 20 #include <linux/list.h> 21 #include <linux/acpi.h> 22 #include <linux/sort.h> 23 #include <linux/io.h> 24 #include <linux/nd.h> 25 #include <asm/cacheflush.h> 26 #include <acpi/nfit.h> 27 #include "nfit.h" 28 29 /* 30 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 31 * irrelevant. 32 */ 33 #include <linux/io-64-nonatomic-hi-lo.h> 34 35 static bool force_enable_dimms; 36 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); 37 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); 38 39 static bool disable_vendor_specific; 40 module_param(disable_vendor_specific, bool, S_IRUGO); 41 MODULE_PARM_DESC(disable_vendor_specific, 42 "Limit commands to the publicly specified set"); 43 44 static unsigned long override_dsm_mask; 45 module_param(override_dsm_mask, ulong, S_IRUGO); 46 MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions"); 47 48 static int default_dsm_family = -1; 49 module_param(default_dsm_family, int, S_IRUGO); 50 MODULE_PARM_DESC(default_dsm_family, 51 "Try this DSM type first when identifying NVDIMM family"); 52 53 static bool no_init_ars; 54 module_param(no_init_ars, bool, 0644); 55 MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time"); 56 57 LIST_HEAD(acpi_descs); 58 DEFINE_MUTEX(acpi_desc_lock); 59 60 static struct workqueue_struct *nfit_wq; 61 62 struct nfit_table_prev { 63 struct list_head spas; 64 struct list_head memdevs; 65 struct list_head dcrs; 66 struct list_head bdws; 67 struct list_head idts; 68 struct list_head flushes; 69 }; 70 71 static guid_t nfit_uuid[NFIT_UUID_MAX]; 72 73 const guid_t *to_nfit_uuid(enum nfit_uuids id) 74 { 75 return &nfit_uuid[id]; 76 } 77 EXPORT_SYMBOL(to_nfit_uuid); 78 79 static struct acpi_nfit_desc *to_acpi_nfit_desc( 80 struct nvdimm_bus_descriptor *nd_desc) 81 { 82 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); 83 } 84 85 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) 86 { 87 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 88 89 /* 90 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct 91 * acpi_device. 92 */ 93 if (!nd_desc->provider_name 94 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) 95 return NULL; 96 97 return to_acpi_device(acpi_desc->dev); 98 } 99 100 static int xlat_bus_status(void *buf, unsigned int cmd, u32 status) 101 { 102 struct nd_cmd_clear_error *clear_err; 103 struct nd_cmd_ars_status *ars_status; 104 u16 flags; 105 106 switch (cmd) { 107 case ND_CMD_ARS_CAP: 108 if ((status & 0xffff) == NFIT_ARS_CAP_NONE) 109 return -ENOTTY; 110 111 /* Command failed */ 112 if (status & 0xffff) 113 return -EIO; 114 115 /* No supported scan types for this range */ 116 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; 117 if ((status >> 16 & flags) == 0) 118 return -ENOTTY; 119 return 0; 120 case ND_CMD_ARS_START: 121 /* ARS is in progress */ 122 if ((status & 0xffff) == NFIT_ARS_START_BUSY) 123 return -EBUSY; 124 125 /* Command failed */ 126 if (status & 0xffff) 127 return -EIO; 128 return 0; 129 case ND_CMD_ARS_STATUS: 130 ars_status = buf; 131 /* Command failed */ 132 if (status & 0xffff) 133 return -EIO; 134 /* Check extended status (Upper two bytes) */ 135 if (status == NFIT_ARS_STATUS_DONE) 136 return 0; 137 138 /* ARS is in progress */ 139 if (status == NFIT_ARS_STATUS_BUSY) 140 return -EBUSY; 141 142 /* No ARS performed for the current boot */ 143 if (status == NFIT_ARS_STATUS_NONE) 144 return -EAGAIN; 145 146 /* 147 * ARS interrupted, either we overflowed or some other 148 * agent wants the scan to stop. If we didn't overflow 149 * then just continue with the returned results. 150 */ 151 if (status == NFIT_ARS_STATUS_INTR) { 152 if (ars_status->out_length >= 40 && (ars_status->flags 153 & NFIT_ARS_F_OVERFLOW)) 154 return -ENOSPC; 155 return 0; 156 } 157 158 /* Unknown status */ 159 if (status >> 16) 160 return -EIO; 161 return 0; 162 case ND_CMD_CLEAR_ERROR: 163 clear_err = buf; 164 if (status & 0xffff) 165 return -EIO; 166 if (!clear_err->cleared) 167 return -EIO; 168 if (clear_err->length > clear_err->cleared) 169 return clear_err->cleared; 170 return 0; 171 default: 172 break; 173 } 174 175 /* all other non-zero status results in an error */ 176 if (status) 177 return -EIO; 178 return 0; 179 } 180 181 #define ACPI_LABELS_LOCKED 3 182 183 static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, 184 u32 status) 185 { 186 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 187 188 switch (cmd) { 189 case ND_CMD_GET_CONFIG_SIZE: 190 /* 191 * In the _LSI, _LSR, _LSW case the locked status is 192 * communicated via the read/write commands 193 */ 194 if (nfit_mem->has_lsr) 195 break; 196 197 if (status >> 16 & ND_CONFIG_LOCKED) 198 return -EACCES; 199 break; 200 case ND_CMD_GET_CONFIG_DATA: 201 if (nfit_mem->has_lsr && status == ACPI_LABELS_LOCKED) 202 return -EACCES; 203 break; 204 case ND_CMD_SET_CONFIG_DATA: 205 if (nfit_mem->has_lsw && status == ACPI_LABELS_LOCKED) 206 return -EACCES; 207 break; 208 default: 209 break; 210 } 211 212 /* all other non-zero status results in an error */ 213 if (status) 214 return -EIO; 215 return 0; 216 } 217 218 static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, 219 u32 status) 220 { 221 if (!nvdimm) 222 return xlat_bus_status(buf, cmd, status); 223 return xlat_nvdimm_status(nvdimm, buf, cmd, status); 224 } 225 226 /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */ 227 static union acpi_object *pkg_to_buf(union acpi_object *pkg) 228 { 229 int i; 230 void *dst; 231 size_t size = 0; 232 union acpi_object *buf = NULL; 233 234 if (pkg->type != ACPI_TYPE_PACKAGE) { 235 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 236 pkg->type); 237 goto err; 238 } 239 240 for (i = 0; i < pkg->package.count; i++) { 241 union acpi_object *obj = &pkg->package.elements[i]; 242 243 if (obj->type == ACPI_TYPE_INTEGER) 244 size += 4; 245 else if (obj->type == ACPI_TYPE_BUFFER) 246 size += obj->buffer.length; 247 else { 248 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 249 obj->type); 250 goto err; 251 } 252 } 253 254 buf = ACPI_ALLOCATE(sizeof(*buf) + size); 255 if (!buf) 256 goto err; 257 258 dst = buf + 1; 259 buf->type = ACPI_TYPE_BUFFER; 260 buf->buffer.length = size; 261 buf->buffer.pointer = dst; 262 for (i = 0; i < pkg->package.count; i++) { 263 union acpi_object *obj = &pkg->package.elements[i]; 264 265 if (obj->type == ACPI_TYPE_INTEGER) { 266 memcpy(dst, &obj->integer.value, 4); 267 dst += 4; 268 } else if (obj->type == ACPI_TYPE_BUFFER) { 269 memcpy(dst, obj->buffer.pointer, obj->buffer.length); 270 dst += obj->buffer.length; 271 } 272 } 273 err: 274 ACPI_FREE(pkg); 275 return buf; 276 } 277 278 static union acpi_object *int_to_buf(union acpi_object *integer) 279 { 280 union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4); 281 void *dst = NULL; 282 283 if (!buf) 284 goto err; 285 286 if (integer->type != ACPI_TYPE_INTEGER) { 287 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", 288 integer->type); 289 goto err; 290 } 291 292 dst = buf + 1; 293 buf->type = ACPI_TYPE_BUFFER; 294 buf->buffer.length = 4; 295 buf->buffer.pointer = dst; 296 memcpy(dst, &integer->integer.value, 4); 297 err: 298 ACPI_FREE(integer); 299 return buf; 300 } 301 302 static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset, 303 u32 len, void *data) 304 { 305 acpi_status rc; 306 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 307 struct acpi_object_list input = { 308 .count = 3, 309 .pointer = (union acpi_object []) { 310 [0] = { 311 .integer.type = ACPI_TYPE_INTEGER, 312 .integer.value = offset, 313 }, 314 [1] = { 315 .integer.type = ACPI_TYPE_INTEGER, 316 .integer.value = len, 317 }, 318 [2] = { 319 .buffer.type = ACPI_TYPE_BUFFER, 320 .buffer.pointer = data, 321 .buffer.length = len, 322 }, 323 }, 324 }; 325 326 rc = acpi_evaluate_object(handle, "_LSW", &input, &buf); 327 if (ACPI_FAILURE(rc)) 328 return NULL; 329 return int_to_buf(buf.pointer); 330 } 331 332 static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset, 333 u32 len) 334 { 335 acpi_status rc; 336 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 337 struct acpi_object_list input = { 338 .count = 2, 339 .pointer = (union acpi_object []) { 340 [0] = { 341 .integer.type = ACPI_TYPE_INTEGER, 342 .integer.value = offset, 343 }, 344 [1] = { 345 .integer.type = ACPI_TYPE_INTEGER, 346 .integer.value = len, 347 }, 348 }, 349 }; 350 351 rc = acpi_evaluate_object(handle, "_LSR", &input, &buf); 352 if (ACPI_FAILURE(rc)) 353 return NULL; 354 return pkg_to_buf(buf.pointer); 355 } 356 357 static union acpi_object *acpi_label_info(acpi_handle handle) 358 { 359 acpi_status rc; 360 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 361 362 rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf); 363 if (ACPI_FAILURE(rc)) 364 return NULL; 365 return pkg_to_buf(buf.pointer); 366 } 367 368 static u8 nfit_dsm_revid(unsigned family, unsigned func) 369 { 370 static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = { 371 [NVDIMM_FAMILY_INTEL] = { 372 [NVDIMM_INTEL_GET_MODES] = 2, 373 [NVDIMM_INTEL_GET_FWINFO] = 2, 374 [NVDIMM_INTEL_START_FWUPDATE] = 2, 375 [NVDIMM_INTEL_SEND_FWUPDATE] = 2, 376 [NVDIMM_INTEL_FINISH_FWUPDATE] = 2, 377 [NVDIMM_INTEL_QUERY_FWUPDATE] = 2, 378 [NVDIMM_INTEL_SET_THRESHOLD] = 2, 379 [NVDIMM_INTEL_INJECT_ERROR] = 2, 380 }, 381 }; 382 u8 id; 383 384 if (family > NVDIMM_FAMILY_MAX) 385 return 0; 386 if (func > 31) 387 return 0; 388 id = revid_table[family][func]; 389 if (id == 0) 390 return 1; /* default */ 391 return id; 392 } 393 394 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, 395 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) 396 { 397 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 398 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 399 union acpi_object in_obj, in_buf, *out_obj; 400 const struct nd_cmd_desc *desc = NULL; 401 struct device *dev = acpi_desc->dev; 402 struct nd_cmd_pkg *call_pkg = NULL; 403 const char *cmd_name, *dimm_name; 404 unsigned long cmd_mask, dsm_mask; 405 u32 offset, fw_status = 0; 406 acpi_handle handle; 407 unsigned int func; 408 const guid_t *guid; 409 int rc, i; 410 411 if (cmd_rc) 412 *cmd_rc = -EINVAL; 413 func = cmd; 414 if (cmd == ND_CMD_CALL) { 415 call_pkg = buf; 416 func = call_pkg->nd_command; 417 418 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) 419 if (call_pkg->nd_reserved2[i]) 420 return -EINVAL; 421 } 422 423 if (nvdimm) { 424 struct acpi_device *adev = nfit_mem->adev; 425 426 if (!adev) 427 return -ENOTTY; 428 if (call_pkg && nfit_mem->family != call_pkg->nd_family) 429 return -ENOTTY; 430 431 dimm_name = nvdimm_name(nvdimm); 432 cmd_name = nvdimm_cmd_name(cmd); 433 cmd_mask = nvdimm_cmd_mask(nvdimm); 434 dsm_mask = nfit_mem->dsm_mask; 435 desc = nd_cmd_dimm_desc(cmd); 436 guid = to_nfit_uuid(nfit_mem->family); 437 handle = adev->handle; 438 } else { 439 struct acpi_device *adev = to_acpi_dev(acpi_desc); 440 441 cmd_name = nvdimm_bus_cmd_name(cmd); 442 cmd_mask = nd_desc->cmd_mask; 443 dsm_mask = cmd_mask; 444 if (cmd == ND_CMD_CALL) 445 dsm_mask = nd_desc->bus_dsm_mask; 446 desc = nd_cmd_bus_desc(cmd); 447 guid = to_nfit_uuid(NFIT_DEV_BUS); 448 handle = adev->handle; 449 dimm_name = "bus"; 450 } 451 452 if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) 453 return -ENOTTY; 454 455 if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) 456 return -ENOTTY; 457 458 in_obj.type = ACPI_TYPE_PACKAGE; 459 in_obj.package.count = 1; 460 in_obj.package.elements = &in_buf; 461 in_buf.type = ACPI_TYPE_BUFFER; 462 in_buf.buffer.pointer = buf; 463 in_buf.buffer.length = 0; 464 465 /* libnvdimm has already validated the input envelope */ 466 for (i = 0; i < desc->in_num; i++) 467 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, 468 i, buf); 469 470 if (call_pkg) { 471 /* skip over package wrapper */ 472 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload; 473 in_buf.buffer.length = call_pkg->nd_size_in; 474 } 475 476 dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n", 477 dimm_name, cmd, func, in_buf.buffer.length); 478 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, 479 in_buf.buffer.pointer, 480 min_t(u32, 256, in_buf.buffer.length), true); 481 482 /* call the BIOS, prefer the named methods over _DSM if available */ 483 if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsr) 484 out_obj = acpi_label_info(handle); 485 else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && nfit_mem->has_lsr) { 486 struct nd_cmd_get_config_data_hdr *p = buf; 487 488 out_obj = acpi_label_read(handle, p->in_offset, p->in_length); 489 } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA 490 && nfit_mem->has_lsw) { 491 struct nd_cmd_set_config_hdr *p = buf; 492 493 out_obj = acpi_label_write(handle, p->in_offset, p->in_length, 494 p->in_buf); 495 } else { 496 u8 revid; 497 498 if (nvdimm) 499 revid = nfit_dsm_revid(nfit_mem->family, func); 500 else 501 revid = 1; 502 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj); 503 } 504 505 if (!out_obj) { 506 dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name); 507 return -EINVAL; 508 } 509 510 if (call_pkg) { 511 call_pkg->nd_fw_size = out_obj->buffer.length; 512 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, 513 out_obj->buffer.pointer, 514 min(call_pkg->nd_fw_size, call_pkg->nd_size_out)); 515 516 ACPI_FREE(out_obj); 517 /* 518 * Need to support FW function w/o known size in advance. 519 * Caller can determine required size based upon nd_fw_size. 520 * If we return an error (like elsewhere) then caller wouldn't 521 * be able to rely upon data returned to make calculation. 522 */ 523 if (cmd_rc) 524 *cmd_rc = 0; 525 return 0; 526 } 527 528 if (out_obj->package.type != ACPI_TYPE_BUFFER) { 529 dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n", 530 dimm_name, cmd_name, out_obj->type); 531 rc = -EINVAL; 532 goto out; 533 } 534 535 dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, 536 cmd_name, out_obj->buffer.length); 537 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, 538 out_obj->buffer.pointer, 539 min_t(u32, 128, out_obj->buffer.length), true); 540 541 for (i = 0, offset = 0; i < desc->out_num; i++) { 542 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, 543 (u32 *) out_obj->buffer.pointer, 544 out_obj->buffer.length - offset); 545 546 if (offset + out_size > out_obj->buffer.length) { 547 dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n", 548 dimm_name, cmd_name, i); 549 break; 550 } 551 552 if (in_buf.buffer.length + offset + out_size > buf_len) { 553 dev_dbg(dev, "%s output overrun cmd: %s field: %d\n", 554 dimm_name, cmd_name, i); 555 rc = -ENXIO; 556 goto out; 557 } 558 memcpy(buf + in_buf.buffer.length + offset, 559 out_obj->buffer.pointer + offset, out_size); 560 offset += out_size; 561 } 562 563 /* 564 * Set fw_status for all the commands with a known format to be 565 * later interpreted by xlat_status(). 566 */ 567 if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP 568 && cmd <= ND_CMD_CLEAR_ERROR) 569 || (nvdimm && cmd >= ND_CMD_SMART 570 && cmd <= ND_CMD_VENDOR))) 571 fw_status = *(u32 *) out_obj->buffer.pointer; 572 573 if (offset + in_buf.buffer.length < buf_len) { 574 if (i >= 1) { 575 /* 576 * status valid, return the number of bytes left 577 * unfilled in the output buffer 578 */ 579 rc = buf_len - offset - in_buf.buffer.length; 580 if (cmd_rc) 581 *cmd_rc = xlat_status(nvdimm, buf, cmd, 582 fw_status); 583 } else { 584 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", 585 __func__, dimm_name, cmd_name, buf_len, 586 offset); 587 rc = -ENXIO; 588 } 589 } else { 590 rc = 0; 591 if (cmd_rc) 592 *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status); 593 } 594 595 out: 596 ACPI_FREE(out_obj); 597 598 return rc; 599 } 600 EXPORT_SYMBOL_GPL(acpi_nfit_ctl); 601 602 static const char *spa_type_name(u16 type) 603 { 604 static const char *to_name[] = { 605 [NFIT_SPA_VOLATILE] = "volatile", 606 [NFIT_SPA_PM] = "pmem", 607 [NFIT_SPA_DCR] = "dimm-control-region", 608 [NFIT_SPA_BDW] = "block-data-window", 609 [NFIT_SPA_VDISK] = "volatile-disk", 610 [NFIT_SPA_VCD] = "volatile-cd", 611 [NFIT_SPA_PDISK] = "persistent-disk", 612 [NFIT_SPA_PCD] = "persistent-cd", 613 614 }; 615 616 if (type > NFIT_SPA_PCD) 617 return "unknown"; 618 619 return to_name[type]; 620 } 621 622 int nfit_spa_type(struct acpi_nfit_system_address *spa) 623 { 624 int i; 625 626 for (i = 0; i < NFIT_UUID_MAX; i++) 627 if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid)) 628 return i; 629 return -1; 630 } 631 632 static bool add_spa(struct acpi_nfit_desc *acpi_desc, 633 struct nfit_table_prev *prev, 634 struct acpi_nfit_system_address *spa) 635 { 636 struct device *dev = acpi_desc->dev; 637 struct nfit_spa *nfit_spa; 638 639 if (spa->header.length != sizeof(*spa)) 640 return false; 641 642 list_for_each_entry(nfit_spa, &prev->spas, list) { 643 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { 644 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 645 return true; 646 } 647 } 648 649 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa), 650 GFP_KERNEL); 651 if (!nfit_spa) 652 return false; 653 INIT_LIST_HEAD(&nfit_spa->list); 654 memcpy(nfit_spa->spa, spa, sizeof(*spa)); 655 list_add_tail(&nfit_spa->list, &acpi_desc->spas); 656 dev_dbg(dev, "spa index: %d type: %s\n", 657 spa->range_index, 658 spa_type_name(nfit_spa_type(spa))); 659 return true; 660 } 661 662 static bool add_memdev(struct acpi_nfit_desc *acpi_desc, 663 struct nfit_table_prev *prev, 664 struct acpi_nfit_memory_map *memdev) 665 { 666 struct device *dev = acpi_desc->dev; 667 struct nfit_memdev *nfit_memdev; 668 669 if (memdev->header.length != sizeof(*memdev)) 670 return false; 671 672 list_for_each_entry(nfit_memdev, &prev->memdevs, list) 673 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { 674 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); 675 return true; 676 } 677 678 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev), 679 GFP_KERNEL); 680 if (!nfit_memdev) 681 return false; 682 INIT_LIST_HEAD(&nfit_memdev->list); 683 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); 684 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); 685 dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n", 686 memdev->device_handle, memdev->range_index, 687 memdev->region_index, memdev->flags); 688 return true; 689 } 690 691 int nfit_get_smbios_id(u32 device_handle, u16 *flags) 692 { 693 struct acpi_nfit_memory_map *memdev; 694 struct acpi_nfit_desc *acpi_desc; 695 struct nfit_mem *nfit_mem; 696 697 mutex_lock(&acpi_desc_lock); 698 list_for_each_entry(acpi_desc, &acpi_descs, list) { 699 mutex_lock(&acpi_desc->init_mutex); 700 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 701 memdev = __to_nfit_memdev(nfit_mem); 702 if (memdev->device_handle == device_handle) { 703 mutex_unlock(&acpi_desc->init_mutex); 704 mutex_unlock(&acpi_desc_lock); 705 *flags = memdev->flags; 706 return memdev->physical_id; 707 } 708 } 709 mutex_unlock(&acpi_desc->init_mutex); 710 } 711 mutex_unlock(&acpi_desc_lock); 712 713 return -ENODEV; 714 } 715 EXPORT_SYMBOL_GPL(nfit_get_smbios_id); 716 717 /* 718 * An implementation may provide a truncated control region if no block windows 719 * are defined. 720 */ 721 static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr) 722 { 723 if (dcr->header.length < offsetof(struct acpi_nfit_control_region, 724 window_size)) 725 return 0; 726 if (dcr->windows) 727 return sizeof(*dcr); 728 return offsetof(struct acpi_nfit_control_region, window_size); 729 } 730 731 static bool add_dcr(struct acpi_nfit_desc *acpi_desc, 732 struct nfit_table_prev *prev, 733 struct acpi_nfit_control_region *dcr) 734 { 735 struct device *dev = acpi_desc->dev; 736 struct nfit_dcr *nfit_dcr; 737 738 if (!sizeof_dcr(dcr)) 739 return false; 740 741 list_for_each_entry(nfit_dcr, &prev->dcrs, list) 742 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) { 743 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); 744 return true; 745 } 746 747 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr), 748 GFP_KERNEL); 749 if (!nfit_dcr) 750 return false; 751 INIT_LIST_HEAD(&nfit_dcr->list); 752 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)); 753 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); 754 dev_dbg(dev, "dcr index: %d windows: %d\n", 755 dcr->region_index, dcr->windows); 756 return true; 757 } 758 759 static bool add_bdw(struct acpi_nfit_desc *acpi_desc, 760 struct nfit_table_prev *prev, 761 struct acpi_nfit_data_region *bdw) 762 { 763 struct device *dev = acpi_desc->dev; 764 struct nfit_bdw *nfit_bdw; 765 766 if (bdw->header.length != sizeof(*bdw)) 767 return false; 768 list_for_each_entry(nfit_bdw, &prev->bdws, list) 769 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { 770 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); 771 return true; 772 } 773 774 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw), 775 GFP_KERNEL); 776 if (!nfit_bdw) 777 return false; 778 INIT_LIST_HEAD(&nfit_bdw->list); 779 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw)); 780 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); 781 dev_dbg(dev, "bdw dcr: %d windows: %d\n", 782 bdw->region_index, bdw->windows); 783 return true; 784 } 785 786 static size_t sizeof_idt(struct acpi_nfit_interleave *idt) 787 { 788 if (idt->header.length < sizeof(*idt)) 789 return 0; 790 return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); 791 } 792 793 static bool add_idt(struct acpi_nfit_desc *acpi_desc, 794 struct nfit_table_prev *prev, 795 struct acpi_nfit_interleave *idt) 796 { 797 struct device *dev = acpi_desc->dev; 798 struct nfit_idt *nfit_idt; 799 800 if (!sizeof_idt(idt)) 801 return false; 802 803 list_for_each_entry(nfit_idt, &prev->idts, list) { 804 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt)) 805 continue; 806 807 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) { 808 list_move_tail(&nfit_idt->list, &acpi_desc->idts); 809 return true; 810 } 811 } 812 813 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt), 814 GFP_KERNEL); 815 if (!nfit_idt) 816 return false; 817 INIT_LIST_HEAD(&nfit_idt->list); 818 memcpy(nfit_idt->idt, idt, sizeof_idt(idt)); 819 list_add_tail(&nfit_idt->list, &acpi_desc->idts); 820 dev_dbg(dev, "idt index: %d num_lines: %d\n", 821 idt->interleave_index, idt->line_count); 822 return true; 823 } 824 825 static size_t sizeof_flush(struct acpi_nfit_flush_address *flush) 826 { 827 if (flush->header.length < sizeof(*flush)) 828 return 0; 829 return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1); 830 } 831 832 static bool add_flush(struct acpi_nfit_desc *acpi_desc, 833 struct nfit_table_prev *prev, 834 struct acpi_nfit_flush_address *flush) 835 { 836 struct device *dev = acpi_desc->dev; 837 struct nfit_flush *nfit_flush; 838 839 if (!sizeof_flush(flush)) 840 return false; 841 842 list_for_each_entry(nfit_flush, &prev->flushes, list) { 843 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush)) 844 continue; 845 846 if (memcmp(nfit_flush->flush, flush, 847 sizeof_flush(flush)) == 0) { 848 list_move_tail(&nfit_flush->list, &acpi_desc->flushes); 849 return true; 850 } 851 } 852 853 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush) 854 + sizeof_flush(flush), GFP_KERNEL); 855 if (!nfit_flush) 856 return false; 857 INIT_LIST_HEAD(&nfit_flush->list); 858 memcpy(nfit_flush->flush, flush, sizeof_flush(flush)); 859 list_add_tail(&nfit_flush->list, &acpi_desc->flushes); 860 dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n", 861 flush->device_handle, flush->hint_count); 862 return true; 863 } 864 865 static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc, 866 struct acpi_nfit_capabilities *pcap) 867 { 868 struct device *dev = acpi_desc->dev; 869 u32 mask; 870 871 mask = (1 << (pcap->highest_capability + 1)) - 1; 872 acpi_desc->platform_cap = pcap->capabilities & mask; 873 dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap); 874 return true; 875 } 876 877 static void *add_table(struct acpi_nfit_desc *acpi_desc, 878 struct nfit_table_prev *prev, void *table, const void *end) 879 { 880 struct device *dev = acpi_desc->dev; 881 struct acpi_nfit_header *hdr; 882 void *err = ERR_PTR(-ENOMEM); 883 884 if (table >= end) 885 return NULL; 886 887 hdr = table; 888 if (!hdr->length) { 889 dev_warn(dev, "found a zero length table '%d' parsing nfit\n", 890 hdr->type); 891 return NULL; 892 } 893 894 switch (hdr->type) { 895 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: 896 if (!add_spa(acpi_desc, prev, table)) 897 return err; 898 break; 899 case ACPI_NFIT_TYPE_MEMORY_MAP: 900 if (!add_memdev(acpi_desc, prev, table)) 901 return err; 902 break; 903 case ACPI_NFIT_TYPE_CONTROL_REGION: 904 if (!add_dcr(acpi_desc, prev, table)) 905 return err; 906 break; 907 case ACPI_NFIT_TYPE_DATA_REGION: 908 if (!add_bdw(acpi_desc, prev, table)) 909 return err; 910 break; 911 case ACPI_NFIT_TYPE_INTERLEAVE: 912 if (!add_idt(acpi_desc, prev, table)) 913 return err; 914 break; 915 case ACPI_NFIT_TYPE_FLUSH_ADDRESS: 916 if (!add_flush(acpi_desc, prev, table)) 917 return err; 918 break; 919 case ACPI_NFIT_TYPE_SMBIOS: 920 dev_dbg(dev, "smbios\n"); 921 break; 922 case ACPI_NFIT_TYPE_CAPABILITIES: 923 if (!add_platform_cap(acpi_desc, table)) 924 return err; 925 break; 926 default: 927 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); 928 break; 929 } 930 931 return table + hdr->length; 932 } 933 934 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, 935 struct nfit_mem *nfit_mem) 936 { 937 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 938 u16 dcr = nfit_mem->dcr->region_index; 939 struct nfit_spa *nfit_spa; 940 941 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 942 u16 range_index = nfit_spa->spa->range_index; 943 int type = nfit_spa_type(nfit_spa->spa); 944 struct nfit_memdev *nfit_memdev; 945 946 if (type != NFIT_SPA_BDW) 947 continue; 948 949 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 950 if (nfit_memdev->memdev->range_index != range_index) 951 continue; 952 if (nfit_memdev->memdev->device_handle != device_handle) 953 continue; 954 if (nfit_memdev->memdev->region_index != dcr) 955 continue; 956 957 nfit_mem->spa_bdw = nfit_spa->spa; 958 return; 959 } 960 } 961 962 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", 963 nfit_mem->spa_dcr->range_index); 964 nfit_mem->bdw = NULL; 965 } 966 967 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, 968 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) 969 { 970 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 971 struct nfit_memdev *nfit_memdev; 972 struct nfit_bdw *nfit_bdw; 973 struct nfit_idt *nfit_idt; 974 u16 idt_idx, range_index; 975 976 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { 977 if (nfit_bdw->bdw->region_index != dcr) 978 continue; 979 nfit_mem->bdw = nfit_bdw->bdw; 980 break; 981 } 982 983 if (!nfit_mem->bdw) 984 return; 985 986 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); 987 988 if (!nfit_mem->spa_bdw) 989 return; 990 991 range_index = nfit_mem->spa_bdw->range_index; 992 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 993 if (nfit_memdev->memdev->range_index != range_index || 994 nfit_memdev->memdev->region_index != dcr) 995 continue; 996 nfit_mem->memdev_bdw = nfit_memdev->memdev; 997 idt_idx = nfit_memdev->memdev->interleave_index; 998 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 999 if (nfit_idt->idt->interleave_index != idt_idx) 1000 continue; 1001 nfit_mem->idt_bdw = nfit_idt->idt; 1002 break; 1003 } 1004 break; 1005 } 1006 } 1007 1008 static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc, 1009 struct acpi_nfit_system_address *spa) 1010 { 1011 struct nfit_mem *nfit_mem, *found; 1012 struct nfit_memdev *nfit_memdev; 1013 int type = spa ? nfit_spa_type(spa) : 0; 1014 1015 switch (type) { 1016 case NFIT_SPA_DCR: 1017 case NFIT_SPA_PM: 1018 break; 1019 default: 1020 if (spa) 1021 return 0; 1022 } 1023 1024 /* 1025 * This loop runs in two modes, when a dimm is mapped the loop 1026 * adds memdev associations to an existing dimm, or creates a 1027 * dimm. In the unmapped dimm case this loop sweeps for memdev 1028 * instances with an invalid / zero range_index and adds those 1029 * dimms without spa associations. 1030 */ 1031 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1032 struct nfit_flush *nfit_flush; 1033 struct nfit_dcr *nfit_dcr; 1034 u32 device_handle; 1035 u16 dcr; 1036 1037 if (spa && nfit_memdev->memdev->range_index != spa->range_index) 1038 continue; 1039 if (!spa && nfit_memdev->memdev->range_index) 1040 continue; 1041 found = NULL; 1042 dcr = nfit_memdev->memdev->region_index; 1043 device_handle = nfit_memdev->memdev->device_handle; 1044 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 1045 if (__to_nfit_memdev(nfit_mem)->device_handle 1046 == device_handle) { 1047 found = nfit_mem; 1048 break; 1049 } 1050 1051 if (found) 1052 nfit_mem = found; 1053 else { 1054 nfit_mem = devm_kzalloc(acpi_desc->dev, 1055 sizeof(*nfit_mem), GFP_KERNEL); 1056 if (!nfit_mem) 1057 return -ENOMEM; 1058 INIT_LIST_HEAD(&nfit_mem->list); 1059 nfit_mem->acpi_desc = acpi_desc; 1060 list_add(&nfit_mem->list, &acpi_desc->dimms); 1061 } 1062 1063 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 1064 if (nfit_dcr->dcr->region_index != dcr) 1065 continue; 1066 /* 1067 * Record the control region for the dimm. For 1068 * the ACPI 6.1 case, where there are separate 1069 * control regions for the pmem vs blk 1070 * interfaces, be sure to record the extended 1071 * blk details. 1072 */ 1073 if (!nfit_mem->dcr) 1074 nfit_mem->dcr = nfit_dcr->dcr; 1075 else if (nfit_mem->dcr->windows == 0 1076 && nfit_dcr->dcr->windows) 1077 nfit_mem->dcr = nfit_dcr->dcr; 1078 break; 1079 } 1080 1081 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { 1082 struct acpi_nfit_flush_address *flush; 1083 u16 i; 1084 1085 if (nfit_flush->flush->device_handle != device_handle) 1086 continue; 1087 nfit_mem->nfit_flush = nfit_flush; 1088 flush = nfit_flush->flush; 1089 nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev, 1090 flush->hint_count, 1091 sizeof(struct resource), 1092 GFP_KERNEL); 1093 if (!nfit_mem->flush_wpq) 1094 return -ENOMEM; 1095 for (i = 0; i < flush->hint_count; i++) { 1096 struct resource *res = &nfit_mem->flush_wpq[i]; 1097 1098 res->start = flush->hint_address[i]; 1099 res->end = res->start + 8 - 1; 1100 } 1101 break; 1102 } 1103 1104 if (dcr && !nfit_mem->dcr) { 1105 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", 1106 spa->range_index, dcr); 1107 return -ENODEV; 1108 } 1109 1110 if (type == NFIT_SPA_DCR) { 1111 struct nfit_idt *nfit_idt; 1112 u16 idt_idx; 1113 1114 /* multiple dimms may share a SPA when interleaved */ 1115 nfit_mem->spa_dcr = spa; 1116 nfit_mem->memdev_dcr = nfit_memdev->memdev; 1117 idt_idx = nfit_memdev->memdev->interleave_index; 1118 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 1119 if (nfit_idt->idt->interleave_index != idt_idx) 1120 continue; 1121 nfit_mem->idt_dcr = nfit_idt->idt; 1122 break; 1123 } 1124 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); 1125 } else if (type == NFIT_SPA_PM) { 1126 /* 1127 * A single dimm may belong to multiple SPA-PM 1128 * ranges, record at least one in addition to 1129 * any SPA-DCR range. 1130 */ 1131 nfit_mem->memdev_pmem = nfit_memdev->memdev; 1132 } else 1133 nfit_mem->memdev_dcr = nfit_memdev->memdev; 1134 } 1135 1136 return 0; 1137 } 1138 1139 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) 1140 { 1141 struct nfit_mem *a = container_of(_a, typeof(*a), list); 1142 struct nfit_mem *b = container_of(_b, typeof(*b), list); 1143 u32 handleA, handleB; 1144 1145 handleA = __to_nfit_memdev(a)->device_handle; 1146 handleB = __to_nfit_memdev(b)->device_handle; 1147 if (handleA < handleB) 1148 return -1; 1149 else if (handleA > handleB) 1150 return 1; 1151 return 0; 1152 } 1153 1154 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) 1155 { 1156 struct nfit_spa *nfit_spa; 1157 int rc; 1158 1159 1160 /* 1161 * For each SPA-DCR or SPA-PMEM address range find its 1162 * corresponding MEMDEV(s). From each MEMDEV find the 1163 * corresponding DCR. Then, if we're operating on a SPA-DCR, 1164 * try to find a SPA-BDW and a corresponding BDW that references 1165 * the DCR. Throw it all into an nfit_mem object. Note, that 1166 * BDWs are optional. 1167 */ 1168 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 1169 rc = __nfit_mem_init(acpi_desc, nfit_spa->spa); 1170 if (rc) 1171 return rc; 1172 } 1173 1174 /* 1175 * If a DIMM has failed to be mapped into SPA there will be no 1176 * SPA entries above. Find and register all the unmapped DIMMs 1177 * for reporting and recovery purposes. 1178 */ 1179 rc = __nfit_mem_init(acpi_desc, NULL); 1180 if (rc) 1181 return rc; 1182 1183 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); 1184 1185 return 0; 1186 } 1187 1188 static ssize_t bus_dsm_mask_show(struct device *dev, 1189 struct device_attribute *attr, char *buf) 1190 { 1191 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1192 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1193 1194 return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask); 1195 } 1196 static struct device_attribute dev_attr_bus_dsm_mask = 1197 __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL); 1198 1199 static ssize_t revision_show(struct device *dev, 1200 struct device_attribute *attr, char *buf) 1201 { 1202 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1203 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1204 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1205 1206 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); 1207 } 1208 static DEVICE_ATTR_RO(revision); 1209 1210 static ssize_t hw_error_scrub_show(struct device *dev, 1211 struct device_attribute *attr, char *buf) 1212 { 1213 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1214 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1215 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1216 1217 return sprintf(buf, "%d\n", acpi_desc->scrub_mode); 1218 } 1219 1220 /* 1221 * The 'hw_error_scrub' attribute can have the following values written to it: 1222 * '0': Switch to the default mode where an exception will only insert 1223 * the address of the memory error into the poison and badblocks lists. 1224 * '1': Enable a full scrub to happen if an exception for a memory error is 1225 * received. 1226 */ 1227 static ssize_t hw_error_scrub_store(struct device *dev, 1228 struct device_attribute *attr, const char *buf, size_t size) 1229 { 1230 struct nvdimm_bus_descriptor *nd_desc; 1231 ssize_t rc; 1232 long val; 1233 1234 rc = kstrtol(buf, 0, &val); 1235 if (rc) 1236 return rc; 1237 1238 device_lock(dev); 1239 nd_desc = dev_get_drvdata(dev); 1240 if (nd_desc) { 1241 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1242 1243 switch (val) { 1244 case HW_ERROR_SCRUB_ON: 1245 acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON; 1246 break; 1247 case HW_ERROR_SCRUB_OFF: 1248 acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF; 1249 break; 1250 default: 1251 rc = -EINVAL; 1252 break; 1253 } 1254 } 1255 device_unlock(dev); 1256 if (rc) 1257 return rc; 1258 return size; 1259 } 1260 static DEVICE_ATTR_RW(hw_error_scrub); 1261 1262 /* 1263 * This shows the number of full Address Range Scrubs that have been 1264 * completed since driver load time. Userspace can wait on this using 1265 * select/poll etc. A '+' at the end indicates an ARS is in progress 1266 */ 1267 static ssize_t scrub_show(struct device *dev, 1268 struct device_attribute *attr, char *buf) 1269 { 1270 struct nvdimm_bus_descriptor *nd_desc; 1271 ssize_t rc = -ENXIO; 1272 1273 device_lock(dev); 1274 nd_desc = dev_get_drvdata(dev); 1275 if (nd_desc) { 1276 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1277 1278 mutex_lock(&acpi_desc->init_mutex); 1279 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, 1280 acpi_desc->scrub_busy 1281 && !acpi_desc->cancel ? "+\n" : "\n"); 1282 mutex_unlock(&acpi_desc->init_mutex); 1283 } 1284 device_unlock(dev); 1285 return rc; 1286 } 1287 1288 static ssize_t scrub_store(struct device *dev, 1289 struct device_attribute *attr, const char *buf, size_t size) 1290 { 1291 struct nvdimm_bus_descriptor *nd_desc; 1292 ssize_t rc; 1293 long val; 1294 1295 rc = kstrtol(buf, 0, &val); 1296 if (rc) 1297 return rc; 1298 if (val != 1) 1299 return -EINVAL; 1300 1301 device_lock(dev); 1302 nd_desc = dev_get_drvdata(dev); 1303 if (nd_desc) { 1304 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1305 1306 rc = acpi_nfit_ars_rescan(acpi_desc, 0); 1307 } 1308 device_unlock(dev); 1309 if (rc) 1310 return rc; 1311 return size; 1312 } 1313 static DEVICE_ATTR_RW(scrub); 1314 1315 static bool ars_supported(struct nvdimm_bus *nvdimm_bus) 1316 { 1317 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1318 const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START 1319 | 1 << ND_CMD_ARS_STATUS; 1320 1321 return (nd_desc->cmd_mask & mask) == mask; 1322 } 1323 1324 static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) 1325 { 1326 struct device *dev = container_of(kobj, struct device, kobj); 1327 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 1328 1329 if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) 1330 return 0; 1331 return a->mode; 1332 } 1333 1334 static struct attribute *acpi_nfit_attributes[] = { 1335 &dev_attr_revision.attr, 1336 &dev_attr_scrub.attr, 1337 &dev_attr_hw_error_scrub.attr, 1338 &dev_attr_bus_dsm_mask.attr, 1339 NULL, 1340 }; 1341 1342 static const struct attribute_group acpi_nfit_attribute_group = { 1343 .name = "nfit", 1344 .attrs = acpi_nfit_attributes, 1345 .is_visible = nfit_visible, 1346 }; 1347 1348 static const struct attribute_group *acpi_nfit_attribute_groups[] = { 1349 &nvdimm_bus_attribute_group, 1350 &acpi_nfit_attribute_group, 1351 NULL, 1352 }; 1353 1354 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) 1355 { 1356 struct nvdimm *nvdimm = to_nvdimm(dev); 1357 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1358 1359 return __to_nfit_memdev(nfit_mem); 1360 } 1361 1362 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) 1363 { 1364 struct nvdimm *nvdimm = to_nvdimm(dev); 1365 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1366 1367 return nfit_mem->dcr; 1368 } 1369 1370 static ssize_t handle_show(struct device *dev, 1371 struct device_attribute *attr, char *buf) 1372 { 1373 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 1374 1375 return sprintf(buf, "%#x\n", memdev->device_handle); 1376 } 1377 static DEVICE_ATTR_RO(handle); 1378 1379 static ssize_t phys_id_show(struct device *dev, 1380 struct device_attribute *attr, char *buf) 1381 { 1382 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 1383 1384 return sprintf(buf, "%#x\n", memdev->physical_id); 1385 } 1386 static DEVICE_ATTR_RO(phys_id); 1387 1388 static ssize_t vendor_show(struct device *dev, 1389 struct device_attribute *attr, char *buf) 1390 { 1391 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1392 1393 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id)); 1394 } 1395 static DEVICE_ATTR_RO(vendor); 1396 1397 static ssize_t rev_id_show(struct device *dev, 1398 struct device_attribute *attr, char *buf) 1399 { 1400 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1401 1402 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id)); 1403 } 1404 static DEVICE_ATTR_RO(rev_id); 1405 1406 static ssize_t device_show(struct device *dev, 1407 struct device_attribute *attr, char *buf) 1408 { 1409 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1410 1411 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id)); 1412 } 1413 static DEVICE_ATTR_RO(device); 1414 1415 static ssize_t subsystem_vendor_show(struct device *dev, 1416 struct device_attribute *attr, char *buf) 1417 { 1418 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1419 1420 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id)); 1421 } 1422 static DEVICE_ATTR_RO(subsystem_vendor); 1423 1424 static ssize_t subsystem_rev_id_show(struct device *dev, 1425 struct device_attribute *attr, char *buf) 1426 { 1427 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1428 1429 return sprintf(buf, "0x%04x\n", 1430 be16_to_cpu(dcr->subsystem_revision_id)); 1431 } 1432 static DEVICE_ATTR_RO(subsystem_rev_id); 1433 1434 static ssize_t subsystem_device_show(struct device *dev, 1435 struct device_attribute *attr, char *buf) 1436 { 1437 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1438 1439 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id)); 1440 } 1441 static DEVICE_ATTR_RO(subsystem_device); 1442 1443 static int num_nvdimm_formats(struct nvdimm *nvdimm) 1444 { 1445 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1446 int formats = 0; 1447 1448 if (nfit_mem->memdev_pmem) 1449 formats++; 1450 if (nfit_mem->memdev_bdw) 1451 formats++; 1452 return formats; 1453 } 1454 1455 static ssize_t format_show(struct device *dev, 1456 struct device_attribute *attr, char *buf) 1457 { 1458 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1459 1460 return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code)); 1461 } 1462 static DEVICE_ATTR_RO(format); 1463 1464 static ssize_t format1_show(struct device *dev, 1465 struct device_attribute *attr, char *buf) 1466 { 1467 u32 handle; 1468 ssize_t rc = -ENXIO; 1469 struct nfit_mem *nfit_mem; 1470 struct nfit_memdev *nfit_memdev; 1471 struct acpi_nfit_desc *acpi_desc; 1472 struct nvdimm *nvdimm = to_nvdimm(dev); 1473 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1474 1475 nfit_mem = nvdimm_provider_data(nvdimm); 1476 acpi_desc = nfit_mem->acpi_desc; 1477 handle = to_nfit_memdev(dev)->device_handle; 1478 1479 /* assumes DIMMs have at most 2 published interface codes */ 1480 mutex_lock(&acpi_desc->init_mutex); 1481 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1482 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 1483 struct nfit_dcr *nfit_dcr; 1484 1485 if (memdev->device_handle != handle) 1486 continue; 1487 1488 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 1489 if (nfit_dcr->dcr->region_index != memdev->region_index) 1490 continue; 1491 if (nfit_dcr->dcr->code == dcr->code) 1492 continue; 1493 rc = sprintf(buf, "0x%04x\n", 1494 le16_to_cpu(nfit_dcr->dcr->code)); 1495 break; 1496 } 1497 if (rc != ENXIO) 1498 break; 1499 } 1500 mutex_unlock(&acpi_desc->init_mutex); 1501 return rc; 1502 } 1503 static DEVICE_ATTR_RO(format1); 1504 1505 static ssize_t formats_show(struct device *dev, 1506 struct device_attribute *attr, char *buf) 1507 { 1508 struct nvdimm *nvdimm = to_nvdimm(dev); 1509 1510 return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm)); 1511 } 1512 static DEVICE_ATTR_RO(formats); 1513 1514 static ssize_t serial_show(struct device *dev, 1515 struct device_attribute *attr, char *buf) 1516 { 1517 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1518 1519 return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number)); 1520 } 1521 static DEVICE_ATTR_RO(serial); 1522 1523 static ssize_t family_show(struct device *dev, 1524 struct device_attribute *attr, char *buf) 1525 { 1526 struct nvdimm *nvdimm = to_nvdimm(dev); 1527 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1528 1529 if (nfit_mem->family < 0) 1530 return -ENXIO; 1531 return sprintf(buf, "%d\n", nfit_mem->family); 1532 } 1533 static DEVICE_ATTR_RO(family); 1534 1535 static ssize_t dsm_mask_show(struct device *dev, 1536 struct device_attribute *attr, char *buf) 1537 { 1538 struct nvdimm *nvdimm = to_nvdimm(dev); 1539 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1540 1541 if (nfit_mem->family < 0) 1542 return -ENXIO; 1543 return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask); 1544 } 1545 static DEVICE_ATTR_RO(dsm_mask); 1546 1547 static ssize_t flags_show(struct device *dev, 1548 struct device_attribute *attr, char *buf) 1549 { 1550 u16 flags = to_nfit_memdev(dev)->flags; 1551 1552 return sprintf(buf, "%s%s%s%s%s%s%s\n", 1553 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", 1554 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", 1555 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", 1556 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", 1557 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "", 1558 flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "", 1559 flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : ""); 1560 } 1561 static DEVICE_ATTR_RO(flags); 1562 1563 static ssize_t id_show(struct device *dev, 1564 struct device_attribute *attr, char *buf) 1565 { 1566 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1567 1568 if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) 1569 return sprintf(buf, "%04x-%02x-%04x-%08x\n", 1570 be16_to_cpu(dcr->vendor_id), 1571 dcr->manufacturing_location, 1572 be16_to_cpu(dcr->manufacturing_date), 1573 be32_to_cpu(dcr->serial_number)); 1574 else 1575 return sprintf(buf, "%04x-%08x\n", 1576 be16_to_cpu(dcr->vendor_id), 1577 be32_to_cpu(dcr->serial_number)); 1578 } 1579 static DEVICE_ATTR_RO(id); 1580 1581 static struct attribute *acpi_nfit_dimm_attributes[] = { 1582 &dev_attr_handle.attr, 1583 &dev_attr_phys_id.attr, 1584 &dev_attr_vendor.attr, 1585 &dev_attr_device.attr, 1586 &dev_attr_rev_id.attr, 1587 &dev_attr_subsystem_vendor.attr, 1588 &dev_attr_subsystem_device.attr, 1589 &dev_attr_subsystem_rev_id.attr, 1590 &dev_attr_format.attr, 1591 &dev_attr_formats.attr, 1592 &dev_attr_format1.attr, 1593 &dev_attr_serial.attr, 1594 &dev_attr_flags.attr, 1595 &dev_attr_id.attr, 1596 &dev_attr_family.attr, 1597 &dev_attr_dsm_mask.attr, 1598 NULL, 1599 }; 1600 1601 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, 1602 struct attribute *a, int n) 1603 { 1604 struct device *dev = container_of(kobj, struct device, kobj); 1605 struct nvdimm *nvdimm = to_nvdimm(dev); 1606 1607 if (!to_nfit_dcr(dev)) { 1608 /* Without a dcr only the memdev attributes can be surfaced */ 1609 if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr 1610 || a == &dev_attr_flags.attr 1611 || a == &dev_attr_family.attr 1612 || a == &dev_attr_dsm_mask.attr) 1613 return a->mode; 1614 return 0; 1615 } 1616 1617 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1) 1618 return 0; 1619 return a->mode; 1620 } 1621 1622 static const struct attribute_group acpi_nfit_dimm_attribute_group = { 1623 .name = "nfit", 1624 .attrs = acpi_nfit_dimm_attributes, 1625 .is_visible = acpi_nfit_dimm_attr_visible, 1626 }; 1627 1628 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { 1629 &nvdimm_attribute_group, 1630 &nd_device_attribute_group, 1631 &acpi_nfit_dimm_attribute_group, 1632 NULL, 1633 }; 1634 1635 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, 1636 u32 device_handle) 1637 { 1638 struct nfit_mem *nfit_mem; 1639 1640 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 1641 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) 1642 return nfit_mem->nvdimm; 1643 1644 return NULL; 1645 } 1646 1647 void __acpi_nvdimm_notify(struct device *dev, u32 event) 1648 { 1649 struct nfit_mem *nfit_mem; 1650 struct acpi_nfit_desc *acpi_desc; 1651 1652 dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev), 1653 event); 1654 1655 if (event != NFIT_NOTIFY_DIMM_HEALTH) { 1656 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev), 1657 event); 1658 return; 1659 } 1660 1661 acpi_desc = dev_get_drvdata(dev->parent); 1662 if (!acpi_desc) 1663 return; 1664 1665 /* 1666 * If we successfully retrieved acpi_desc, then we know nfit_mem data 1667 * is still valid. 1668 */ 1669 nfit_mem = dev_get_drvdata(dev); 1670 if (nfit_mem && nfit_mem->flags_attr) 1671 sysfs_notify_dirent(nfit_mem->flags_attr); 1672 } 1673 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify); 1674 1675 static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data) 1676 { 1677 struct acpi_device *adev = data; 1678 struct device *dev = &adev->dev; 1679 1680 device_lock(dev->parent); 1681 __acpi_nvdimm_notify(dev, event); 1682 device_unlock(dev->parent); 1683 } 1684 1685 static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method) 1686 { 1687 acpi_handle handle; 1688 acpi_status status; 1689 1690 status = acpi_get_handle(adev->handle, method, &handle); 1691 1692 if (ACPI_SUCCESS(status)) 1693 return true; 1694 return false; 1695 } 1696 1697 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, 1698 struct nfit_mem *nfit_mem, u32 device_handle) 1699 { 1700 struct acpi_device *adev, *adev_dimm; 1701 struct device *dev = acpi_desc->dev; 1702 unsigned long dsm_mask; 1703 const guid_t *guid; 1704 int i; 1705 int family = -1; 1706 1707 /* nfit test assumes 1:1 relationship between commands and dsms */ 1708 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; 1709 nfit_mem->family = NVDIMM_FAMILY_INTEL; 1710 adev = to_acpi_dev(acpi_desc); 1711 if (!adev) 1712 return 0; 1713 1714 adev_dimm = acpi_find_child_device(adev, device_handle, false); 1715 nfit_mem->adev = adev_dimm; 1716 if (!adev_dimm) { 1717 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", 1718 device_handle); 1719 return force_enable_dimms ? 0 : -ENODEV; 1720 } 1721 1722 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle, 1723 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) { 1724 dev_err(dev, "%s: notification registration failed\n", 1725 dev_name(&adev_dimm->dev)); 1726 return -ENXIO; 1727 } 1728 /* 1729 * Record nfit_mem for the notification path to track back to 1730 * the nfit sysfs attributes for this dimm device object. 1731 */ 1732 dev_set_drvdata(&adev_dimm->dev, nfit_mem); 1733 1734 /* 1735 * Until standardization materializes we need to consider 4 1736 * different command sets. Note, that checking for function0 (bit0) 1737 * tells us if any commands are reachable through this GUID. 1738 */ 1739 for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) 1740 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) 1741 if (family < 0 || i == default_dsm_family) 1742 family = i; 1743 1744 /* limit the supported commands to those that are publicly documented */ 1745 nfit_mem->family = family; 1746 if (override_dsm_mask && !disable_vendor_specific) 1747 dsm_mask = override_dsm_mask; 1748 else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { 1749 dsm_mask = NVDIMM_INTEL_CMDMASK; 1750 if (disable_vendor_specific) 1751 dsm_mask &= ~(1 << ND_CMD_VENDOR); 1752 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) { 1753 dsm_mask = 0x1c3c76; 1754 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { 1755 dsm_mask = 0x1fe; 1756 if (disable_vendor_specific) 1757 dsm_mask &= ~(1 << 8); 1758 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { 1759 dsm_mask = 0xffffffff; 1760 } else { 1761 dev_dbg(dev, "unknown dimm command family\n"); 1762 nfit_mem->family = -1; 1763 /* DSMs are optional, continue loading the driver... */ 1764 return 0; 1765 } 1766 1767 guid = to_nfit_uuid(nfit_mem->family); 1768 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 1769 if (acpi_check_dsm(adev_dimm->handle, guid, 1770 nfit_dsm_revid(nfit_mem->family, i), 1771 1ULL << i)) 1772 set_bit(i, &nfit_mem->dsm_mask); 1773 1774 if (acpi_nvdimm_has_method(adev_dimm, "_LSI") 1775 && acpi_nvdimm_has_method(adev_dimm, "_LSR")) { 1776 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); 1777 nfit_mem->has_lsr = true; 1778 } 1779 1780 if (nfit_mem->has_lsr && acpi_nvdimm_has_method(adev_dimm, "_LSW")) { 1781 dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev)); 1782 nfit_mem->has_lsw = true; 1783 } 1784 1785 return 0; 1786 } 1787 1788 static void shutdown_dimm_notify(void *data) 1789 { 1790 struct acpi_nfit_desc *acpi_desc = data; 1791 struct nfit_mem *nfit_mem; 1792 1793 mutex_lock(&acpi_desc->init_mutex); 1794 /* 1795 * Clear out the nfit_mem->flags_attr and shut down dimm event 1796 * notifications. 1797 */ 1798 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1799 struct acpi_device *adev_dimm = nfit_mem->adev; 1800 1801 if (nfit_mem->flags_attr) { 1802 sysfs_put(nfit_mem->flags_attr); 1803 nfit_mem->flags_attr = NULL; 1804 } 1805 if (adev_dimm) { 1806 acpi_remove_notify_handler(adev_dimm->handle, 1807 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify); 1808 dev_set_drvdata(&adev_dimm->dev, NULL); 1809 } 1810 } 1811 mutex_unlock(&acpi_desc->init_mutex); 1812 } 1813 1814 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) 1815 { 1816 struct nfit_mem *nfit_mem; 1817 int dimm_count = 0, rc; 1818 struct nvdimm *nvdimm; 1819 1820 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1821 struct acpi_nfit_flush_address *flush; 1822 unsigned long flags = 0, cmd_mask; 1823 struct nfit_memdev *nfit_memdev; 1824 u32 device_handle; 1825 u16 mem_flags; 1826 1827 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 1828 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); 1829 if (nvdimm) { 1830 dimm_count++; 1831 continue; 1832 } 1833 1834 if (nfit_mem->bdw && nfit_mem->memdev_pmem) 1835 set_bit(NDD_ALIASING, &flags); 1836 1837 /* collate flags across all memdevs for this dimm */ 1838 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1839 struct acpi_nfit_memory_map *dimm_memdev; 1840 1841 dimm_memdev = __to_nfit_memdev(nfit_mem); 1842 if (dimm_memdev->device_handle 1843 != nfit_memdev->memdev->device_handle) 1844 continue; 1845 dimm_memdev->flags |= nfit_memdev->memdev->flags; 1846 } 1847 1848 mem_flags = __to_nfit_memdev(nfit_mem)->flags; 1849 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) 1850 set_bit(NDD_UNARMED, &flags); 1851 1852 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); 1853 if (rc) 1854 continue; 1855 1856 /* 1857 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL 1858 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the 1859 * userspace interface. 1860 */ 1861 cmd_mask = 1UL << ND_CMD_CALL; 1862 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { 1863 /* 1864 * These commands have a 1:1 correspondence 1865 * between DSM payload and libnvdimm ioctl 1866 * payload format. 1867 */ 1868 cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK; 1869 } 1870 1871 if (nfit_mem->has_lsr) { 1872 set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); 1873 set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); 1874 } 1875 if (nfit_mem->has_lsw) 1876 set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); 1877 1878 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush 1879 : NULL; 1880 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, 1881 acpi_nfit_dimm_attribute_groups, 1882 flags, cmd_mask, flush ? flush->hint_count : 0, 1883 nfit_mem->flush_wpq); 1884 if (!nvdimm) 1885 return -ENOMEM; 1886 1887 nfit_mem->nvdimm = nvdimm; 1888 dimm_count++; 1889 1890 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) 1891 continue; 1892 1893 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n", 1894 nvdimm_name(nvdimm), 1895 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", 1896 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", 1897 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", 1898 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "", 1899 mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : ""); 1900 1901 } 1902 1903 rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); 1904 if (rc) 1905 return rc; 1906 1907 /* 1908 * Now that dimms are successfully registered, and async registration 1909 * is flushed, attempt to enable event notification. 1910 */ 1911 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1912 struct kernfs_node *nfit_kernfs; 1913 1914 nvdimm = nfit_mem->nvdimm; 1915 if (!nvdimm) 1916 continue; 1917 1918 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); 1919 if (nfit_kernfs) 1920 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, 1921 "flags"); 1922 sysfs_put(nfit_kernfs); 1923 if (!nfit_mem->flags_attr) 1924 dev_warn(acpi_desc->dev, "%s: notifications disabled\n", 1925 nvdimm_name(nvdimm)); 1926 } 1927 1928 return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify, 1929 acpi_desc); 1930 } 1931 1932 /* 1933 * These constants are private because there are no kernel consumers of 1934 * these commands. 1935 */ 1936 enum nfit_aux_cmds { 1937 NFIT_CMD_TRANSLATE_SPA = 5, 1938 NFIT_CMD_ARS_INJECT_SET = 7, 1939 NFIT_CMD_ARS_INJECT_CLEAR = 8, 1940 NFIT_CMD_ARS_INJECT_GET = 9, 1941 }; 1942 1943 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) 1944 { 1945 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 1946 const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS); 1947 struct acpi_device *adev; 1948 unsigned long dsm_mask; 1949 int i; 1950 1951 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; 1952 nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en; 1953 adev = to_acpi_dev(acpi_desc); 1954 if (!adev) 1955 return; 1956 1957 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) 1958 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) 1959 set_bit(i, &nd_desc->cmd_mask); 1960 set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); 1961 1962 dsm_mask = 1963 (1 << ND_CMD_ARS_CAP) | 1964 (1 << ND_CMD_ARS_START) | 1965 (1 << ND_CMD_ARS_STATUS) | 1966 (1 << ND_CMD_CLEAR_ERROR) | 1967 (1 << NFIT_CMD_TRANSLATE_SPA) | 1968 (1 << NFIT_CMD_ARS_INJECT_SET) | 1969 (1 << NFIT_CMD_ARS_INJECT_CLEAR) | 1970 (1 << NFIT_CMD_ARS_INJECT_GET); 1971 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 1972 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) 1973 set_bit(i, &nd_desc->bus_dsm_mask); 1974 } 1975 1976 static ssize_t range_index_show(struct device *dev, 1977 struct device_attribute *attr, char *buf) 1978 { 1979 struct nd_region *nd_region = to_nd_region(dev); 1980 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); 1981 1982 return sprintf(buf, "%d\n", nfit_spa->spa->range_index); 1983 } 1984 static DEVICE_ATTR_RO(range_index); 1985 1986 static struct attribute *acpi_nfit_region_attributes[] = { 1987 &dev_attr_range_index.attr, 1988 NULL, 1989 }; 1990 1991 static const struct attribute_group acpi_nfit_region_attribute_group = { 1992 .name = "nfit", 1993 .attrs = acpi_nfit_region_attributes, 1994 }; 1995 1996 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { 1997 &nd_region_attribute_group, 1998 &nd_mapping_attribute_group, 1999 &nd_device_attribute_group, 2000 &nd_numa_attribute_group, 2001 &acpi_nfit_region_attribute_group, 2002 NULL, 2003 }; 2004 2005 /* enough info to uniquely specify an interleave set */ 2006 struct nfit_set_info { 2007 struct nfit_set_info_map { 2008 u64 region_offset; 2009 u32 serial_number; 2010 u32 pad; 2011 } mapping[0]; 2012 }; 2013 2014 struct nfit_set_info2 { 2015 struct nfit_set_info_map2 { 2016 u64 region_offset; 2017 u32 serial_number; 2018 u16 vendor_id; 2019 u16 manufacturing_date; 2020 u8 manufacturing_location; 2021 u8 reserved[31]; 2022 } mapping[0]; 2023 }; 2024 2025 static size_t sizeof_nfit_set_info(int num_mappings) 2026 { 2027 return sizeof(struct nfit_set_info) 2028 + num_mappings * sizeof(struct nfit_set_info_map); 2029 } 2030 2031 static size_t sizeof_nfit_set_info2(int num_mappings) 2032 { 2033 return sizeof(struct nfit_set_info2) 2034 + num_mappings * sizeof(struct nfit_set_info_map2); 2035 } 2036 2037 static int cmp_map_compat(const void *m0, const void *m1) 2038 { 2039 const struct nfit_set_info_map *map0 = m0; 2040 const struct nfit_set_info_map *map1 = m1; 2041 2042 return memcmp(&map0->region_offset, &map1->region_offset, 2043 sizeof(u64)); 2044 } 2045 2046 static int cmp_map(const void *m0, const void *m1) 2047 { 2048 const struct nfit_set_info_map *map0 = m0; 2049 const struct nfit_set_info_map *map1 = m1; 2050 2051 if (map0->region_offset < map1->region_offset) 2052 return -1; 2053 else if (map0->region_offset > map1->region_offset) 2054 return 1; 2055 return 0; 2056 } 2057 2058 static int cmp_map2(const void *m0, const void *m1) 2059 { 2060 const struct nfit_set_info_map2 *map0 = m0; 2061 const struct nfit_set_info_map2 *map1 = m1; 2062 2063 if (map0->region_offset < map1->region_offset) 2064 return -1; 2065 else if (map0->region_offset > map1->region_offset) 2066 return 1; 2067 return 0; 2068 } 2069 2070 /* Retrieve the nth entry referencing this spa */ 2071 static struct acpi_nfit_memory_map *memdev_from_spa( 2072 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) 2073 { 2074 struct nfit_memdev *nfit_memdev; 2075 2076 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) 2077 if (nfit_memdev->memdev->range_index == range_index) 2078 if (n-- == 0) 2079 return nfit_memdev->memdev; 2080 return NULL; 2081 } 2082 2083 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, 2084 struct nd_region_desc *ndr_desc, 2085 struct acpi_nfit_system_address *spa) 2086 { 2087 struct device *dev = acpi_desc->dev; 2088 struct nd_interleave_set *nd_set; 2089 u16 nr = ndr_desc->num_mappings; 2090 struct nfit_set_info2 *info2; 2091 struct nfit_set_info *info; 2092 int i; 2093 2094 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 2095 if (!nd_set) 2096 return -ENOMEM; 2097 ndr_desc->nd_set = nd_set; 2098 guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); 2099 2100 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); 2101 if (!info) 2102 return -ENOMEM; 2103 2104 info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL); 2105 if (!info2) 2106 return -ENOMEM; 2107 2108 for (i = 0; i < nr; i++) { 2109 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; 2110 struct nfit_set_info_map *map = &info->mapping[i]; 2111 struct nfit_set_info_map2 *map2 = &info2->mapping[i]; 2112 struct nvdimm *nvdimm = mapping->nvdimm; 2113 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 2114 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, 2115 spa->range_index, i); 2116 struct acpi_nfit_control_region *dcr = nfit_mem->dcr; 2117 2118 if (!memdev || !nfit_mem->dcr) { 2119 dev_err(dev, "%s: failed to find DCR\n", __func__); 2120 return -ENODEV; 2121 } 2122 2123 map->region_offset = memdev->region_offset; 2124 map->serial_number = dcr->serial_number; 2125 2126 map2->region_offset = memdev->region_offset; 2127 map2->serial_number = dcr->serial_number; 2128 map2->vendor_id = dcr->vendor_id; 2129 map2->manufacturing_date = dcr->manufacturing_date; 2130 map2->manufacturing_location = dcr->manufacturing_location; 2131 } 2132 2133 /* v1.1 namespaces */ 2134 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 2135 cmp_map, NULL); 2136 nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 2137 2138 /* v1.2 namespaces */ 2139 sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2), 2140 cmp_map2, NULL); 2141 nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0); 2142 2143 /* support v1.1 namespaces created with the wrong sort order */ 2144 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 2145 cmp_map_compat, NULL); 2146 nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 2147 2148 /* record the result of the sort for the mapping position */ 2149 for (i = 0; i < nr; i++) { 2150 struct nfit_set_info_map2 *map2 = &info2->mapping[i]; 2151 int j; 2152 2153 for (j = 0; j < nr; j++) { 2154 struct nd_mapping_desc *mapping = &ndr_desc->mapping[j]; 2155 struct nvdimm *nvdimm = mapping->nvdimm; 2156 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 2157 struct acpi_nfit_control_region *dcr = nfit_mem->dcr; 2158 2159 if (map2->serial_number == dcr->serial_number && 2160 map2->vendor_id == dcr->vendor_id && 2161 map2->manufacturing_date == dcr->manufacturing_date && 2162 map2->manufacturing_location 2163 == dcr->manufacturing_location) { 2164 mapping->position = i; 2165 break; 2166 } 2167 } 2168 } 2169 2170 ndr_desc->nd_set = nd_set; 2171 devm_kfree(dev, info); 2172 devm_kfree(dev, info2); 2173 2174 return 0; 2175 } 2176 2177 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) 2178 { 2179 struct acpi_nfit_interleave *idt = mmio->idt; 2180 u32 sub_line_offset, line_index, line_offset; 2181 u64 line_no, table_skip_count, table_offset; 2182 2183 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); 2184 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); 2185 line_offset = idt->line_offset[line_index] 2186 * mmio->line_size; 2187 table_offset = table_skip_count * mmio->table_size; 2188 2189 return mmio->base_offset + line_offset + table_offset + sub_line_offset; 2190 } 2191 2192 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) 2193 { 2194 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 2195 u64 offset = nfit_blk->stat_offset + mmio->size * bw; 2196 const u32 STATUS_MASK = 0x80000037; 2197 2198 if (mmio->num_lines) 2199 offset = to_interleave_offset(offset, mmio); 2200 2201 return readl(mmio->addr.base + offset) & STATUS_MASK; 2202 } 2203 2204 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, 2205 resource_size_t dpa, unsigned int len, unsigned int write) 2206 { 2207 u64 cmd, offset; 2208 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 2209 2210 enum { 2211 BCW_OFFSET_MASK = (1ULL << 48)-1, 2212 BCW_LEN_SHIFT = 48, 2213 BCW_LEN_MASK = (1ULL << 8) - 1, 2214 BCW_CMD_SHIFT = 56, 2215 }; 2216 2217 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; 2218 len = len >> L1_CACHE_SHIFT; 2219 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; 2220 cmd |= ((u64) write) << BCW_CMD_SHIFT; 2221 2222 offset = nfit_blk->cmd_offset + mmio->size * bw; 2223 if (mmio->num_lines) 2224 offset = to_interleave_offset(offset, mmio); 2225 2226 writeq(cmd, mmio->addr.base + offset); 2227 nvdimm_flush(nfit_blk->nd_region); 2228 2229 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) 2230 readq(mmio->addr.base + offset); 2231 } 2232 2233 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, 2234 resource_size_t dpa, void *iobuf, size_t len, int rw, 2235 unsigned int lane) 2236 { 2237 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 2238 unsigned int copied = 0; 2239 u64 base_offset; 2240 int rc; 2241 2242 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES 2243 + lane * mmio->size; 2244 write_blk_ctl(nfit_blk, lane, dpa, len, rw); 2245 while (len) { 2246 unsigned int c; 2247 u64 offset; 2248 2249 if (mmio->num_lines) { 2250 u32 line_offset; 2251 2252 offset = to_interleave_offset(base_offset + copied, 2253 mmio); 2254 div_u64_rem(offset, mmio->line_size, &line_offset); 2255 c = min_t(size_t, len, mmio->line_size - line_offset); 2256 } else { 2257 offset = base_offset + nfit_blk->bdw_offset; 2258 c = len; 2259 } 2260 2261 if (rw) 2262 memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c); 2263 else { 2264 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH) 2265 arch_invalidate_pmem((void __force *) 2266 mmio->addr.aperture + offset, c); 2267 2268 memcpy(iobuf + copied, mmio->addr.aperture + offset, c); 2269 } 2270 2271 copied += c; 2272 len -= c; 2273 } 2274 2275 if (rw) 2276 nvdimm_flush(nfit_blk->nd_region); 2277 2278 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; 2279 return rc; 2280 } 2281 2282 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, 2283 resource_size_t dpa, void *iobuf, u64 len, int rw) 2284 { 2285 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); 2286 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 2287 struct nd_region *nd_region = nfit_blk->nd_region; 2288 unsigned int lane, copied = 0; 2289 int rc = 0; 2290 2291 lane = nd_region_acquire_lane(nd_region); 2292 while (len) { 2293 u64 c = min(len, mmio->size); 2294 2295 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, 2296 iobuf + copied, c, rw, lane); 2297 if (rc) 2298 break; 2299 2300 copied += c; 2301 len -= c; 2302 } 2303 nd_region_release_lane(nd_region, lane); 2304 2305 return rc; 2306 } 2307 2308 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, 2309 struct acpi_nfit_interleave *idt, u16 interleave_ways) 2310 { 2311 if (idt) { 2312 mmio->num_lines = idt->line_count; 2313 mmio->line_size = idt->line_size; 2314 if (interleave_ways == 0) 2315 return -ENXIO; 2316 mmio->table_size = mmio->num_lines * interleave_ways 2317 * mmio->line_size; 2318 } 2319 2320 return 0; 2321 } 2322 2323 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, 2324 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) 2325 { 2326 struct nd_cmd_dimm_flags flags; 2327 int rc; 2328 2329 memset(&flags, 0, sizeof(flags)); 2330 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, 2331 sizeof(flags), NULL); 2332 2333 if (rc >= 0 && flags.status == 0) 2334 nfit_blk->dimm_flags = flags.flags; 2335 else if (rc == -ENOTTY) { 2336 /* fall back to a conservative default */ 2337 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH; 2338 rc = 0; 2339 } else 2340 rc = -ENXIO; 2341 2342 return rc; 2343 } 2344 2345 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, 2346 struct device *dev) 2347 { 2348 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 2349 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 2350 struct nfit_blk_mmio *mmio; 2351 struct nfit_blk *nfit_blk; 2352 struct nfit_mem *nfit_mem; 2353 struct nvdimm *nvdimm; 2354 int rc; 2355 2356 nvdimm = nd_blk_region_to_dimm(ndbr); 2357 nfit_mem = nvdimm_provider_data(nvdimm); 2358 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { 2359 dev_dbg(dev, "missing%s%s%s\n", 2360 nfit_mem ? "" : " nfit_mem", 2361 (nfit_mem && nfit_mem->dcr) ? "" : " dcr", 2362 (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); 2363 return -ENXIO; 2364 } 2365 2366 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); 2367 if (!nfit_blk) 2368 return -ENOMEM; 2369 nd_blk_region_set_provider_data(ndbr, nfit_blk); 2370 nfit_blk->nd_region = to_nd_region(dev); 2371 2372 /* map block aperture memory */ 2373 nfit_blk->bdw_offset = nfit_mem->bdw->offset; 2374 mmio = &nfit_blk->mmio[BDW]; 2375 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address, 2376 nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr)); 2377 if (!mmio->addr.base) { 2378 dev_dbg(dev, "%s failed to map bdw\n", 2379 nvdimm_name(nvdimm)); 2380 return -ENOMEM; 2381 } 2382 mmio->size = nfit_mem->bdw->size; 2383 mmio->base_offset = nfit_mem->memdev_bdw->region_offset; 2384 mmio->idt = nfit_mem->idt_bdw; 2385 mmio->spa = nfit_mem->spa_bdw; 2386 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, 2387 nfit_mem->memdev_bdw->interleave_ways); 2388 if (rc) { 2389 dev_dbg(dev, "%s failed to init bdw interleave\n", 2390 nvdimm_name(nvdimm)); 2391 return rc; 2392 } 2393 2394 /* map block control memory */ 2395 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; 2396 nfit_blk->stat_offset = nfit_mem->dcr->status_offset; 2397 mmio = &nfit_blk->mmio[DCR]; 2398 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address, 2399 nfit_mem->spa_dcr->length); 2400 if (!mmio->addr.base) { 2401 dev_dbg(dev, "%s failed to map dcr\n", 2402 nvdimm_name(nvdimm)); 2403 return -ENOMEM; 2404 } 2405 mmio->size = nfit_mem->dcr->window_size; 2406 mmio->base_offset = nfit_mem->memdev_dcr->region_offset; 2407 mmio->idt = nfit_mem->idt_dcr; 2408 mmio->spa = nfit_mem->spa_dcr; 2409 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, 2410 nfit_mem->memdev_dcr->interleave_ways); 2411 if (rc) { 2412 dev_dbg(dev, "%s failed to init dcr interleave\n", 2413 nvdimm_name(nvdimm)); 2414 return rc; 2415 } 2416 2417 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); 2418 if (rc < 0) { 2419 dev_dbg(dev, "%s failed get DIMM flags\n", 2420 nvdimm_name(nvdimm)); 2421 return rc; 2422 } 2423 2424 if (nvdimm_has_flush(nfit_blk->nd_region) < 0) 2425 dev_warn(dev, "unable to guarantee persistence of writes\n"); 2426 2427 if (mmio->line_size == 0) 2428 return 0; 2429 2430 if ((u32) nfit_blk->cmd_offset % mmio->line_size 2431 + 8 > mmio->line_size) { 2432 dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); 2433 return -ENXIO; 2434 } else if ((u32) nfit_blk->stat_offset % mmio->line_size 2435 + 8 > mmio->line_size) { 2436 dev_dbg(dev, "stat_offset crosses interleave boundary\n"); 2437 return -ENXIO; 2438 } 2439 2440 return 0; 2441 } 2442 2443 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, 2444 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) 2445 { 2446 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2447 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2448 int cmd_rc, rc; 2449 2450 cmd->address = spa->address; 2451 cmd->length = spa->length; 2452 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd, 2453 sizeof(*cmd), &cmd_rc); 2454 if (rc < 0) 2455 return rc; 2456 return cmd_rc; 2457 } 2458 2459 static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) 2460 { 2461 int rc; 2462 int cmd_rc; 2463 struct nd_cmd_ars_start ars_start; 2464 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2465 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2466 2467 memset(&ars_start, 0, sizeof(ars_start)); 2468 ars_start.address = spa->address; 2469 ars_start.length = spa->length; 2470 if (test_bit(ARS_SHORT, &nfit_spa->ars_state)) 2471 ars_start.flags = ND_ARS_RETURN_PREV_DATA; 2472 if (nfit_spa_type(spa) == NFIT_SPA_PM) 2473 ars_start.type = ND_ARS_PERSISTENT; 2474 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) 2475 ars_start.type = ND_ARS_VOLATILE; 2476 else 2477 return -ENOTTY; 2478 2479 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 2480 sizeof(ars_start), &cmd_rc); 2481 2482 if (rc < 0) 2483 return rc; 2484 return cmd_rc; 2485 } 2486 2487 static int ars_continue(struct acpi_nfit_desc *acpi_desc) 2488 { 2489 int rc, cmd_rc; 2490 struct nd_cmd_ars_start ars_start; 2491 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2492 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2493 2494 memset(&ars_start, 0, sizeof(ars_start)); 2495 ars_start.address = ars_status->restart_address; 2496 ars_start.length = ars_status->restart_length; 2497 ars_start.type = ars_status->type; 2498 ars_start.flags = acpi_desc->ars_start_flags; 2499 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 2500 sizeof(ars_start), &cmd_rc); 2501 if (rc < 0) 2502 return rc; 2503 return cmd_rc; 2504 } 2505 2506 static int ars_get_status(struct acpi_nfit_desc *acpi_desc) 2507 { 2508 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2509 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2510 int rc, cmd_rc; 2511 2512 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status, 2513 acpi_desc->max_ars, &cmd_rc); 2514 if (rc < 0) 2515 return rc; 2516 return cmd_rc; 2517 } 2518 2519 static void ars_complete(struct acpi_nfit_desc *acpi_desc, 2520 struct nfit_spa *nfit_spa) 2521 { 2522 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2523 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2524 struct nd_region *nd_region = nfit_spa->nd_region; 2525 struct device *dev; 2526 2527 if ((ars_status->address >= spa->address && ars_status->address 2528 < spa->address + spa->length) 2529 || (ars_status->address < spa->address)) { 2530 /* 2531 * Assume that if a scrub starts at an offset from the 2532 * start of nfit_spa that we are in the continuation 2533 * case. 2534 * 2535 * Otherwise, if the scrub covers the spa range, mark 2536 * any pending request complete. 2537 */ 2538 if (ars_status->address + ars_status->length 2539 >= spa->address + spa->length) 2540 /* complete */; 2541 else 2542 return; 2543 } else 2544 return; 2545 2546 if (test_bit(ARS_DONE, &nfit_spa->ars_state)) 2547 return; 2548 2549 if (!test_and_clear_bit(ARS_REQ, &nfit_spa->ars_state)) 2550 return; 2551 2552 if (nd_region) { 2553 dev = nd_region_dev(nd_region); 2554 nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON); 2555 } else 2556 dev = acpi_desc->dev; 2557 2558 dev_dbg(dev, "ARS: range %d %s complete\n", spa->range_index, 2559 test_bit(ARS_SHORT, &nfit_spa->ars_state) 2560 ? "short" : "long"); 2561 clear_bit(ARS_SHORT, &nfit_spa->ars_state); 2562 set_bit(ARS_DONE, &nfit_spa->ars_state); 2563 } 2564 2565 static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) 2566 { 2567 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus; 2568 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2569 int rc; 2570 u32 i; 2571 2572 /* 2573 * First record starts at 44 byte offset from the start of the 2574 * payload. 2575 */ 2576 if (ars_status->out_length < 44) 2577 return 0; 2578 for (i = 0; i < ars_status->num_records; i++) { 2579 /* only process full records */ 2580 if (ars_status->out_length 2581 < 44 + sizeof(struct nd_ars_record) * (i + 1)) 2582 break; 2583 rc = nvdimm_bus_add_badrange(nvdimm_bus, 2584 ars_status->records[i].err_address, 2585 ars_status->records[i].length); 2586 if (rc) 2587 return rc; 2588 } 2589 if (i < ars_status->num_records) 2590 dev_warn(acpi_desc->dev, "detected truncated ars results\n"); 2591 2592 return 0; 2593 } 2594 2595 static void acpi_nfit_remove_resource(void *data) 2596 { 2597 struct resource *res = data; 2598 2599 remove_resource(res); 2600 } 2601 2602 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, 2603 struct nd_region_desc *ndr_desc) 2604 { 2605 struct resource *res, *nd_res = ndr_desc->res; 2606 int is_pmem, ret; 2607 2608 /* No operation if the region is already registered as PMEM */ 2609 is_pmem = region_intersects(nd_res->start, resource_size(nd_res), 2610 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY); 2611 if (is_pmem == REGION_INTERSECTS) 2612 return 0; 2613 2614 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL); 2615 if (!res) 2616 return -ENOMEM; 2617 2618 res->name = "Persistent Memory"; 2619 res->start = nd_res->start; 2620 res->end = nd_res->end; 2621 res->flags = IORESOURCE_MEM; 2622 res->desc = IORES_DESC_PERSISTENT_MEMORY; 2623 2624 ret = insert_resource(&iomem_resource, res); 2625 if (ret) 2626 return ret; 2627 2628 ret = devm_add_action_or_reset(acpi_desc->dev, 2629 acpi_nfit_remove_resource, 2630 res); 2631 if (ret) 2632 return ret; 2633 2634 return 0; 2635 } 2636 2637 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, 2638 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc, 2639 struct acpi_nfit_memory_map *memdev, 2640 struct nfit_spa *nfit_spa) 2641 { 2642 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, 2643 memdev->device_handle); 2644 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2645 struct nd_blk_region_desc *ndbr_desc; 2646 struct nfit_mem *nfit_mem; 2647 int rc; 2648 2649 if (!nvdimm) { 2650 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", 2651 spa->range_index, memdev->device_handle); 2652 return -ENODEV; 2653 } 2654 2655 mapping->nvdimm = nvdimm; 2656 switch (nfit_spa_type(spa)) { 2657 case NFIT_SPA_PM: 2658 case NFIT_SPA_VOLATILE: 2659 mapping->start = memdev->address; 2660 mapping->size = memdev->region_size; 2661 break; 2662 case NFIT_SPA_DCR: 2663 nfit_mem = nvdimm_provider_data(nvdimm); 2664 if (!nfit_mem || !nfit_mem->bdw) { 2665 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", 2666 spa->range_index, nvdimm_name(nvdimm)); 2667 break; 2668 } 2669 2670 mapping->size = nfit_mem->bdw->capacity; 2671 mapping->start = nfit_mem->bdw->start_address; 2672 ndr_desc->num_lanes = nfit_mem->bdw->windows; 2673 ndr_desc->mapping = mapping; 2674 ndr_desc->num_mappings = 1; 2675 ndbr_desc = to_blk_region_desc(ndr_desc); 2676 ndbr_desc->enable = acpi_nfit_blk_region_enable; 2677 ndbr_desc->do_io = acpi_desc->blk_do_io; 2678 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 2679 if (rc) 2680 return rc; 2681 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, 2682 ndr_desc); 2683 if (!nfit_spa->nd_region) 2684 return -ENOMEM; 2685 break; 2686 } 2687 2688 return 0; 2689 } 2690 2691 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) 2692 { 2693 return (nfit_spa_type(spa) == NFIT_SPA_VDISK || 2694 nfit_spa_type(spa) == NFIT_SPA_VCD || 2695 nfit_spa_type(spa) == NFIT_SPA_PDISK || 2696 nfit_spa_type(spa) == NFIT_SPA_PCD); 2697 } 2698 2699 static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa) 2700 { 2701 return (nfit_spa_type(spa) == NFIT_SPA_VDISK || 2702 nfit_spa_type(spa) == NFIT_SPA_VCD || 2703 nfit_spa_type(spa) == NFIT_SPA_VOLATILE); 2704 } 2705 2706 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, 2707 struct nfit_spa *nfit_spa) 2708 { 2709 static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS]; 2710 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2711 struct nd_blk_region_desc ndbr_desc; 2712 struct nd_region_desc *ndr_desc; 2713 struct nfit_memdev *nfit_memdev; 2714 struct nvdimm_bus *nvdimm_bus; 2715 struct resource res; 2716 int count = 0, rc; 2717 2718 if (nfit_spa->nd_region) 2719 return 0; 2720 2721 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) { 2722 dev_dbg(acpi_desc->dev, "detected invalid spa index\n"); 2723 return 0; 2724 } 2725 2726 memset(&res, 0, sizeof(res)); 2727 memset(&mappings, 0, sizeof(mappings)); 2728 memset(&ndbr_desc, 0, sizeof(ndbr_desc)); 2729 res.start = spa->address; 2730 res.end = res.start + spa->length - 1; 2731 ndr_desc = &ndbr_desc.ndr_desc; 2732 ndr_desc->res = &res; 2733 ndr_desc->provider_data = nfit_spa; 2734 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; 2735 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) 2736 ndr_desc->numa_node = acpi_map_pxm_to_online_node( 2737 spa->proximity_domain); 2738 else 2739 ndr_desc->numa_node = NUMA_NO_NODE; 2740 2741 /* 2742 * Persistence domain bits are hierarchical, if 2743 * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then 2744 * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied. 2745 */ 2746 if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH) 2747 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags); 2748 else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH) 2749 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags); 2750 2751 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 2752 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 2753 struct nd_mapping_desc *mapping; 2754 2755 if (memdev->range_index != spa->range_index) 2756 continue; 2757 if (count >= ND_MAX_MAPPINGS) { 2758 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", 2759 spa->range_index, ND_MAX_MAPPINGS); 2760 return -ENXIO; 2761 } 2762 mapping = &mappings[count++]; 2763 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc, 2764 memdev, nfit_spa); 2765 if (rc) 2766 goto out; 2767 } 2768 2769 ndr_desc->mapping = mappings; 2770 ndr_desc->num_mappings = count; 2771 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 2772 if (rc) 2773 goto out; 2774 2775 nvdimm_bus = acpi_desc->nvdimm_bus; 2776 if (nfit_spa_type(spa) == NFIT_SPA_PM) { 2777 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc); 2778 if (rc) { 2779 dev_warn(acpi_desc->dev, 2780 "failed to insert pmem resource to iomem: %d\n", 2781 rc); 2782 goto out; 2783 } 2784 2785 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, 2786 ndr_desc); 2787 if (!nfit_spa->nd_region) 2788 rc = -ENOMEM; 2789 } else if (nfit_spa_is_volatile(spa)) { 2790 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus, 2791 ndr_desc); 2792 if (!nfit_spa->nd_region) 2793 rc = -ENOMEM; 2794 } else if (nfit_spa_is_virtual(spa)) { 2795 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, 2796 ndr_desc); 2797 if (!nfit_spa->nd_region) 2798 rc = -ENOMEM; 2799 } 2800 2801 out: 2802 if (rc) 2803 dev_err(acpi_desc->dev, "failed to register spa range %d\n", 2804 nfit_spa->spa->range_index); 2805 return rc; 2806 } 2807 2808 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc) 2809 { 2810 struct device *dev = acpi_desc->dev; 2811 struct nd_cmd_ars_status *ars_status; 2812 2813 if (acpi_desc->ars_status) { 2814 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); 2815 return 0; 2816 } 2817 2818 ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL); 2819 if (!ars_status) 2820 return -ENOMEM; 2821 acpi_desc->ars_status = ars_status; 2822 return 0; 2823 } 2824 2825 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc) 2826 { 2827 int rc; 2828 2829 if (ars_status_alloc(acpi_desc)) 2830 return -ENOMEM; 2831 2832 rc = ars_get_status(acpi_desc); 2833 2834 if (rc < 0 && rc != -ENOSPC) 2835 return rc; 2836 2837 if (ars_status_process_records(acpi_desc)) 2838 return -ENOMEM; 2839 2840 return 0; 2841 } 2842 2843 static int ars_register(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa, 2844 int *query_rc) 2845 { 2846 int rc = *query_rc; 2847 2848 if (no_init_ars) 2849 return acpi_nfit_register_region(acpi_desc, nfit_spa); 2850 2851 set_bit(ARS_REQ, &nfit_spa->ars_state); 2852 set_bit(ARS_SHORT, &nfit_spa->ars_state); 2853 2854 switch (rc) { 2855 case 0: 2856 case -EAGAIN: 2857 rc = ars_start(acpi_desc, nfit_spa); 2858 if (rc == -EBUSY) { 2859 *query_rc = rc; 2860 break; 2861 } else if (rc == 0) { 2862 rc = acpi_nfit_query_poison(acpi_desc); 2863 } else { 2864 set_bit(ARS_FAILED, &nfit_spa->ars_state); 2865 break; 2866 } 2867 if (rc == -EAGAIN) 2868 clear_bit(ARS_SHORT, &nfit_spa->ars_state); 2869 else if (rc == 0) 2870 ars_complete(acpi_desc, nfit_spa); 2871 break; 2872 case -EBUSY: 2873 case -ENOSPC: 2874 break; 2875 default: 2876 set_bit(ARS_FAILED, &nfit_spa->ars_state); 2877 break; 2878 } 2879 2880 if (test_and_clear_bit(ARS_DONE, &nfit_spa->ars_state)) 2881 set_bit(ARS_REQ, &nfit_spa->ars_state); 2882 2883 return acpi_nfit_register_region(acpi_desc, nfit_spa); 2884 } 2885 2886 static void ars_complete_all(struct acpi_nfit_desc *acpi_desc) 2887 { 2888 struct nfit_spa *nfit_spa; 2889 2890 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2891 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 2892 continue; 2893 ars_complete(acpi_desc, nfit_spa); 2894 } 2895 } 2896 2897 static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, 2898 int query_rc) 2899 { 2900 unsigned int tmo = acpi_desc->scrub_tmo; 2901 struct device *dev = acpi_desc->dev; 2902 struct nfit_spa *nfit_spa; 2903 2904 if (acpi_desc->cancel) 2905 return 0; 2906 2907 if (query_rc == -EBUSY) { 2908 dev_dbg(dev, "ARS: ARS busy\n"); 2909 return min(30U * 60U, tmo * 2); 2910 } 2911 if (query_rc == -ENOSPC) { 2912 dev_dbg(dev, "ARS: ARS continue\n"); 2913 ars_continue(acpi_desc); 2914 return 1; 2915 } 2916 if (query_rc && query_rc != -EAGAIN) { 2917 unsigned long long addr, end; 2918 2919 addr = acpi_desc->ars_status->address; 2920 end = addr + acpi_desc->ars_status->length; 2921 dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n", addr, end, 2922 query_rc); 2923 } 2924 2925 ars_complete_all(acpi_desc); 2926 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2927 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 2928 continue; 2929 if (test_bit(ARS_REQ, &nfit_spa->ars_state)) { 2930 int rc = ars_start(acpi_desc, nfit_spa); 2931 2932 clear_bit(ARS_DONE, &nfit_spa->ars_state); 2933 dev = nd_region_dev(nfit_spa->nd_region); 2934 dev_dbg(dev, "ARS: range %d ARS start (%d)\n", 2935 nfit_spa->spa->range_index, rc); 2936 if (rc == 0 || rc == -EBUSY) 2937 return 1; 2938 dev_err(dev, "ARS: range %d ARS failed (%d)\n", 2939 nfit_spa->spa->range_index, rc); 2940 set_bit(ARS_FAILED, &nfit_spa->ars_state); 2941 } 2942 } 2943 return 0; 2944 } 2945 2946 static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo) 2947 { 2948 lockdep_assert_held(&acpi_desc->init_mutex); 2949 2950 acpi_desc->scrub_busy = 1; 2951 /* note this should only be set from within the workqueue */ 2952 if (tmo) 2953 acpi_desc->scrub_tmo = tmo; 2954 queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ); 2955 } 2956 2957 static void sched_ars(struct acpi_nfit_desc *acpi_desc) 2958 { 2959 __sched_ars(acpi_desc, 0); 2960 } 2961 2962 static void notify_ars_done(struct acpi_nfit_desc *acpi_desc) 2963 { 2964 lockdep_assert_held(&acpi_desc->init_mutex); 2965 2966 acpi_desc->scrub_busy = 0; 2967 acpi_desc->scrub_count++; 2968 if (acpi_desc->scrub_count_state) 2969 sysfs_notify_dirent(acpi_desc->scrub_count_state); 2970 } 2971 2972 static void acpi_nfit_scrub(struct work_struct *work) 2973 { 2974 struct acpi_nfit_desc *acpi_desc; 2975 unsigned int tmo; 2976 int query_rc; 2977 2978 acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work); 2979 mutex_lock(&acpi_desc->init_mutex); 2980 query_rc = acpi_nfit_query_poison(acpi_desc); 2981 tmo = __acpi_nfit_scrub(acpi_desc, query_rc); 2982 if (tmo) 2983 __sched_ars(acpi_desc, tmo); 2984 else 2985 notify_ars_done(acpi_desc); 2986 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); 2987 mutex_unlock(&acpi_desc->init_mutex); 2988 } 2989 2990 static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc, 2991 struct nfit_spa *nfit_spa) 2992 { 2993 int type = nfit_spa_type(nfit_spa->spa); 2994 struct nd_cmd_ars_cap ars_cap; 2995 int rc; 2996 2997 memset(&ars_cap, 0, sizeof(ars_cap)); 2998 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa); 2999 if (rc < 0) 3000 return; 3001 /* check that the supported scrub types match the spa type */ 3002 if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16) 3003 & ND_ARS_VOLATILE) == 0) 3004 return; 3005 if (type == NFIT_SPA_PM && ((ars_cap.status >> 16) 3006 & ND_ARS_PERSISTENT) == 0) 3007 return; 3008 3009 nfit_spa->max_ars = ars_cap.max_ars_out; 3010 nfit_spa->clear_err_unit = ars_cap.clear_err_unit; 3011 acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars); 3012 clear_bit(ARS_FAILED, &nfit_spa->ars_state); 3013 set_bit(ARS_REQ, &nfit_spa->ars_state); 3014 } 3015 3016 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) 3017 { 3018 struct nfit_spa *nfit_spa; 3019 int rc, query_rc; 3020 3021 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3022 set_bit(ARS_FAILED, &nfit_spa->ars_state); 3023 switch (nfit_spa_type(nfit_spa->spa)) { 3024 case NFIT_SPA_VOLATILE: 3025 case NFIT_SPA_PM: 3026 acpi_nfit_init_ars(acpi_desc, nfit_spa); 3027 break; 3028 } 3029 } 3030 3031 /* 3032 * Reap any results that might be pending before starting new 3033 * short requests. 3034 */ 3035 query_rc = acpi_nfit_query_poison(acpi_desc); 3036 if (query_rc == 0) 3037 ars_complete_all(acpi_desc); 3038 3039 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) 3040 switch (nfit_spa_type(nfit_spa->spa)) { 3041 case NFIT_SPA_VOLATILE: 3042 case NFIT_SPA_PM: 3043 /* register regions and kick off initial ARS run */ 3044 rc = ars_register(acpi_desc, nfit_spa, &query_rc); 3045 if (rc) 3046 return rc; 3047 break; 3048 case NFIT_SPA_BDW: 3049 /* nothing to register */ 3050 break; 3051 case NFIT_SPA_DCR: 3052 case NFIT_SPA_VDISK: 3053 case NFIT_SPA_VCD: 3054 case NFIT_SPA_PDISK: 3055 case NFIT_SPA_PCD: 3056 /* register known regions that don't support ARS */ 3057 rc = acpi_nfit_register_region(acpi_desc, nfit_spa); 3058 if (rc) 3059 return rc; 3060 break; 3061 default: 3062 /* don't register unknown regions */ 3063 break; 3064 } 3065 3066 sched_ars(acpi_desc); 3067 return 0; 3068 } 3069 3070 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, 3071 struct nfit_table_prev *prev) 3072 { 3073 struct device *dev = acpi_desc->dev; 3074 3075 if (!list_empty(&prev->spas) || 3076 !list_empty(&prev->memdevs) || 3077 !list_empty(&prev->dcrs) || 3078 !list_empty(&prev->bdws) || 3079 !list_empty(&prev->idts) || 3080 !list_empty(&prev->flushes)) { 3081 dev_err(dev, "new nfit deletes entries (unsupported)\n"); 3082 return -ENXIO; 3083 } 3084 return 0; 3085 } 3086 3087 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) 3088 { 3089 struct device *dev = acpi_desc->dev; 3090 struct kernfs_node *nfit; 3091 struct device *bus_dev; 3092 3093 if (!ars_supported(acpi_desc->nvdimm_bus)) 3094 return 0; 3095 3096 bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); 3097 nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit"); 3098 if (!nfit) { 3099 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n"); 3100 return -ENODEV; 3101 } 3102 acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub"); 3103 sysfs_put(nfit); 3104 if (!acpi_desc->scrub_count_state) { 3105 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n"); 3106 return -ENODEV; 3107 } 3108 3109 return 0; 3110 } 3111 3112 static void acpi_nfit_unregister(void *data) 3113 { 3114 struct acpi_nfit_desc *acpi_desc = data; 3115 3116 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 3117 } 3118 3119 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) 3120 { 3121 struct device *dev = acpi_desc->dev; 3122 struct nfit_table_prev prev; 3123 const void *end; 3124 int rc; 3125 3126 if (!acpi_desc->nvdimm_bus) { 3127 acpi_nfit_init_dsms(acpi_desc); 3128 3129 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, 3130 &acpi_desc->nd_desc); 3131 if (!acpi_desc->nvdimm_bus) 3132 return -ENOMEM; 3133 3134 rc = devm_add_action_or_reset(dev, acpi_nfit_unregister, 3135 acpi_desc); 3136 if (rc) 3137 return rc; 3138 3139 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); 3140 if (rc) 3141 return rc; 3142 3143 /* register this acpi_desc for mce notifications */ 3144 mutex_lock(&acpi_desc_lock); 3145 list_add_tail(&acpi_desc->list, &acpi_descs); 3146 mutex_unlock(&acpi_desc_lock); 3147 } 3148 3149 mutex_lock(&acpi_desc->init_mutex); 3150 3151 INIT_LIST_HEAD(&prev.spas); 3152 INIT_LIST_HEAD(&prev.memdevs); 3153 INIT_LIST_HEAD(&prev.dcrs); 3154 INIT_LIST_HEAD(&prev.bdws); 3155 INIT_LIST_HEAD(&prev.idts); 3156 INIT_LIST_HEAD(&prev.flushes); 3157 3158 list_cut_position(&prev.spas, &acpi_desc->spas, 3159 acpi_desc->spas.prev); 3160 list_cut_position(&prev.memdevs, &acpi_desc->memdevs, 3161 acpi_desc->memdevs.prev); 3162 list_cut_position(&prev.dcrs, &acpi_desc->dcrs, 3163 acpi_desc->dcrs.prev); 3164 list_cut_position(&prev.bdws, &acpi_desc->bdws, 3165 acpi_desc->bdws.prev); 3166 list_cut_position(&prev.idts, &acpi_desc->idts, 3167 acpi_desc->idts.prev); 3168 list_cut_position(&prev.flushes, &acpi_desc->flushes, 3169 acpi_desc->flushes.prev); 3170 3171 end = data + sz; 3172 while (!IS_ERR_OR_NULL(data)) 3173 data = add_table(acpi_desc, &prev, data, end); 3174 3175 if (IS_ERR(data)) { 3176 dev_dbg(dev, "nfit table parsing error: %ld\n", PTR_ERR(data)); 3177 rc = PTR_ERR(data); 3178 goto out_unlock; 3179 } 3180 3181 rc = acpi_nfit_check_deletions(acpi_desc, &prev); 3182 if (rc) 3183 goto out_unlock; 3184 3185 rc = nfit_mem_init(acpi_desc); 3186 if (rc) 3187 goto out_unlock; 3188 3189 rc = acpi_nfit_register_dimms(acpi_desc); 3190 if (rc) 3191 goto out_unlock; 3192 3193 rc = acpi_nfit_register_regions(acpi_desc); 3194 3195 out_unlock: 3196 mutex_unlock(&acpi_desc->init_mutex); 3197 return rc; 3198 } 3199 EXPORT_SYMBOL_GPL(acpi_nfit_init); 3200 3201 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) 3202 { 3203 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3204 struct device *dev = acpi_desc->dev; 3205 3206 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ 3207 device_lock(dev); 3208 device_unlock(dev); 3209 3210 /* Bounce the init_mutex to complete initial registration */ 3211 mutex_lock(&acpi_desc->init_mutex); 3212 mutex_unlock(&acpi_desc->init_mutex); 3213 3214 return 0; 3215 } 3216 3217 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, 3218 struct nvdimm *nvdimm, unsigned int cmd) 3219 { 3220 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3221 3222 if (nvdimm) 3223 return 0; 3224 if (cmd != ND_CMD_ARS_START) 3225 return 0; 3226 3227 /* 3228 * The kernel and userspace may race to initiate a scrub, but 3229 * the scrub thread is prepared to lose that initial race. It 3230 * just needs guarantees that any ars it initiates are not 3231 * interrupted by any intervening start reqeusts from userspace. 3232 */ 3233 if (work_busy(&acpi_desc->dwork.work)) 3234 return -EBUSY; 3235 3236 return 0; 3237 } 3238 3239 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags) 3240 { 3241 struct device *dev = acpi_desc->dev; 3242 int scheduled = 0, busy = 0; 3243 struct nfit_spa *nfit_spa; 3244 3245 mutex_lock(&acpi_desc->init_mutex); 3246 if (acpi_desc->cancel) { 3247 mutex_unlock(&acpi_desc->init_mutex); 3248 return 0; 3249 } 3250 3251 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3252 int type = nfit_spa_type(nfit_spa->spa); 3253 3254 if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE) 3255 continue; 3256 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 3257 continue; 3258 3259 if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) 3260 busy++; 3261 else { 3262 if (test_bit(ARS_SHORT, &flags)) 3263 set_bit(ARS_SHORT, &nfit_spa->ars_state); 3264 scheduled++; 3265 } 3266 } 3267 if (scheduled) { 3268 sched_ars(acpi_desc); 3269 dev_dbg(dev, "ars_scan triggered\n"); 3270 } 3271 mutex_unlock(&acpi_desc->init_mutex); 3272 3273 if (scheduled) 3274 return 0; 3275 if (busy) 3276 return -EBUSY; 3277 return -ENOTTY; 3278 } 3279 3280 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) 3281 { 3282 struct nvdimm_bus_descriptor *nd_desc; 3283 3284 dev_set_drvdata(dev, acpi_desc); 3285 acpi_desc->dev = dev; 3286 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; 3287 nd_desc = &acpi_desc->nd_desc; 3288 nd_desc->provider_name = "ACPI.NFIT"; 3289 nd_desc->module = THIS_MODULE; 3290 nd_desc->ndctl = acpi_nfit_ctl; 3291 nd_desc->flush_probe = acpi_nfit_flush_probe; 3292 nd_desc->clear_to_send = acpi_nfit_clear_to_send; 3293 nd_desc->attr_groups = acpi_nfit_attribute_groups; 3294 3295 INIT_LIST_HEAD(&acpi_desc->spas); 3296 INIT_LIST_HEAD(&acpi_desc->dcrs); 3297 INIT_LIST_HEAD(&acpi_desc->bdws); 3298 INIT_LIST_HEAD(&acpi_desc->idts); 3299 INIT_LIST_HEAD(&acpi_desc->flushes); 3300 INIT_LIST_HEAD(&acpi_desc->memdevs); 3301 INIT_LIST_HEAD(&acpi_desc->dimms); 3302 INIT_LIST_HEAD(&acpi_desc->list); 3303 mutex_init(&acpi_desc->init_mutex); 3304 acpi_desc->scrub_tmo = 1; 3305 INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub); 3306 } 3307 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init); 3308 3309 static void acpi_nfit_put_table(void *table) 3310 { 3311 acpi_put_table(table); 3312 } 3313 3314 void acpi_nfit_shutdown(void *data) 3315 { 3316 struct acpi_nfit_desc *acpi_desc = data; 3317 struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); 3318 3319 /* 3320 * Destruct under acpi_desc_lock so that nfit_handle_mce does not 3321 * race teardown 3322 */ 3323 mutex_lock(&acpi_desc_lock); 3324 list_del(&acpi_desc->list); 3325 mutex_unlock(&acpi_desc_lock); 3326 3327 mutex_lock(&acpi_desc->init_mutex); 3328 acpi_desc->cancel = 1; 3329 cancel_delayed_work_sync(&acpi_desc->dwork); 3330 mutex_unlock(&acpi_desc->init_mutex); 3331 3332 /* 3333 * Bounce the nvdimm bus lock to make sure any in-flight 3334 * acpi_nfit_ars_rescan() submissions have had a chance to 3335 * either submit or see ->cancel set. 3336 */ 3337 device_lock(bus_dev); 3338 device_unlock(bus_dev); 3339 3340 flush_workqueue(nfit_wq); 3341 } 3342 EXPORT_SYMBOL_GPL(acpi_nfit_shutdown); 3343 3344 static int acpi_nfit_add(struct acpi_device *adev) 3345 { 3346 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 3347 struct acpi_nfit_desc *acpi_desc; 3348 struct device *dev = &adev->dev; 3349 struct acpi_table_header *tbl; 3350 acpi_status status = AE_OK; 3351 acpi_size sz; 3352 int rc = 0; 3353 3354 status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl); 3355 if (ACPI_FAILURE(status)) { 3356 /* This is ok, we could have an nvdimm hotplugged later */ 3357 dev_dbg(dev, "failed to find NFIT at startup\n"); 3358 return 0; 3359 } 3360 3361 rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl); 3362 if (rc) 3363 return rc; 3364 sz = tbl->length; 3365 3366 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 3367 if (!acpi_desc) 3368 return -ENOMEM; 3369 acpi_nfit_desc_init(acpi_desc, &adev->dev); 3370 3371 /* Save the acpi header for exporting the revision via sysfs */ 3372 acpi_desc->acpi_header = *tbl; 3373 3374 /* Evaluate _FIT and override with that if present */ 3375 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 3376 if (ACPI_SUCCESS(status) && buf.length > 0) { 3377 union acpi_object *obj = buf.pointer; 3378 3379 if (obj->type == ACPI_TYPE_BUFFER) 3380 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 3381 obj->buffer.length); 3382 else 3383 dev_dbg(dev, "invalid type %d, ignoring _FIT\n", 3384 (int) obj->type); 3385 kfree(buf.pointer); 3386 } else 3387 /* skip over the lead-in header table */ 3388 rc = acpi_nfit_init(acpi_desc, (void *) tbl 3389 + sizeof(struct acpi_table_nfit), 3390 sz - sizeof(struct acpi_table_nfit)); 3391 3392 if (rc) 3393 return rc; 3394 return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc); 3395 } 3396 3397 static int acpi_nfit_remove(struct acpi_device *adev) 3398 { 3399 /* see acpi_nfit_unregister */ 3400 return 0; 3401 } 3402 3403 static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle) 3404 { 3405 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); 3406 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 3407 union acpi_object *obj; 3408 acpi_status status; 3409 int ret; 3410 3411 if (!dev->driver) { 3412 /* dev->driver may be null if we're being removed */ 3413 dev_dbg(dev, "no driver found for dev\n"); 3414 return; 3415 } 3416 3417 if (!acpi_desc) { 3418 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 3419 if (!acpi_desc) 3420 return; 3421 acpi_nfit_desc_init(acpi_desc, dev); 3422 } else { 3423 /* 3424 * Finish previous registration before considering new 3425 * regions. 3426 */ 3427 flush_workqueue(nfit_wq); 3428 } 3429 3430 /* Evaluate _FIT */ 3431 status = acpi_evaluate_object(handle, "_FIT", NULL, &buf); 3432 if (ACPI_FAILURE(status)) { 3433 dev_err(dev, "failed to evaluate _FIT\n"); 3434 return; 3435 } 3436 3437 obj = buf.pointer; 3438 if (obj->type == ACPI_TYPE_BUFFER) { 3439 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 3440 obj->buffer.length); 3441 if (ret) 3442 dev_err(dev, "failed to merge updated NFIT\n"); 3443 } else 3444 dev_err(dev, "Invalid _FIT\n"); 3445 kfree(buf.pointer); 3446 } 3447 3448 static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle) 3449 { 3450 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); 3451 unsigned long flags = (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) ? 3452 0 : 1 << ARS_SHORT; 3453 3454 acpi_nfit_ars_rescan(acpi_desc, flags); 3455 } 3456 3457 void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event) 3458 { 3459 dev_dbg(dev, "event: 0x%x\n", event); 3460 3461 switch (event) { 3462 case NFIT_NOTIFY_UPDATE: 3463 return acpi_nfit_update_notify(dev, handle); 3464 case NFIT_NOTIFY_UC_MEMORY_ERROR: 3465 return acpi_nfit_uc_error_notify(dev, handle); 3466 default: 3467 return; 3468 } 3469 } 3470 EXPORT_SYMBOL_GPL(__acpi_nfit_notify); 3471 3472 static void acpi_nfit_notify(struct acpi_device *adev, u32 event) 3473 { 3474 device_lock(&adev->dev); 3475 __acpi_nfit_notify(&adev->dev, adev->handle, event); 3476 device_unlock(&adev->dev); 3477 } 3478 3479 static const struct acpi_device_id acpi_nfit_ids[] = { 3480 { "ACPI0012", 0 }, 3481 { "", 0 }, 3482 }; 3483 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); 3484 3485 static struct acpi_driver acpi_nfit_driver = { 3486 .name = KBUILD_MODNAME, 3487 .ids = acpi_nfit_ids, 3488 .ops = { 3489 .add = acpi_nfit_add, 3490 .remove = acpi_nfit_remove, 3491 .notify = acpi_nfit_notify, 3492 }, 3493 }; 3494 3495 static __init int nfit_init(void) 3496 { 3497 int ret; 3498 3499 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); 3500 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); 3501 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); 3502 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); 3503 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); 3504 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); 3505 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); 3506 BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16); 3507 3508 guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]); 3509 guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]); 3510 guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]); 3511 guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]); 3512 guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]); 3513 guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]); 3514 guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]); 3515 guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]); 3516 guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]); 3517 guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]); 3518 guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); 3519 guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); 3520 guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); 3521 3522 nfit_wq = create_singlethread_workqueue("nfit"); 3523 if (!nfit_wq) 3524 return -ENOMEM; 3525 3526 nfit_mce_register(); 3527 ret = acpi_bus_register_driver(&acpi_nfit_driver); 3528 if (ret) { 3529 nfit_mce_unregister(); 3530 destroy_workqueue(nfit_wq); 3531 } 3532 3533 return ret; 3534 3535 } 3536 3537 static __exit void nfit_exit(void) 3538 { 3539 nfit_mce_unregister(); 3540 acpi_bus_unregister_driver(&acpi_nfit_driver); 3541 destroy_workqueue(nfit_wq); 3542 WARN_ON(!list_empty(&acpi_descs)); 3543 } 3544 3545 module_init(nfit_init); 3546 module_exit(nfit_exit); 3547 MODULE_LICENSE("GPL v2"); 3548 MODULE_AUTHOR("Intel Corporation"); 3549