1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 #include <linux/platform_device.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/workqueue.h> 17 #include <linux/libnvdimm.h> 18 #include <linux/genalloc.h> 19 #include <linux/vmalloc.h> 20 #include <linux/device.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/ndctl.h> 24 #include <linux/sizes.h> 25 #include <linux/list.h> 26 #include <linux/slab.h> 27 #include <nd-core.h> 28 #include <intel.h> 29 #include <nfit.h> 30 #include <nd.h> 31 #include "nfit_test.h" 32 #include "../watermark.h" 33 34 #include <asm/mcsafe_test.h> 35 36 /* 37 * Generate an NFIT table to describe the following topology: 38 * 39 * BUS0: Interleaved PMEM regions, and aliasing with BLK regions 40 * 41 * (a) (b) DIMM BLK-REGION 42 * +----------+--------------+----------+---------+ 43 * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2 44 * | imc0 +--+- - - - - region0 - - - -+----------+ + 45 * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3 46 * | +----------+--------------v----------v v 47 * +--+---+ | | 48 * | cpu0 | region1 49 * +--+---+ | | 50 * | +-------------------------^----------^ ^ 51 * +--+---+ | blk4.0 | pm1.0 | 2 region4 52 * | imc1 +--+-------------------------+----------+ + 53 * +------+ | blk5.0 | pm1.0 | 3 region5 54 * +-------------------------+----------+-+-------+ 55 * 56 * +--+---+ 57 * | cpu1 | 58 * +--+---+ (Hotplug DIMM) 59 * | +----------------------------------------------+ 60 * +--+---+ | blk6.0/pm7.0 | 4 region6/7 61 * | imc0 +--+----------------------------------------------+ 62 * +------+ 63 * 64 * 65 * *) In this layout we have four dimms and two memory controllers in one 66 * socket. Each unique interface (BLK or PMEM) to DPA space 67 * is identified by a region device with a dynamically assigned id. 68 * 69 * *) The first portion of dimm0 and dimm1 are interleaved as REGION0. 70 * A single PMEM namespace "pm0.0" is created using half of the 71 * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace 72 * allocate from from the bottom of a region. The unallocated 73 * portion of REGION0 aliases with REGION2 and REGION3. That 74 * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and 75 * "blk3.0") starting at the base of each DIMM to offset (a) in those 76 * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable 77 * names that can be assigned to a namespace. 78 * 79 * *) In the last portion of dimm0 and dimm1 we have an interleaved 80 * SPA range, REGION1, that spans those two dimms as well as dimm2 81 * and dimm3. Some of REGION1 allocated to a PMEM namespace named 82 * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each 83 * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and 84 * "blk5.0". 85 * 86 * *) The portion of dimm2 and dimm3 that do not participate in the 87 * REGION1 interleaved SPA range (i.e. the DPA address below offset 88 * (b) are also included in the "blk4.0" and "blk5.0" namespaces. 89 * Note, that BLK namespaces need not be contiguous in DPA-space, and 90 * can consume aliased capacity from multiple interleave sets. 91 * 92 * BUS1: Legacy NVDIMM (single contiguous range) 93 * 94 * region2 95 * +---------------------+ 96 * |---------------------| 97 * || pm2.0 || 98 * |---------------------| 99 * +---------------------+ 100 * 101 * *) A NFIT-table may describe a simple system-physical-address range 102 * with no BLK aliasing. This type of region may optionally 103 * reference an NVDIMM. 104 */ 105 enum { 106 NUM_PM = 3, 107 NUM_DCR = 5, 108 NUM_HINTS = 8, 109 NUM_BDW = NUM_DCR, 110 NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW, 111 NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ 112 + 4 /* spa1 iset */ + 1 /* spa11 iset */, 113 DIMM_SIZE = SZ_32M, 114 LABEL_SIZE = SZ_128K, 115 SPA_VCD_SIZE = SZ_4M, 116 SPA0_SIZE = DIMM_SIZE, 117 SPA1_SIZE = DIMM_SIZE*2, 118 SPA2_SIZE = DIMM_SIZE, 119 BDW_SIZE = 64 << 8, 120 DCR_SIZE = 12, 121 NUM_NFITS = 2, /* permit testing multiple NFITs per system */ 122 }; 123 124 struct nfit_test_dcr { 125 __le64 bdw_addr; 126 __le32 bdw_status; 127 __u8 aperature[BDW_SIZE]; 128 }; 129 130 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \ 131 (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \ 132 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf)) 133 134 static u32 handle[] = { 135 [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0), 136 [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1), 137 [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0), 138 [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1), 139 [4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0), 140 [5] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0), 141 [6] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 1), 142 }; 143 144 static unsigned long dimm_fail_cmd_flags[ARRAY_SIZE(handle)]; 145 static int dimm_fail_cmd_code[ARRAY_SIZE(handle)]; 146 147 static const struct nd_intel_smart smart_def = { 148 .flags = ND_INTEL_SMART_HEALTH_VALID 149 | ND_INTEL_SMART_SPARES_VALID 150 | ND_INTEL_SMART_ALARM_VALID 151 | ND_INTEL_SMART_USED_VALID 152 | ND_INTEL_SMART_SHUTDOWN_VALID 153 | ND_INTEL_SMART_SHUTDOWN_COUNT_VALID 154 | ND_INTEL_SMART_MTEMP_VALID 155 | ND_INTEL_SMART_CTEMP_VALID, 156 .health = ND_INTEL_SMART_NON_CRITICAL_HEALTH, 157 .media_temperature = 23 * 16, 158 .ctrl_temperature = 25 * 16, 159 .pmic_temperature = 40 * 16, 160 .spares = 75, 161 .alarm_flags = ND_INTEL_SMART_SPARE_TRIP 162 | ND_INTEL_SMART_TEMP_TRIP, 163 .ait_status = 1, 164 .life_used = 5, 165 .shutdown_state = 0, 166 .shutdown_count = 42, 167 .vendor_size = 0, 168 }; 169 170 struct nfit_test_fw { 171 enum intel_fw_update_state state; 172 u32 context; 173 u64 version; 174 u32 size_received; 175 u64 end_time; 176 }; 177 178 struct nfit_test { 179 struct acpi_nfit_desc acpi_desc; 180 struct platform_device pdev; 181 struct list_head resources; 182 void *nfit_buf; 183 dma_addr_t nfit_dma; 184 size_t nfit_size; 185 size_t nfit_filled; 186 int dcr_idx; 187 int num_dcr; 188 int num_pm; 189 void **dimm; 190 dma_addr_t *dimm_dma; 191 void **flush; 192 dma_addr_t *flush_dma; 193 void **label; 194 dma_addr_t *label_dma; 195 void **spa_set; 196 dma_addr_t *spa_set_dma; 197 struct nfit_test_dcr **dcr; 198 dma_addr_t *dcr_dma; 199 int (*alloc)(struct nfit_test *t); 200 void (*setup)(struct nfit_test *t); 201 int setup_hotplug; 202 union acpi_object **_fit; 203 dma_addr_t _fit_dma; 204 struct ars_state { 205 struct nd_cmd_ars_status *ars_status; 206 unsigned long deadline; 207 spinlock_t lock; 208 } ars_state; 209 struct device *dimm_dev[ARRAY_SIZE(handle)]; 210 struct nd_intel_smart *smart; 211 struct nd_intel_smart_threshold *smart_threshold; 212 struct badrange badrange; 213 struct work_struct work; 214 struct nfit_test_fw *fw; 215 }; 216 217 static struct workqueue_struct *nfit_wq; 218 219 static struct gen_pool *nfit_pool; 220 221 static struct nfit_test *to_nfit_test(struct device *dev) 222 { 223 struct platform_device *pdev = to_platform_device(dev); 224 225 return container_of(pdev, struct nfit_test, pdev); 226 } 227 228 static int nd_intel_test_get_fw_info(struct nfit_test *t, 229 struct nd_intel_fw_info *nd_cmd, unsigned int buf_len, 230 int idx) 231 { 232 struct device *dev = &t->pdev.dev; 233 struct nfit_test_fw *fw = &t->fw[idx]; 234 235 dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p, buf_len: %u, idx: %d\n", 236 __func__, t, nd_cmd, buf_len, idx); 237 238 if (buf_len < sizeof(*nd_cmd)) 239 return -EINVAL; 240 241 nd_cmd->status = 0; 242 nd_cmd->storage_size = INTEL_FW_STORAGE_SIZE; 243 nd_cmd->max_send_len = INTEL_FW_MAX_SEND_LEN; 244 nd_cmd->query_interval = INTEL_FW_QUERY_INTERVAL; 245 nd_cmd->max_query_time = INTEL_FW_QUERY_MAX_TIME; 246 nd_cmd->update_cap = 0; 247 nd_cmd->fis_version = INTEL_FW_FIS_VERSION; 248 nd_cmd->run_version = 0; 249 nd_cmd->updated_version = fw->version; 250 251 return 0; 252 } 253 254 static int nd_intel_test_start_update(struct nfit_test *t, 255 struct nd_intel_fw_start *nd_cmd, unsigned int buf_len, 256 int idx) 257 { 258 struct device *dev = &t->pdev.dev; 259 struct nfit_test_fw *fw = &t->fw[idx]; 260 261 dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n", 262 __func__, t, nd_cmd, buf_len, idx); 263 264 if (buf_len < sizeof(*nd_cmd)) 265 return -EINVAL; 266 267 if (fw->state != FW_STATE_NEW) { 268 /* extended status, FW update in progress */ 269 nd_cmd->status = 0x10007; 270 return 0; 271 } 272 273 fw->state = FW_STATE_IN_PROGRESS; 274 fw->context++; 275 fw->size_received = 0; 276 nd_cmd->status = 0; 277 nd_cmd->context = fw->context; 278 279 dev_dbg(dev, "%s: context issued: %#x\n", __func__, nd_cmd->context); 280 281 return 0; 282 } 283 284 static int nd_intel_test_send_data(struct nfit_test *t, 285 struct nd_intel_fw_send_data *nd_cmd, unsigned int buf_len, 286 int idx) 287 { 288 struct device *dev = &t->pdev.dev; 289 struct nfit_test_fw *fw = &t->fw[idx]; 290 u32 *status = (u32 *)&nd_cmd->data[nd_cmd->length]; 291 292 dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n", 293 __func__, t, nd_cmd, buf_len, idx); 294 295 if (buf_len < sizeof(*nd_cmd)) 296 return -EINVAL; 297 298 299 dev_dbg(dev, "%s: cmd->status: %#x\n", __func__, *status); 300 dev_dbg(dev, "%s: cmd->data[0]: %#x\n", __func__, nd_cmd->data[0]); 301 dev_dbg(dev, "%s: cmd->data[%u]: %#x\n", __func__, nd_cmd->length-1, 302 nd_cmd->data[nd_cmd->length-1]); 303 304 if (fw->state != FW_STATE_IN_PROGRESS) { 305 dev_dbg(dev, "%s: not in IN_PROGRESS state\n", __func__); 306 *status = 0x5; 307 return 0; 308 } 309 310 if (nd_cmd->context != fw->context) { 311 dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n", 312 __func__, nd_cmd->context, fw->context); 313 *status = 0x10007; 314 return 0; 315 } 316 317 /* 318 * check offset + len > size of fw storage 319 * check length is > max send length 320 */ 321 if (nd_cmd->offset + nd_cmd->length > INTEL_FW_STORAGE_SIZE || 322 nd_cmd->length > INTEL_FW_MAX_SEND_LEN) { 323 *status = 0x3; 324 dev_dbg(dev, "%s: buffer boundary violation\n", __func__); 325 return 0; 326 } 327 328 fw->size_received += nd_cmd->length; 329 dev_dbg(dev, "%s: copying %u bytes, %u bytes so far\n", 330 __func__, nd_cmd->length, fw->size_received); 331 *status = 0; 332 return 0; 333 } 334 335 static int nd_intel_test_finish_fw(struct nfit_test *t, 336 struct nd_intel_fw_finish_update *nd_cmd, 337 unsigned int buf_len, int idx) 338 { 339 struct device *dev = &t->pdev.dev; 340 struct nfit_test_fw *fw = &t->fw[idx]; 341 342 dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n", 343 __func__, t, nd_cmd, buf_len, idx); 344 345 if (fw->state == FW_STATE_UPDATED) { 346 /* update already done, need cold boot */ 347 nd_cmd->status = 0x20007; 348 return 0; 349 } 350 351 dev_dbg(dev, "%s: context: %#x ctrl_flags: %#x\n", 352 __func__, nd_cmd->context, nd_cmd->ctrl_flags); 353 354 switch (nd_cmd->ctrl_flags) { 355 case 0: /* finish */ 356 if (nd_cmd->context != fw->context) { 357 dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n", 358 __func__, nd_cmd->context, 359 fw->context); 360 nd_cmd->status = 0x10007; 361 return 0; 362 } 363 nd_cmd->status = 0; 364 fw->state = FW_STATE_VERIFY; 365 /* set 1 second of time for firmware "update" */ 366 fw->end_time = jiffies + HZ; 367 break; 368 369 case 1: /* abort */ 370 fw->size_received = 0; 371 /* successfully aborted status */ 372 nd_cmd->status = 0x40007; 373 fw->state = FW_STATE_NEW; 374 dev_dbg(dev, "%s: abort successful\n", __func__); 375 break; 376 377 default: /* bad control flag */ 378 dev_warn(dev, "%s: unknown control flag: %#x\n", 379 __func__, nd_cmd->ctrl_flags); 380 return -EINVAL; 381 } 382 383 return 0; 384 } 385 386 static int nd_intel_test_finish_query(struct nfit_test *t, 387 struct nd_intel_fw_finish_query *nd_cmd, 388 unsigned int buf_len, int idx) 389 { 390 struct device *dev = &t->pdev.dev; 391 struct nfit_test_fw *fw = &t->fw[idx]; 392 393 dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n", 394 __func__, t, nd_cmd, buf_len, idx); 395 396 if (buf_len < sizeof(*nd_cmd)) 397 return -EINVAL; 398 399 if (nd_cmd->context != fw->context) { 400 dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n", 401 __func__, nd_cmd->context, fw->context); 402 nd_cmd->status = 0x10007; 403 return 0; 404 } 405 406 dev_dbg(dev, "%s context: %#x\n", __func__, nd_cmd->context); 407 408 switch (fw->state) { 409 case FW_STATE_NEW: 410 nd_cmd->updated_fw_rev = 0; 411 nd_cmd->status = 0; 412 dev_dbg(dev, "%s: new state\n", __func__); 413 break; 414 415 case FW_STATE_IN_PROGRESS: 416 /* sequencing error */ 417 nd_cmd->status = 0x40007; 418 nd_cmd->updated_fw_rev = 0; 419 dev_dbg(dev, "%s: sequence error\n", __func__); 420 break; 421 422 case FW_STATE_VERIFY: 423 if (time_is_after_jiffies64(fw->end_time)) { 424 nd_cmd->updated_fw_rev = 0; 425 nd_cmd->status = 0x20007; 426 dev_dbg(dev, "%s: still verifying\n", __func__); 427 break; 428 } 429 430 dev_dbg(dev, "%s: transition out verify\n", __func__); 431 fw->state = FW_STATE_UPDATED; 432 /* we are going to fall through if it's "done" */ 433 case FW_STATE_UPDATED: 434 nd_cmd->status = 0; 435 /* bogus test version */ 436 fw->version = nd_cmd->updated_fw_rev = 437 INTEL_FW_FAKE_VERSION; 438 dev_dbg(dev, "%s: updated\n", __func__); 439 break; 440 441 default: /* we should never get here */ 442 return -EINVAL; 443 } 444 445 return 0; 446 } 447 448 static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd, 449 unsigned int buf_len) 450 { 451 if (buf_len < sizeof(*nd_cmd)) 452 return -EINVAL; 453 454 nd_cmd->status = 0; 455 nd_cmd->config_size = LABEL_SIZE; 456 nd_cmd->max_xfer = SZ_4K; 457 458 return 0; 459 } 460 461 static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr 462 *nd_cmd, unsigned int buf_len, void *label) 463 { 464 unsigned int len, offset = nd_cmd->in_offset; 465 int rc; 466 467 if (buf_len < sizeof(*nd_cmd)) 468 return -EINVAL; 469 if (offset >= LABEL_SIZE) 470 return -EINVAL; 471 if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len) 472 return -EINVAL; 473 474 nd_cmd->status = 0; 475 len = min(nd_cmd->in_length, LABEL_SIZE - offset); 476 memcpy(nd_cmd->out_buf, label + offset, len); 477 rc = buf_len - sizeof(*nd_cmd) - len; 478 479 return rc; 480 } 481 482 static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd, 483 unsigned int buf_len, void *label) 484 { 485 unsigned int len, offset = nd_cmd->in_offset; 486 u32 *status; 487 int rc; 488 489 if (buf_len < sizeof(*nd_cmd)) 490 return -EINVAL; 491 if (offset >= LABEL_SIZE) 492 return -EINVAL; 493 if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len) 494 return -EINVAL; 495 496 status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd); 497 *status = 0; 498 len = min(nd_cmd->in_length, LABEL_SIZE - offset); 499 memcpy(label + offset, nd_cmd->in_buf, len); 500 rc = buf_len - sizeof(*nd_cmd) - (len + 4); 501 502 return rc; 503 } 504 505 #define NFIT_TEST_CLEAR_ERR_UNIT 256 506 507 static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd, 508 unsigned int buf_len) 509 { 510 int ars_recs; 511 512 if (buf_len < sizeof(*nd_cmd)) 513 return -EINVAL; 514 515 /* for testing, only store up to n records that fit within 4k */ 516 ars_recs = SZ_4K / sizeof(struct nd_ars_record); 517 518 nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status) 519 + ars_recs * sizeof(struct nd_ars_record); 520 nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16; 521 nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT; 522 523 return 0; 524 } 525 526 static void post_ars_status(struct ars_state *ars_state, 527 struct badrange *badrange, u64 addr, u64 len) 528 { 529 struct nd_cmd_ars_status *ars_status; 530 struct nd_ars_record *ars_record; 531 struct badrange_entry *be; 532 u64 end = addr + len - 1; 533 int i = 0; 534 535 ars_state->deadline = jiffies + 1*HZ; 536 ars_status = ars_state->ars_status; 537 ars_status->status = 0; 538 ars_status->address = addr; 539 ars_status->length = len; 540 ars_status->type = ND_ARS_PERSISTENT; 541 542 spin_lock(&badrange->lock); 543 list_for_each_entry(be, &badrange->list, list) { 544 u64 be_end = be->start + be->length - 1; 545 u64 rstart, rend; 546 547 /* skip entries outside the range */ 548 if (be_end < addr || be->start > end) 549 continue; 550 551 rstart = (be->start < addr) ? addr : be->start; 552 rend = (be_end < end) ? be_end : end; 553 ars_record = &ars_status->records[i]; 554 ars_record->handle = 0; 555 ars_record->err_address = rstart; 556 ars_record->length = rend - rstart + 1; 557 i++; 558 } 559 spin_unlock(&badrange->lock); 560 ars_status->num_records = i; 561 ars_status->out_length = sizeof(struct nd_cmd_ars_status) 562 + i * sizeof(struct nd_ars_record); 563 } 564 565 static int nfit_test_cmd_ars_start(struct nfit_test *t, 566 struct ars_state *ars_state, 567 struct nd_cmd_ars_start *ars_start, unsigned int buf_len, 568 int *cmd_rc) 569 { 570 if (buf_len < sizeof(*ars_start)) 571 return -EINVAL; 572 573 spin_lock(&ars_state->lock); 574 if (time_before(jiffies, ars_state->deadline)) { 575 ars_start->status = NFIT_ARS_START_BUSY; 576 *cmd_rc = -EBUSY; 577 } else { 578 ars_start->status = 0; 579 ars_start->scrub_time = 1; 580 post_ars_status(ars_state, &t->badrange, ars_start->address, 581 ars_start->length); 582 *cmd_rc = 0; 583 } 584 spin_unlock(&ars_state->lock); 585 586 return 0; 587 } 588 589 static int nfit_test_cmd_ars_status(struct ars_state *ars_state, 590 struct nd_cmd_ars_status *ars_status, unsigned int buf_len, 591 int *cmd_rc) 592 { 593 if (buf_len < ars_state->ars_status->out_length) 594 return -EINVAL; 595 596 spin_lock(&ars_state->lock); 597 if (time_before(jiffies, ars_state->deadline)) { 598 memset(ars_status, 0, buf_len); 599 ars_status->status = NFIT_ARS_STATUS_BUSY; 600 ars_status->out_length = sizeof(*ars_status); 601 *cmd_rc = -EBUSY; 602 } else { 603 memcpy(ars_status, ars_state->ars_status, 604 ars_state->ars_status->out_length); 605 *cmd_rc = 0; 606 } 607 spin_unlock(&ars_state->lock); 608 return 0; 609 } 610 611 static int nfit_test_cmd_clear_error(struct nfit_test *t, 612 struct nd_cmd_clear_error *clear_err, 613 unsigned int buf_len, int *cmd_rc) 614 { 615 const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1; 616 if (buf_len < sizeof(*clear_err)) 617 return -EINVAL; 618 619 if ((clear_err->address & mask) || (clear_err->length & mask)) 620 return -EINVAL; 621 622 badrange_forget(&t->badrange, clear_err->address, clear_err->length); 623 clear_err->status = 0; 624 clear_err->cleared = clear_err->length; 625 *cmd_rc = 0; 626 return 0; 627 } 628 629 struct region_search_spa { 630 u64 addr; 631 struct nd_region *region; 632 }; 633 634 static int is_region_device(struct device *dev) 635 { 636 return !strncmp(dev->kobj.name, "region", 6); 637 } 638 639 static int nfit_test_search_region_spa(struct device *dev, void *data) 640 { 641 struct region_search_spa *ctx = data; 642 struct nd_region *nd_region; 643 resource_size_t ndr_end; 644 645 if (!is_region_device(dev)) 646 return 0; 647 648 nd_region = to_nd_region(dev); 649 ndr_end = nd_region->ndr_start + nd_region->ndr_size; 650 651 if (ctx->addr >= nd_region->ndr_start && ctx->addr < ndr_end) { 652 ctx->region = nd_region; 653 return 1; 654 } 655 656 return 0; 657 } 658 659 static int nfit_test_search_spa(struct nvdimm_bus *bus, 660 struct nd_cmd_translate_spa *spa) 661 { 662 int ret; 663 struct nd_region *nd_region = NULL; 664 struct nvdimm *nvdimm = NULL; 665 struct nd_mapping *nd_mapping = NULL; 666 struct region_search_spa ctx = { 667 .addr = spa->spa, 668 .region = NULL, 669 }; 670 u64 dpa; 671 672 ret = device_for_each_child(&bus->dev, &ctx, 673 nfit_test_search_region_spa); 674 675 if (!ret) 676 return -ENODEV; 677 678 nd_region = ctx.region; 679 680 dpa = ctx.addr - nd_region->ndr_start; 681 682 /* 683 * last dimm is selected for test 684 */ 685 nd_mapping = &nd_region->mapping[nd_region->ndr_mappings - 1]; 686 nvdimm = nd_mapping->nvdimm; 687 688 spa->devices[0].nfit_device_handle = handle[nvdimm->id]; 689 spa->num_nvdimms = 1; 690 spa->devices[0].dpa = dpa; 691 692 return 0; 693 } 694 695 static int nfit_test_cmd_translate_spa(struct nvdimm_bus *bus, 696 struct nd_cmd_translate_spa *spa, unsigned int buf_len) 697 { 698 if (buf_len < spa->translate_length) 699 return -EINVAL; 700 701 if (nfit_test_search_spa(bus, spa) < 0 || !spa->num_nvdimms) 702 spa->status = 2; 703 704 return 0; 705 } 706 707 static int nfit_test_cmd_smart(struct nd_intel_smart *smart, unsigned int buf_len, 708 struct nd_intel_smart *smart_data) 709 { 710 if (buf_len < sizeof(*smart)) 711 return -EINVAL; 712 memcpy(smart, smart_data, sizeof(*smart)); 713 return 0; 714 } 715 716 static int nfit_test_cmd_smart_threshold( 717 struct nd_intel_smart_threshold *out, 718 unsigned int buf_len, 719 struct nd_intel_smart_threshold *smart_t) 720 { 721 if (buf_len < sizeof(*smart_t)) 722 return -EINVAL; 723 memcpy(out, smart_t, sizeof(*smart_t)); 724 return 0; 725 } 726 727 static void smart_notify(struct device *bus_dev, 728 struct device *dimm_dev, struct nd_intel_smart *smart, 729 struct nd_intel_smart_threshold *thresh) 730 { 731 dev_dbg(dimm_dev, "%s: alarm: %#x spares: %d (%d) mtemp: %d (%d) ctemp: %d (%d)\n", 732 __func__, thresh->alarm_control, thresh->spares, 733 smart->spares, thresh->media_temperature, 734 smart->media_temperature, thresh->ctrl_temperature, 735 smart->ctrl_temperature); 736 if (((thresh->alarm_control & ND_INTEL_SMART_SPARE_TRIP) 737 && smart->spares 738 <= thresh->spares) 739 || ((thresh->alarm_control & ND_INTEL_SMART_TEMP_TRIP) 740 && smart->media_temperature 741 >= thresh->media_temperature) 742 || ((thresh->alarm_control & ND_INTEL_SMART_CTEMP_TRIP) 743 && smart->ctrl_temperature 744 >= thresh->ctrl_temperature) 745 || (smart->health != ND_INTEL_SMART_NON_CRITICAL_HEALTH) 746 || (smart->shutdown_state != 0)) { 747 device_lock(bus_dev); 748 __acpi_nvdimm_notify(dimm_dev, 0x81); 749 device_unlock(bus_dev); 750 } 751 } 752 753 static int nfit_test_cmd_smart_set_threshold( 754 struct nd_intel_smart_set_threshold *in, 755 unsigned int buf_len, 756 struct nd_intel_smart_threshold *thresh, 757 struct nd_intel_smart *smart, 758 struct device *bus_dev, struct device *dimm_dev) 759 { 760 unsigned int size; 761 762 size = sizeof(*in) - 4; 763 if (buf_len < size) 764 return -EINVAL; 765 memcpy(thresh->data, in, size); 766 in->status = 0; 767 smart_notify(bus_dev, dimm_dev, smart, thresh); 768 769 return 0; 770 } 771 772 static int nfit_test_cmd_smart_inject( 773 struct nd_intel_smart_inject *inj, 774 unsigned int buf_len, 775 struct nd_intel_smart_threshold *thresh, 776 struct nd_intel_smart *smart, 777 struct device *bus_dev, struct device *dimm_dev) 778 { 779 if (buf_len != sizeof(*inj)) 780 return -EINVAL; 781 782 if (inj->flags & ND_INTEL_SMART_INJECT_MTEMP) { 783 if (inj->mtemp_enable) 784 smart->media_temperature = inj->media_temperature; 785 else 786 smart->media_temperature = smart_def.media_temperature; 787 } 788 if (inj->flags & ND_INTEL_SMART_INJECT_SPARE) { 789 if (inj->spare_enable) 790 smart->spares = inj->spares; 791 else 792 smart->spares = smart_def.spares; 793 } 794 if (inj->flags & ND_INTEL_SMART_INJECT_FATAL) { 795 if (inj->fatal_enable) 796 smart->health = ND_INTEL_SMART_FATAL_HEALTH; 797 else 798 smart->health = ND_INTEL_SMART_NON_CRITICAL_HEALTH; 799 } 800 if (inj->flags & ND_INTEL_SMART_INJECT_SHUTDOWN) { 801 if (inj->unsafe_shutdown_enable) { 802 smart->shutdown_state = 1; 803 smart->shutdown_count++; 804 } else 805 smart->shutdown_state = 0; 806 } 807 inj->status = 0; 808 smart_notify(bus_dev, dimm_dev, smart, thresh); 809 810 return 0; 811 } 812 813 static void uc_error_notify(struct work_struct *work) 814 { 815 struct nfit_test *t = container_of(work, typeof(*t), work); 816 817 __acpi_nfit_notify(&t->pdev.dev, t, NFIT_NOTIFY_UC_MEMORY_ERROR); 818 } 819 820 static int nfit_test_cmd_ars_error_inject(struct nfit_test *t, 821 struct nd_cmd_ars_err_inj *err_inj, unsigned int buf_len) 822 { 823 int rc; 824 825 if (buf_len != sizeof(*err_inj)) { 826 rc = -EINVAL; 827 goto err; 828 } 829 830 if (err_inj->err_inj_spa_range_length <= 0) { 831 rc = -EINVAL; 832 goto err; 833 } 834 835 rc = badrange_add(&t->badrange, err_inj->err_inj_spa_range_base, 836 err_inj->err_inj_spa_range_length); 837 if (rc < 0) 838 goto err; 839 840 if (err_inj->err_inj_options & (1 << ND_ARS_ERR_INJ_OPT_NOTIFY)) 841 queue_work(nfit_wq, &t->work); 842 843 err_inj->status = 0; 844 return 0; 845 846 err: 847 err_inj->status = NFIT_ARS_INJECT_INVALID; 848 return rc; 849 } 850 851 static int nfit_test_cmd_ars_inject_clear(struct nfit_test *t, 852 struct nd_cmd_ars_err_inj_clr *err_clr, unsigned int buf_len) 853 { 854 int rc; 855 856 if (buf_len != sizeof(*err_clr)) { 857 rc = -EINVAL; 858 goto err; 859 } 860 861 if (err_clr->err_inj_clr_spa_range_length <= 0) { 862 rc = -EINVAL; 863 goto err; 864 } 865 866 badrange_forget(&t->badrange, err_clr->err_inj_clr_spa_range_base, 867 err_clr->err_inj_clr_spa_range_length); 868 869 err_clr->status = 0; 870 return 0; 871 872 err: 873 err_clr->status = NFIT_ARS_INJECT_INVALID; 874 return rc; 875 } 876 877 static int nfit_test_cmd_ars_inject_status(struct nfit_test *t, 878 struct nd_cmd_ars_err_inj_stat *err_stat, 879 unsigned int buf_len) 880 { 881 struct badrange_entry *be; 882 int max = SZ_4K / sizeof(struct nd_error_stat_query_record); 883 int i = 0; 884 885 err_stat->status = 0; 886 spin_lock(&t->badrange.lock); 887 list_for_each_entry(be, &t->badrange.list, list) { 888 err_stat->record[i].err_inj_stat_spa_range_base = be->start; 889 err_stat->record[i].err_inj_stat_spa_range_length = be->length; 890 i++; 891 if (i > max) 892 break; 893 } 894 spin_unlock(&t->badrange.lock); 895 err_stat->inj_err_rec_count = i; 896 897 return 0; 898 } 899 900 static int nd_intel_test_cmd_set_lss_status(struct nfit_test *t, 901 struct nd_intel_lss *nd_cmd, unsigned int buf_len) 902 { 903 struct device *dev = &t->pdev.dev; 904 905 if (buf_len < sizeof(*nd_cmd)) 906 return -EINVAL; 907 908 switch (nd_cmd->enable) { 909 case 0: 910 nd_cmd->status = 0; 911 dev_dbg(dev, "%s: Latch System Shutdown Status disabled\n", 912 __func__); 913 break; 914 case 1: 915 nd_cmd->status = 0; 916 dev_dbg(dev, "%s: Latch System Shutdown Status enabled\n", 917 __func__); 918 break; 919 default: 920 dev_warn(dev, "Unknown enable value: %#x\n", nd_cmd->enable); 921 nd_cmd->status = 0x3; 922 break; 923 } 924 925 926 return 0; 927 } 928 929 static int override_return_code(int dimm, unsigned int func, int rc) 930 { 931 if ((1 << func) & dimm_fail_cmd_flags[dimm]) { 932 if (dimm_fail_cmd_code[dimm]) 933 return dimm_fail_cmd_code[dimm]; 934 return -EIO; 935 } 936 return rc; 937 } 938 939 static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func) 940 { 941 int i; 942 943 /* lookup per-dimm data */ 944 for (i = 0; i < ARRAY_SIZE(handle); i++) 945 if (__to_nfit_memdev(nfit_mem)->device_handle == handle[i]) 946 break; 947 if (i >= ARRAY_SIZE(handle)) 948 return -ENXIO; 949 return i; 950 } 951 952 static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc, 953 struct nvdimm *nvdimm, unsigned int cmd, void *buf, 954 unsigned int buf_len, int *cmd_rc) 955 { 956 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 957 struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc); 958 unsigned int func = cmd; 959 int i, rc = 0, __cmd_rc; 960 961 if (!cmd_rc) 962 cmd_rc = &__cmd_rc; 963 *cmd_rc = 0; 964 965 if (nvdimm) { 966 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 967 unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm); 968 969 if (!nfit_mem) 970 return -ENOTTY; 971 972 if (cmd == ND_CMD_CALL) { 973 struct nd_cmd_pkg *call_pkg = buf; 974 975 buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out; 976 buf = (void *) call_pkg->nd_payload; 977 func = call_pkg->nd_command; 978 if (call_pkg->nd_family != nfit_mem->family) 979 return -ENOTTY; 980 981 i = get_dimm(nfit_mem, func); 982 if (i < 0) 983 return i; 984 985 switch (func) { 986 case ND_INTEL_ENABLE_LSS_STATUS: 987 rc = nd_intel_test_cmd_set_lss_status(t, 988 buf, buf_len); 989 break; 990 case ND_INTEL_FW_GET_INFO: 991 rc = nd_intel_test_get_fw_info(t, buf, 992 buf_len, i - t->dcr_idx); 993 break; 994 case ND_INTEL_FW_START_UPDATE: 995 rc = nd_intel_test_start_update(t, buf, 996 buf_len, i - t->dcr_idx); 997 break; 998 case ND_INTEL_FW_SEND_DATA: 999 rc = nd_intel_test_send_data(t, buf, 1000 buf_len, i - t->dcr_idx); 1001 break; 1002 case ND_INTEL_FW_FINISH_UPDATE: 1003 rc = nd_intel_test_finish_fw(t, buf, 1004 buf_len, i - t->dcr_idx); 1005 break; 1006 case ND_INTEL_FW_FINISH_QUERY: 1007 rc = nd_intel_test_finish_query(t, buf, 1008 buf_len, i - t->dcr_idx); 1009 break; 1010 case ND_INTEL_SMART: 1011 rc = nfit_test_cmd_smart(buf, buf_len, 1012 &t->smart[i - t->dcr_idx]); 1013 break; 1014 case ND_INTEL_SMART_THRESHOLD: 1015 rc = nfit_test_cmd_smart_threshold(buf, 1016 buf_len, 1017 &t->smart_threshold[i - 1018 t->dcr_idx]); 1019 break; 1020 case ND_INTEL_SMART_SET_THRESHOLD: 1021 rc = nfit_test_cmd_smart_set_threshold(buf, 1022 buf_len, 1023 &t->smart_threshold[i - 1024 t->dcr_idx], 1025 &t->smart[i - t->dcr_idx], 1026 &t->pdev.dev, t->dimm_dev[i]); 1027 break; 1028 case ND_INTEL_SMART_INJECT: 1029 rc = nfit_test_cmd_smart_inject(buf, 1030 buf_len, 1031 &t->smart_threshold[i - 1032 t->dcr_idx], 1033 &t->smart[i - t->dcr_idx], 1034 &t->pdev.dev, t->dimm_dev[i]); 1035 break; 1036 default: 1037 return -ENOTTY; 1038 } 1039 return override_return_code(i, func, rc); 1040 } 1041 1042 if (!test_bit(cmd, &cmd_mask) 1043 || !test_bit(func, &nfit_mem->dsm_mask)) 1044 return -ENOTTY; 1045 1046 i = get_dimm(nfit_mem, func); 1047 if (i < 0) 1048 return i; 1049 1050 switch (func) { 1051 case ND_CMD_GET_CONFIG_SIZE: 1052 rc = nfit_test_cmd_get_config_size(buf, buf_len); 1053 break; 1054 case ND_CMD_GET_CONFIG_DATA: 1055 rc = nfit_test_cmd_get_config_data(buf, buf_len, 1056 t->label[i - t->dcr_idx]); 1057 break; 1058 case ND_CMD_SET_CONFIG_DATA: 1059 rc = nfit_test_cmd_set_config_data(buf, buf_len, 1060 t->label[i - t->dcr_idx]); 1061 break; 1062 default: 1063 return -ENOTTY; 1064 } 1065 return override_return_code(i, func, rc); 1066 } else { 1067 struct ars_state *ars_state = &t->ars_state; 1068 struct nd_cmd_pkg *call_pkg = buf; 1069 1070 if (!nd_desc) 1071 return -ENOTTY; 1072 1073 if (cmd == ND_CMD_CALL) { 1074 func = call_pkg->nd_command; 1075 1076 buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out; 1077 buf = (void *) call_pkg->nd_payload; 1078 1079 switch (func) { 1080 case NFIT_CMD_TRANSLATE_SPA: 1081 rc = nfit_test_cmd_translate_spa( 1082 acpi_desc->nvdimm_bus, buf, buf_len); 1083 return rc; 1084 case NFIT_CMD_ARS_INJECT_SET: 1085 rc = nfit_test_cmd_ars_error_inject(t, buf, 1086 buf_len); 1087 return rc; 1088 case NFIT_CMD_ARS_INJECT_CLEAR: 1089 rc = nfit_test_cmd_ars_inject_clear(t, buf, 1090 buf_len); 1091 return rc; 1092 case NFIT_CMD_ARS_INJECT_GET: 1093 rc = nfit_test_cmd_ars_inject_status(t, buf, 1094 buf_len); 1095 return rc; 1096 default: 1097 return -ENOTTY; 1098 } 1099 } 1100 1101 if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask)) 1102 return -ENOTTY; 1103 1104 switch (func) { 1105 case ND_CMD_ARS_CAP: 1106 rc = nfit_test_cmd_ars_cap(buf, buf_len); 1107 break; 1108 case ND_CMD_ARS_START: 1109 rc = nfit_test_cmd_ars_start(t, ars_state, buf, 1110 buf_len, cmd_rc); 1111 break; 1112 case ND_CMD_ARS_STATUS: 1113 rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len, 1114 cmd_rc); 1115 break; 1116 case ND_CMD_CLEAR_ERROR: 1117 rc = nfit_test_cmd_clear_error(t, buf, buf_len, cmd_rc); 1118 break; 1119 default: 1120 return -ENOTTY; 1121 } 1122 } 1123 1124 return rc; 1125 } 1126 1127 static DEFINE_SPINLOCK(nfit_test_lock); 1128 static struct nfit_test *instances[NUM_NFITS]; 1129 1130 static void release_nfit_res(void *data) 1131 { 1132 struct nfit_test_resource *nfit_res = data; 1133 1134 spin_lock(&nfit_test_lock); 1135 list_del(&nfit_res->list); 1136 spin_unlock(&nfit_test_lock); 1137 1138 if (resource_size(&nfit_res->res) >= DIMM_SIZE) 1139 gen_pool_free(nfit_pool, nfit_res->res.start, 1140 resource_size(&nfit_res->res)); 1141 vfree(nfit_res->buf); 1142 kfree(nfit_res); 1143 } 1144 1145 static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma, 1146 void *buf) 1147 { 1148 struct device *dev = &t->pdev.dev; 1149 struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res), 1150 GFP_KERNEL); 1151 int rc; 1152 1153 if (!buf || !nfit_res || !*dma) 1154 goto err; 1155 rc = devm_add_action(dev, release_nfit_res, nfit_res); 1156 if (rc) 1157 goto err; 1158 INIT_LIST_HEAD(&nfit_res->list); 1159 memset(buf, 0, size); 1160 nfit_res->dev = dev; 1161 nfit_res->buf = buf; 1162 nfit_res->res.start = *dma; 1163 nfit_res->res.end = *dma + size - 1; 1164 nfit_res->res.name = "NFIT"; 1165 spin_lock_init(&nfit_res->lock); 1166 INIT_LIST_HEAD(&nfit_res->requests); 1167 spin_lock(&nfit_test_lock); 1168 list_add(&nfit_res->list, &t->resources); 1169 spin_unlock(&nfit_test_lock); 1170 1171 return nfit_res->buf; 1172 err: 1173 if (*dma && size >= DIMM_SIZE) 1174 gen_pool_free(nfit_pool, *dma, size); 1175 if (buf) 1176 vfree(buf); 1177 kfree(nfit_res); 1178 return NULL; 1179 } 1180 1181 static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma) 1182 { 1183 struct genpool_data_align data = { 1184 .align = SZ_128M, 1185 }; 1186 void *buf = vmalloc(size); 1187 1188 if (size >= DIMM_SIZE) 1189 *dma = gen_pool_alloc_algo(nfit_pool, size, 1190 gen_pool_first_fit_align, &data); 1191 else 1192 *dma = (unsigned long) buf; 1193 return __test_alloc(t, size, dma, buf); 1194 } 1195 1196 static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr) 1197 { 1198 int i; 1199 1200 for (i = 0; i < ARRAY_SIZE(instances); i++) { 1201 struct nfit_test_resource *n, *nfit_res = NULL; 1202 struct nfit_test *t = instances[i]; 1203 1204 if (!t) 1205 continue; 1206 spin_lock(&nfit_test_lock); 1207 list_for_each_entry(n, &t->resources, list) { 1208 if (addr >= n->res.start && (addr < n->res.start 1209 + resource_size(&n->res))) { 1210 nfit_res = n; 1211 break; 1212 } else if (addr >= (unsigned long) n->buf 1213 && (addr < (unsigned long) n->buf 1214 + resource_size(&n->res))) { 1215 nfit_res = n; 1216 break; 1217 } 1218 } 1219 spin_unlock(&nfit_test_lock); 1220 if (nfit_res) 1221 return nfit_res; 1222 } 1223 1224 return NULL; 1225 } 1226 1227 static int ars_state_init(struct device *dev, struct ars_state *ars_state) 1228 { 1229 /* for testing, only store up to n records that fit within 4k */ 1230 ars_state->ars_status = devm_kzalloc(dev, 1231 sizeof(struct nd_cmd_ars_status) + SZ_4K, GFP_KERNEL); 1232 if (!ars_state->ars_status) 1233 return -ENOMEM; 1234 spin_lock_init(&ars_state->lock); 1235 return 0; 1236 } 1237 1238 static void put_dimms(void *data) 1239 { 1240 struct nfit_test *t = data; 1241 int i; 1242 1243 for (i = 0; i < t->num_dcr; i++) 1244 if (t->dimm_dev[i]) 1245 device_unregister(t->dimm_dev[i]); 1246 } 1247 1248 static struct class *nfit_test_dimm; 1249 1250 static int dimm_name_to_id(struct device *dev) 1251 { 1252 int dimm; 1253 1254 if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1) 1255 return -ENXIO; 1256 return dimm; 1257 } 1258 1259 static ssize_t handle_show(struct device *dev, struct device_attribute *attr, 1260 char *buf) 1261 { 1262 int dimm = dimm_name_to_id(dev); 1263 1264 if (dimm < 0) 1265 return dimm; 1266 1267 return sprintf(buf, "%#x\n", handle[dimm]); 1268 } 1269 DEVICE_ATTR_RO(handle); 1270 1271 static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr, 1272 char *buf) 1273 { 1274 int dimm = dimm_name_to_id(dev); 1275 1276 if (dimm < 0) 1277 return dimm; 1278 1279 return sprintf(buf, "%#lx\n", dimm_fail_cmd_flags[dimm]); 1280 } 1281 1282 static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr, 1283 const char *buf, size_t size) 1284 { 1285 int dimm = dimm_name_to_id(dev); 1286 unsigned long val; 1287 ssize_t rc; 1288 1289 if (dimm < 0) 1290 return dimm; 1291 1292 rc = kstrtol(buf, 0, &val); 1293 if (rc) 1294 return rc; 1295 1296 dimm_fail_cmd_flags[dimm] = val; 1297 return size; 1298 } 1299 static DEVICE_ATTR_RW(fail_cmd); 1300 1301 static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr, 1302 char *buf) 1303 { 1304 int dimm = dimm_name_to_id(dev); 1305 1306 if (dimm < 0) 1307 return dimm; 1308 1309 return sprintf(buf, "%d\n", dimm_fail_cmd_code[dimm]); 1310 } 1311 1312 static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr, 1313 const char *buf, size_t size) 1314 { 1315 int dimm = dimm_name_to_id(dev); 1316 unsigned long val; 1317 ssize_t rc; 1318 1319 if (dimm < 0) 1320 return dimm; 1321 1322 rc = kstrtol(buf, 0, &val); 1323 if (rc) 1324 return rc; 1325 1326 dimm_fail_cmd_code[dimm] = val; 1327 return size; 1328 } 1329 static DEVICE_ATTR_RW(fail_cmd_code); 1330 1331 static struct attribute *nfit_test_dimm_attributes[] = { 1332 &dev_attr_fail_cmd.attr, 1333 &dev_attr_fail_cmd_code.attr, 1334 &dev_attr_handle.attr, 1335 NULL, 1336 }; 1337 1338 static struct attribute_group nfit_test_dimm_attribute_group = { 1339 .attrs = nfit_test_dimm_attributes, 1340 }; 1341 1342 static const struct attribute_group *nfit_test_dimm_attribute_groups[] = { 1343 &nfit_test_dimm_attribute_group, 1344 NULL, 1345 }; 1346 1347 static int nfit_test_dimm_init(struct nfit_test *t) 1348 { 1349 int i; 1350 1351 if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t)) 1352 return -ENOMEM; 1353 for (i = 0; i < t->num_dcr; i++) { 1354 t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm, 1355 &t->pdev.dev, 0, NULL, 1356 nfit_test_dimm_attribute_groups, 1357 "test_dimm%d", i + t->dcr_idx); 1358 if (!t->dimm_dev[i]) 1359 return -ENOMEM; 1360 } 1361 return 0; 1362 } 1363 1364 static void smart_init(struct nfit_test *t) 1365 { 1366 int i; 1367 const struct nd_intel_smart_threshold smart_t_data = { 1368 .alarm_control = ND_INTEL_SMART_SPARE_TRIP 1369 | ND_INTEL_SMART_TEMP_TRIP, 1370 .media_temperature = 40 * 16, 1371 .ctrl_temperature = 30 * 16, 1372 .spares = 5, 1373 }; 1374 1375 for (i = 0; i < t->num_dcr; i++) { 1376 memcpy(&t->smart[i], &smart_def, sizeof(smart_def)); 1377 memcpy(&t->smart_threshold[i], &smart_t_data, 1378 sizeof(smart_t_data)); 1379 } 1380 } 1381 1382 static int nfit_test0_alloc(struct nfit_test *t) 1383 { 1384 size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA 1385 + sizeof(struct acpi_nfit_memory_map) * NUM_MEM 1386 + sizeof(struct acpi_nfit_control_region) * NUM_DCR 1387 + offsetof(struct acpi_nfit_control_region, 1388 window_size) * NUM_DCR 1389 + sizeof(struct acpi_nfit_data_region) * NUM_BDW 1390 + (sizeof(struct acpi_nfit_flush_address) 1391 + sizeof(u64) * NUM_HINTS) * NUM_DCR 1392 + sizeof(struct acpi_nfit_capabilities); 1393 int i; 1394 1395 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma); 1396 if (!t->nfit_buf) 1397 return -ENOMEM; 1398 t->nfit_size = nfit_size; 1399 1400 t->spa_set[0] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[0]); 1401 if (!t->spa_set[0]) 1402 return -ENOMEM; 1403 1404 t->spa_set[1] = test_alloc(t, SPA1_SIZE, &t->spa_set_dma[1]); 1405 if (!t->spa_set[1]) 1406 return -ENOMEM; 1407 1408 t->spa_set[2] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[2]); 1409 if (!t->spa_set[2]) 1410 return -ENOMEM; 1411 1412 for (i = 0; i < t->num_dcr; i++) { 1413 t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]); 1414 if (!t->dimm[i]) 1415 return -ENOMEM; 1416 1417 t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]); 1418 if (!t->label[i]) 1419 return -ENOMEM; 1420 sprintf(t->label[i], "label%d", i); 1421 1422 t->flush[i] = test_alloc(t, max(PAGE_SIZE, 1423 sizeof(u64) * NUM_HINTS), 1424 &t->flush_dma[i]); 1425 if (!t->flush[i]) 1426 return -ENOMEM; 1427 } 1428 1429 for (i = 0; i < t->num_dcr; i++) { 1430 t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]); 1431 if (!t->dcr[i]) 1432 return -ENOMEM; 1433 } 1434 1435 t->_fit = test_alloc(t, sizeof(union acpi_object **), &t->_fit_dma); 1436 if (!t->_fit) 1437 return -ENOMEM; 1438 1439 if (nfit_test_dimm_init(t)) 1440 return -ENOMEM; 1441 smart_init(t); 1442 return ars_state_init(&t->pdev.dev, &t->ars_state); 1443 } 1444 1445 static int nfit_test1_alloc(struct nfit_test *t) 1446 { 1447 size_t nfit_size = sizeof(struct acpi_nfit_system_address) * 2 1448 + sizeof(struct acpi_nfit_memory_map) * 2 1449 + offsetof(struct acpi_nfit_control_region, window_size) * 2; 1450 int i; 1451 1452 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma); 1453 if (!t->nfit_buf) 1454 return -ENOMEM; 1455 t->nfit_size = nfit_size; 1456 1457 t->spa_set[0] = test_alloc(t, SPA2_SIZE, &t->spa_set_dma[0]); 1458 if (!t->spa_set[0]) 1459 return -ENOMEM; 1460 1461 for (i = 0; i < t->num_dcr; i++) { 1462 t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]); 1463 if (!t->label[i]) 1464 return -ENOMEM; 1465 sprintf(t->label[i], "label%d", i); 1466 } 1467 1468 t->spa_set[1] = test_alloc(t, SPA_VCD_SIZE, &t->spa_set_dma[1]); 1469 if (!t->spa_set[1]) 1470 return -ENOMEM; 1471 1472 if (nfit_test_dimm_init(t)) 1473 return -ENOMEM; 1474 smart_init(t); 1475 return ars_state_init(&t->pdev.dev, &t->ars_state); 1476 } 1477 1478 static void dcr_common_init(struct acpi_nfit_control_region *dcr) 1479 { 1480 dcr->vendor_id = 0xabcd; 1481 dcr->device_id = 0; 1482 dcr->revision_id = 1; 1483 dcr->valid_fields = 1; 1484 dcr->manufacturing_location = 0xa; 1485 dcr->manufacturing_date = cpu_to_be16(2016); 1486 } 1487 1488 static void nfit_test0_setup(struct nfit_test *t) 1489 { 1490 const int flush_hint_size = sizeof(struct acpi_nfit_flush_address) 1491 + (sizeof(u64) * NUM_HINTS); 1492 struct acpi_nfit_desc *acpi_desc; 1493 struct acpi_nfit_memory_map *memdev; 1494 void *nfit_buf = t->nfit_buf; 1495 struct acpi_nfit_system_address *spa; 1496 struct acpi_nfit_control_region *dcr; 1497 struct acpi_nfit_data_region *bdw; 1498 struct acpi_nfit_flush_address *flush; 1499 struct acpi_nfit_capabilities *pcap; 1500 unsigned int offset = 0, i; 1501 1502 /* 1503 * spa0 (interleave first half of dimm0 and dimm1, note storage 1504 * does not actually alias the related block-data-window 1505 * regions) 1506 */ 1507 spa = nfit_buf; 1508 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1509 spa->header.length = sizeof(*spa); 1510 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 1511 spa->range_index = 0+1; 1512 spa->address = t->spa_set_dma[0]; 1513 spa->length = SPA0_SIZE; 1514 offset += spa->header.length; 1515 1516 /* 1517 * spa1 (interleave last half of the 4 DIMMS, note storage 1518 * does not actually alias the related block-data-window 1519 * regions) 1520 */ 1521 spa = nfit_buf + offset; 1522 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1523 spa->header.length = sizeof(*spa); 1524 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 1525 spa->range_index = 1+1; 1526 spa->address = t->spa_set_dma[1]; 1527 spa->length = SPA1_SIZE; 1528 offset += spa->header.length; 1529 1530 /* spa2 (dcr0) dimm0 */ 1531 spa = nfit_buf + offset; 1532 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1533 spa->header.length = sizeof(*spa); 1534 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 1535 spa->range_index = 2+1; 1536 spa->address = t->dcr_dma[0]; 1537 spa->length = DCR_SIZE; 1538 offset += spa->header.length; 1539 1540 /* spa3 (dcr1) dimm1 */ 1541 spa = nfit_buf + offset; 1542 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1543 spa->header.length = sizeof(*spa); 1544 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 1545 spa->range_index = 3+1; 1546 spa->address = t->dcr_dma[1]; 1547 spa->length = DCR_SIZE; 1548 offset += spa->header.length; 1549 1550 /* spa4 (dcr2) dimm2 */ 1551 spa = nfit_buf + offset; 1552 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1553 spa->header.length = sizeof(*spa); 1554 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 1555 spa->range_index = 4+1; 1556 spa->address = t->dcr_dma[2]; 1557 spa->length = DCR_SIZE; 1558 offset += spa->header.length; 1559 1560 /* spa5 (dcr3) dimm3 */ 1561 spa = nfit_buf + offset; 1562 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1563 spa->header.length = sizeof(*spa); 1564 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 1565 spa->range_index = 5+1; 1566 spa->address = t->dcr_dma[3]; 1567 spa->length = DCR_SIZE; 1568 offset += spa->header.length; 1569 1570 /* spa6 (bdw for dcr0) dimm0 */ 1571 spa = nfit_buf + offset; 1572 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1573 spa->header.length = sizeof(*spa); 1574 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 1575 spa->range_index = 6+1; 1576 spa->address = t->dimm_dma[0]; 1577 spa->length = DIMM_SIZE; 1578 offset += spa->header.length; 1579 1580 /* spa7 (bdw for dcr1) dimm1 */ 1581 spa = nfit_buf + offset; 1582 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1583 spa->header.length = sizeof(*spa); 1584 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 1585 spa->range_index = 7+1; 1586 spa->address = t->dimm_dma[1]; 1587 spa->length = DIMM_SIZE; 1588 offset += spa->header.length; 1589 1590 /* spa8 (bdw for dcr2) dimm2 */ 1591 spa = nfit_buf + offset; 1592 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1593 spa->header.length = sizeof(*spa); 1594 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 1595 spa->range_index = 8+1; 1596 spa->address = t->dimm_dma[2]; 1597 spa->length = DIMM_SIZE; 1598 offset += spa->header.length; 1599 1600 /* spa9 (bdw for dcr3) dimm3 */ 1601 spa = nfit_buf + offset; 1602 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1603 spa->header.length = sizeof(*spa); 1604 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 1605 spa->range_index = 9+1; 1606 spa->address = t->dimm_dma[3]; 1607 spa->length = DIMM_SIZE; 1608 offset += spa->header.length; 1609 1610 /* mem-region0 (spa0, dimm0) */ 1611 memdev = nfit_buf + offset; 1612 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1613 memdev->header.length = sizeof(*memdev); 1614 memdev->device_handle = handle[0]; 1615 memdev->physical_id = 0; 1616 memdev->region_id = 0; 1617 memdev->range_index = 0+1; 1618 memdev->region_index = 4+1; 1619 memdev->region_size = SPA0_SIZE/2; 1620 memdev->region_offset = 1; 1621 memdev->address = 0; 1622 memdev->interleave_index = 0; 1623 memdev->interleave_ways = 2; 1624 offset += memdev->header.length; 1625 1626 /* mem-region1 (spa0, dimm1) */ 1627 memdev = nfit_buf + offset; 1628 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1629 memdev->header.length = sizeof(*memdev); 1630 memdev->device_handle = handle[1]; 1631 memdev->physical_id = 1; 1632 memdev->region_id = 0; 1633 memdev->range_index = 0+1; 1634 memdev->region_index = 5+1; 1635 memdev->region_size = SPA0_SIZE/2; 1636 memdev->region_offset = (1 << 8); 1637 memdev->address = 0; 1638 memdev->interleave_index = 0; 1639 memdev->interleave_ways = 2; 1640 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED; 1641 offset += memdev->header.length; 1642 1643 /* mem-region2 (spa1, dimm0) */ 1644 memdev = nfit_buf + offset; 1645 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1646 memdev->header.length = sizeof(*memdev); 1647 memdev->device_handle = handle[0]; 1648 memdev->physical_id = 0; 1649 memdev->region_id = 1; 1650 memdev->range_index = 1+1; 1651 memdev->region_index = 4+1; 1652 memdev->region_size = SPA1_SIZE/4; 1653 memdev->region_offset = (1 << 16); 1654 memdev->address = SPA0_SIZE/2; 1655 memdev->interleave_index = 0; 1656 memdev->interleave_ways = 4; 1657 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED; 1658 offset += memdev->header.length; 1659 1660 /* mem-region3 (spa1, dimm1) */ 1661 memdev = nfit_buf + offset; 1662 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1663 memdev->header.length = sizeof(*memdev); 1664 memdev->device_handle = handle[1]; 1665 memdev->physical_id = 1; 1666 memdev->region_id = 1; 1667 memdev->range_index = 1+1; 1668 memdev->region_index = 5+1; 1669 memdev->region_size = SPA1_SIZE/4; 1670 memdev->region_offset = (1 << 24); 1671 memdev->address = SPA0_SIZE/2; 1672 memdev->interleave_index = 0; 1673 memdev->interleave_ways = 4; 1674 offset += memdev->header.length; 1675 1676 /* mem-region4 (spa1, dimm2) */ 1677 memdev = nfit_buf + offset; 1678 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1679 memdev->header.length = sizeof(*memdev); 1680 memdev->device_handle = handle[2]; 1681 memdev->physical_id = 2; 1682 memdev->region_id = 0; 1683 memdev->range_index = 1+1; 1684 memdev->region_index = 6+1; 1685 memdev->region_size = SPA1_SIZE/4; 1686 memdev->region_offset = (1ULL << 32); 1687 memdev->address = SPA0_SIZE/2; 1688 memdev->interleave_index = 0; 1689 memdev->interleave_ways = 4; 1690 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED; 1691 offset += memdev->header.length; 1692 1693 /* mem-region5 (spa1, dimm3) */ 1694 memdev = nfit_buf + offset; 1695 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1696 memdev->header.length = sizeof(*memdev); 1697 memdev->device_handle = handle[3]; 1698 memdev->physical_id = 3; 1699 memdev->region_id = 0; 1700 memdev->range_index = 1+1; 1701 memdev->region_index = 7+1; 1702 memdev->region_size = SPA1_SIZE/4; 1703 memdev->region_offset = (1ULL << 40); 1704 memdev->address = SPA0_SIZE/2; 1705 memdev->interleave_index = 0; 1706 memdev->interleave_ways = 4; 1707 offset += memdev->header.length; 1708 1709 /* mem-region6 (spa/dcr0, dimm0) */ 1710 memdev = nfit_buf + offset; 1711 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1712 memdev->header.length = sizeof(*memdev); 1713 memdev->device_handle = handle[0]; 1714 memdev->physical_id = 0; 1715 memdev->region_id = 0; 1716 memdev->range_index = 2+1; 1717 memdev->region_index = 0+1; 1718 memdev->region_size = 0; 1719 memdev->region_offset = 0; 1720 memdev->address = 0; 1721 memdev->interleave_index = 0; 1722 memdev->interleave_ways = 1; 1723 offset += memdev->header.length; 1724 1725 /* mem-region7 (spa/dcr1, dimm1) */ 1726 memdev = nfit_buf + offset; 1727 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1728 memdev->header.length = sizeof(*memdev); 1729 memdev->device_handle = handle[1]; 1730 memdev->physical_id = 1; 1731 memdev->region_id = 0; 1732 memdev->range_index = 3+1; 1733 memdev->region_index = 1+1; 1734 memdev->region_size = 0; 1735 memdev->region_offset = 0; 1736 memdev->address = 0; 1737 memdev->interleave_index = 0; 1738 memdev->interleave_ways = 1; 1739 offset += memdev->header.length; 1740 1741 /* mem-region8 (spa/dcr2, dimm2) */ 1742 memdev = nfit_buf + offset; 1743 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1744 memdev->header.length = sizeof(*memdev); 1745 memdev->device_handle = handle[2]; 1746 memdev->physical_id = 2; 1747 memdev->region_id = 0; 1748 memdev->range_index = 4+1; 1749 memdev->region_index = 2+1; 1750 memdev->region_size = 0; 1751 memdev->region_offset = 0; 1752 memdev->address = 0; 1753 memdev->interleave_index = 0; 1754 memdev->interleave_ways = 1; 1755 offset += memdev->header.length; 1756 1757 /* mem-region9 (spa/dcr3, dimm3) */ 1758 memdev = nfit_buf + offset; 1759 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1760 memdev->header.length = sizeof(*memdev); 1761 memdev->device_handle = handle[3]; 1762 memdev->physical_id = 3; 1763 memdev->region_id = 0; 1764 memdev->range_index = 5+1; 1765 memdev->region_index = 3+1; 1766 memdev->region_size = 0; 1767 memdev->region_offset = 0; 1768 memdev->address = 0; 1769 memdev->interleave_index = 0; 1770 memdev->interleave_ways = 1; 1771 offset += memdev->header.length; 1772 1773 /* mem-region10 (spa/bdw0, dimm0) */ 1774 memdev = nfit_buf + offset; 1775 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1776 memdev->header.length = sizeof(*memdev); 1777 memdev->device_handle = handle[0]; 1778 memdev->physical_id = 0; 1779 memdev->region_id = 0; 1780 memdev->range_index = 6+1; 1781 memdev->region_index = 0+1; 1782 memdev->region_size = 0; 1783 memdev->region_offset = 0; 1784 memdev->address = 0; 1785 memdev->interleave_index = 0; 1786 memdev->interleave_ways = 1; 1787 offset += memdev->header.length; 1788 1789 /* mem-region11 (spa/bdw1, dimm1) */ 1790 memdev = nfit_buf + offset; 1791 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1792 memdev->header.length = sizeof(*memdev); 1793 memdev->device_handle = handle[1]; 1794 memdev->physical_id = 1; 1795 memdev->region_id = 0; 1796 memdev->range_index = 7+1; 1797 memdev->region_index = 1+1; 1798 memdev->region_size = 0; 1799 memdev->region_offset = 0; 1800 memdev->address = 0; 1801 memdev->interleave_index = 0; 1802 memdev->interleave_ways = 1; 1803 offset += memdev->header.length; 1804 1805 /* mem-region12 (spa/bdw2, dimm2) */ 1806 memdev = nfit_buf + offset; 1807 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1808 memdev->header.length = sizeof(*memdev); 1809 memdev->device_handle = handle[2]; 1810 memdev->physical_id = 2; 1811 memdev->region_id = 0; 1812 memdev->range_index = 8+1; 1813 memdev->region_index = 2+1; 1814 memdev->region_size = 0; 1815 memdev->region_offset = 0; 1816 memdev->address = 0; 1817 memdev->interleave_index = 0; 1818 memdev->interleave_ways = 1; 1819 offset += memdev->header.length; 1820 1821 /* mem-region13 (spa/dcr3, dimm3) */ 1822 memdev = nfit_buf + offset; 1823 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1824 memdev->header.length = sizeof(*memdev); 1825 memdev->device_handle = handle[3]; 1826 memdev->physical_id = 3; 1827 memdev->region_id = 0; 1828 memdev->range_index = 9+1; 1829 memdev->region_index = 3+1; 1830 memdev->region_size = 0; 1831 memdev->region_offset = 0; 1832 memdev->address = 0; 1833 memdev->interleave_index = 0; 1834 memdev->interleave_ways = 1; 1835 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED; 1836 offset += memdev->header.length; 1837 1838 /* dcr-descriptor0: blk */ 1839 dcr = nfit_buf + offset; 1840 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1841 dcr->header.length = sizeof(*dcr); 1842 dcr->region_index = 0+1; 1843 dcr_common_init(dcr); 1844 dcr->serial_number = ~handle[0]; 1845 dcr->code = NFIT_FIC_BLK; 1846 dcr->windows = 1; 1847 dcr->window_size = DCR_SIZE; 1848 dcr->command_offset = 0; 1849 dcr->command_size = 8; 1850 dcr->status_offset = 8; 1851 dcr->status_size = 4; 1852 offset += dcr->header.length; 1853 1854 /* dcr-descriptor1: blk */ 1855 dcr = nfit_buf + offset; 1856 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1857 dcr->header.length = sizeof(*dcr); 1858 dcr->region_index = 1+1; 1859 dcr_common_init(dcr); 1860 dcr->serial_number = ~handle[1]; 1861 dcr->code = NFIT_FIC_BLK; 1862 dcr->windows = 1; 1863 dcr->window_size = DCR_SIZE; 1864 dcr->command_offset = 0; 1865 dcr->command_size = 8; 1866 dcr->status_offset = 8; 1867 dcr->status_size = 4; 1868 offset += dcr->header.length; 1869 1870 /* dcr-descriptor2: blk */ 1871 dcr = nfit_buf + offset; 1872 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1873 dcr->header.length = sizeof(*dcr); 1874 dcr->region_index = 2+1; 1875 dcr_common_init(dcr); 1876 dcr->serial_number = ~handle[2]; 1877 dcr->code = NFIT_FIC_BLK; 1878 dcr->windows = 1; 1879 dcr->window_size = DCR_SIZE; 1880 dcr->command_offset = 0; 1881 dcr->command_size = 8; 1882 dcr->status_offset = 8; 1883 dcr->status_size = 4; 1884 offset += dcr->header.length; 1885 1886 /* dcr-descriptor3: blk */ 1887 dcr = nfit_buf + offset; 1888 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1889 dcr->header.length = sizeof(*dcr); 1890 dcr->region_index = 3+1; 1891 dcr_common_init(dcr); 1892 dcr->serial_number = ~handle[3]; 1893 dcr->code = NFIT_FIC_BLK; 1894 dcr->windows = 1; 1895 dcr->window_size = DCR_SIZE; 1896 dcr->command_offset = 0; 1897 dcr->command_size = 8; 1898 dcr->status_offset = 8; 1899 dcr->status_size = 4; 1900 offset += dcr->header.length; 1901 1902 /* dcr-descriptor0: pmem */ 1903 dcr = nfit_buf + offset; 1904 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1905 dcr->header.length = offsetof(struct acpi_nfit_control_region, 1906 window_size); 1907 dcr->region_index = 4+1; 1908 dcr_common_init(dcr); 1909 dcr->serial_number = ~handle[0]; 1910 dcr->code = NFIT_FIC_BYTEN; 1911 dcr->windows = 0; 1912 offset += dcr->header.length; 1913 1914 /* dcr-descriptor1: pmem */ 1915 dcr = nfit_buf + offset; 1916 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1917 dcr->header.length = offsetof(struct acpi_nfit_control_region, 1918 window_size); 1919 dcr->region_index = 5+1; 1920 dcr_common_init(dcr); 1921 dcr->serial_number = ~handle[1]; 1922 dcr->code = NFIT_FIC_BYTEN; 1923 dcr->windows = 0; 1924 offset += dcr->header.length; 1925 1926 /* dcr-descriptor2: pmem */ 1927 dcr = nfit_buf + offset; 1928 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1929 dcr->header.length = offsetof(struct acpi_nfit_control_region, 1930 window_size); 1931 dcr->region_index = 6+1; 1932 dcr_common_init(dcr); 1933 dcr->serial_number = ~handle[2]; 1934 dcr->code = NFIT_FIC_BYTEN; 1935 dcr->windows = 0; 1936 offset += dcr->header.length; 1937 1938 /* dcr-descriptor3: pmem */ 1939 dcr = nfit_buf + offset; 1940 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1941 dcr->header.length = offsetof(struct acpi_nfit_control_region, 1942 window_size); 1943 dcr->region_index = 7+1; 1944 dcr_common_init(dcr); 1945 dcr->serial_number = ~handle[3]; 1946 dcr->code = NFIT_FIC_BYTEN; 1947 dcr->windows = 0; 1948 offset += dcr->header.length; 1949 1950 /* bdw0 (spa/dcr0, dimm0) */ 1951 bdw = nfit_buf + offset; 1952 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1953 bdw->header.length = sizeof(*bdw); 1954 bdw->region_index = 0+1; 1955 bdw->windows = 1; 1956 bdw->offset = 0; 1957 bdw->size = BDW_SIZE; 1958 bdw->capacity = DIMM_SIZE; 1959 bdw->start_address = 0; 1960 offset += bdw->header.length; 1961 1962 /* bdw1 (spa/dcr1, dimm1) */ 1963 bdw = nfit_buf + offset; 1964 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1965 bdw->header.length = sizeof(*bdw); 1966 bdw->region_index = 1+1; 1967 bdw->windows = 1; 1968 bdw->offset = 0; 1969 bdw->size = BDW_SIZE; 1970 bdw->capacity = DIMM_SIZE; 1971 bdw->start_address = 0; 1972 offset += bdw->header.length; 1973 1974 /* bdw2 (spa/dcr2, dimm2) */ 1975 bdw = nfit_buf + offset; 1976 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1977 bdw->header.length = sizeof(*bdw); 1978 bdw->region_index = 2+1; 1979 bdw->windows = 1; 1980 bdw->offset = 0; 1981 bdw->size = BDW_SIZE; 1982 bdw->capacity = DIMM_SIZE; 1983 bdw->start_address = 0; 1984 offset += bdw->header.length; 1985 1986 /* bdw3 (spa/dcr3, dimm3) */ 1987 bdw = nfit_buf + offset; 1988 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1989 bdw->header.length = sizeof(*bdw); 1990 bdw->region_index = 3+1; 1991 bdw->windows = 1; 1992 bdw->offset = 0; 1993 bdw->size = BDW_SIZE; 1994 bdw->capacity = DIMM_SIZE; 1995 bdw->start_address = 0; 1996 offset += bdw->header.length; 1997 1998 /* flush0 (dimm0) */ 1999 flush = nfit_buf + offset; 2000 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 2001 flush->header.length = flush_hint_size; 2002 flush->device_handle = handle[0]; 2003 flush->hint_count = NUM_HINTS; 2004 for (i = 0; i < NUM_HINTS; i++) 2005 flush->hint_address[i] = t->flush_dma[0] + i * sizeof(u64); 2006 offset += flush->header.length; 2007 2008 /* flush1 (dimm1) */ 2009 flush = nfit_buf + offset; 2010 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 2011 flush->header.length = flush_hint_size; 2012 flush->device_handle = handle[1]; 2013 flush->hint_count = NUM_HINTS; 2014 for (i = 0; i < NUM_HINTS; i++) 2015 flush->hint_address[i] = t->flush_dma[1] + i * sizeof(u64); 2016 offset += flush->header.length; 2017 2018 /* flush2 (dimm2) */ 2019 flush = nfit_buf + offset; 2020 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 2021 flush->header.length = flush_hint_size; 2022 flush->device_handle = handle[2]; 2023 flush->hint_count = NUM_HINTS; 2024 for (i = 0; i < NUM_HINTS; i++) 2025 flush->hint_address[i] = t->flush_dma[2] + i * sizeof(u64); 2026 offset += flush->header.length; 2027 2028 /* flush3 (dimm3) */ 2029 flush = nfit_buf + offset; 2030 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 2031 flush->header.length = flush_hint_size; 2032 flush->device_handle = handle[3]; 2033 flush->hint_count = NUM_HINTS; 2034 for (i = 0; i < NUM_HINTS; i++) 2035 flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64); 2036 offset += flush->header.length; 2037 2038 /* platform capabilities */ 2039 pcap = nfit_buf + offset; 2040 pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES; 2041 pcap->header.length = sizeof(*pcap); 2042 pcap->highest_capability = 1; 2043 pcap->capabilities = ACPI_NFIT_CAPABILITY_MEM_FLUSH; 2044 offset += pcap->header.length; 2045 2046 if (t->setup_hotplug) { 2047 /* dcr-descriptor4: blk */ 2048 dcr = nfit_buf + offset; 2049 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 2050 dcr->header.length = sizeof(*dcr); 2051 dcr->region_index = 8+1; 2052 dcr_common_init(dcr); 2053 dcr->serial_number = ~handle[4]; 2054 dcr->code = NFIT_FIC_BLK; 2055 dcr->windows = 1; 2056 dcr->window_size = DCR_SIZE; 2057 dcr->command_offset = 0; 2058 dcr->command_size = 8; 2059 dcr->status_offset = 8; 2060 dcr->status_size = 4; 2061 offset += dcr->header.length; 2062 2063 /* dcr-descriptor4: pmem */ 2064 dcr = nfit_buf + offset; 2065 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 2066 dcr->header.length = offsetof(struct acpi_nfit_control_region, 2067 window_size); 2068 dcr->region_index = 9+1; 2069 dcr_common_init(dcr); 2070 dcr->serial_number = ~handle[4]; 2071 dcr->code = NFIT_FIC_BYTEN; 2072 dcr->windows = 0; 2073 offset += dcr->header.length; 2074 2075 /* bdw4 (spa/dcr4, dimm4) */ 2076 bdw = nfit_buf + offset; 2077 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 2078 bdw->header.length = sizeof(*bdw); 2079 bdw->region_index = 8+1; 2080 bdw->windows = 1; 2081 bdw->offset = 0; 2082 bdw->size = BDW_SIZE; 2083 bdw->capacity = DIMM_SIZE; 2084 bdw->start_address = 0; 2085 offset += bdw->header.length; 2086 2087 /* spa10 (dcr4) dimm4 */ 2088 spa = nfit_buf + offset; 2089 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 2090 spa->header.length = sizeof(*spa); 2091 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 2092 spa->range_index = 10+1; 2093 spa->address = t->dcr_dma[4]; 2094 spa->length = DCR_SIZE; 2095 offset += spa->header.length; 2096 2097 /* 2098 * spa11 (single-dimm interleave for hotplug, note storage 2099 * does not actually alias the related block-data-window 2100 * regions) 2101 */ 2102 spa = nfit_buf + offset; 2103 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 2104 spa->header.length = sizeof(*spa); 2105 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 2106 spa->range_index = 11+1; 2107 spa->address = t->spa_set_dma[2]; 2108 spa->length = SPA0_SIZE; 2109 offset += spa->header.length; 2110 2111 /* spa12 (bdw for dcr4) dimm4 */ 2112 spa = nfit_buf + offset; 2113 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 2114 spa->header.length = sizeof(*spa); 2115 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 2116 spa->range_index = 12+1; 2117 spa->address = t->dimm_dma[4]; 2118 spa->length = DIMM_SIZE; 2119 offset += spa->header.length; 2120 2121 /* mem-region14 (spa/dcr4, dimm4) */ 2122 memdev = nfit_buf + offset; 2123 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 2124 memdev->header.length = sizeof(*memdev); 2125 memdev->device_handle = handle[4]; 2126 memdev->physical_id = 4; 2127 memdev->region_id = 0; 2128 memdev->range_index = 10+1; 2129 memdev->region_index = 8+1; 2130 memdev->region_size = 0; 2131 memdev->region_offset = 0; 2132 memdev->address = 0; 2133 memdev->interleave_index = 0; 2134 memdev->interleave_ways = 1; 2135 offset += memdev->header.length; 2136 2137 /* mem-region15 (spa11, dimm4) */ 2138 memdev = nfit_buf + offset; 2139 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 2140 memdev->header.length = sizeof(*memdev); 2141 memdev->device_handle = handle[4]; 2142 memdev->physical_id = 4; 2143 memdev->region_id = 0; 2144 memdev->range_index = 11+1; 2145 memdev->region_index = 9+1; 2146 memdev->region_size = SPA0_SIZE; 2147 memdev->region_offset = (1ULL << 48); 2148 memdev->address = 0; 2149 memdev->interleave_index = 0; 2150 memdev->interleave_ways = 1; 2151 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED; 2152 offset += memdev->header.length; 2153 2154 /* mem-region16 (spa/bdw4, dimm4) */ 2155 memdev = nfit_buf + offset; 2156 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 2157 memdev->header.length = sizeof(*memdev); 2158 memdev->device_handle = handle[4]; 2159 memdev->physical_id = 4; 2160 memdev->region_id = 0; 2161 memdev->range_index = 12+1; 2162 memdev->region_index = 8+1; 2163 memdev->region_size = 0; 2164 memdev->region_offset = 0; 2165 memdev->address = 0; 2166 memdev->interleave_index = 0; 2167 memdev->interleave_ways = 1; 2168 offset += memdev->header.length; 2169 2170 /* flush3 (dimm4) */ 2171 flush = nfit_buf + offset; 2172 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 2173 flush->header.length = flush_hint_size; 2174 flush->device_handle = handle[4]; 2175 flush->hint_count = NUM_HINTS; 2176 for (i = 0; i < NUM_HINTS; i++) 2177 flush->hint_address[i] = t->flush_dma[4] 2178 + i * sizeof(u64); 2179 offset += flush->header.length; 2180 2181 /* sanity check to make sure we've filled the buffer */ 2182 WARN_ON(offset != t->nfit_size); 2183 } 2184 2185 t->nfit_filled = offset; 2186 2187 post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0], 2188 SPA0_SIZE); 2189 2190 acpi_desc = &t->acpi_desc; 2191 set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en); 2192 set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); 2193 set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); 2194 set_bit(ND_INTEL_SMART, &acpi_desc->dimm_cmd_force_en); 2195 set_bit(ND_INTEL_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en); 2196 set_bit(ND_INTEL_SMART_SET_THRESHOLD, &acpi_desc->dimm_cmd_force_en); 2197 set_bit(ND_INTEL_SMART_INJECT, &acpi_desc->dimm_cmd_force_en); 2198 set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en); 2199 set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en); 2200 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); 2201 set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); 2202 set_bit(ND_CMD_CALL, &acpi_desc->bus_cmd_force_en); 2203 set_bit(NFIT_CMD_TRANSLATE_SPA, &acpi_desc->bus_nfit_cmd_force_en); 2204 set_bit(NFIT_CMD_ARS_INJECT_SET, &acpi_desc->bus_nfit_cmd_force_en); 2205 set_bit(NFIT_CMD_ARS_INJECT_CLEAR, &acpi_desc->bus_nfit_cmd_force_en); 2206 set_bit(NFIT_CMD_ARS_INJECT_GET, &acpi_desc->bus_nfit_cmd_force_en); 2207 set_bit(ND_INTEL_FW_GET_INFO, &acpi_desc->dimm_cmd_force_en); 2208 set_bit(ND_INTEL_FW_START_UPDATE, &acpi_desc->dimm_cmd_force_en); 2209 set_bit(ND_INTEL_FW_SEND_DATA, &acpi_desc->dimm_cmd_force_en); 2210 set_bit(ND_INTEL_FW_FINISH_UPDATE, &acpi_desc->dimm_cmd_force_en); 2211 set_bit(ND_INTEL_FW_FINISH_QUERY, &acpi_desc->dimm_cmd_force_en); 2212 set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en); 2213 } 2214 2215 static void nfit_test1_setup(struct nfit_test *t) 2216 { 2217 size_t offset; 2218 void *nfit_buf = t->nfit_buf; 2219 struct acpi_nfit_memory_map *memdev; 2220 struct acpi_nfit_control_region *dcr; 2221 struct acpi_nfit_system_address *spa; 2222 struct acpi_nfit_desc *acpi_desc; 2223 2224 offset = 0; 2225 /* spa0 (flat range with no bdw aliasing) */ 2226 spa = nfit_buf + offset; 2227 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 2228 spa->header.length = sizeof(*spa); 2229 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 2230 spa->range_index = 0+1; 2231 spa->address = t->spa_set_dma[0]; 2232 spa->length = SPA2_SIZE; 2233 offset += spa->header.length; 2234 2235 /* virtual cd region */ 2236 spa = nfit_buf + offset; 2237 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 2238 spa->header.length = sizeof(*spa); 2239 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16); 2240 spa->range_index = 0; 2241 spa->address = t->spa_set_dma[1]; 2242 spa->length = SPA_VCD_SIZE; 2243 offset += spa->header.length; 2244 2245 /* mem-region0 (spa0, dimm0) */ 2246 memdev = nfit_buf + offset; 2247 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 2248 memdev->header.length = sizeof(*memdev); 2249 memdev->device_handle = handle[5]; 2250 memdev->physical_id = 0; 2251 memdev->region_id = 0; 2252 memdev->range_index = 0+1; 2253 memdev->region_index = 0+1; 2254 memdev->region_size = SPA2_SIZE; 2255 memdev->region_offset = 0; 2256 memdev->address = 0; 2257 memdev->interleave_index = 0; 2258 memdev->interleave_ways = 1; 2259 memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED 2260 | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED 2261 | ACPI_NFIT_MEM_NOT_ARMED; 2262 offset += memdev->header.length; 2263 2264 /* dcr-descriptor0 */ 2265 dcr = nfit_buf + offset; 2266 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 2267 dcr->header.length = offsetof(struct acpi_nfit_control_region, 2268 window_size); 2269 dcr->region_index = 0+1; 2270 dcr_common_init(dcr); 2271 dcr->serial_number = ~handle[5]; 2272 dcr->code = NFIT_FIC_BYTE; 2273 dcr->windows = 0; 2274 offset += dcr->header.length; 2275 2276 memdev = nfit_buf + offset; 2277 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 2278 memdev->header.length = sizeof(*memdev); 2279 memdev->device_handle = handle[6]; 2280 memdev->physical_id = 0; 2281 memdev->region_id = 0; 2282 memdev->range_index = 0; 2283 memdev->region_index = 0+2; 2284 memdev->region_size = SPA2_SIZE; 2285 memdev->region_offset = 0; 2286 memdev->address = 0; 2287 memdev->interleave_index = 0; 2288 memdev->interleave_ways = 1; 2289 memdev->flags = ACPI_NFIT_MEM_MAP_FAILED; 2290 offset += memdev->header.length; 2291 2292 /* dcr-descriptor1 */ 2293 dcr = nfit_buf + offset; 2294 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 2295 dcr->header.length = offsetof(struct acpi_nfit_control_region, 2296 window_size); 2297 dcr->region_index = 0+2; 2298 dcr_common_init(dcr); 2299 dcr->serial_number = ~handle[6]; 2300 dcr->code = NFIT_FIC_BYTE; 2301 dcr->windows = 0; 2302 offset += dcr->header.length; 2303 2304 /* sanity check to make sure we've filled the buffer */ 2305 WARN_ON(offset != t->nfit_size); 2306 2307 t->nfit_filled = offset; 2308 2309 post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0], 2310 SPA2_SIZE); 2311 2312 acpi_desc = &t->acpi_desc; 2313 set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en); 2314 set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en); 2315 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); 2316 set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); 2317 set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en); 2318 set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en); 2319 set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); 2320 set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); 2321 } 2322 2323 static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, 2324 void *iobuf, u64 len, int rw) 2325 { 2326 struct nfit_blk *nfit_blk = ndbr->blk_provider_data; 2327 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 2328 struct nd_region *nd_region = &ndbr->nd_region; 2329 unsigned int lane; 2330 2331 lane = nd_region_acquire_lane(nd_region); 2332 if (rw) 2333 memcpy(mmio->addr.base + dpa, iobuf, len); 2334 else { 2335 memcpy(iobuf, mmio->addr.base + dpa, len); 2336 2337 /* give us some some coverage of the arch_invalidate_pmem() API */ 2338 arch_invalidate_pmem(mmio->addr.base + dpa, len); 2339 } 2340 nd_region_release_lane(nd_region, lane); 2341 2342 return 0; 2343 } 2344 2345 static unsigned long nfit_ctl_handle; 2346 2347 union acpi_object *result; 2348 2349 static union acpi_object *nfit_test_evaluate_dsm(acpi_handle handle, 2350 const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4) 2351 { 2352 if (handle != &nfit_ctl_handle) 2353 return ERR_PTR(-ENXIO); 2354 2355 return result; 2356 } 2357 2358 static int setup_result(void *buf, size_t size) 2359 { 2360 result = kmalloc(sizeof(union acpi_object) + size, GFP_KERNEL); 2361 if (!result) 2362 return -ENOMEM; 2363 result->package.type = ACPI_TYPE_BUFFER, 2364 result->buffer.pointer = (void *) (result + 1); 2365 result->buffer.length = size; 2366 memcpy(result->buffer.pointer, buf, size); 2367 memset(buf, 0, size); 2368 return 0; 2369 } 2370 2371 static int nfit_ctl_test(struct device *dev) 2372 { 2373 int rc, cmd_rc; 2374 struct nvdimm *nvdimm; 2375 struct acpi_device *adev; 2376 struct nfit_mem *nfit_mem; 2377 struct nd_ars_record *record; 2378 struct acpi_nfit_desc *acpi_desc; 2379 const u64 test_val = 0x0123456789abcdefULL; 2380 unsigned long mask, cmd_size, offset; 2381 union { 2382 struct nd_cmd_get_config_size cfg_size; 2383 struct nd_cmd_clear_error clear_err; 2384 struct nd_cmd_ars_status ars_stat; 2385 struct nd_cmd_ars_cap ars_cap; 2386 char buf[sizeof(struct nd_cmd_ars_status) 2387 + sizeof(struct nd_ars_record)]; 2388 } cmds; 2389 2390 adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL); 2391 if (!adev) 2392 return -ENOMEM; 2393 *adev = (struct acpi_device) { 2394 .handle = &nfit_ctl_handle, 2395 .dev = { 2396 .init_name = "test-adev", 2397 }, 2398 }; 2399 2400 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 2401 if (!acpi_desc) 2402 return -ENOMEM; 2403 *acpi_desc = (struct acpi_nfit_desc) { 2404 .nd_desc = { 2405 .cmd_mask = 1UL << ND_CMD_ARS_CAP 2406 | 1UL << ND_CMD_ARS_START 2407 | 1UL << ND_CMD_ARS_STATUS 2408 | 1UL << ND_CMD_CLEAR_ERROR 2409 | 1UL << ND_CMD_CALL, 2410 .module = THIS_MODULE, 2411 .provider_name = "ACPI.NFIT", 2412 .ndctl = acpi_nfit_ctl, 2413 .bus_dsm_mask = 1UL << NFIT_CMD_TRANSLATE_SPA 2414 | 1UL << NFIT_CMD_ARS_INJECT_SET 2415 | 1UL << NFIT_CMD_ARS_INJECT_CLEAR 2416 | 1UL << NFIT_CMD_ARS_INJECT_GET, 2417 }, 2418 .dev = &adev->dev, 2419 }; 2420 2421 nfit_mem = devm_kzalloc(dev, sizeof(*nfit_mem), GFP_KERNEL); 2422 if (!nfit_mem) 2423 return -ENOMEM; 2424 2425 mask = 1UL << ND_CMD_SMART | 1UL << ND_CMD_SMART_THRESHOLD 2426 | 1UL << ND_CMD_DIMM_FLAGS | 1UL << ND_CMD_GET_CONFIG_SIZE 2427 | 1UL << ND_CMD_GET_CONFIG_DATA | 1UL << ND_CMD_SET_CONFIG_DATA 2428 | 1UL << ND_CMD_VENDOR; 2429 *nfit_mem = (struct nfit_mem) { 2430 .adev = adev, 2431 .family = NVDIMM_FAMILY_INTEL, 2432 .dsm_mask = mask, 2433 }; 2434 2435 nvdimm = devm_kzalloc(dev, sizeof(*nvdimm), GFP_KERNEL); 2436 if (!nvdimm) 2437 return -ENOMEM; 2438 *nvdimm = (struct nvdimm) { 2439 .provider_data = nfit_mem, 2440 .cmd_mask = mask, 2441 .dev = { 2442 .init_name = "test-dimm", 2443 }, 2444 }; 2445 2446 2447 /* basic checkout of a typical 'get config size' command */ 2448 cmd_size = sizeof(cmds.cfg_size); 2449 cmds.cfg_size = (struct nd_cmd_get_config_size) { 2450 .status = 0, 2451 .config_size = SZ_128K, 2452 .max_xfer = SZ_4K, 2453 }; 2454 rc = setup_result(cmds.buf, cmd_size); 2455 if (rc) 2456 return rc; 2457 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE, 2458 cmds.buf, cmd_size, &cmd_rc); 2459 2460 if (rc < 0 || cmd_rc || cmds.cfg_size.status != 0 2461 || cmds.cfg_size.config_size != SZ_128K 2462 || cmds.cfg_size.max_xfer != SZ_4K) { 2463 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n", 2464 __func__, __LINE__, rc, cmd_rc); 2465 return -EIO; 2466 } 2467 2468 2469 /* test ars_status with zero output */ 2470 cmd_size = offsetof(struct nd_cmd_ars_status, address); 2471 cmds.ars_stat = (struct nd_cmd_ars_status) { 2472 .out_length = 0, 2473 }; 2474 rc = setup_result(cmds.buf, cmd_size); 2475 if (rc) 2476 return rc; 2477 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS, 2478 cmds.buf, cmd_size, &cmd_rc); 2479 2480 if (rc < 0 || cmd_rc) { 2481 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n", 2482 __func__, __LINE__, rc, cmd_rc); 2483 return -EIO; 2484 } 2485 2486 2487 /* test ars_cap with benign extended status */ 2488 cmd_size = sizeof(cmds.ars_cap); 2489 cmds.ars_cap = (struct nd_cmd_ars_cap) { 2490 .status = ND_ARS_PERSISTENT << 16, 2491 }; 2492 offset = offsetof(struct nd_cmd_ars_cap, status); 2493 rc = setup_result(cmds.buf + offset, cmd_size - offset); 2494 if (rc) 2495 return rc; 2496 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_CAP, 2497 cmds.buf, cmd_size, &cmd_rc); 2498 2499 if (rc < 0 || cmd_rc) { 2500 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n", 2501 __func__, __LINE__, rc, cmd_rc); 2502 return -EIO; 2503 } 2504 2505 2506 /* test ars_status with 'status' trimmed from 'out_length' */ 2507 cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record); 2508 cmds.ars_stat = (struct nd_cmd_ars_status) { 2509 .out_length = cmd_size - 4, 2510 }; 2511 record = &cmds.ars_stat.records[0]; 2512 *record = (struct nd_ars_record) { 2513 .length = test_val, 2514 }; 2515 rc = setup_result(cmds.buf, cmd_size); 2516 if (rc) 2517 return rc; 2518 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS, 2519 cmds.buf, cmd_size, &cmd_rc); 2520 2521 if (rc < 0 || cmd_rc || record->length != test_val) { 2522 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n", 2523 __func__, __LINE__, rc, cmd_rc); 2524 return -EIO; 2525 } 2526 2527 2528 /* test ars_status with 'Output (Size)' including 'status' */ 2529 cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record); 2530 cmds.ars_stat = (struct nd_cmd_ars_status) { 2531 .out_length = cmd_size, 2532 }; 2533 record = &cmds.ars_stat.records[0]; 2534 *record = (struct nd_ars_record) { 2535 .length = test_val, 2536 }; 2537 rc = setup_result(cmds.buf, cmd_size); 2538 if (rc) 2539 return rc; 2540 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS, 2541 cmds.buf, cmd_size, &cmd_rc); 2542 2543 if (rc < 0 || cmd_rc || record->length != test_val) { 2544 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n", 2545 __func__, __LINE__, rc, cmd_rc); 2546 return -EIO; 2547 } 2548 2549 2550 /* test extended status for get_config_size results in failure */ 2551 cmd_size = sizeof(cmds.cfg_size); 2552 cmds.cfg_size = (struct nd_cmd_get_config_size) { 2553 .status = 1 << 16, 2554 }; 2555 rc = setup_result(cmds.buf, cmd_size); 2556 if (rc) 2557 return rc; 2558 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE, 2559 cmds.buf, cmd_size, &cmd_rc); 2560 2561 if (rc < 0 || cmd_rc >= 0) { 2562 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n", 2563 __func__, __LINE__, rc, cmd_rc); 2564 return -EIO; 2565 } 2566 2567 /* test clear error */ 2568 cmd_size = sizeof(cmds.clear_err); 2569 cmds.clear_err = (struct nd_cmd_clear_error) { 2570 .length = 512, 2571 .cleared = 512, 2572 }; 2573 rc = setup_result(cmds.buf, cmd_size); 2574 if (rc) 2575 return rc; 2576 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_CLEAR_ERROR, 2577 cmds.buf, cmd_size, &cmd_rc); 2578 if (rc < 0 || cmd_rc) { 2579 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n", 2580 __func__, __LINE__, rc, cmd_rc); 2581 return -EIO; 2582 } 2583 2584 return 0; 2585 } 2586 2587 static int nfit_test_probe(struct platform_device *pdev) 2588 { 2589 struct nvdimm_bus_descriptor *nd_desc; 2590 struct acpi_nfit_desc *acpi_desc; 2591 struct device *dev = &pdev->dev; 2592 struct nfit_test *nfit_test; 2593 struct nfit_mem *nfit_mem; 2594 union acpi_object *obj; 2595 int rc; 2596 2597 if (strcmp(dev_name(&pdev->dev), "nfit_test.0") == 0) { 2598 rc = nfit_ctl_test(&pdev->dev); 2599 if (rc) 2600 return rc; 2601 } 2602 2603 nfit_test = to_nfit_test(&pdev->dev); 2604 2605 /* common alloc */ 2606 if (nfit_test->num_dcr) { 2607 int num = nfit_test->num_dcr; 2608 2609 nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *), 2610 GFP_KERNEL); 2611 nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t), 2612 GFP_KERNEL); 2613 nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *), 2614 GFP_KERNEL); 2615 nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t), 2616 GFP_KERNEL); 2617 nfit_test->label = devm_kcalloc(dev, num, sizeof(void *), 2618 GFP_KERNEL); 2619 nfit_test->label_dma = devm_kcalloc(dev, num, 2620 sizeof(dma_addr_t), GFP_KERNEL); 2621 nfit_test->dcr = devm_kcalloc(dev, num, 2622 sizeof(struct nfit_test_dcr *), GFP_KERNEL); 2623 nfit_test->dcr_dma = devm_kcalloc(dev, num, 2624 sizeof(dma_addr_t), GFP_KERNEL); 2625 nfit_test->smart = devm_kcalloc(dev, num, 2626 sizeof(struct nd_intel_smart), GFP_KERNEL); 2627 nfit_test->smart_threshold = devm_kcalloc(dev, num, 2628 sizeof(struct nd_intel_smart_threshold), 2629 GFP_KERNEL); 2630 nfit_test->fw = devm_kcalloc(dev, num, 2631 sizeof(struct nfit_test_fw), GFP_KERNEL); 2632 if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label 2633 && nfit_test->label_dma && nfit_test->dcr 2634 && nfit_test->dcr_dma && nfit_test->flush 2635 && nfit_test->flush_dma 2636 && nfit_test->fw) 2637 /* pass */; 2638 else 2639 return -ENOMEM; 2640 } 2641 2642 if (nfit_test->num_pm) { 2643 int num = nfit_test->num_pm; 2644 2645 nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *), 2646 GFP_KERNEL); 2647 nfit_test->spa_set_dma = devm_kcalloc(dev, num, 2648 sizeof(dma_addr_t), GFP_KERNEL); 2649 if (nfit_test->spa_set && nfit_test->spa_set_dma) 2650 /* pass */; 2651 else 2652 return -ENOMEM; 2653 } 2654 2655 /* per-nfit specific alloc */ 2656 if (nfit_test->alloc(nfit_test)) 2657 return -ENOMEM; 2658 2659 nfit_test->setup(nfit_test); 2660 acpi_desc = &nfit_test->acpi_desc; 2661 acpi_nfit_desc_init(acpi_desc, &pdev->dev); 2662 acpi_desc->blk_do_io = nfit_test_blk_do_io; 2663 nd_desc = &acpi_desc->nd_desc; 2664 nd_desc->provider_name = NULL; 2665 nd_desc->module = THIS_MODULE; 2666 nd_desc->ndctl = nfit_test_ctl; 2667 2668 rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf, 2669 nfit_test->nfit_filled); 2670 if (rc) 2671 return rc; 2672 2673 rc = devm_add_action_or_reset(&pdev->dev, acpi_nfit_shutdown, acpi_desc); 2674 if (rc) 2675 return rc; 2676 2677 if (nfit_test->setup != nfit_test0_setup) 2678 return 0; 2679 2680 nfit_test->setup_hotplug = 1; 2681 nfit_test->setup(nfit_test); 2682 2683 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 2684 if (!obj) 2685 return -ENOMEM; 2686 obj->type = ACPI_TYPE_BUFFER; 2687 obj->buffer.length = nfit_test->nfit_size; 2688 obj->buffer.pointer = nfit_test->nfit_buf; 2689 *(nfit_test->_fit) = obj; 2690 __acpi_nfit_notify(&pdev->dev, nfit_test, 0x80); 2691 2692 /* associate dimm devices with nfit_mem data for notification testing */ 2693 mutex_lock(&acpi_desc->init_mutex); 2694 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 2695 u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle; 2696 int i; 2697 2698 for (i = 0; i < ARRAY_SIZE(handle); i++) 2699 if (nfit_handle == handle[i]) 2700 dev_set_drvdata(nfit_test->dimm_dev[i], 2701 nfit_mem); 2702 } 2703 mutex_unlock(&acpi_desc->init_mutex); 2704 2705 return 0; 2706 } 2707 2708 static int nfit_test_remove(struct platform_device *pdev) 2709 { 2710 return 0; 2711 } 2712 2713 static void nfit_test_release(struct device *dev) 2714 { 2715 struct nfit_test *nfit_test = to_nfit_test(dev); 2716 2717 kfree(nfit_test); 2718 } 2719 2720 static const struct platform_device_id nfit_test_id[] = { 2721 { KBUILD_MODNAME }, 2722 { }, 2723 }; 2724 2725 static struct platform_driver nfit_test_driver = { 2726 .probe = nfit_test_probe, 2727 .remove = nfit_test_remove, 2728 .driver = { 2729 .name = KBUILD_MODNAME, 2730 }, 2731 .id_table = nfit_test_id, 2732 }; 2733 2734 static char mcsafe_buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); 2735 2736 enum INJECT { 2737 INJECT_NONE, 2738 INJECT_SRC, 2739 INJECT_DST, 2740 }; 2741 2742 static void mcsafe_test_init(char *dst, char *src, size_t size) 2743 { 2744 size_t i; 2745 2746 memset(dst, 0xff, size); 2747 for (i = 0; i < size; i++) 2748 src[i] = (char) i; 2749 } 2750 2751 static bool mcsafe_test_validate(unsigned char *dst, unsigned char *src, 2752 size_t size, unsigned long rem) 2753 { 2754 size_t i; 2755 2756 for (i = 0; i < size - rem; i++) 2757 if (dst[i] != (unsigned char) i) { 2758 pr_info_once("%s:%d: offset: %zd got: %#x expect: %#x\n", 2759 __func__, __LINE__, i, dst[i], 2760 (unsigned char) i); 2761 return false; 2762 } 2763 for (i = size - rem; i < size; i++) 2764 if (dst[i] != 0xffU) { 2765 pr_info_once("%s:%d: offset: %zd got: %#x expect: 0xff\n", 2766 __func__, __LINE__, i, dst[i]); 2767 return false; 2768 } 2769 return true; 2770 } 2771 2772 void mcsafe_test(void) 2773 { 2774 char *inject_desc[] = { "none", "source", "destination" }; 2775 enum INJECT inj; 2776 2777 if (IS_ENABLED(CONFIG_MCSAFE_TEST)) { 2778 pr_info("%s: run...\n", __func__); 2779 } else { 2780 pr_info("%s: disabled, skip.\n", __func__); 2781 return; 2782 } 2783 2784 for (inj = INJECT_NONE; inj <= INJECT_DST; inj++) { 2785 int i; 2786 2787 pr_info("%s: inject: %s\n", __func__, inject_desc[inj]); 2788 for (i = 0; i < 512; i++) { 2789 unsigned long expect, rem; 2790 void *src, *dst; 2791 bool valid; 2792 2793 switch (inj) { 2794 case INJECT_NONE: 2795 mcsafe_inject_src(NULL); 2796 mcsafe_inject_dst(NULL); 2797 dst = &mcsafe_buf[2048]; 2798 src = &mcsafe_buf[1024 - i]; 2799 expect = 0; 2800 break; 2801 case INJECT_SRC: 2802 mcsafe_inject_src(&mcsafe_buf[1024]); 2803 mcsafe_inject_dst(NULL); 2804 dst = &mcsafe_buf[2048]; 2805 src = &mcsafe_buf[1024 - i]; 2806 expect = 512 - i; 2807 break; 2808 case INJECT_DST: 2809 mcsafe_inject_src(NULL); 2810 mcsafe_inject_dst(&mcsafe_buf[2048]); 2811 dst = &mcsafe_buf[2048 - i]; 2812 src = &mcsafe_buf[1024]; 2813 expect = 512 - i; 2814 break; 2815 } 2816 2817 mcsafe_test_init(dst, src, 512); 2818 rem = __memcpy_mcsafe(dst, src, 512); 2819 valid = mcsafe_test_validate(dst, src, 512, expect); 2820 if (rem == expect && valid) 2821 continue; 2822 pr_info("%s: copy(%#lx, %#lx, %d) off: %d rem: %ld %s expect: %ld\n", 2823 __func__, 2824 ((unsigned long) dst) & ~PAGE_MASK, 2825 ((unsigned long ) src) & ~PAGE_MASK, 2826 512, i, rem, valid ? "valid" : "bad", 2827 expect); 2828 } 2829 } 2830 2831 mcsafe_inject_src(NULL); 2832 mcsafe_inject_dst(NULL); 2833 } 2834 2835 static __init int nfit_test_init(void) 2836 { 2837 int rc, i; 2838 2839 pmem_test(); 2840 libnvdimm_test(); 2841 acpi_nfit_test(); 2842 device_dax_test(); 2843 mcsafe_test(); 2844 2845 nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm); 2846 2847 nfit_wq = create_singlethread_workqueue("nfit"); 2848 if (!nfit_wq) 2849 return -ENOMEM; 2850 2851 nfit_test_dimm = class_create(THIS_MODULE, "nfit_test_dimm"); 2852 if (IS_ERR(nfit_test_dimm)) { 2853 rc = PTR_ERR(nfit_test_dimm); 2854 goto err_register; 2855 } 2856 2857 nfit_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE); 2858 if (!nfit_pool) { 2859 rc = -ENOMEM; 2860 goto err_register; 2861 } 2862 2863 if (gen_pool_add(nfit_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) { 2864 rc = -ENOMEM; 2865 goto err_register; 2866 } 2867 2868 for (i = 0; i < NUM_NFITS; i++) { 2869 struct nfit_test *nfit_test; 2870 struct platform_device *pdev; 2871 2872 nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL); 2873 if (!nfit_test) { 2874 rc = -ENOMEM; 2875 goto err_register; 2876 } 2877 INIT_LIST_HEAD(&nfit_test->resources); 2878 badrange_init(&nfit_test->badrange); 2879 switch (i) { 2880 case 0: 2881 nfit_test->num_pm = NUM_PM; 2882 nfit_test->dcr_idx = 0; 2883 nfit_test->num_dcr = NUM_DCR; 2884 nfit_test->alloc = nfit_test0_alloc; 2885 nfit_test->setup = nfit_test0_setup; 2886 break; 2887 case 1: 2888 nfit_test->num_pm = 2; 2889 nfit_test->dcr_idx = NUM_DCR; 2890 nfit_test->num_dcr = 2; 2891 nfit_test->alloc = nfit_test1_alloc; 2892 nfit_test->setup = nfit_test1_setup; 2893 break; 2894 default: 2895 rc = -EINVAL; 2896 goto err_register; 2897 } 2898 pdev = &nfit_test->pdev; 2899 pdev->name = KBUILD_MODNAME; 2900 pdev->id = i; 2901 pdev->dev.release = nfit_test_release; 2902 rc = platform_device_register(pdev); 2903 if (rc) { 2904 put_device(&pdev->dev); 2905 goto err_register; 2906 } 2907 get_device(&pdev->dev); 2908 2909 rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2910 if (rc) 2911 goto err_register; 2912 2913 instances[i] = nfit_test; 2914 INIT_WORK(&nfit_test->work, uc_error_notify); 2915 } 2916 2917 rc = platform_driver_register(&nfit_test_driver); 2918 if (rc) 2919 goto err_register; 2920 return 0; 2921 2922 err_register: 2923 if (nfit_pool) 2924 gen_pool_destroy(nfit_pool); 2925 2926 destroy_workqueue(nfit_wq); 2927 for (i = 0; i < NUM_NFITS; i++) 2928 if (instances[i]) 2929 platform_device_unregister(&instances[i]->pdev); 2930 nfit_test_teardown(); 2931 for (i = 0; i < NUM_NFITS; i++) 2932 if (instances[i]) 2933 put_device(&instances[i]->pdev.dev); 2934 2935 return rc; 2936 } 2937 2938 static __exit void nfit_test_exit(void) 2939 { 2940 int i; 2941 2942 flush_workqueue(nfit_wq); 2943 destroy_workqueue(nfit_wq); 2944 for (i = 0; i < NUM_NFITS; i++) 2945 platform_device_unregister(&instances[i]->pdev); 2946 platform_driver_unregister(&nfit_test_driver); 2947 nfit_test_teardown(); 2948 2949 gen_pool_destroy(nfit_pool); 2950 2951 for (i = 0; i < NUM_NFITS; i++) 2952 put_device(&instances[i]->pdev.dev); 2953 class_destroy(nfit_test_dimm); 2954 } 2955 2956 module_init(nfit_test_init); 2957 module_exit(nfit_test_exit); 2958 MODULE_LICENSE("GPL v2"); 2959 MODULE_AUTHOR("Intel Corporation"); 2960