1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 #include <linux/platform_device.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/libnvdimm.h> 17 #include <linux/vmalloc.h> 18 #include <linux/device.h> 19 #include <linux/module.h> 20 #include <linux/ndctl.h> 21 #include <linux/sizes.h> 22 #include <linux/slab.h> 23 #include <nfit.h> 24 #include <nd.h> 25 #include "nfit_test.h" 26 27 /* 28 * Generate an NFIT table to describe the following topology: 29 * 30 * BUS0: Interleaved PMEM regions, and aliasing with BLK regions 31 * 32 * (a) (b) DIMM BLK-REGION 33 * +----------+--------------+----------+---------+ 34 * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2 35 * | imc0 +--+- - - - - region0 - - - -+----------+ + 36 * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3 37 * | +----------+--------------v----------v v 38 * +--+---+ | | 39 * | cpu0 | region1 40 * +--+---+ | | 41 * | +-------------------------^----------^ ^ 42 * +--+---+ | blk4.0 | pm1.0 | 2 region4 43 * | imc1 +--+-------------------------+----------+ + 44 * +------+ | blk5.0 | pm1.0 | 3 region5 45 * +-------------------------+----------+-+-------+ 46 * 47 * *) In this layout we have four dimms and two memory controllers in one 48 * socket. Each unique interface (BLK or PMEM) to DPA space 49 * is identified by a region device with a dynamically assigned id. 50 * 51 * *) The first portion of dimm0 and dimm1 are interleaved as REGION0. 52 * A single PMEM namespace "pm0.0" is created using half of the 53 * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace 54 * allocate from from the bottom of a region. The unallocated 55 * portion of REGION0 aliases with REGION2 and REGION3. That 56 * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and 57 * "blk3.0") starting at the base of each DIMM to offset (a) in those 58 * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable 59 * names that can be assigned to a namespace. 60 * 61 * *) In the last portion of dimm0 and dimm1 we have an interleaved 62 * SPA range, REGION1, that spans those two dimms as well as dimm2 63 * and dimm3. Some of REGION1 allocated to a PMEM namespace named 64 * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each 65 * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and 66 * "blk5.0". 67 * 68 * *) The portion of dimm2 and dimm3 that do not participate in the 69 * REGION1 interleaved SPA range (i.e. the DPA address below offset 70 * (b) are also included in the "blk4.0" and "blk5.0" namespaces. 71 * Note, that BLK namespaces need not be contiguous in DPA-space, and 72 * can consume aliased capacity from multiple interleave sets. 73 * 74 * BUS1: Legacy NVDIMM (single contiguous range) 75 * 76 * region2 77 * +---------------------+ 78 * |---------------------| 79 * || pm2.0 || 80 * |---------------------| 81 * +---------------------+ 82 * 83 * *) A NFIT-table may describe a simple system-physical-address range 84 * with no BLK aliasing. This type of region may optionally 85 * reference an NVDIMM. 86 */ 87 enum { 88 NUM_PM = 2, 89 NUM_DCR = 4, 90 NUM_BDW = NUM_DCR, 91 NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW, 92 NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */, 93 DIMM_SIZE = SZ_32M, 94 LABEL_SIZE = SZ_128K, 95 SPA0_SIZE = DIMM_SIZE, 96 SPA1_SIZE = DIMM_SIZE*2, 97 SPA2_SIZE = DIMM_SIZE, 98 BDW_SIZE = 64 << 8, 99 DCR_SIZE = 12, 100 NUM_NFITS = 2, /* permit testing multiple NFITs per system */ 101 }; 102 103 struct nfit_test_dcr { 104 __le64 bdw_addr; 105 __le32 bdw_status; 106 __u8 aperature[BDW_SIZE]; 107 }; 108 109 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \ 110 (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \ 111 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf)) 112 113 static u32 handle[NUM_DCR] = { 114 [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0), 115 [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1), 116 [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0), 117 [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1), 118 }; 119 120 struct nfit_test { 121 struct acpi_nfit_desc acpi_desc; 122 struct platform_device pdev; 123 struct list_head resources; 124 void *nfit_buf; 125 dma_addr_t nfit_dma; 126 size_t nfit_size; 127 int num_dcr; 128 int num_pm; 129 void **dimm; 130 dma_addr_t *dimm_dma; 131 void **flush; 132 dma_addr_t *flush_dma; 133 void **label; 134 dma_addr_t *label_dma; 135 void **spa_set; 136 dma_addr_t *spa_set_dma; 137 struct nfit_test_dcr **dcr; 138 dma_addr_t *dcr_dma; 139 int (*alloc)(struct nfit_test *t); 140 void (*setup)(struct nfit_test *t); 141 }; 142 143 static struct nfit_test *to_nfit_test(struct device *dev) 144 { 145 struct platform_device *pdev = to_platform_device(dev); 146 147 return container_of(pdev, struct nfit_test, pdev); 148 } 149 150 static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd, 151 unsigned int buf_len) 152 { 153 if (buf_len < sizeof(*nd_cmd)) 154 return -EINVAL; 155 156 nd_cmd->status = 0; 157 nd_cmd->config_size = LABEL_SIZE; 158 nd_cmd->max_xfer = SZ_4K; 159 160 return 0; 161 } 162 163 static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr 164 *nd_cmd, unsigned int buf_len, void *label) 165 { 166 unsigned int len, offset = nd_cmd->in_offset; 167 int rc; 168 169 if (buf_len < sizeof(*nd_cmd)) 170 return -EINVAL; 171 if (offset >= LABEL_SIZE) 172 return -EINVAL; 173 if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len) 174 return -EINVAL; 175 176 nd_cmd->status = 0; 177 len = min(nd_cmd->in_length, LABEL_SIZE - offset); 178 memcpy(nd_cmd->out_buf, label + offset, len); 179 rc = buf_len - sizeof(*nd_cmd) - len; 180 181 return rc; 182 } 183 184 static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd, 185 unsigned int buf_len, void *label) 186 { 187 unsigned int len, offset = nd_cmd->in_offset; 188 u32 *status; 189 int rc; 190 191 if (buf_len < sizeof(*nd_cmd)) 192 return -EINVAL; 193 if (offset >= LABEL_SIZE) 194 return -EINVAL; 195 if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len) 196 return -EINVAL; 197 198 status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd); 199 *status = 0; 200 len = min(nd_cmd->in_length, LABEL_SIZE - offset); 201 memcpy(label + offset, nd_cmd->in_buf, len); 202 rc = buf_len - sizeof(*nd_cmd) - (len + 4); 203 204 return rc; 205 } 206 207 static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd, 208 unsigned int buf_len) 209 { 210 if (buf_len < sizeof(*nd_cmd)) 211 return -EINVAL; 212 213 nd_cmd->max_ars_out = 256; 214 nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16; 215 216 return 0; 217 } 218 219 static int nfit_test_cmd_ars_start(struct nd_cmd_ars_start *nd_cmd, 220 unsigned int buf_len) 221 { 222 if (buf_len < sizeof(*nd_cmd)) 223 return -EINVAL; 224 225 nd_cmd->status = 0; 226 227 return 0; 228 } 229 230 static int nfit_test_cmd_ars_status(struct nd_cmd_ars_status *nd_cmd, 231 unsigned int buf_len) 232 { 233 if (buf_len < sizeof(*nd_cmd)) 234 return -EINVAL; 235 236 nd_cmd->out_length = 256; 237 nd_cmd->num_records = 0; 238 nd_cmd->status = 0; 239 240 return 0; 241 } 242 243 static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc, 244 struct nvdimm *nvdimm, unsigned int cmd, void *buf, 245 unsigned int buf_len) 246 { 247 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 248 struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc); 249 int i, rc = 0; 250 251 if (nvdimm) { 252 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 253 254 if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask)) 255 return -ENOTTY; 256 257 /* lookup label space for the given dimm */ 258 for (i = 0; i < ARRAY_SIZE(handle); i++) 259 if (__to_nfit_memdev(nfit_mem)->device_handle == 260 handle[i]) 261 break; 262 if (i >= ARRAY_SIZE(handle)) 263 return -ENXIO; 264 265 switch (cmd) { 266 case ND_CMD_GET_CONFIG_SIZE: 267 rc = nfit_test_cmd_get_config_size(buf, buf_len); 268 break; 269 case ND_CMD_GET_CONFIG_DATA: 270 rc = nfit_test_cmd_get_config_data(buf, buf_len, 271 t->label[i]); 272 break; 273 case ND_CMD_SET_CONFIG_DATA: 274 rc = nfit_test_cmd_set_config_data(buf, buf_len, 275 t->label[i]); 276 break; 277 default: 278 return -ENOTTY; 279 } 280 } else { 281 if (!nd_desc || !test_bit(cmd, &nd_desc->dsm_mask)) 282 return -ENOTTY; 283 284 switch (cmd) { 285 case ND_CMD_ARS_CAP: 286 rc = nfit_test_cmd_ars_cap(buf, buf_len); 287 break; 288 case ND_CMD_ARS_START: 289 rc = nfit_test_cmd_ars_start(buf, buf_len); 290 break; 291 case ND_CMD_ARS_STATUS: 292 rc = nfit_test_cmd_ars_status(buf, buf_len); 293 break; 294 default: 295 return -ENOTTY; 296 } 297 } 298 299 return rc; 300 } 301 302 static DEFINE_SPINLOCK(nfit_test_lock); 303 static struct nfit_test *instances[NUM_NFITS]; 304 305 static void release_nfit_res(void *data) 306 { 307 struct nfit_test_resource *nfit_res = data; 308 struct resource *res = nfit_res->res; 309 310 spin_lock(&nfit_test_lock); 311 list_del(&nfit_res->list); 312 spin_unlock(&nfit_test_lock); 313 314 if (is_vmalloc_addr(nfit_res->buf)) 315 vfree(nfit_res->buf); 316 else 317 dma_free_coherent(nfit_res->dev, resource_size(res), 318 nfit_res->buf, res->start); 319 kfree(res); 320 kfree(nfit_res); 321 } 322 323 static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma, 324 void *buf) 325 { 326 struct device *dev = &t->pdev.dev; 327 struct resource *res = kzalloc(sizeof(*res) * 2, GFP_KERNEL); 328 struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res), 329 GFP_KERNEL); 330 int rc; 331 332 if (!res || !buf || !nfit_res) 333 goto err; 334 rc = devm_add_action(dev, release_nfit_res, nfit_res); 335 if (rc) 336 goto err; 337 INIT_LIST_HEAD(&nfit_res->list); 338 memset(buf, 0, size); 339 nfit_res->dev = dev; 340 nfit_res->buf = buf; 341 nfit_res->res = res; 342 res->start = *dma; 343 res->end = *dma + size - 1; 344 res->name = "NFIT"; 345 spin_lock(&nfit_test_lock); 346 list_add(&nfit_res->list, &t->resources); 347 spin_unlock(&nfit_test_lock); 348 349 return nfit_res->buf; 350 err: 351 if (buf && !is_vmalloc_addr(buf)) 352 dma_free_coherent(dev, size, buf, *dma); 353 else if (buf) 354 vfree(buf); 355 kfree(res); 356 kfree(nfit_res); 357 return NULL; 358 } 359 360 static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma) 361 { 362 void *buf = vmalloc(size); 363 364 *dma = (unsigned long) buf; 365 return __test_alloc(t, size, dma, buf); 366 } 367 368 static void *test_alloc_coherent(struct nfit_test *t, size_t size, 369 dma_addr_t *dma) 370 { 371 struct device *dev = &t->pdev.dev; 372 void *buf = dma_alloc_coherent(dev, size, dma, GFP_KERNEL); 373 374 return __test_alloc(t, size, dma, buf); 375 } 376 377 static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr) 378 { 379 int i; 380 381 for (i = 0; i < ARRAY_SIZE(instances); i++) { 382 struct nfit_test_resource *n, *nfit_res = NULL; 383 struct nfit_test *t = instances[i]; 384 385 if (!t) 386 continue; 387 spin_lock(&nfit_test_lock); 388 list_for_each_entry(n, &t->resources, list) { 389 if (addr >= n->res->start && (addr < n->res->start 390 + resource_size(n->res))) { 391 nfit_res = n; 392 break; 393 } else if (addr >= (unsigned long) n->buf 394 && (addr < (unsigned long) n->buf 395 + resource_size(n->res))) { 396 nfit_res = n; 397 break; 398 } 399 } 400 spin_unlock(&nfit_test_lock); 401 if (nfit_res) 402 return nfit_res; 403 } 404 405 return NULL; 406 } 407 408 static int nfit_test0_alloc(struct nfit_test *t) 409 { 410 size_t nfit_size = sizeof(struct acpi_table_nfit) 411 + sizeof(struct acpi_nfit_system_address) * NUM_SPA 412 + sizeof(struct acpi_nfit_memory_map) * NUM_MEM 413 + sizeof(struct acpi_nfit_control_region) * NUM_DCR 414 + sizeof(struct acpi_nfit_data_region) * NUM_BDW 415 + sizeof(struct acpi_nfit_flush_address) * NUM_DCR; 416 int i; 417 418 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma); 419 if (!t->nfit_buf) 420 return -ENOMEM; 421 t->nfit_size = nfit_size; 422 423 t->spa_set[0] = test_alloc_coherent(t, SPA0_SIZE, &t->spa_set_dma[0]); 424 if (!t->spa_set[0]) 425 return -ENOMEM; 426 427 t->spa_set[1] = test_alloc_coherent(t, SPA1_SIZE, &t->spa_set_dma[1]); 428 if (!t->spa_set[1]) 429 return -ENOMEM; 430 431 for (i = 0; i < NUM_DCR; i++) { 432 t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]); 433 if (!t->dimm[i]) 434 return -ENOMEM; 435 436 t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]); 437 if (!t->label[i]) 438 return -ENOMEM; 439 sprintf(t->label[i], "label%d", i); 440 441 t->flush[i] = test_alloc(t, 8, &t->flush_dma[i]); 442 if (!t->flush[i]) 443 return -ENOMEM; 444 } 445 446 for (i = 0; i < NUM_DCR; i++) { 447 t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]); 448 if (!t->dcr[i]) 449 return -ENOMEM; 450 } 451 452 return 0; 453 } 454 455 static int nfit_test1_alloc(struct nfit_test *t) 456 { 457 size_t nfit_size = sizeof(struct acpi_table_nfit) 458 + sizeof(struct acpi_nfit_system_address) 459 + sizeof(struct acpi_nfit_memory_map) 460 + sizeof(struct acpi_nfit_control_region); 461 462 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma); 463 if (!t->nfit_buf) 464 return -ENOMEM; 465 t->nfit_size = nfit_size; 466 467 t->spa_set[0] = test_alloc_coherent(t, SPA2_SIZE, &t->spa_set_dma[0]); 468 if (!t->spa_set[0]) 469 return -ENOMEM; 470 471 return 0; 472 } 473 474 static void nfit_test_init_header(struct acpi_table_nfit *nfit, size_t size) 475 { 476 memcpy(nfit->header.signature, ACPI_SIG_NFIT, 4); 477 nfit->header.length = size; 478 nfit->header.revision = 1; 479 memcpy(nfit->header.oem_id, "LIBND", 6); 480 memcpy(nfit->header.oem_table_id, "TEST", 5); 481 nfit->header.oem_revision = 1; 482 memcpy(nfit->header.asl_compiler_id, "TST", 4); 483 nfit->header.asl_compiler_revision = 1; 484 } 485 486 static void nfit_test0_setup(struct nfit_test *t) 487 { 488 struct nvdimm_bus_descriptor *nd_desc; 489 struct acpi_nfit_desc *acpi_desc; 490 struct acpi_nfit_memory_map *memdev; 491 void *nfit_buf = t->nfit_buf; 492 size_t size = t->nfit_size; 493 struct acpi_nfit_system_address *spa; 494 struct acpi_nfit_control_region *dcr; 495 struct acpi_nfit_data_region *bdw; 496 struct acpi_nfit_flush_address *flush; 497 unsigned int offset; 498 499 nfit_test_init_header(nfit_buf, size); 500 501 /* 502 * spa0 (interleave first half of dimm0 and dimm1, note storage 503 * does not actually alias the related block-data-window 504 * regions) 505 */ 506 spa = nfit_buf + sizeof(struct acpi_table_nfit); 507 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 508 spa->header.length = sizeof(*spa); 509 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 510 spa->range_index = 0+1; 511 spa->address = t->spa_set_dma[0]; 512 spa->length = SPA0_SIZE; 513 514 /* 515 * spa1 (interleave last half of the 4 DIMMS, note storage 516 * does not actually alias the related block-data-window 517 * regions) 518 */ 519 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa); 520 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 521 spa->header.length = sizeof(*spa); 522 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 523 spa->range_index = 1+1; 524 spa->address = t->spa_set_dma[1]; 525 spa->length = SPA1_SIZE; 526 527 /* spa2 (dcr0) dimm0 */ 528 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 2; 529 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 530 spa->header.length = sizeof(*spa); 531 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 532 spa->range_index = 2+1; 533 spa->address = t->dcr_dma[0]; 534 spa->length = DCR_SIZE; 535 536 /* spa3 (dcr1) dimm1 */ 537 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 3; 538 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 539 spa->header.length = sizeof(*spa); 540 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 541 spa->range_index = 3+1; 542 spa->address = t->dcr_dma[1]; 543 spa->length = DCR_SIZE; 544 545 /* spa4 (dcr2) dimm2 */ 546 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 4; 547 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 548 spa->header.length = sizeof(*spa); 549 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 550 spa->range_index = 4+1; 551 spa->address = t->dcr_dma[2]; 552 spa->length = DCR_SIZE; 553 554 /* spa5 (dcr3) dimm3 */ 555 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 5; 556 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 557 spa->header.length = sizeof(*spa); 558 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 559 spa->range_index = 5+1; 560 spa->address = t->dcr_dma[3]; 561 spa->length = DCR_SIZE; 562 563 /* spa6 (bdw for dcr0) dimm0 */ 564 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 6; 565 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 566 spa->header.length = sizeof(*spa); 567 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 568 spa->range_index = 6+1; 569 spa->address = t->dimm_dma[0]; 570 spa->length = DIMM_SIZE; 571 572 /* spa7 (bdw for dcr1) dimm1 */ 573 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 7; 574 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 575 spa->header.length = sizeof(*spa); 576 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 577 spa->range_index = 7+1; 578 spa->address = t->dimm_dma[1]; 579 spa->length = DIMM_SIZE; 580 581 /* spa8 (bdw for dcr2) dimm2 */ 582 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 8; 583 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 584 spa->header.length = sizeof(*spa); 585 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 586 spa->range_index = 8+1; 587 spa->address = t->dimm_dma[2]; 588 spa->length = DIMM_SIZE; 589 590 /* spa9 (bdw for dcr3) dimm3 */ 591 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 9; 592 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 593 spa->header.length = sizeof(*spa); 594 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 595 spa->range_index = 9+1; 596 spa->address = t->dimm_dma[3]; 597 spa->length = DIMM_SIZE; 598 599 offset = sizeof(struct acpi_table_nfit) + sizeof(*spa) * 10; 600 /* mem-region0 (spa0, dimm0) */ 601 memdev = nfit_buf + offset; 602 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 603 memdev->header.length = sizeof(*memdev); 604 memdev->device_handle = handle[0]; 605 memdev->physical_id = 0; 606 memdev->region_id = 0; 607 memdev->range_index = 0+1; 608 memdev->region_index = 0+1; 609 memdev->region_size = SPA0_SIZE/2; 610 memdev->region_offset = t->spa_set_dma[0]; 611 memdev->address = 0; 612 memdev->interleave_index = 0; 613 memdev->interleave_ways = 2; 614 615 /* mem-region1 (spa0, dimm1) */ 616 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map); 617 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 618 memdev->header.length = sizeof(*memdev); 619 memdev->device_handle = handle[1]; 620 memdev->physical_id = 1; 621 memdev->region_id = 0; 622 memdev->range_index = 0+1; 623 memdev->region_index = 1+1; 624 memdev->region_size = SPA0_SIZE/2; 625 memdev->region_offset = t->spa_set_dma[0] + SPA0_SIZE/2; 626 memdev->address = 0; 627 memdev->interleave_index = 0; 628 memdev->interleave_ways = 2; 629 630 /* mem-region2 (spa1, dimm0) */ 631 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2; 632 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 633 memdev->header.length = sizeof(*memdev); 634 memdev->device_handle = handle[0]; 635 memdev->physical_id = 0; 636 memdev->region_id = 1; 637 memdev->range_index = 1+1; 638 memdev->region_index = 0+1; 639 memdev->region_size = SPA1_SIZE/4; 640 memdev->region_offset = t->spa_set_dma[1]; 641 memdev->address = SPA0_SIZE/2; 642 memdev->interleave_index = 0; 643 memdev->interleave_ways = 4; 644 645 /* mem-region3 (spa1, dimm1) */ 646 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3; 647 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 648 memdev->header.length = sizeof(*memdev); 649 memdev->device_handle = handle[1]; 650 memdev->physical_id = 1; 651 memdev->region_id = 1; 652 memdev->range_index = 1+1; 653 memdev->region_index = 1+1; 654 memdev->region_size = SPA1_SIZE/4; 655 memdev->region_offset = t->spa_set_dma[1] + SPA1_SIZE/4; 656 memdev->address = SPA0_SIZE/2; 657 memdev->interleave_index = 0; 658 memdev->interleave_ways = 4; 659 660 /* mem-region4 (spa1, dimm2) */ 661 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4; 662 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 663 memdev->header.length = sizeof(*memdev); 664 memdev->device_handle = handle[2]; 665 memdev->physical_id = 2; 666 memdev->region_id = 0; 667 memdev->range_index = 1+1; 668 memdev->region_index = 2+1; 669 memdev->region_size = SPA1_SIZE/4; 670 memdev->region_offset = t->spa_set_dma[1] + 2*SPA1_SIZE/4; 671 memdev->address = SPA0_SIZE/2; 672 memdev->interleave_index = 0; 673 memdev->interleave_ways = 4; 674 675 /* mem-region5 (spa1, dimm3) */ 676 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5; 677 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 678 memdev->header.length = sizeof(*memdev); 679 memdev->device_handle = handle[3]; 680 memdev->physical_id = 3; 681 memdev->region_id = 0; 682 memdev->range_index = 1+1; 683 memdev->region_index = 3+1; 684 memdev->region_size = SPA1_SIZE/4; 685 memdev->region_offset = t->spa_set_dma[1] + 3*SPA1_SIZE/4; 686 memdev->address = SPA0_SIZE/2; 687 memdev->interleave_index = 0; 688 memdev->interleave_ways = 4; 689 690 /* mem-region6 (spa/dcr0, dimm0) */ 691 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6; 692 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 693 memdev->header.length = sizeof(*memdev); 694 memdev->device_handle = handle[0]; 695 memdev->physical_id = 0; 696 memdev->region_id = 0; 697 memdev->range_index = 2+1; 698 memdev->region_index = 0+1; 699 memdev->region_size = 0; 700 memdev->region_offset = 0; 701 memdev->address = 0; 702 memdev->interleave_index = 0; 703 memdev->interleave_ways = 1; 704 705 /* mem-region7 (spa/dcr1, dimm1) */ 706 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7; 707 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 708 memdev->header.length = sizeof(*memdev); 709 memdev->device_handle = handle[1]; 710 memdev->physical_id = 1; 711 memdev->region_id = 0; 712 memdev->range_index = 3+1; 713 memdev->region_index = 1+1; 714 memdev->region_size = 0; 715 memdev->region_offset = 0; 716 memdev->address = 0; 717 memdev->interleave_index = 0; 718 memdev->interleave_ways = 1; 719 720 /* mem-region8 (spa/dcr2, dimm2) */ 721 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8; 722 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 723 memdev->header.length = sizeof(*memdev); 724 memdev->device_handle = handle[2]; 725 memdev->physical_id = 2; 726 memdev->region_id = 0; 727 memdev->range_index = 4+1; 728 memdev->region_index = 2+1; 729 memdev->region_size = 0; 730 memdev->region_offset = 0; 731 memdev->address = 0; 732 memdev->interleave_index = 0; 733 memdev->interleave_ways = 1; 734 735 /* mem-region9 (spa/dcr3, dimm3) */ 736 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9; 737 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 738 memdev->header.length = sizeof(*memdev); 739 memdev->device_handle = handle[3]; 740 memdev->physical_id = 3; 741 memdev->region_id = 0; 742 memdev->range_index = 5+1; 743 memdev->region_index = 3+1; 744 memdev->region_size = 0; 745 memdev->region_offset = 0; 746 memdev->address = 0; 747 memdev->interleave_index = 0; 748 memdev->interleave_ways = 1; 749 750 /* mem-region10 (spa/bdw0, dimm0) */ 751 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10; 752 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 753 memdev->header.length = sizeof(*memdev); 754 memdev->device_handle = handle[0]; 755 memdev->physical_id = 0; 756 memdev->region_id = 0; 757 memdev->range_index = 6+1; 758 memdev->region_index = 0+1; 759 memdev->region_size = 0; 760 memdev->region_offset = 0; 761 memdev->address = 0; 762 memdev->interleave_index = 0; 763 memdev->interleave_ways = 1; 764 765 /* mem-region11 (spa/bdw1, dimm1) */ 766 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11; 767 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 768 memdev->header.length = sizeof(*memdev); 769 memdev->device_handle = handle[1]; 770 memdev->physical_id = 1; 771 memdev->region_id = 0; 772 memdev->range_index = 7+1; 773 memdev->region_index = 1+1; 774 memdev->region_size = 0; 775 memdev->region_offset = 0; 776 memdev->address = 0; 777 memdev->interleave_index = 0; 778 memdev->interleave_ways = 1; 779 780 /* mem-region12 (spa/bdw2, dimm2) */ 781 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12; 782 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 783 memdev->header.length = sizeof(*memdev); 784 memdev->device_handle = handle[2]; 785 memdev->physical_id = 2; 786 memdev->region_id = 0; 787 memdev->range_index = 8+1; 788 memdev->region_index = 2+1; 789 memdev->region_size = 0; 790 memdev->region_offset = 0; 791 memdev->address = 0; 792 memdev->interleave_index = 0; 793 memdev->interleave_ways = 1; 794 795 /* mem-region13 (spa/dcr3, dimm3) */ 796 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13; 797 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 798 memdev->header.length = sizeof(*memdev); 799 memdev->device_handle = handle[3]; 800 memdev->physical_id = 3; 801 memdev->region_id = 0; 802 memdev->range_index = 9+1; 803 memdev->region_index = 3+1; 804 memdev->region_size = 0; 805 memdev->region_offset = 0; 806 memdev->address = 0; 807 memdev->interleave_index = 0; 808 memdev->interleave_ways = 1; 809 810 offset = offset + sizeof(struct acpi_nfit_memory_map) * 14; 811 /* dcr-descriptor0 */ 812 dcr = nfit_buf + offset; 813 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 814 dcr->header.length = sizeof(struct acpi_nfit_control_region); 815 dcr->region_index = 0+1; 816 dcr->vendor_id = 0xabcd; 817 dcr->device_id = 0; 818 dcr->revision_id = 1; 819 dcr->serial_number = ~handle[0]; 820 dcr->windows = 1; 821 dcr->window_size = DCR_SIZE; 822 dcr->command_offset = 0; 823 dcr->command_size = 8; 824 dcr->status_offset = 8; 825 dcr->status_size = 4; 826 827 /* dcr-descriptor1 */ 828 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region); 829 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 830 dcr->header.length = sizeof(struct acpi_nfit_control_region); 831 dcr->region_index = 1+1; 832 dcr->vendor_id = 0xabcd; 833 dcr->device_id = 0; 834 dcr->revision_id = 1; 835 dcr->serial_number = ~handle[1]; 836 dcr->windows = 1; 837 dcr->window_size = DCR_SIZE; 838 dcr->command_offset = 0; 839 dcr->command_size = 8; 840 dcr->status_offset = 8; 841 dcr->status_size = 4; 842 843 /* dcr-descriptor2 */ 844 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2; 845 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 846 dcr->header.length = sizeof(struct acpi_nfit_control_region); 847 dcr->region_index = 2+1; 848 dcr->vendor_id = 0xabcd; 849 dcr->device_id = 0; 850 dcr->revision_id = 1; 851 dcr->serial_number = ~handle[2]; 852 dcr->windows = 1; 853 dcr->window_size = DCR_SIZE; 854 dcr->command_offset = 0; 855 dcr->command_size = 8; 856 dcr->status_offset = 8; 857 dcr->status_size = 4; 858 859 /* dcr-descriptor3 */ 860 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3; 861 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 862 dcr->header.length = sizeof(struct acpi_nfit_control_region); 863 dcr->region_index = 3+1; 864 dcr->vendor_id = 0xabcd; 865 dcr->device_id = 0; 866 dcr->revision_id = 1; 867 dcr->serial_number = ~handle[3]; 868 dcr->windows = 1; 869 dcr->window_size = DCR_SIZE; 870 dcr->command_offset = 0; 871 dcr->command_size = 8; 872 dcr->status_offset = 8; 873 dcr->status_size = 4; 874 875 offset = offset + sizeof(struct acpi_nfit_control_region) * 4; 876 /* bdw0 (spa/dcr0, dimm0) */ 877 bdw = nfit_buf + offset; 878 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 879 bdw->header.length = sizeof(struct acpi_nfit_data_region); 880 bdw->region_index = 0+1; 881 bdw->windows = 1; 882 bdw->offset = 0; 883 bdw->size = BDW_SIZE; 884 bdw->capacity = DIMM_SIZE; 885 bdw->start_address = 0; 886 887 /* bdw1 (spa/dcr1, dimm1) */ 888 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region); 889 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 890 bdw->header.length = sizeof(struct acpi_nfit_data_region); 891 bdw->region_index = 1+1; 892 bdw->windows = 1; 893 bdw->offset = 0; 894 bdw->size = BDW_SIZE; 895 bdw->capacity = DIMM_SIZE; 896 bdw->start_address = 0; 897 898 /* bdw2 (spa/dcr2, dimm2) */ 899 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2; 900 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 901 bdw->header.length = sizeof(struct acpi_nfit_data_region); 902 bdw->region_index = 2+1; 903 bdw->windows = 1; 904 bdw->offset = 0; 905 bdw->size = BDW_SIZE; 906 bdw->capacity = DIMM_SIZE; 907 bdw->start_address = 0; 908 909 /* bdw3 (spa/dcr3, dimm3) */ 910 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3; 911 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 912 bdw->header.length = sizeof(struct acpi_nfit_data_region); 913 bdw->region_index = 3+1; 914 bdw->windows = 1; 915 bdw->offset = 0; 916 bdw->size = BDW_SIZE; 917 bdw->capacity = DIMM_SIZE; 918 bdw->start_address = 0; 919 920 offset = offset + sizeof(struct acpi_nfit_data_region) * 4; 921 /* flush0 (dimm0) */ 922 flush = nfit_buf + offset; 923 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 924 flush->header.length = sizeof(struct acpi_nfit_flush_address); 925 flush->device_handle = handle[0]; 926 flush->hint_count = 1; 927 flush->hint_address[0] = t->flush_dma[0]; 928 929 /* flush1 (dimm1) */ 930 flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 1; 931 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 932 flush->header.length = sizeof(struct acpi_nfit_flush_address); 933 flush->device_handle = handle[1]; 934 flush->hint_count = 1; 935 flush->hint_address[0] = t->flush_dma[1]; 936 937 /* flush2 (dimm2) */ 938 flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 2; 939 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 940 flush->header.length = sizeof(struct acpi_nfit_flush_address); 941 flush->device_handle = handle[2]; 942 flush->hint_count = 1; 943 flush->hint_address[0] = t->flush_dma[2]; 944 945 /* flush3 (dimm3) */ 946 flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 3; 947 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 948 flush->header.length = sizeof(struct acpi_nfit_flush_address); 949 flush->device_handle = handle[3]; 950 flush->hint_count = 1; 951 flush->hint_address[0] = t->flush_dma[3]; 952 953 acpi_desc = &t->acpi_desc; 954 set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en); 955 set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en); 956 set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en); 957 set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_dsm_force_en); 958 set_bit(ND_CMD_ARS_START, &acpi_desc->bus_dsm_force_en); 959 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_dsm_force_en); 960 nd_desc = &acpi_desc->nd_desc; 961 nd_desc->ndctl = nfit_test_ctl; 962 } 963 964 static void nfit_test1_setup(struct nfit_test *t) 965 { 966 size_t size = t->nfit_size, offset; 967 void *nfit_buf = t->nfit_buf; 968 struct acpi_nfit_memory_map *memdev; 969 struct acpi_nfit_control_region *dcr; 970 struct acpi_nfit_system_address *spa; 971 972 nfit_test_init_header(nfit_buf, size); 973 974 offset = sizeof(struct acpi_table_nfit); 975 /* spa0 (flat range with no bdw aliasing) */ 976 spa = nfit_buf + offset; 977 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 978 spa->header.length = sizeof(*spa); 979 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 980 spa->range_index = 0+1; 981 spa->address = t->spa_set_dma[0]; 982 spa->length = SPA2_SIZE; 983 984 offset += sizeof(*spa); 985 /* mem-region0 (spa0, dimm0) */ 986 memdev = nfit_buf + offset; 987 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 988 memdev->header.length = sizeof(*memdev); 989 memdev->device_handle = 0; 990 memdev->physical_id = 0; 991 memdev->region_id = 0; 992 memdev->range_index = 0+1; 993 memdev->region_index = 0+1; 994 memdev->region_size = SPA2_SIZE; 995 memdev->region_offset = 0; 996 memdev->address = 0; 997 memdev->interleave_index = 0; 998 memdev->interleave_ways = 1; 999 memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED 1000 | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED 1001 | ACPI_NFIT_MEM_ARMED; 1002 1003 offset += sizeof(*memdev); 1004 /* dcr-descriptor0 */ 1005 dcr = nfit_buf + offset; 1006 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1007 dcr->header.length = sizeof(struct acpi_nfit_control_region); 1008 dcr->region_index = 0+1; 1009 dcr->vendor_id = 0xabcd; 1010 dcr->device_id = 0; 1011 dcr->revision_id = 1; 1012 dcr->serial_number = ~0; 1013 dcr->code = 0x201; 1014 dcr->windows = 0; 1015 dcr->window_size = 0; 1016 dcr->command_offset = 0; 1017 dcr->command_size = 0; 1018 dcr->status_offset = 0; 1019 dcr->status_size = 0; 1020 } 1021 1022 static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, 1023 void *iobuf, u64 len, int rw) 1024 { 1025 struct nfit_blk *nfit_blk = ndbr->blk_provider_data; 1026 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 1027 struct nd_region *nd_region = &ndbr->nd_region; 1028 unsigned int lane; 1029 1030 lane = nd_region_acquire_lane(nd_region); 1031 if (rw) 1032 memcpy(mmio->addr.base + dpa, iobuf, len); 1033 else { 1034 memcpy(iobuf, mmio->addr.base + dpa, len); 1035 1036 /* give us some some coverage of the mmio_flush_range() API */ 1037 mmio_flush_range(mmio->addr.base + dpa, len); 1038 } 1039 nd_region_release_lane(nd_region, lane); 1040 1041 return 0; 1042 } 1043 1044 static int nfit_test_probe(struct platform_device *pdev) 1045 { 1046 struct nvdimm_bus_descriptor *nd_desc; 1047 struct acpi_nfit_desc *acpi_desc; 1048 struct device *dev = &pdev->dev; 1049 struct nfit_test *nfit_test; 1050 int rc; 1051 1052 nfit_test = to_nfit_test(&pdev->dev); 1053 1054 /* common alloc */ 1055 if (nfit_test->num_dcr) { 1056 int num = nfit_test->num_dcr; 1057 1058 nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *), 1059 GFP_KERNEL); 1060 nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t), 1061 GFP_KERNEL); 1062 nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *), 1063 GFP_KERNEL); 1064 nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t), 1065 GFP_KERNEL); 1066 nfit_test->label = devm_kcalloc(dev, num, sizeof(void *), 1067 GFP_KERNEL); 1068 nfit_test->label_dma = devm_kcalloc(dev, num, 1069 sizeof(dma_addr_t), GFP_KERNEL); 1070 nfit_test->dcr = devm_kcalloc(dev, num, 1071 sizeof(struct nfit_test_dcr *), GFP_KERNEL); 1072 nfit_test->dcr_dma = devm_kcalloc(dev, num, 1073 sizeof(dma_addr_t), GFP_KERNEL); 1074 if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label 1075 && nfit_test->label_dma && nfit_test->dcr 1076 && nfit_test->dcr_dma && nfit_test->flush 1077 && nfit_test->flush_dma) 1078 /* pass */; 1079 else 1080 return -ENOMEM; 1081 } 1082 1083 if (nfit_test->num_pm) { 1084 int num = nfit_test->num_pm; 1085 1086 nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *), 1087 GFP_KERNEL); 1088 nfit_test->spa_set_dma = devm_kcalloc(dev, num, 1089 sizeof(dma_addr_t), GFP_KERNEL); 1090 if (nfit_test->spa_set && nfit_test->spa_set_dma) 1091 /* pass */; 1092 else 1093 return -ENOMEM; 1094 } 1095 1096 /* per-nfit specific alloc */ 1097 if (nfit_test->alloc(nfit_test)) 1098 return -ENOMEM; 1099 1100 nfit_test->setup(nfit_test); 1101 acpi_desc = &nfit_test->acpi_desc; 1102 acpi_desc->dev = &pdev->dev; 1103 acpi_desc->nfit = nfit_test->nfit_buf; 1104 acpi_desc->blk_do_io = nfit_test_blk_do_io; 1105 nd_desc = &acpi_desc->nd_desc; 1106 nd_desc->attr_groups = acpi_nfit_attribute_groups; 1107 acpi_desc->nvdimm_bus = nvdimm_bus_register(&pdev->dev, nd_desc); 1108 if (!acpi_desc->nvdimm_bus) 1109 return -ENXIO; 1110 1111 rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_size); 1112 if (rc) { 1113 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 1114 return rc; 1115 } 1116 1117 return 0; 1118 } 1119 1120 static int nfit_test_remove(struct platform_device *pdev) 1121 { 1122 struct nfit_test *nfit_test = to_nfit_test(&pdev->dev); 1123 struct acpi_nfit_desc *acpi_desc = &nfit_test->acpi_desc; 1124 1125 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 1126 1127 return 0; 1128 } 1129 1130 static void nfit_test_release(struct device *dev) 1131 { 1132 struct nfit_test *nfit_test = to_nfit_test(dev); 1133 1134 kfree(nfit_test); 1135 } 1136 1137 static const struct platform_device_id nfit_test_id[] = { 1138 { KBUILD_MODNAME }, 1139 { }, 1140 }; 1141 1142 static struct platform_driver nfit_test_driver = { 1143 .probe = nfit_test_probe, 1144 .remove = nfit_test_remove, 1145 .driver = { 1146 .name = KBUILD_MODNAME, 1147 }, 1148 .id_table = nfit_test_id, 1149 }; 1150 1151 #ifdef CONFIG_CMA_SIZE_MBYTES 1152 #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES 1153 #else 1154 #define CMA_SIZE_MBYTES 0 1155 #endif 1156 1157 static __init int nfit_test_init(void) 1158 { 1159 int rc, i; 1160 1161 nfit_test_setup(nfit_test_lookup); 1162 1163 for (i = 0; i < NUM_NFITS; i++) { 1164 struct nfit_test *nfit_test; 1165 struct platform_device *pdev; 1166 static int once; 1167 1168 nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL); 1169 if (!nfit_test) { 1170 rc = -ENOMEM; 1171 goto err_register; 1172 } 1173 INIT_LIST_HEAD(&nfit_test->resources); 1174 switch (i) { 1175 case 0: 1176 nfit_test->num_pm = NUM_PM; 1177 nfit_test->num_dcr = NUM_DCR; 1178 nfit_test->alloc = nfit_test0_alloc; 1179 nfit_test->setup = nfit_test0_setup; 1180 break; 1181 case 1: 1182 nfit_test->num_pm = 1; 1183 nfit_test->alloc = nfit_test1_alloc; 1184 nfit_test->setup = nfit_test1_setup; 1185 break; 1186 default: 1187 rc = -EINVAL; 1188 goto err_register; 1189 } 1190 pdev = &nfit_test->pdev; 1191 pdev->name = KBUILD_MODNAME; 1192 pdev->id = i; 1193 pdev->dev.release = nfit_test_release; 1194 rc = platform_device_register(pdev); 1195 if (rc) { 1196 put_device(&pdev->dev); 1197 goto err_register; 1198 } 1199 1200 rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1201 if (rc) 1202 goto err_register; 1203 1204 instances[i] = nfit_test; 1205 1206 if (!once++) { 1207 dma_addr_t dma; 1208 void *buf; 1209 1210 buf = dma_alloc_coherent(&pdev->dev, SZ_128M, &dma, 1211 GFP_KERNEL); 1212 if (!buf) { 1213 rc = -ENOMEM; 1214 dev_warn(&pdev->dev, "need 128M of free cma\n"); 1215 goto err_register; 1216 } 1217 dma_free_coherent(&pdev->dev, SZ_128M, buf, dma); 1218 } 1219 } 1220 1221 rc = platform_driver_register(&nfit_test_driver); 1222 if (rc) 1223 goto err_register; 1224 return 0; 1225 1226 err_register: 1227 for (i = 0; i < NUM_NFITS; i++) 1228 if (instances[i]) 1229 platform_device_unregister(&instances[i]->pdev); 1230 nfit_test_teardown(); 1231 return rc; 1232 } 1233 1234 static __exit void nfit_test_exit(void) 1235 { 1236 int i; 1237 1238 platform_driver_unregister(&nfit_test_driver); 1239 for (i = 0; i < NUM_NFITS; i++) 1240 platform_device_unregister(&instances[i]->pdev); 1241 nfit_test_teardown(); 1242 } 1243 1244 module_init(nfit_test_init); 1245 module_exit(nfit_test_exit); 1246 MODULE_LICENSE("GPL v2"); 1247 MODULE_AUTHOR("Intel Corporation"); 1248