1 /* 2 * Copyright(c) 2013-2016 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/memremap.h> 14 #include <linux/blkdev.h> 15 #include <linux/device.h> 16 #include <linux/genhd.h> 17 #include <linux/sizes.h> 18 #include <linux/slab.h> 19 #include <linux/fs.h> 20 #include <linux/mm.h> 21 #include "nd-core.h" 22 #include "pfn.h" 23 #include "nd.h" 24 25 static void nd_pfn_release(struct device *dev) 26 { 27 struct nd_region *nd_region = to_nd_region(dev->parent); 28 struct nd_pfn *nd_pfn = to_nd_pfn(dev); 29 30 dev_dbg(dev, "%s\n", __func__); 31 nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns); 32 ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id); 33 kfree(nd_pfn->uuid); 34 kfree(nd_pfn); 35 } 36 37 static struct device_type nd_pfn_device_type = { 38 .name = "nd_pfn", 39 .release = nd_pfn_release, 40 }; 41 42 bool is_nd_pfn(struct device *dev) 43 { 44 return dev ? dev->type == &nd_pfn_device_type : false; 45 } 46 EXPORT_SYMBOL(is_nd_pfn); 47 48 struct nd_pfn *to_nd_pfn(struct device *dev) 49 { 50 struct nd_pfn *nd_pfn = container_of(dev, struct nd_pfn, dev); 51 52 WARN_ON(!is_nd_pfn(dev)); 53 return nd_pfn; 54 } 55 EXPORT_SYMBOL(to_nd_pfn); 56 57 static ssize_t mode_show(struct device *dev, 58 struct device_attribute *attr, char *buf) 59 { 60 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 61 62 switch (nd_pfn->mode) { 63 case PFN_MODE_RAM: 64 return sprintf(buf, "ram\n"); 65 case PFN_MODE_PMEM: 66 return sprintf(buf, "pmem\n"); 67 default: 68 return sprintf(buf, "none\n"); 69 } 70 } 71 72 static ssize_t mode_store(struct device *dev, 73 struct device_attribute *attr, const char *buf, size_t len) 74 { 75 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 76 ssize_t rc = 0; 77 78 device_lock(dev); 79 nvdimm_bus_lock(dev); 80 if (dev->driver) 81 rc = -EBUSY; 82 else { 83 size_t n = len - 1; 84 85 if (strncmp(buf, "pmem\n", n) == 0 86 || strncmp(buf, "pmem", n) == 0) { 87 nd_pfn->mode = PFN_MODE_PMEM; 88 } else if (strncmp(buf, "ram\n", n) == 0 89 || strncmp(buf, "ram", n) == 0) 90 nd_pfn->mode = PFN_MODE_RAM; 91 else if (strncmp(buf, "none\n", n) == 0 92 || strncmp(buf, "none", n) == 0) 93 nd_pfn->mode = PFN_MODE_NONE; 94 else 95 rc = -EINVAL; 96 } 97 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, 98 rc, buf, buf[len - 1] == '\n' ? "" : "\n"); 99 nvdimm_bus_unlock(dev); 100 device_unlock(dev); 101 102 return rc ? rc : len; 103 } 104 static DEVICE_ATTR_RW(mode); 105 106 static ssize_t align_show(struct device *dev, 107 struct device_attribute *attr, char *buf) 108 { 109 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 110 111 return sprintf(buf, "%ld\n", nd_pfn->align); 112 } 113 114 static ssize_t __align_store(struct nd_pfn *nd_pfn, const char *buf) 115 { 116 unsigned long val; 117 int rc; 118 119 rc = kstrtoul(buf, 0, &val); 120 if (rc) 121 return rc; 122 123 if (!is_power_of_2(val) || val < PAGE_SIZE || val > SZ_1G) 124 return -EINVAL; 125 126 if (nd_pfn->dev.driver) 127 return -EBUSY; 128 else 129 nd_pfn->align = val; 130 131 return 0; 132 } 133 134 static ssize_t align_store(struct device *dev, 135 struct device_attribute *attr, const char *buf, size_t len) 136 { 137 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 138 ssize_t rc; 139 140 device_lock(dev); 141 nvdimm_bus_lock(dev); 142 rc = __align_store(nd_pfn, buf); 143 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, 144 rc, buf, buf[len - 1] == '\n' ? "" : "\n"); 145 nvdimm_bus_unlock(dev); 146 device_unlock(dev); 147 148 return rc ? rc : len; 149 } 150 static DEVICE_ATTR_RW(align); 151 152 static ssize_t uuid_show(struct device *dev, 153 struct device_attribute *attr, char *buf) 154 { 155 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 156 157 if (nd_pfn->uuid) 158 return sprintf(buf, "%pUb\n", nd_pfn->uuid); 159 return sprintf(buf, "\n"); 160 } 161 162 static ssize_t uuid_store(struct device *dev, 163 struct device_attribute *attr, const char *buf, size_t len) 164 { 165 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 166 ssize_t rc; 167 168 device_lock(dev); 169 rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len); 170 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, 171 rc, buf, buf[len - 1] == '\n' ? "" : "\n"); 172 device_unlock(dev); 173 174 return rc ? rc : len; 175 } 176 static DEVICE_ATTR_RW(uuid); 177 178 static ssize_t namespace_show(struct device *dev, 179 struct device_attribute *attr, char *buf) 180 { 181 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 182 ssize_t rc; 183 184 nvdimm_bus_lock(dev); 185 rc = sprintf(buf, "%s\n", nd_pfn->ndns 186 ? dev_name(&nd_pfn->ndns->dev) : ""); 187 nvdimm_bus_unlock(dev); 188 return rc; 189 } 190 191 static ssize_t namespace_store(struct device *dev, 192 struct device_attribute *attr, const char *buf, size_t len) 193 { 194 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 195 ssize_t rc; 196 197 device_lock(dev); 198 nvdimm_bus_lock(dev); 199 rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); 200 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, 201 rc, buf, buf[len - 1] == '\n' ? "" : "\n"); 202 nvdimm_bus_unlock(dev); 203 device_unlock(dev); 204 205 return rc; 206 } 207 static DEVICE_ATTR_RW(namespace); 208 209 static ssize_t resource_show(struct device *dev, 210 struct device_attribute *attr, char *buf) 211 { 212 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 213 ssize_t rc; 214 215 device_lock(dev); 216 if (dev->driver) { 217 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 218 u64 offset = __le64_to_cpu(pfn_sb->dataoff); 219 struct nd_namespace_common *ndns = nd_pfn->ndns; 220 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); 221 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 222 223 rc = sprintf(buf, "%#llx\n", (unsigned long long) nsio->res.start 224 + start_pad + offset); 225 } else { 226 /* no address to convey if the pfn instance is disabled */ 227 rc = -ENXIO; 228 } 229 device_unlock(dev); 230 231 return rc; 232 } 233 static DEVICE_ATTR_RO(resource); 234 235 static ssize_t size_show(struct device *dev, 236 struct device_attribute *attr, char *buf) 237 { 238 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 239 ssize_t rc; 240 241 device_lock(dev); 242 if (dev->driver) { 243 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 244 u64 offset = __le64_to_cpu(pfn_sb->dataoff); 245 struct nd_namespace_common *ndns = nd_pfn->ndns; 246 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); 247 u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc); 248 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 249 250 rc = sprintf(buf, "%llu\n", (unsigned long long) 251 resource_size(&nsio->res) - start_pad 252 - end_trunc - offset); 253 } else { 254 /* no size to convey if the pfn instance is disabled */ 255 rc = -ENXIO; 256 } 257 device_unlock(dev); 258 259 return rc; 260 } 261 static DEVICE_ATTR_RO(size); 262 263 static struct attribute *nd_pfn_attributes[] = { 264 &dev_attr_mode.attr, 265 &dev_attr_namespace.attr, 266 &dev_attr_uuid.attr, 267 &dev_attr_align.attr, 268 &dev_attr_resource.attr, 269 &dev_attr_size.attr, 270 NULL, 271 }; 272 273 struct attribute_group nd_pfn_attribute_group = { 274 .attrs = nd_pfn_attributes, 275 }; 276 277 static const struct attribute_group *nd_pfn_attribute_groups[] = { 278 &nd_pfn_attribute_group, 279 &nd_device_attribute_group, 280 &nd_numa_attribute_group, 281 NULL, 282 }; 283 284 struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn, 285 struct nd_namespace_common *ndns) 286 { 287 struct device *dev = &nd_pfn->dev; 288 289 if (!nd_pfn) 290 return NULL; 291 292 nd_pfn->mode = PFN_MODE_NONE; 293 nd_pfn->align = HPAGE_SIZE; 294 dev = &nd_pfn->dev; 295 device_initialize(&nd_pfn->dev); 296 if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) { 297 dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n", 298 __func__, dev_name(ndns->claim)); 299 put_device(dev); 300 return NULL; 301 } 302 return dev; 303 } 304 305 static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region) 306 { 307 struct nd_pfn *nd_pfn; 308 struct device *dev; 309 310 nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL); 311 if (!nd_pfn) 312 return NULL; 313 314 nd_pfn->id = ida_simple_get(&nd_region->pfn_ida, 0, 0, GFP_KERNEL); 315 if (nd_pfn->id < 0) { 316 kfree(nd_pfn); 317 return NULL; 318 } 319 320 dev = &nd_pfn->dev; 321 dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id); 322 dev->groups = nd_pfn_attribute_groups; 323 dev->type = &nd_pfn_device_type; 324 dev->parent = &nd_region->dev; 325 326 return nd_pfn; 327 } 328 329 struct device *nd_pfn_create(struct nd_region *nd_region) 330 { 331 struct nd_pfn *nd_pfn; 332 struct device *dev; 333 334 if (!is_memory(&nd_region->dev)) 335 return NULL; 336 337 nd_pfn = nd_pfn_alloc(nd_region); 338 dev = nd_pfn_devinit(nd_pfn, NULL); 339 340 __nd_device_register(dev); 341 return dev; 342 } 343 344 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) 345 { 346 u64 checksum, offset; 347 unsigned long align; 348 enum nd_pfn_mode mode; 349 struct nd_namespace_io *nsio; 350 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 351 struct nd_namespace_common *ndns = nd_pfn->ndns; 352 const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev); 353 354 if (!pfn_sb || !ndns) 355 return -ENODEV; 356 357 if (!is_memory(nd_pfn->dev.parent)) 358 return -ENODEV; 359 360 if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0)) 361 return -ENXIO; 362 363 if (memcmp(pfn_sb->signature, sig, PFN_SIG_LEN) != 0) 364 return -ENODEV; 365 366 checksum = le64_to_cpu(pfn_sb->checksum); 367 pfn_sb->checksum = 0; 368 if (checksum != nd_sb_checksum((struct nd_gen_sb *) pfn_sb)) 369 return -ENODEV; 370 pfn_sb->checksum = cpu_to_le64(checksum); 371 372 if (memcmp(pfn_sb->parent_uuid, parent_uuid, 16) != 0) 373 return -ENODEV; 374 375 if (__le16_to_cpu(pfn_sb->version_minor) < 1) { 376 pfn_sb->start_pad = 0; 377 pfn_sb->end_trunc = 0; 378 } 379 380 if (__le16_to_cpu(pfn_sb->version_minor) < 2) 381 pfn_sb->align = 0; 382 383 switch (le32_to_cpu(pfn_sb->mode)) { 384 case PFN_MODE_RAM: 385 case PFN_MODE_PMEM: 386 break; 387 default: 388 return -ENXIO; 389 } 390 391 align = le32_to_cpu(pfn_sb->align); 392 offset = le64_to_cpu(pfn_sb->dataoff); 393 if (align == 0) 394 align = 1UL << ilog2(offset); 395 mode = le32_to_cpu(pfn_sb->mode); 396 397 if (!nd_pfn->uuid) { 398 /* 399 * When probing a namepace via nd_pfn_probe() the uuid 400 * is NULL (see: nd_pfn_devinit()) we init settings from 401 * pfn_sb 402 */ 403 nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL); 404 if (!nd_pfn->uuid) 405 return -ENOMEM; 406 nd_pfn->align = align; 407 nd_pfn->mode = mode; 408 } else { 409 /* 410 * When probing a pfn / dax instance we validate the 411 * live settings against the pfn_sb 412 */ 413 if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0) 414 return -ENODEV; 415 416 /* 417 * If the uuid validates, but other settings mismatch 418 * return EINVAL because userspace has managed to change 419 * the configuration without specifying new 420 * identification. 421 */ 422 if (nd_pfn->align != align || nd_pfn->mode != mode) { 423 dev_err(&nd_pfn->dev, 424 "init failed, settings mismatch\n"); 425 dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n", 426 nd_pfn->align, align, nd_pfn->mode, 427 mode); 428 return -EINVAL; 429 } 430 } 431 432 if (align > nvdimm_namespace_capacity(ndns)) { 433 dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n", 434 align, nvdimm_namespace_capacity(ndns)); 435 return -EINVAL; 436 } 437 438 /* 439 * These warnings are verbose because they can only trigger in 440 * the case where the physical address alignment of the 441 * namespace has changed since the pfn superblock was 442 * established. 443 */ 444 nsio = to_nd_namespace_io(&ndns->dev); 445 if (offset >= resource_size(&nsio->res)) { 446 dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n", 447 dev_name(&ndns->dev)); 448 return -EBUSY; 449 } 450 451 if ((align && !IS_ALIGNED(offset, align)) 452 || !IS_ALIGNED(offset, PAGE_SIZE)) { 453 dev_err(&nd_pfn->dev, 454 "bad offset: %#llx dax disabled align: %#lx\n", 455 offset, align); 456 return -ENXIO; 457 } 458 459 return 0; 460 } 461 EXPORT_SYMBOL(nd_pfn_validate); 462 463 int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns) 464 { 465 int rc; 466 struct nd_pfn *nd_pfn; 467 struct device *pfn_dev; 468 struct nd_pfn_sb *pfn_sb; 469 struct nd_region *nd_region = to_nd_region(ndns->dev.parent); 470 471 if (ndns->force_raw) 472 return -ENODEV; 473 474 switch (ndns->claim_class) { 475 case NVDIMM_CCLASS_NONE: 476 case NVDIMM_CCLASS_PFN: 477 break; 478 default: 479 return -ENODEV; 480 } 481 482 nvdimm_bus_lock(&ndns->dev); 483 nd_pfn = nd_pfn_alloc(nd_region); 484 pfn_dev = nd_pfn_devinit(nd_pfn, ndns); 485 nvdimm_bus_unlock(&ndns->dev); 486 if (!pfn_dev) 487 return -ENOMEM; 488 pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); 489 nd_pfn = to_nd_pfn(pfn_dev); 490 nd_pfn->pfn_sb = pfn_sb; 491 rc = nd_pfn_validate(nd_pfn, PFN_SIG); 492 dev_dbg(dev, "%s: pfn: %s\n", __func__, 493 rc == 0 ? dev_name(pfn_dev) : "<none>"); 494 if (rc < 0) { 495 nd_detach_ndns(pfn_dev, &nd_pfn->ndns); 496 put_device(pfn_dev); 497 } else 498 __nd_device_register(pfn_dev); 499 500 return rc; 501 } 502 EXPORT_SYMBOL(nd_pfn_probe); 503 504 /* 505 * We hotplug memory at section granularity, pad the reserved area from 506 * the previous section base to the namespace base address. 507 */ 508 static unsigned long init_altmap_base(resource_size_t base) 509 { 510 unsigned long base_pfn = PHYS_PFN(base); 511 512 return PFN_SECTION_ALIGN_DOWN(base_pfn); 513 } 514 515 static unsigned long init_altmap_reserve(resource_size_t base) 516 { 517 unsigned long reserve = PHYS_PFN(SZ_8K); 518 unsigned long base_pfn = PHYS_PFN(base); 519 520 reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn); 521 return reserve; 522 } 523 524 static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn, 525 struct resource *res, struct vmem_altmap *altmap) 526 { 527 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 528 u64 offset = le64_to_cpu(pfn_sb->dataoff); 529 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); 530 u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc); 531 struct nd_namespace_common *ndns = nd_pfn->ndns; 532 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 533 resource_size_t base = nsio->res.start + start_pad; 534 struct vmem_altmap __altmap = { 535 .base_pfn = init_altmap_base(base), 536 .reserve = init_altmap_reserve(base), 537 }; 538 539 memcpy(res, &nsio->res, sizeof(*res)); 540 res->start += start_pad; 541 res->end -= end_trunc; 542 543 if (nd_pfn->mode == PFN_MODE_RAM) { 544 if (offset < SZ_8K) 545 return ERR_PTR(-EINVAL); 546 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); 547 altmap = NULL; 548 } else if (nd_pfn->mode == PFN_MODE_PMEM) { 549 nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res) 550 - offset) / PAGE_SIZE); 551 if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns) 552 dev_info(&nd_pfn->dev, 553 "number of pfns truncated from %lld to %ld\n", 554 le64_to_cpu(nd_pfn->pfn_sb->npfns), 555 nd_pfn->npfns); 556 memcpy(altmap, &__altmap, sizeof(*altmap)); 557 altmap->free = PHYS_PFN(offset - SZ_8K); 558 altmap->alloc = 0; 559 } else 560 return ERR_PTR(-ENXIO); 561 562 return altmap; 563 } 564 565 static int nd_pfn_init(struct nd_pfn *nd_pfn) 566 { 567 u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0; 568 struct nd_namespace_common *ndns = nd_pfn->ndns; 569 u32 start_pad = 0, end_trunc = 0; 570 resource_size_t start, size; 571 struct nd_namespace_io *nsio; 572 struct nd_region *nd_region; 573 struct nd_pfn_sb *pfn_sb; 574 unsigned long npfns; 575 phys_addr_t offset; 576 const char *sig; 577 u64 checksum; 578 int rc; 579 580 pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL); 581 if (!pfn_sb) 582 return -ENOMEM; 583 584 nd_pfn->pfn_sb = pfn_sb; 585 if (is_nd_dax(&nd_pfn->dev)) 586 sig = DAX_SIG; 587 else 588 sig = PFN_SIG; 589 rc = nd_pfn_validate(nd_pfn, sig); 590 if (rc != -ENODEV) 591 return rc; 592 593 /* no info block, do init */; 594 nd_region = to_nd_region(nd_pfn->dev.parent); 595 if (nd_region->ro) { 596 dev_info(&nd_pfn->dev, 597 "%s is read-only, unable to init metadata\n", 598 dev_name(&nd_region->dev)); 599 return -ENXIO; 600 } 601 602 memset(pfn_sb, 0, sizeof(*pfn_sb)); 603 604 /* 605 * Check if pmem collides with 'System RAM' when section aligned and 606 * trim it accordingly 607 */ 608 nsio = to_nd_namespace_io(&ndns->dev); 609 start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start); 610 size = resource_size(&nsio->res); 611 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, 612 IORES_DESC_NONE) == REGION_MIXED) { 613 start = nsio->res.start; 614 start_pad = PHYS_SECTION_ALIGN_UP(start) - start; 615 } 616 617 start = nsio->res.start; 618 size = PHYS_SECTION_ALIGN_UP(start + size) - start; 619 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, 620 IORES_DESC_NONE) == REGION_MIXED) { 621 size = resource_size(&nsio->res); 622 end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size); 623 } 624 625 if (start_pad + end_trunc) 626 dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n", 627 dev_name(&ndns->dev), start_pad + end_trunc); 628 629 /* 630 * Note, we use 64 here for the standard size of struct page, 631 * debugging options may cause it to be larger in which case the 632 * implementation will limit the pfns advertised through 633 * ->direct_access() to those that are included in the memmap. 634 */ 635 start += start_pad; 636 size = resource_size(&nsio->res); 637 npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K) 638 / PAGE_SIZE); 639 if (nd_pfn->mode == PFN_MODE_PMEM) { 640 /* 641 * vmemmap_populate_hugepages() allocates the memmap array in 642 * HPAGE_SIZE chunks. 643 */ 644 offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve, 645 max(nd_pfn->align, HPAGE_SIZE)) - start; 646 } else if (nd_pfn->mode == PFN_MODE_RAM) 647 offset = ALIGN(start + SZ_8K + dax_label_reserve, 648 nd_pfn->align) - start; 649 else 650 return -ENXIO; 651 652 if (offset + start_pad + end_trunc >= size) { 653 dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n", 654 dev_name(&ndns->dev)); 655 return -ENXIO; 656 } 657 658 npfns = (size - offset - start_pad - end_trunc) / SZ_4K; 659 pfn_sb->mode = cpu_to_le32(nd_pfn->mode); 660 pfn_sb->dataoff = cpu_to_le64(offset); 661 pfn_sb->npfns = cpu_to_le64(npfns); 662 memcpy(pfn_sb->signature, sig, PFN_SIG_LEN); 663 memcpy(pfn_sb->uuid, nd_pfn->uuid, 16); 664 memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16); 665 pfn_sb->version_major = cpu_to_le16(1); 666 pfn_sb->version_minor = cpu_to_le16(2); 667 pfn_sb->start_pad = cpu_to_le32(start_pad); 668 pfn_sb->end_trunc = cpu_to_le32(end_trunc); 669 pfn_sb->align = cpu_to_le32(nd_pfn->align); 670 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb); 671 pfn_sb->checksum = cpu_to_le64(checksum); 672 673 return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0); 674 } 675 676 /* 677 * Determine the effective resource range and vmem_altmap from an nd_pfn 678 * instance. 679 */ 680 struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn, 681 struct resource *res, struct vmem_altmap *altmap) 682 { 683 int rc; 684 685 if (!nd_pfn->uuid || !nd_pfn->ndns) 686 return ERR_PTR(-ENODEV); 687 688 rc = nd_pfn_init(nd_pfn); 689 if (rc) 690 return ERR_PTR(rc); 691 692 /* we need a valid pfn_sb before we can init a vmem_altmap */ 693 return __nvdimm_setup_pfn(nd_pfn, res, altmap); 694 } 695 EXPORT_SYMBOL_GPL(nvdimm_setup_pfn); 696