1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Persistent Memory Driver 4 * 5 * Copyright (c) 2014-2015, Intel Corporation. 6 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>. 7 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>. 8 */ 9 10 #include <linux/blkdev.h> 11 #include <linux/pagemap.h> 12 #include <linux/hdreg.h> 13 #include <linux/init.h> 14 #include <linux/platform_device.h> 15 #include <linux/set_memory.h> 16 #include <linux/module.h> 17 #include <linux/moduleparam.h> 18 #include <linux/badblocks.h> 19 #include <linux/memremap.h> 20 #include <linux/vmalloc.h> 21 #include <linux/blk-mq.h> 22 #include <linux/pfn_t.h> 23 #include <linux/slab.h> 24 #include <linux/uio.h> 25 #include <linux/dax.h> 26 #include <linux/nd.h> 27 #include <linux/mm.h> 28 #include <asm/cacheflush.h> 29 #include "pmem.h" 30 #include "btt.h" 31 #include "pfn.h" 32 #include "nd.h" 33 34 static struct device *to_dev(struct pmem_device *pmem) 35 { 36 /* 37 * nvdimm bus services need a 'dev' parameter, and we record the device 38 * at init in bb.dev. 39 */ 40 return pmem->bb.dev; 41 } 42 43 static struct nd_region *to_region(struct pmem_device *pmem) 44 { 45 return to_nd_region(to_dev(pmem)->parent); 46 } 47 48 static void hwpoison_clear(struct pmem_device *pmem, 49 phys_addr_t phys, unsigned int len) 50 { 51 unsigned long pfn_start, pfn_end, pfn; 52 53 /* only pmem in the linear map supports HWPoison */ 54 if (is_vmalloc_addr(pmem->virt_addr)) 55 return; 56 57 pfn_start = PHYS_PFN(phys); 58 pfn_end = pfn_start + PHYS_PFN(len); 59 for (pfn = pfn_start; pfn < pfn_end; pfn++) { 60 struct page *page = pfn_to_page(pfn); 61 62 /* 63 * Note, no need to hold a get_dev_pagemap() reference 64 * here since we're in the driver I/O path and 65 * outstanding I/O requests pin the dev_pagemap. 66 */ 67 if (test_and_clear_pmem_poison(page)) 68 clear_mce_nospec(pfn); 69 } 70 } 71 72 static blk_status_t pmem_clear_poison(struct pmem_device *pmem, 73 phys_addr_t offset, unsigned int len) 74 { 75 struct device *dev = to_dev(pmem); 76 sector_t sector; 77 long cleared; 78 blk_status_t rc = BLK_STS_OK; 79 80 sector = (offset - pmem->data_offset) / 512; 81 82 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); 83 if (cleared < len) 84 rc = BLK_STS_IOERR; 85 if (cleared > 0 && cleared / 512) { 86 hwpoison_clear(pmem, pmem->phys_addr + offset, cleared); 87 cleared /= 512; 88 dev_dbg(dev, "%#llx clear %ld sector%s\n", 89 (unsigned long long) sector, cleared, 90 cleared > 1 ? "s" : ""); 91 badblocks_clear(&pmem->bb, sector, cleared); 92 if (pmem->bb_state) 93 sysfs_notify_dirent(pmem->bb_state); 94 } 95 96 arch_invalidate_pmem(pmem->virt_addr + offset, len); 97 98 return rc; 99 } 100 101 static void write_pmem(void *pmem_addr, struct page *page, 102 unsigned int off, unsigned int len) 103 { 104 unsigned int chunk; 105 void *mem; 106 107 while (len) { 108 mem = kmap_atomic(page); 109 chunk = min_t(unsigned int, len, PAGE_SIZE - off); 110 memcpy_flushcache(pmem_addr, mem + off, chunk); 111 kunmap_atomic(mem); 112 len -= chunk; 113 off = 0; 114 page++; 115 pmem_addr += chunk; 116 } 117 } 118 119 static blk_status_t read_pmem(struct page *page, unsigned int off, 120 void *pmem_addr, unsigned int len) 121 { 122 unsigned int chunk; 123 unsigned long rem; 124 void *mem; 125 126 while (len) { 127 mem = kmap_atomic(page); 128 chunk = min_t(unsigned int, len, PAGE_SIZE - off); 129 rem = copy_mc_to_kernel(mem + off, pmem_addr, chunk); 130 kunmap_atomic(mem); 131 if (rem) 132 return BLK_STS_IOERR; 133 len -= chunk; 134 off = 0; 135 page++; 136 pmem_addr += chunk; 137 } 138 return BLK_STS_OK; 139 } 140 141 static blk_status_t pmem_do_read(struct pmem_device *pmem, 142 struct page *page, unsigned int page_off, 143 sector_t sector, unsigned int len) 144 { 145 blk_status_t rc; 146 phys_addr_t pmem_off = sector * 512 + pmem->data_offset; 147 void *pmem_addr = pmem->virt_addr + pmem_off; 148 149 if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) 150 return BLK_STS_IOERR; 151 152 rc = read_pmem(page, page_off, pmem_addr, len); 153 flush_dcache_page(page); 154 return rc; 155 } 156 157 static blk_status_t pmem_do_write(struct pmem_device *pmem, 158 struct page *page, unsigned int page_off, 159 sector_t sector, unsigned int len) 160 { 161 blk_status_t rc = BLK_STS_OK; 162 bool bad_pmem = false; 163 phys_addr_t pmem_off = sector * 512 + pmem->data_offset; 164 void *pmem_addr = pmem->virt_addr + pmem_off; 165 166 if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) 167 bad_pmem = true; 168 169 /* 170 * Note that we write the data both before and after 171 * clearing poison. The write before clear poison 172 * handles situations where the latest written data is 173 * preserved and the clear poison operation simply marks 174 * the address range as valid without changing the data. 175 * In this case application software can assume that an 176 * interrupted write will either return the new good 177 * data or an error. 178 * 179 * However, if pmem_clear_poison() leaves the data in an 180 * indeterminate state we need to perform the write 181 * after clear poison. 182 */ 183 flush_dcache_page(page); 184 write_pmem(pmem_addr, page, page_off, len); 185 if (unlikely(bad_pmem)) { 186 rc = pmem_clear_poison(pmem, pmem_off, len); 187 write_pmem(pmem_addr, page, page_off, len); 188 } 189 190 return rc; 191 } 192 193 static blk_qc_t pmem_submit_bio(struct bio *bio) 194 { 195 int ret = 0; 196 blk_status_t rc = 0; 197 bool do_acct; 198 unsigned long start; 199 struct bio_vec bvec; 200 struct bvec_iter iter; 201 struct pmem_device *pmem = bio->bi_bdev->bd_disk->private_data; 202 struct nd_region *nd_region = to_region(pmem); 203 204 if (bio->bi_opf & REQ_PREFLUSH) 205 ret = nvdimm_flush(nd_region, bio); 206 207 do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue); 208 if (do_acct) 209 start = bio_start_io_acct(bio); 210 bio_for_each_segment(bvec, bio, iter) { 211 if (op_is_write(bio_op(bio))) 212 rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset, 213 iter.bi_sector, bvec.bv_len); 214 else 215 rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset, 216 iter.bi_sector, bvec.bv_len); 217 if (rc) { 218 bio->bi_status = rc; 219 break; 220 } 221 } 222 if (do_acct) 223 bio_end_io_acct(bio, start); 224 225 if (bio->bi_opf & REQ_FUA) 226 ret = nvdimm_flush(nd_region, bio); 227 228 if (ret) 229 bio->bi_status = errno_to_blk_status(ret); 230 231 bio_endio(bio); 232 return BLK_QC_T_NONE; 233 } 234 235 static int pmem_rw_page(struct block_device *bdev, sector_t sector, 236 struct page *page, unsigned int op) 237 { 238 struct pmem_device *pmem = bdev->bd_disk->private_data; 239 blk_status_t rc; 240 241 if (op_is_write(op)) 242 rc = pmem_do_write(pmem, page, 0, sector, thp_size(page)); 243 else 244 rc = pmem_do_read(pmem, page, 0, sector, thp_size(page)); 245 /* 246 * The ->rw_page interface is subtle and tricky. The core 247 * retries on any error, so we can only invoke page_endio() in 248 * the successful completion case. Otherwise, we'll see crashes 249 * caused by double completion. 250 */ 251 if (rc == 0) 252 page_endio(page, op_is_write(op), 0); 253 254 return blk_status_to_errno(rc); 255 } 256 257 /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */ 258 __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, 259 long nr_pages, void **kaddr, pfn_t *pfn) 260 { 261 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset; 262 263 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512, 264 PFN_PHYS(nr_pages)))) 265 return -EIO; 266 267 if (kaddr) 268 *kaddr = pmem->virt_addr + offset; 269 if (pfn) 270 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); 271 272 /* 273 * If badblocks are present, limit known good range to the 274 * requested range. 275 */ 276 if (unlikely(pmem->bb.count)) 277 return nr_pages; 278 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset); 279 } 280 281 static const struct block_device_operations pmem_fops = { 282 .owner = THIS_MODULE, 283 .submit_bio = pmem_submit_bio, 284 .rw_page = pmem_rw_page, 285 }; 286 287 static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 288 size_t nr_pages) 289 { 290 struct pmem_device *pmem = dax_get_private(dax_dev); 291 292 return blk_status_to_errno(pmem_do_write(pmem, ZERO_PAGE(0), 0, 293 PFN_PHYS(pgoff) >> SECTOR_SHIFT, 294 PAGE_SIZE)); 295 } 296 297 static long pmem_dax_direct_access(struct dax_device *dax_dev, 298 pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) 299 { 300 struct pmem_device *pmem = dax_get_private(dax_dev); 301 302 return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn); 303 } 304 305 /* 306 * Use the 'no check' versions of copy_from_iter_flushcache() and 307 * copy_mc_to_iter() to bypass HARDENED_USERCOPY overhead. Bounds 308 * checking, both file offset and device offset, is handled by 309 * dax_iomap_actor() 310 */ 311 static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 312 void *addr, size_t bytes, struct iov_iter *i) 313 { 314 return _copy_from_iter_flushcache(addr, bytes, i); 315 } 316 317 static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, 318 void *addr, size_t bytes, struct iov_iter *i) 319 { 320 return _copy_mc_to_iter(addr, bytes, i); 321 } 322 323 static const struct dax_operations pmem_dax_ops = { 324 .direct_access = pmem_dax_direct_access, 325 .dax_supported = generic_fsdax_supported, 326 .copy_from_iter = pmem_copy_from_iter, 327 .copy_to_iter = pmem_copy_to_iter, 328 .zero_page_range = pmem_dax_zero_page_range, 329 }; 330 331 static const struct attribute_group *pmem_attribute_groups[] = { 332 &dax_attribute_group, 333 NULL, 334 }; 335 336 static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap) 337 { 338 struct pmem_device *pmem = pgmap->owner; 339 340 blk_cleanup_disk(pmem->disk); 341 } 342 343 static void pmem_release_queue(void *pgmap) 344 { 345 pmem_pagemap_cleanup(pgmap); 346 } 347 348 static void pmem_pagemap_kill(struct dev_pagemap *pgmap) 349 { 350 struct request_queue *q = 351 container_of(pgmap->ref, struct request_queue, q_usage_counter); 352 353 blk_freeze_queue_start(q); 354 } 355 356 static void pmem_release_disk(void *__pmem) 357 { 358 struct pmem_device *pmem = __pmem; 359 360 kill_dax(pmem->dax_dev); 361 put_dax(pmem->dax_dev); 362 del_gendisk(pmem->disk); 363 } 364 365 static const struct dev_pagemap_ops fsdax_pagemap_ops = { 366 .kill = pmem_pagemap_kill, 367 .cleanup = pmem_pagemap_cleanup, 368 }; 369 370 static int pmem_attach_disk(struct device *dev, 371 struct nd_namespace_common *ndns) 372 { 373 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 374 struct nd_region *nd_region = to_nd_region(dev->parent); 375 int nid = dev_to_node(dev), fua; 376 struct resource *res = &nsio->res; 377 struct range bb_range; 378 struct nd_pfn *nd_pfn = NULL; 379 struct dax_device *dax_dev; 380 struct nd_pfn_sb *pfn_sb; 381 struct pmem_device *pmem; 382 struct request_queue *q; 383 struct device *gendev; 384 struct gendisk *disk; 385 void *addr; 386 int rc; 387 unsigned long flags = 0UL; 388 389 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL); 390 if (!pmem) 391 return -ENOMEM; 392 393 rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve()); 394 if (rc) 395 return rc; 396 397 /* while nsio_rw_bytes is active, parse a pfn info block if present */ 398 if (is_nd_pfn(dev)) { 399 nd_pfn = to_nd_pfn(dev); 400 rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap); 401 if (rc) 402 return rc; 403 } 404 405 /* we're attaching a block device, disable raw namespace access */ 406 devm_namespace_disable(dev, ndns); 407 408 dev_set_drvdata(dev, pmem); 409 pmem->phys_addr = res->start; 410 pmem->size = resource_size(res); 411 fua = nvdimm_has_flush(nd_region); 412 if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) { 413 dev_warn(dev, "unable to guarantee persistence of writes\n"); 414 fua = 0; 415 } 416 417 if (!devm_request_mem_region(dev, res->start, resource_size(res), 418 dev_name(&ndns->dev))) { 419 dev_warn(dev, "could not reserve region %pR\n", res); 420 return -EBUSY; 421 } 422 423 disk = blk_alloc_disk(nid); 424 if (!disk) 425 return -ENOMEM; 426 q = disk->queue; 427 428 pmem->disk = disk; 429 pmem->pgmap.owner = pmem; 430 pmem->pfn_flags = PFN_DEV; 431 pmem->pgmap.ref = &q->q_usage_counter; 432 if (is_nd_pfn(dev)) { 433 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; 434 pmem->pgmap.ops = &fsdax_pagemap_ops; 435 addr = devm_memremap_pages(dev, &pmem->pgmap); 436 pfn_sb = nd_pfn->pfn_sb; 437 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); 438 pmem->pfn_pad = resource_size(res) - 439 range_len(&pmem->pgmap.range); 440 pmem->pfn_flags |= PFN_MAP; 441 bb_range = pmem->pgmap.range; 442 bb_range.start += pmem->data_offset; 443 } else if (pmem_should_map_pages(dev)) { 444 pmem->pgmap.range.start = res->start; 445 pmem->pgmap.range.end = res->end; 446 pmem->pgmap.nr_range = 1; 447 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; 448 pmem->pgmap.ops = &fsdax_pagemap_ops; 449 addr = devm_memremap_pages(dev, &pmem->pgmap); 450 pmem->pfn_flags |= PFN_MAP; 451 bb_range = pmem->pgmap.range; 452 } else { 453 addr = devm_memremap(dev, pmem->phys_addr, 454 pmem->size, ARCH_MEMREMAP_PMEM); 455 if (devm_add_action_or_reset(dev, pmem_release_queue, 456 &pmem->pgmap)) 457 return -ENOMEM; 458 bb_range.start = res->start; 459 bb_range.end = res->end; 460 } 461 462 if (IS_ERR(addr)) 463 return PTR_ERR(addr); 464 pmem->virt_addr = addr; 465 466 blk_queue_write_cache(q, true, fua); 467 blk_queue_physical_block_size(q, PAGE_SIZE); 468 blk_queue_logical_block_size(q, pmem_sector_size(ndns)); 469 blk_queue_max_hw_sectors(q, UINT_MAX); 470 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 471 if (pmem->pfn_flags & PFN_MAP) 472 blk_queue_flag_set(QUEUE_FLAG_DAX, q); 473 474 disk->fops = &pmem_fops; 475 disk->private_data = pmem; 476 nvdimm_namespace_disk_name(ndns, disk->disk_name); 477 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset) 478 / 512); 479 if (devm_init_badblocks(dev, &pmem->bb)) 480 return -ENOMEM; 481 nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range); 482 disk->bb = &pmem->bb; 483 484 if (is_nvdimm_sync(nd_region)) 485 flags = DAXDEV_F_SYNC; 486 dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags); 487 if (IS_ERR(dax_dev)) { 488 return PTR_ERR(dax_dev); 489 } 490 dax_write_cache(dax_dev, nvdimm_has_cache(nd_region)); 491 pmem->dax_dev = dax_dev; 492 gendev = disk_to_dev(disk); 493 gendev->groups = pmem_attribute_groups; 494 495 device_add_disk(dev, disk, NULL); 496 if (devm_add_action_or_reset(dev, pmem_release_disk, pmem)) 497 return -ENOMEM; 498 499 nvdimm_check_and_set_ro(disk); 500 501 pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd, 502 "badblocks"); 503 if (!pmem->bb_state) 504 dev_warn(dev, "'badblocks' notification disabled\n"); 505 506 return 0; 507 } 508 509 static int nd_pmem_probe(struct device *dev) 510 { 511 int ret; 512 struct nd_namespace_common *ndns; 513 514 ndns = nvdimm_namespace_common_probe(dev); 515 if (IS_ERR(ndns)) 516 return PTR_ERR(ndns); 517 518 if (is_nd_btt(dev)) 519 return nvdimm_namespace_attach_btt(ndns); 520 521 if (is_nd_pfn(dev)) 522 return pmem_attach_disk(dev, ndns); 523 524 ret = devm_namespace_enable(dev, ndns, nd_info_block_reserve()); 525 if (ret) 526 return ret; 527 528 ret = nd_btt_probe(dev, ndns); 529 if (ret == 0) 530 return -ENXIO; 531 532 /* 533 * We have two failure conditions here, there is no 534 * info reserver block or we found a valid info reserve block 535 * but failed to initialize the pfn superblock. 536 * 537 * For the first case consider namespace as a raw pmem namespace 538 * and attach a disk. 539 * 540 * For the latter, consider this a success and advance the namespace 541 * seed. 542 */ 543 ret = nd_pfn_probe(dev, ndns); 544 if (ret == 0) 545 return -ENXIO; 546 else if (ret == -EOPNOTSUPP) 547 return ret; 548 549 ret = nd_dax_probe(dev, ndns); 550 if (ret == 0) 551 return -ENXIO; 552 else if (ret == -EOPNOTSUPP) 553 return ret; 554 555 /* probe complete, attach handles namespace enabling */ 556 devm_namespace_disable(dev, ndns); 557 558 return pmem_attach_disk(dev, ndns); 559 } 560 561 static void nd_pmem_remove(struct device *dev) 562 { 563 struct pmem_device *pmem = dev_get_drvdata(dev); 564 565 if (is_nd_btt(dev)) 566 nvdimm_namespace_detach_btt(to_nd_btt(dev)); 567 else { 568 /* 569 * Note, this assumes nd_device_lock() context to not 570 * race nd_pmem_notify() 571 */ 572 sysfs_put(pmem->bb_state); 573 pmem->bb_state = NULL; 574 } 575 nvdimm_flush(to_nd_region(dev->parent), NULL); 576 } 577 578 static void nd_pmem_shutdown(struct device *dev) 579 { 580 nvdimm_flush(to_nd_region(dev->parent), NULL); 581 } 582 583 static void pmem_revalidate_poison(struct device *dev) 584 { 585 struct nd_region *nd_region; 586 resource_size_t offset = 0, end_trunc = 0; 587 struct nd_namespace_common *ndns; 588 struct nd_namespace_io *nsio; 589 struct badblocks *bb; 590 struct range range; 591 struct kernfs_node *bb_state; 592 593 if (is_nd_btt(dev)) { 594 struct nd_btt *nd_btt = to_nd_btt(dev); 595 596 ndns = nd_btt->ndns; 597 nd_region = to_nd_region(ndns->dev.parent); 598 nsio = to_nd_namespace_io(&ndns->dev); 599 bb = &nsio->bb; 600 bb_state = NULL; 601 } else { 602 struct pmem_device *pmem = dev_get_drvdata(dev); 603 604 nd_region = to_region(pmem); 605 bb = &pmem->bb; 606 bb_state = pmem->bb_state; 607 608 if (is_nd_pfn(dev)) { 609 struct nd_pfn *nd_pfn = to_nd_pfn(dev); 610 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 611 612 ndns = nd_pfn->ndns; 613 offset = pmem->data_offset + 614 __le32_to_cpu(pfn_sb->start_pad); 615 end_trunc = __le32_to_cpu(pfn_sb->end_trunc); 616 } else { 617 ndns = to_ndns(dev); 618 } 619 620 nsio = to_nd_namespace_io(&ndns->dev); 621 } 622 623 range.start = nsio->res.start + offset; 624 range.end = nsio->res.end - end_trunc; 625 nvdimm_badblocks_populate(nd_region, bb, &range); 626 if (bb_state) 627 sysfs_notify_dirent(bb_state); 628 } 629 630 static void pmem_revalidate_region(struct device *dev) 631 { 632 struct pmem_device *pmem; 633 634 if (is_nd_btt(dev)) { 635 struct nd_btt *nd_btt = to_nd_btt(dev); 636 struct btt *btt = nd_btt->btt; 637 638 nvdimm_check_and_set_ro(btt->btt_disk); 639 return; 640 } 641 642 pmem = dev_get_drvdata(dev); 643 nvdimm_check_and_set_ro(pmem->disk); 644 } 645 646 static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) 647 { 648 switch (event) { 649 case NVDIMM_REVALIDATE_POISON: 650 pmem_revalidate_poison(dev); 651 break; 652 case NVDIMM_REVALIDATE_REGION: 653 pmem_revalidate_region(dev); 654 break; 655 default: 656 dev_WARN_ONCE(dev, 1, "notify: unknown event: %d\n", event); 657 break; 658 } 659 } 660 661 MODULE_ALIAS("pmem"); 662 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO); 663 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM); 664 static struct nd_device_driver nd_pmem_driver = { 665 .probe = nd_pmem_probe, 666 .remove = nd_pmem_remove, 667 .notify = nd_pmem_notify, 668 .shutdown = nd_pmem_shutdown, 669 .drv = { 670 .name = "nd_pmem", 671 }, 672 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM, 673 }; 674 675 module_nd_driver(nd_pmem_driver); 676 677 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>"); 678 MODULE_LICENSE("GPL v2"); 679