xref: /linux/drivers/dax/super.c (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright(c) 2017 Intel Corporation. All rights reserved.
4  */
5 #include <linux/pagemap.h>
6 #include <linux/module.h>
7 #include <linux/mount.h>
8 #include <linux/pseudo_fs.h>
9 #include <linux/magic.h>
10 #include <linux/cdev.h>
11 #include <linux/slab.h>
12 #include <linux/uio.h>
13 #include <linux/dax.h>
14 #include <linux/fs.h>
15 #include <linux/cacheinfo.h>
16 #include "dax-private.h"
17 #include "bus.h"
18 
19 /**
20  * struct dax_device - anchor object for dax services
21  * @inode: core vfs
22  * @cdev: optional character interface for "device dax"
23  * @private: dax driver private data
24  * @flags: state and boolean properties
25  * @ops: operations for this device
26  * @holder_data: holder of a dax_device: could be filesystem or mapped device
27  * @holder_ops: operations for the inner holder
28  */
29 struct dax_device {
30 	struct inode inode;
31 	struct cdev cdev;
32 	void *private;
33 	unsigned long flags;
34 	const struct dax_operations *ops;
35 	void *holder_data;
36 	const struct dax_holder_operations *holder_ops;
37 };
38 
39 static dev_t dax_devt;
40 DEFINE_STATIC_SRCU(dax_srcu);
41 static struct vfsmount *dax_mnt;
42 static DEFINE_IDA(dax_minor_ida);
43 static struct kmem_cache *dax_cache __read_mostly;
44 static struct super_block *dax_superblock __read_mostly;
45 
46 int dax_read_lock(void)
47 {
48 	return srcu_read_lock(&dax_srcu);
49 }
50 EXPORT_SYMBOL_GPL(dax_read_lock);
51 
52 void dax_read_unlock(int id)
53 {
54 	srcu_read_unlock(&dax_srcu, id);
55 }
56 EXPORT_SYMBOL_GPL(dax_read_unlock);
57 
58 #if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX)
59 #include <linux/blkdev.h>
60 
61 static DEFINE_XARRAY(dax_hosts);
62 
63 int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk)
64 {
65 	return xa_insert(&dax_hosts, (unsigned long)disk, dax_dev, GFP_KERNEL);
66 }
67 EXPORT_SYMBOL_GPL(dax_add_host);
68 
69 void dax_remove_host(struct gendisk *disk)
70 {
71 	xa_erase(&dax_hosts, (unsigned long)disk);
72 }
73 EXPORT_SYMBOL_GPL(dax_remove_host);
74 
75 /**
76  * fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax
77  * @bdev: block device to find a dax_device for
78  * @start_off: returns the byte offset into the dax_device that @bdev starts
79  * @holder: filesystem or mapped device inside the dax_device
80  * @ops: operations for the inner holder
81  */
82 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off,
83 		void *holder, const struct dax_holder_operations *ops)
84 {
85 	struct dax_device *dax_dev;
86 	u64 part_size;
87 	int id;
88 
89 	if (!blk_queue_dax(bdev->bd_disk->queue))
90 		return NULL;
91 
92 	*start_off = get_start_sect(bdev) * SECTOR_SIZE;
93 	part_size = bdev_nr_sectors(bdev) * SECTOR_SIZE;
94 	if (*start_off % PAGE_SIZE || part_size % PAGE_SIZE) {
95 		pr_info("%pg: error: unaligned partition for dax\n", bdev);
96 		return NULL;
97 	}
98 
99 	id = dax_read_lock();
100 	dax_dev = xa_load(&dax_hosts, (unsigned long)bdev->bd_disk);
101 	if (!dax_dev || !dax_alive(dax_dev) || !igrab(&dax_dev->inode))
102 		dax_dev = NULL;
103 	else if (holder) {
104 		if (!cmpxchg(&dax_dev->holder_data, NULL, holder))
105 			dax_dev->holder_ops = ops;
106 		else
107 			dax_dev = NULL;
108 	}
109 	dax_read_unlock(id);
110 
111 	return dax_dev;
112 }
113 EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
114 
115 #endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
116 
117 #if IS_ENABLED(CONFIG_FS_DAX)
118 
119 void fs_put_dax(struct dax_device *dax_dev, void *holder)
120 {
121 	if (dax_dev && holder &&
122 	    cmpxchg(&dax_dev->holder_data, holder, NULL) == holder)
123 		dax_dev->holder_ops = NULL;
124 	put_dax(dax_dev);
125 }
126 EXPORT_SYMBOL_GPL(fs_put_dax);
127 
128 /**
129  * fs_dax_get() - get ownership of a devdax via holder/holder_ops
130  *
131  * fs-dax file systems call this function to prepare to use a devdax device for
132  * fsdax. This is like fs_dax_get_by_bdev(), but the caller already has struct
133  * dev_dax (and there is no bdev). The holder makes this exclusive.
134  *
135  * @dax_dev: dev to be prepared for fs-dax usage
136  * @holder: filesystem or mapped device inside the dax_device
137  * @hops: operations for the inner holder
138  *
139  * Returns: 0 on success, <0 on failure
140  */
141 int fs_dax_get(struct dax_device *dax_dev, void *holder,
142 	const struct dax_holder_operations *hops)
143 {
144 	struct dev_dax *dev_dax;
145 	struct dax_device_driver *dax_drv;
146 	int id;
147 
148 	id = dax_read_lock();
149 	if (!dax_dev || !dax_alive(dax_dev) || !igrab(&dax_dev->inode)) {
150 		dax_read_unlock(id);
151 		return -ENODEV;
152 	}
153 	dax_read_unlock(id);
154 
155 	/* Verify the device is bound to fsdev_dax driver */
156 	dev_dax = dax_get_private(dax_dev);
157 	if (!dev_dax) {
158 		iput(&dax_dev->inode);
159 		return -ENODEV;
160 	}
161 
162 	device_lock(&dev_dax->dev);
163 	if (!dev_dax->dev.driver) {
164 		device_unlock(&dev_dax->dev);
165 		iput(&dax_dev->inode);
166 		return -ENODEV;
167 	}
168 	dax_drv = to_dax_drv(dev_dax->dev.driver);
169 	if (dax_drv->type != DAXDRV_FSDEV_TYPE) {
170 		device_unlock(&dev_dax->dev);
171 		iput(&dax_dev->inode);
172 		return -EOPNOTSUPP;
173 	}
174 	device_unlock(&dev_dax->dev);
175 
176 	if (cmpxchg(&dax_dev->holder_data, NULL, holder)) {
177 		iput(&dax_dev->inode);
178 		return -EBUSY;
179 	}
180 
181 	dax_dev->holder_ops = hops;
182 
183 	return 0;
184 }
185 EXPORT_SYMBOL_GPL(fs_dax_get);
186 #endif /* CONFIG_FS_DAX */
187 
188 enum dax_device_flags {
189 	/* !alive + rcu grace period == no new operations / mappings */
190 	DAXDEV_ALIVE,
191 	/* gate whether dax_flush() calls the low level flush routine */
192 	DAXDEV_WRITE_CACHE,
193 	/* flag to check if device supports synchronous flush */
194 	DAXDEV_SYNC,
195 	/* do not leave the caches dirty after writes */
196 	DAXDEV_NOCACHE,
197 	/* handle CPU fetch exceptions during reads */
198 	DAXDEV_NOMC,
199 };
200 
201 /**
202  * dax_direct_access() - translate a device pgoff to an absolute pfn
203  * @dax_dev: a dax_device instance representing the logical memory range
204  * @pgoff: offset in pages from the start of the device to translate
205  * @nr_pages: number of consecutive pages caller can handle relative to @pfn
206  * @mode: indicator on normal access or recovery write
207  * @kaddr: output parameter that returns a virtual address mapping of pfn
208  * @pfn: output parameter that returns an absolute pfn translation of @pgoff
209  *
210  * Return: negative errno if an error occurs, otherwise the number of
211  * pages accessible at the device relative @pgoff.
212  */
213 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
214 		enum dax_access_mode mode, void **kaddr, unsigned long *pfn)
215 {
216 	long avail;
217 
218 	if (!dax_dev)
219 		return -EOPNOTSUPP;
220 
221 	if (!dax_alive(dax_dev))
222 		return -ENXIO;
223 
224 	if (!dax_dev->ops)
225 		return -EOPNOTSUPP;
226 
227 	if (nr_pages < 0)
228 		return -EINVAL;
229 
230 	avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
231 			mode, kaddr, pfn);
232 	if (!avail)
233 		return -ERANGE;
234 	return min(avail, nr_pages);
235 }
236 EXPORT_SYMBOL_GPL(dax_direct_access);
237 
238 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
239 		size_t bytes, struct iov_iter *i)
240 {
241 	if (!dax_alive(dax_dev))
242 		return 0;
243 
244 	/*
245 	 * The userspace address for the memory copy has already been validated
246 	 * via access_ok() in vfs_write, so use the 'no check' version to bypass
247 	 * the HARDENED_USERCOPY overhead.
248 	 */
249 	if (test_bit(DAXDEV_NOCACHE, &dax_dev->flags))
250 		return _copy_from_iter_flushcache(addr, bytes, i);
251 	return _copy_from_iter(addr, bytes, i);
252 }
253 
254 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
255 		size_t bytes, struct iov_iter *i)
256 {
257 	if (!dax_alive(dax_dev))
258 		return 0;
259 
260 	/*
261 	 * The userspace address for the memory copy has already been validated
262 	 * via access_ok() in vfs_red, so use the 'no check' version to bypass
263 	 * the HARDENED_USERCOPY overhead.
264 	 */
265 	if (test_bit(DAXDEV_NOMC, &dax_dev->flags))
266 		return _copy_mc_to_iter(addr, bytes, i);
267 	return _copy_to_iter(addr, bytes, i);
268 }
269 
270 int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
271 			size_t nr_pages)
272 {
273 	int ret;
274 
275 	if (!dax_alive(dax_dev))
276 		return -ENXIO;
277 
278 	if (!dax_dev->ops)
279 		return -EOPNOTSUPP;
280 
281 	/*
282 	 * There are no callers that want to zero more than one page as of now.
283 	 * Once users are there, this check can be removed after the
284 	 * device mapper code has been updated to split ranges across targets.
285 	 */
286 	if (nr_pages != 1)
287 		return -EIO;
288 
289 	ret = dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages);
290 	return dax_mem2blk_err(ret);
291 }
292 EXPORT_SYMBOL_GPL(dax_zero_page_range);
293 
294 size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
295 		void *addr, size_t bytes, struct iov_iter *iter)
296 {
297 	if (!dax_dev->ops || !dax_dev->ops->recovery_write)
298 		return 0;
299 	return dax_dev->ops->recovery_write(dax_dev, pgoff, addr, bytes, iter);
300 }
301 EXPORT_SYMBOL_GPL(dax_recovery_write);
302 
303 int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off,
304 			      u64 len, int mf_flags)
305 {
306 	int rc, id;
307 
308 	id = dax_read_lock();
309 	if (!dax_alive(dax_dev)) {
310 		rc = -ENXIO;
311 		goto out;
312 	}
313 
314 	if (!dax_dev->holder_ops) {
315 		rc = -EOPNOTSUPP;
316 		goto out;
317 	}
318 
319 	rc = dax_dev->holder_ops->notify_failure(dax_dev, off, len, mf_flags);
320 out:
321 	dax_read_unlock(id);
322 	return rc;
323 }
324 EXPORT_SYMBOL_GPL(dax_holder_notify_failure);
325 
326 #ifdef CONFIG_ARCH_HAS_PMEM_API
327 void arch_wb_cache_pmem(void *addr, size_t size);
328 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
329 {
330 	if (unlikely(!dax_write_cache_enabled(dax_dev)))
331 		return;
332 
333 	arch_wb_cache_pmem(addr, size);
334 }
335 #else
336 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
337 {
338 }
339 #endif
340 EXPORT_SYMBOL_GPL(dax_flush);
341 
342 void dax_write_cache(struct dax_device *dax_dev, bool wc)
343 {
344 	if (wc)
345 		set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
346 	else
347 		clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
348 }
349 EXPORT_SYMBOL_GPL(dax_write_cache);
350 
351 bool dax_write_cache_enabled(struct dax_device *dax_dev)
352 {
353 	return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
354 }
355 EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
356 
357 bool dax_synchronous(struct dax_device *dax_dev)
358 {
359 	return test_bit(DAXDEV_SYNC, &dax_dev->flags);
360 }
361 EXPORT_SYMBOL_GPL(dax_synchronous);
362 
363 void set_dax_synchronous(struct dax_device *dax_dev)
364 {
365 	set_bit(DAXDEV_SYNC, &dax_dev->flags);
366 }
367 EXPORT_SYMBOL_GPL(set_dax_synchronous);
368 
369 void set_dax_nocache(struct dax_device *dax_dev)
370 {
371 	set_bit(DAXDEV_NOCACHE, &dax_dev->flags);
372 }
373 EXPORT_SYMBOL_GPL(set_dax_nocache);
374 
375 void set_dax_nomc(struct dax_device *dax_dev)
376 {
377 	set_bit(DAXDEV_NOMC, &dax_dev->flags);
378 }
379 EXPORT_SYMBOL_GPL(set_dax_nomc);
380 
381 /**
382  * dax_set_ops - set the dax_operations for a dax_device
383  * @dax_dev: the dax_device to configure
384  * @ops: the operations to set (may be NULL to clear)
385  *
386  * This allows drivers to set the dax_operations after the dax_device
387  * has been allocated. This is needed when the device is created before
388  * the driver that needs specific ops is bound (e.g., fsdev_dax binding
389  * to a dev_dax created by hmem).
390  *
391  * When setting non-NULL ops, fails if ops are already set (returns -EBUSY).
392  * When clearing ops (NULL), always succeeds.
393  *
394  * Return: 0 on success, -EBUSY if ops already set
395  */
396 int dax_set_ops(struct dax_device *dax_dev, const struct dax_operations *ops)
397 {
398 	if (ops) {
399 		/* Setting ops: fail if already set */
400 		if (cmpxchg(&dax_dev->ops, NULL, ops) != NULL)
401 			return -EBUSY;
402 	} else {
403 		/* Clearing ops: always allowed */
404 		dax_dev->ops = NULL;
405 	}
406 	return 0;
407 }
408 EXPORT_SYMBOL_GPL(dax_set_ops);
409 
410 bool dax_alive(struct dax_device *dax_dev)
411 {
412 	lockdep_assert_held(&dax_srcu);
413 	return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
414 }
415 EXPORT_SYMBOL_GPL(dax_alive);
416 
417 /*
418  * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
419  * that any fault handlers or operations that might have seen
420  * dax_alive(), have completed.  Any operations that start after
421  * synchronize_srcu() has run will abort upon seeing !dax_alive().
422  *
423  * Note, because alloc_dax() returns an ERR_PTR() on error, callers
424  * typically store its result into a local variable in order to check
425  * the result. Therefore, care must be taken to populate the struct
426  * device dax_dev field make sure the dax_dev is not leaked.
427  */
428 void kill_dax(struct dax_device *dax_dev)
429 {
430 	if (!dax_dev)
431 		return;
432 
433 	if (dax_dev->holder_data != NULL)
434 		dax_holder_notify_failure(dax_dev, 0, U64_MAX,
435 				MF_MEM_PRE_REMOVE);
436 
437 	clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
438 	synchronize_srcu(&dax_srcu);
439 
440 	/* clear holder data */
441 	dax_dev->holder_ops = NULL;
442 	dax_dev->holder_data = NULL;
443 }
444 EXPORT_SYMBOL_GPL(kill_dax);
445 
446 void run_dax(struct dax_device *dax_dev)
447 {
448 	set_bit(DAXDEV_ALIVE, &dax_dev->flags);
449 }
450 EXPORT_SYMBOL_GPL(run_dax);
451 
452 static struct inode *dax_alloc_inode(struct super_block *sb)
453 {
454 	struct dax_device *dax_dev;
455 	struct inode *inode;
456 
457 	dax_dev = alloc_inode_sb(sb, dax_cache, GFP_KERNEL);
458 	if (!dax_dev)
459 		return NULL;
460 
461 	inode = &dax_dev->inode;
462 	inode->i_rdev = 0;
463 	return inode;
464 }
465 
466 static struct dax_device *to_dax_dev(struct inode *inode)
467 {
468 	return container_of(inode, struct dax_device, inode);
469 }
470 
471 static void dax_free_inode(struct inode *inode)
472 {
473 	struct dax_device *dax_dev = to_dax_dev(inode);
474 	if (inode->i_rdev)
475 		ida_free(&dax_minor_ida, iminor(inode));
476 	kmem_cache_free(dax_cache, dax_dev);
477 }
478 
479 static void dax_destroy_inode(struct inode *inode)
480 {
481 	struct dax_device *dax_dev = to_dax_dev(inode);
482 	WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
483 			"kill_dax() must be called before final iput()\n");
484 }
485 
486 static const struct super_operations dax_sops = {
487 	.statfs = simple_statfs,
488 	.alloc_inode = dax_alloc_inode,
489 	.destroy_inode = dax_destroy_inode,
490 	.free_inode = dax_free_inode,
491 	.drop_inode = inode_just_drop,
492 };
493 
494 static int dax_init_fs_context(struct fs_context *fc)
495 {
496 	struct pseudo_fs_context *ctx = init_pseudo(fc, DAXFS_MAGIC);
497 	if (!ctx)
498 		return -ENOMEM;
499 	ctx->ops = &dax_sops;
500 	return 0;
501 }
502 
503 static struct file_system_type dax_fs_type = {
504 	.name		= "dax",
505 	.init_fs_context = dax_init_fs_context,
506 	.kill_sb	= kill_anon_super,
507 };
508 
509 static int dax_test(struct inode *inode, void *data)
510 {
511 	dev_t devt = *(dev_t *) data;
512 
513 	return inode->i_rdev == devt;
514 }
515 
516 static int dax_set(struct inode *inode, void *data)
517 {
518 	dev_t devt = *(dev_t *) data;
519 
520 	inode->i_rdev = devt;
521 	return 0;
522 }
523 
524 struct dax_device *dax_dev_get(dev_t devt)
525 {
526 	struct dax_device *dax_dev;
527 	struct inode *inode;
528 
529 	inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
530 			dax_test, dax_set, &devt);
531 
532 	if (!inode)
533 		return NULL;
534 
535 	dax_dev = to_dax_dev(inode);
536 	if (inode_state_read_once(inode) & I_NEW) {
537 		set_bit(DAXDEV_ALIVE, &dax_dev->flags);
538 		inode->i_cdev = &dax_dev->cdev;
539 		inode->i_mode = S_IFCHR;
540 		inode->i_flags = S_DAX;
541 		mapping_set_gfp_mask(&inode->i_data, GFP_USER);
542 		unlock_new_inode(inode);
543 	}
544 
545 	return dax_dev;
546 }
547 EXPORT_SYMBOL_GPL(dax_dev_get);
548 
549 struct dax_device *alloc_dax(void *private, const struct dax_operations *ops)
550 {
551 	struct dax_device *dax_dev;
552 	dev_t devt;
553 	int minor;
554 
555 	/*
556 	 * Unavailable on architectures with virtually aliased data caches,
557 	 * except for device-dax (NULL operations pointer), which does
558 	 * not use aliased mappings from the kernel.
559 	 */
560 	if (ops && cpu_dcache_is_aliasing())
561 		return ERR_PTR(-EOPNOTSUPP);
562 
563 	if (WARN_ON_ONCE(ops && !ops->zero_page_range))
564 		return ERR_PTR(-EINVAL);
565 
566 	minor = ida_alloc_max(&dax_minor_ida, MINORMASK, GFP_KERNEL);
567 	if (minor < 0)
568 		return ERR_PTR(-ENOMEM);
569 
570 	devt = MKDEV(MAJOR(dax_devt), minor);
571 	dax_dev = dax_dev_get(devt);
572 	if (!dax_dev)
573 		goto err_dev;
574 
575 	dax_dev->ops = ops;
576 	dax_dev->private = private;
577 	return dax_dev;
578 
579  err_dev:
580 	ida_free(&dax_minor_ida, minor);
581 	return ERR_PTR(-ENOMEM);
582 }
583 EXPORT_SYMBOL_GPL(alloc_dax);
584 
585 void put_dax(struct dax_device *dax_dev)
586 {
587 	if (!dax_dev)
588 		return;
589 	iput(&dax_dev->inode);
590 }
591 EXPORT_SYMBOL_GPL(put_dax);
592 
593 /**
594  * dax_holder() - obtain the holder of a dax device
595  * @dax_dev: a dax_device instance
596  *
597  * Return: the holder's data which represents the holder if registered,
598  * otherwize NULL.
599  */
600 void *dax_holder(struct dax_device *dax_dev)
601 {
602 	return dax_dev->holder_data;
603 }
604 EXPORT_SYMBOL_GPL(dax_holder);
605 
606 /**
607  * inode_dax: convert a public inode into its dax_dev
608  * @inode: An inode with i_cdev pointing to a dax_dev
609  *
610  * Note this is not equivalent to to_dax_dev() which is for private
611  * internal use where we know the inode filesystem type == dax_fs_type.
612  */
613 struct dax_device *inode_dax(struct inode *inode)
614 {
615 	struct cdev *cdev = inode->i_cdev;
616 
617 	return container_of(cdev, struct dax_device, cdev);
618 }
619 EXPORT_SYMBOL_GPL(inode_dax);
620 
621 struct inode *dax_inode(struct dax_device *dax_dev)
622 {
623 	return &dax_dev->inode;
624 }
625 EXPORT_SYMBOL_GPL(dax_inode);
626 
627 void *dax_get_private(struct dax_device *dax_dev)
628 {
629 	if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags))
630 		return NULL;
631 	return dax_dev->private;
632 }
633 EXPORT_SYMBOL_GPL(dax_get_private);
634 
635 static void init_once(void *_dax_dev)
636 {
637 	struct dax_device *dax_dev = _dax_dev;
638 	struct inode *inode = &dax_dev->inode;
639 
640 	memset(dax_dev, 0, sizeof(*dax_dev));
641 	inode_init_once(inode);
642 }
643 
644 static int dax_fs_init(void)
645 {
646 	int rc;
647 
648 	dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
649 			SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
650 			init_once);
651 	if (!dax_cache)
652 		return -ENOMEM;
653 
654 	dax_mnt = kern_mount(&dax_fs_type);
655 	if (IS_ERR(dax_mnt)) {
656 		rc = PTR_ERR(dax_mnt);
657 		goto err_mount;
658 	}
659 	dax_superblock = dax_mnt->mnt_sb;
660 
661 	return 0;
662 
663  err_mount:
664 	kmem_cache_destroy(dax_cache);
665 
666 	return rc;
667 }
668 
669 static void dax_fs_exit(void)
670 {
671 	kern_unmount(dax_mnt);
672 	rcu_barrier();
673 	kmem_cache_destroy(dax_cache);
674 }
675 
676 static int __init dax_core_init(void)
677 {
678 	int rc;
679 
680 	rc = dax_fs_init();
681 	if (rc)
682 		return rc;
683 
684 	rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
685 	if (rc)
686 		goto err_chrdev;
687 
688 	rc = dax_bus_init();
689 	if (rc)
690 		goto err_bus;
691 	return 0;
692 
693 err_bus:
694 	unregister_chrdev_region(dax_devt, MINORMASK+1);
695 err_chrdev:
696 	dax_fs_exit();
697 	return 0;
698 }
699 
700 static void __exit dax_core_exit(void)
701 {
702 	dax_bus_exit();
703 	unregister_chrdev_region(dax_devt, MINORMASK+1);
704 	ida_destroy(&dax_minor_ida);
705 	dax_fs_exit();
706 }
707 
708 MODULE_AUTHOR("Intel Corporation");
709 MODULE_DESCRIPTION("DAX: direct access to differentiated memory");
710 MODULE_LICENSE("GPL v2");
711 subsys_initcall(dax_core_init);
712 module_exit(dax_core_exit);
713