xref: /linux/drivers/dax/bus.c (revision 6a4aee277740d04ac0fd54cfa17cc28261932ddc)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017-2018 Intel Corporation. All rights reserved. */
3 #include <linux/memremap.h>
4 #include <linux/device.h>
5 #include <linux/mutex.h>
6 #include <linux/list.h>
7 #include <linux/slab.h>
8 #include <linux/dax.h>
9 #include <linux/io.h>
10 #include "dax-private.h"
11 #include "bus.h"
12 
13 static DEFINE_MUTEX(dax_bus_lock);
14 
15 /*
16  * All changes to the dax region configuration occur with this lock held
17  * for write.
18  */
19 DECLARE_RWSEM(dax_region_rwsem);
20 
21 /*
22  * All changes to the dax device configuration occur with this lock held
23  * for write.
24  */
25 DECLARE_RWSEM(dax_dev_rwsem);
26 
27 #define DAX_NAME_LEN 30
28 struct dax_id {
29 	struct list_head list;
30 	char dev_name[DAX_NAME_LEN];
31 };
32 
33 static int dax_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
34 {
35 	/*
36 	 * We only ever expect to handle device-dax instances, i.e. the
37 	 * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero
38 	 */
39 	return add_uevent_var(env, "MODALIAS=" DAX_DEVICE_MODALIAS_FMT, 0);
40 }
41 
42 static struct dax_device_driver *to_dax_drv(struct device_driver *drv)
43 {
44 	return container_of(drv, struct dax_device_driver, drv);
45 }
46 
47 static struct dax_id *__dax_match_id(struct dax_device_driver *dax_drv,
48 		const char *dev_name)
49 {
50 	struct dax_id *dax_id;
51 
52 	lockdep_assert_held(&dax_bus_lock);
53 
54 	list_for_each_entry(dax_id, &dax_drv->ids, list)
55 		if (sysfs_streq(dax_id->dev_name, dev_name))
56 			return dax_id;
57 	return NULL;
58 }
59 
60 static int dax_match_id(struct dax_device_driver *dax_drv, struct device *dev)
61 {
62 	int match;
63 
64 	mutex_lock(&dax_bus_lock);
65 	match = !!__dax_match_id(dax_drv, dev_name(dev));
66 	mutex_unlock(&dax_bus_lock);
67 
68 	return match;
69 }
70 
71 static int dax_match_type(struct dax_device_driver *dax_drv, struct device *dev)
72 {
73 	enum dax_driver_type type = DAXDRV_DEVICE_TYPE;
74 	struct dev_dax *dev_dax = to_dev_dax(dev);
75 
76 	if (dev_dax->region->res.flags & IORESOURCE_DAX_KMEM)
77 		type = DAXDRV_KMEM_TYPE;
78 
79 	if (dax_drv->type == type)
80 		return 1;
81 
82 	/* default to device mode if dax_kmem is disabled */
83 	if (dax_drv->type == DAXDRV_DEVICE_TYPE &&
84 	    !IS_ENABLED(CONFIG_DEV_DAX_KMEM))
85 		return 1;
86 
87 	return 0;
88 }
89 
90 enum id_action {
91 	ID_REMOVE,
92 	ID_ADD,
93 };
94 
95 static ssize_t do_id_store(struct device_driver *drv, const char *buf,
96 		size_t count, enum id_action action)
97 {
98 	struct dax_device_driver *dax_drv = to_dax_drv(drv);
99 	unsigned int region_id, id;
100 	char devname[DAX_NAME_LEN];
101 	struct dax_id *dax_id;
102 	ssize_t rc = count;
103 	int fields;
104 
105 	fields = sscanf(buf, "dax%d.%d", &region_id, &id);
106 	if (fields != 2)
107 		return -EINVAL;
108 	sprintf(devname, "dax%d.%d", region_id, id);
109 	if (!sysfs_streq(buf, devname))
110 		return -EINVAL;
111 
112 	mutex_lock(&dax_bus_lock);
113 	dax_id = __dax_match_id(dax_drv, buf);
114 	if (!dax_id) {
115 		if (action == ID_ADD) {
116 			dax_id = kzalloc(sizeof(*dax_id), GFP_KERNEL);
117 			if (dax_id) {
118 				strscpy(dax_id->dev_name, buf, DAX_NAME_LEN);
119 				list_add(&dax_id->list, &dax_drv->ids);
120 			} else
121 				rc = -ENOMEM;
122 		}
123 	} else if (action == ID_REMOVE) {
124 		list_del(&dax_id->list);
125 		kfree(dax_id);
126 	}
127 	mutex_unlock(&dax_bus_lock);
128 
129 	if (rc < 0)
130 		return rc;
131 	if (action == ID_ADD)
132 		rc = driver_attach(drv);
133 	if (rc)
134 		return rc;
135 	return count;
136 }
137 
138 static ssize_t new_id_store(struct device_driver *drv, const char *buf,
139 		size_t count)
140 {
141 	return do_id_store(drv, buf, count, ID_ADD);
142 }
143 static DRIVER_ATTR_WO(new_id);
144 
145 static ssize_t remove_id_store(struct device_driver *drv, const char *buf,
146 		size_t count)
147 {
148 	return do_id_store(drv, buf, count, ID_REMOVE);
149 }
150 static DRIVER_ATTR_WO(remove_id);
151 
152 static struct attribute *dax_drv_attrs[] = {
153 	&driver_attr_new_id.attr,
154 	&driver_attr_remove_id.attr,
155 	NULL,
156 };
157 ATTRIBUTE_GROUPS(dax_drv);
158 
159 static int dax_bus_match(struct device *dev, struct device_driver *drv);
160 
161 /*
162  * Static dax regions are regions created by an external subsystem
163  * nvdimm where a single range is assigned. Its boundaries are by the external
164  * subsystem and are usually limited to one physical memory range. For example,
165  * for PMEM it is usually defined by NVDIMM Namespace boundaries (i.e. a
166  * single contiguous range)
167  *
168  * On dynamic dax regions, the assigned region can be partitioned by dax core
169  * into multiple subdivisions. A subdivision is represented into one
170  * /dev/daxN.M device composed by one or more potentially discontiguous ranges.
171  *
172  * When allocating a dax region, drivers must set whether it's static
173  * (IORESOURCE_DAX_STATIC).  On static dax devices, the @pgmap is pre-assigned
174  * to dax core when calling devm_create_dev_dax(), whereas in dynamic dax
175  * devices it is NULL but afterwards allocated by dax core on device ->probe().
176  * Care is needed to make sure that dynamic dax devices are torn down with a
177  * cleared @pgmap field (see kill_dev_dax()).
178  */
179 static bool is_static(struct dax_region *dax_region)
180 {
181 	return (dax_region->res.flags & IORESOURCE_DAX_STATIC) != 0;
182 }
183 
184 bool static_dev_dax(struct dev_dax *dev_dax)
185 {
186 	return is_static(dev_dax->region);
187 }
188 EXPORT_SYMBOL_GPL(static_dev_dax);
189 
190 static u64 dev_dax_size(struct dev_dax *dev_dax)
191 {
192 	u64 size = 0;
193 	int i;
194 
195 	WARN_ON_ONCE(!rwsem_is_locked(&dax_dev_rwsem));
196 
197 	for (i = 0; i < dev_dax->nr_range; i++)
198 		size += range_len(&dev_dax->ranges[i].range);
199 
200 	return size;
201 }
202 
203 static int dax_bus_probe(struct device *dev)
204 {
205 	struct dax_device_driver *dax_drv = to_dax_drv(dev->driver);
206 	struct dev_dax *dev_dax = to_dev_dax(dev);
207 	struct dax_region *dax_region = dev_dax->region;
208 	int rc;
209 	u64 size;
210 
211 	rc = down_read_interruptible(&dax_dev_rwsem);
212 	if (rc)
213 		return rc;
214 	size = dev_dax_size(dev_dax);
215 	up_read(&dax_dev_rwsem);
216 
217 	if (size == 0 || dev_dax->id < 0)
218 		return -ENXIO;
219 
220 	rc = dax_drv->probe(dev_dax);
221 
222 	if (rc || is_static(dax_region))
223 		return rc;
224 
225 	/*
226 	 * Track new seed creation only after successful probe of the
227 	 * previous seed.
228 	 */
229 	if (dax_region->seed == dev)
230 		dax_region->seed = NULL;
231 
232 	return 0;
233 }
234 
235 static void dax_bus_remove(struct device *dev)
236 {
237 	struct dax_device_driver *dax_drv = to_dax_drv(dev->driver);
238 	struct dev_dax *dev_dax = to_dev_dax(dev);
239 
240 	if (dax_drv->remove)
241 		dax_drv->remove(dev_dax);
242 }
243 
244 static const struct bus_type dax_bus_type = {
245 	.name = "dax",
246 	.uevent = dax_bus_uevent,
247 	.match = dax_bus_match,
248 	.probe = dax_bus_probe,
249 	.remove = dax_bus_remove,
250 	.drv_groups = dax_drv_groups,
251 };
252 
253 static int dax_bus_match(struct device *dev, struct device_driver *drv)
254 {
255 	struct dax_device_driver *dax_drv = to_dax_drv(drv);
256 
257 	if (dax_match_id(dax_drv, dev))
258 		return 1;
259 	return dax_match_type(dax_drv, dev);
260 }
261 
262 /*
263  * Rely on the fact that drvdata is set before the attributes are
264  * registered, and that the attributes are unregistered before drvdata
265  * is cleared to assume that drvdata is always valid.
266  */
267 static ssize_t id_show(struct device *dev,
268 		struct device_attribute *attr, char *buf)
269 {
270 	struct dax_region *dax_region = dev_get_drvdata(dev);
271 
272 	return sysfs_emit(buf, "%d\n", dax_region->id);
273 }
274 static DEVICE_ATTR_RO(id);
275 
276 static ssize_t region_size_show(struct device *dev,
277 		struct device_attribute *attr, char *buf)
278 {
279 	struct dax_region *dax_region = dev_get_drvdata(dev);
280 
281 	return sysfs_emit(buf, "%llu\n",
282 			  (unsigned long long)resource_size(&dax_region->res));
283 }
284 static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
285 		region_size_show, NULL);
286 
287 static ssize_t region_align_show(struct device *dev,
288 		struct device_attribute *attr, char *buf)
289 {
290 	struct dax_region *dax_region = dev_get_drvdata(dev);
291 
292 	return sysfs_emit(buf, "%u\n", dax_region->align);
293 }
294 static struct device_attribute dev_attr_region_align =
295 		__ATTR(align, 0400, region_align_show, NULL);
296 
297 #define for_each_dax_region_resource(dax_region, res) \
298 	for (res = (dax_region)->res.child; res; res = res->sibling)
299 
300 static unsigned long long dax_region_avail_size(struct dax_region *dax_region)
301 {
302 	resource_size_t size = resource_size(&dax_region->res);
303 	struct resource *res;
304 
305 	WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
306 
307 	for_each_dax_region_resource(dax_region, res)
308 		size -= resource_size(res);
309 	return size;
310 }
311 
312 static ssize_t available_size_show(struct device *dev,
313 		struct device_attribute *attr, char *buf)
314 {
315 	struct dax_region *dax_region = dev_get_drvdata(dev);
316 	unsigned long long size;
317 	int rc;
318 
319 	rc = down_read_interruptible(&dax_region_rwsem);
320 	if (rc)
321 		return rc;
322 	size = dax_region_avail_size(dax_region);
323 	up_read(&dax_region_rwsem);
324 
325 	return sysfs_emit(buf, "%llu\n", size);
326 }
327 static DEVICE_ATTR_RO(available_size);
328 
329 static ssize_t seed_show(struct device *dev,
330 		struct device_attribute *attr, char *buf)
331 {
332 	struct dax_region *dax_region = dev_get_drvdata(dev);
333 	struct device *seed;
334 	ssize_t rc;
335 
336 	if (is_static(dax_region))
337 		return -EINVAL;
338 
339 	rc = down_read_interruptible(&dax_region_rwsem);
340 	if (rc)
341 		return rc;
342 	seed = dax_region->seed;
343 	rc = sysfs_emit(buf, "%s\n", seed ? dev_name(seed) : "");
344 	up_read(&dax_region_rwsem);
345 
346 	return rc;
347 }
348 static DEVICE_ATTR_RO(seed);
349 
350 static ssize_t create_show(struct device *dev,
351 		struct device_attribute *attr, char *buf)
352 {
353 	struct dax_region *dax_region = dev_get_drvdata(dev);
354 	struct device *youngest;
355 	ssize_t rc;
356 
357 	if (is_static(dax_region))
358 		return -EINVAL;
359 
360 	rc = down_read_interruptible(&dax_region_rwsem);
361 	if (rc)
362 		return rc;
363 	youngest = dax_region->youngest;
364 	rc = sysfs_emit(buf, "%s\n", youngest ? dev_name(youngest) : "");
365 	up_read(&dax_region_rwsem);
366 
367 	return rc;
368 }
369 
370 static struct dev_dax *__devm_create_dev_dax(struct dev_dax_data *data);
371 
372 static ssize_t create_store(struct device *dev, struct device_attribute *attr,
373 		const char *buf, size_t len)
374 {
375 	struct dax_region *dax_region = dev_get_drvdata(dev);
376 	unsigned long long avail;
377 	ssize_t rc;
378 	int val;
379 
380 	if (is_static(dax_region))
381 		return -EINVAL;
382 
383 	rc = kstrtoint(buf, 0, &val);
384 	if (rc)
385 		return rc;
386 	if (val != 1)
387 		return -EINVAL;
388 
389 	rc = down_write_killable(&dax_region_rwsem);
390 	if (rc)
391 		return rc;
392 	avail = dax_region_avail_size(dax_region);
393 	if (avail == 0)
394 		rc = -ENOSPC;
395 	else {
396 		struct dev_dax_data data = {
397 			.dax_region = dax_region,
398 			.size = 0,
399 			.id = -1,
400 			.memmap_on_memory = false,
401 		};
402 		struct dev_dax *dev_dax = __devm_create_dev_dax(&data);
403 
404 		if (IS_ERR(dev_dax))
405 			rc = PTR_ERR(dev_dax);
406 		else {
407 			/*
408 			 * In support of crafting multiple new devices
409 			 * simultaneously multiple seeds can be created,
410 			 * but only the first one that has not been
411 			 * successfully bound is tracked as the region
412 			 * seed.
413 			 */
414 			if (!dax_region->seed)
415 				dax_region->seed = &dev_dax->dev;
416 			dax_region->youngest = &dev_dax->dev;
417 			rc = len;
418 		}
419 	}
420 	up_write(&dax_region_rwsem);
421 
422 	return rc;
423 }
424 static DEVICE_ATTR_RW(create);
425 
426 void kill_dev_dax(struct dev_dax *dev_dax)
427 {
428 	struct dax_device *dax_dev = dev_dax->dax_dev;
429 	struct inode *inode = dax_inode(dax_dev);
430 
431 	kill_dax(dax_dev);
432 	unmap_mapping_range(inode->i_mapping, 0, 0, 1);
433 
434 	/*
435 	 * Dynamic dax region have the pgmap allocated via dev_kzalloc()
436 	 * and thus freed by devm. Clear the pgmap to not have stale pgmap
437 	 * ranges on probe() from previous reconfigurations of region devices.
438 	 */
439 	if (!static_dev_dax(dev_dax))
440 		dev_dax->pgmap = NULL;
441 }
442 EXPORT_SYMBOL_GPL(kill_dev_dax);
443 
444 static void trim_dev_dax_range(struct dev_dax *dev_dax)
445 {
446 	int i = dev_dax->nr_range - 1;
447 	struct range *range = &dev_dax->ranges[i].range;
448 	struct dax_region *dax_region = dev_dax->region;
449 
450 	WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
451 	dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i,
452 		(unsigned long long)range->start,
453 		(unsigned long long)range->end);
454 
455 	__release_region(&dax_region->res, range->start, range_len(range));
456 	if (--dev_dax->nr_range == 0) {
457 		kfree(dev_dax->ranges);
458 		dev_dax->ranges = NULL;
459 	}
460 }
461 
462 static void free_dev_dax_ranges(struct dev_dax *dev_dax)
463 {
464 	while (dev_dax->nr_range)
465 		trim_dev_dax_range(dev_dax);
466 }
467 
468 static void __unregister_dev_dax(void *dev)
469 {
470 	struct dev_dax *dev_dax = to_dev_dax(dev);
471 
472 	dev_dbg(dev, "%s\n", __func__);
473 
474 	kill_dev_dax(dev_dax);
475 	device_del(dev);
476 	free_dev_dax_ranges(dev_dax);
477 	put_device(dev);
478 }
479 
480 static void unregister_dev_dax(void *dev)
481 {
482 	if (rwsem_is_locked(&dax_region_rwsem))
483 		return __unregister_dev_dax(dev);
484 
485 	if (WARN_ON_ONCE(down_write_killable(&dax_region_rwsem) != 0))
486 		return;
487 	__unregister_dev_dax(dev);
488 	up_write(&dax_region_rwsem);
489 }
490 
491 static void dax_region_free(struct kref *kref)
492 {
493 	struct dax_region *dax_region;
494 
495 	dax_region = container_of(kref, struct dax_region, kref);
496 	kfree(dax_region);
497 }
498 
499 static void dax_region_put(struct dax_region *dax_region)
500 {
501 	kref_put(&dax_region->kref, dax_region_free);
502 }
503 
504 /* a return value >= 0 indicates this invocation invalidated the id */
505 static int __free_dev_dax_id(struct dev_dax *dev_dax)
506 {
507 	struct dax_region *dax_region;
508 	int rc = dev_dax->id;
509 
510 	WARN_ON_ONCE(!rwsem_is_locked(&dax_dev_rwsem));
511 
512 	if (!dev_dax->dyn_id || dev_dax->id < 0)
513 		return -1;
514 	dax_region = dev_dax->region;
515 	ida_free(&dax_region->ida, dev_dax->id);
516 	dax_region_put(dax_region);
517 	dev_dax->id = -1;
518 	return rc;
519 }
520 
521 static int free_dev_dax_id(struct dev_dax *dev_dax)
522 {
523 	int rc;
524 
525 	rc = down_write_killable(&dax_dev_rwsem);
526 	if (rc)
527 		return rc;
528 	rc = __free_dev_dax_id(dev_dax);
529 	up_write(&dax_dev_rwsem);
530 	return rc;
531 }
532 
533 static int alloc_dev_dax_id(struct dev_dax *dev_dax)
534 {
535 	struct dax_region *dax_region = dev_dax->region;
536 	int id;
537 
538 	id = ida_alloc(&dax_region->ida, GFP_KERNEL);
539 	if (id < 0)
540 		return id;
541 	kref_get(&dax_region->kref);
542 	dev_dax->dyn_id = true;
543 	dev_dax->id = id;
544 	return id;
545 }
546 
547 static ssize_t delete_store(struct device *dev, struct device_attribute *attr,
548 		const char *buf, size_t len)
549 {
550 	struct dax_region *dax_region = dev_get_drvdata(dev);
551 	struct dev_dax *dev_dax;
552 	struct device *victim;
553 	bool do_del = false;
554 	int rc;
555 
556 	if (is_static(dax_region))
557 		return -EINVAL;
558 
559 	victim = device_find_child_by_name(dax_region->dev, buf);
560 	if (!victim)
561 		return -ENXIO;
562 
563 	rc = down_write_killable(&dax_region_rwsem);
564 	if (rc)
565 		return rc;
566 	rc = down_write_killable(&dax_dev_rwsem);
567 	if (rc) {
568 		up_write(&dax_region_rwsem);
569 		return rc;
570 	}
571 	dev_dax = to_dev_dax(victim);
572 	if (victim->driver || dev_dax_size(dev_dax))
573 		rc = -EBUSY;
574 	else {
575 		/*
576 		 * Invalidate the device so it does not become active
577 		 * again, but always preserve device-id-0 so that
578 		 * /sys/bus/dax/ is guaranteed to be populated while any
579 		 * dax_region is registered.
580 		 */
581 		if (dev_dax->id > 0) {
582 			do_del = __free_dev_dax_id(dev_dax) >= 0;
583 			rc = len;
584 			if (dax_region->seed == victim)
585 				dax_region->seed = NULL;
586 			if (dax_region->youngest == victim)
587 				dax_region->youngest = NULL;
588 		} else
589 			rc = -EBUSY;
590 	}
591 	up_write(&dax_dev_rwsem);
592 
593 	/* won the race to invalidate the device, clean it up */
594 	if (do_del)
595 		devm_release_action(dev, unregister_dev_dax, victim);
596 	up_write(&dax_region_rwsem);
597 	put_device(victim);
598 
599 	return rc;
600 }
601 static DEVICE_ATTR_WO(delete);
602 
603 static umode_t dax_region_visible(struct kobject *kobj, struct attribute *a,
604 		int n)
605 {
606 	struct device *dev = container_of(kobj, struct device, kobj);
607 	struct dax_region *dax_region = dev_get_drvdata(dev);
608 
609 	if (is_static(dax_region))
610 		if (a == &dev_attr_available_size.attr
611 				|| a == &dev_attr_create.attr
612 				|| a == &dev_attr_seed.attr
613 				|| a == &dev_attr_delete.attr)
614 			return 0;
615 	return a->mode;
616 }
617 
618 static struct attribute *dax_region_attributes[] = {
619 	&dev_attr_available_size.attr,
620 	&dev_attr_region_size.attr,
621 	&dev_attr_region_align.attr,
622 	&dev_attr_create.attr,
623 	&dev_attr_seed.attr,
624 	&dev_attr_delete.attr,
625 	&dev_attr_id.attr,
626 	NULL,
627 };
628 
629 static const struct attribute_group dax_region_attribute_group = {
630 	.name = "dax_region",
631 	.attrs = dax_region_attributes,
632 	.is_visible = dax_region_visible,
633 };
634 
635 static const struct attribute_group *dax_region_attribute_groups[] = {
636 	&dax_region_attribute_group,
637 	NULL,
638 };
639 
640 static void dax_region_unregister(void *region)
641 {
642 	struct dax_region *dax_region = region;
643 
644 	sysfs_remove_groups(&dax_region->dev->kobj,
645 			dax_region_attribute_groups);
646 	dax_region_put(dax_region);
647 }
648 
649 struct dax_region *alloc_dax_region(struct device *parent, int region_id,
650 		struct range *range, int target_node, unsigned int align,
651 		unsigned long flags)
652 {
653 	struct dax_region *dax_region;
654 
655 	/*
656 	 * The DAX core assumes that it can store its private data in
657 	 * parent->driver_data. This WARN is a reminder / safeguard for
658 	 * developers of device-dax drivers.
659 	 */
660 	if (dev_get_drvdata(parent)) {
661 		dev_WARN(parent, "dax core failed to setup private data\n");
662 		return NULL;
663 	}
664 
665 	if (!IS_ALIGNED(range->start, align)
666 			|| !IS_ALIGNED(range_len(range), align))
667 		return NULL;
668 
669 	dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
670 	if (!dax_region)
671 		return NULL;
672 
673 	dev_set_drvdata(parent, dax_region);
674 	kref_init(&dax_region->kref);
675 	dax_region->id = region_id;
676 	dax_region->align = align;
677 	dax_region->dev = parent;
678 	dax_region->target_node = target_node;
679 	ida_init(&dax_region->ida);
680 	dax_region->res = (struct resource) {
681 		.start = range->start,
682 		.end = range->end,
683 		.flags = IORESOURCE_MEM | flags,
684 	};
685 
686 	if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
687 		kfree(dax_region);
688 		return NULL;
689 	}
690 
691 	if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region))
692 		return NULL;
693 	return dax_region;
694 }
695 EXPORT_SYMBOL_GPL(alloc_dax_region);
696 
697 static void dax_mapping_release(struct device *dev)
698 {
699 	struct dax_mapping *mapping = to_dax_mapping(dev);
700 	struct device *parent = dev->parent;
701 	struct dev_dax *dev_dax = to_dev_dax(parent);
702 
703 	ida_free(&dev_dax->ida, mapping->id);
704 	kfree(mapping);
705 	put_device(parent);
706 }
707 
708 static void __unregister_dax_mapping(void *data)
709 {
710 	struct device *dev = data;
711 	struct dax_mapping *mapping = to_dax_mapping(dev);
712 	struct dev_dax *dev_dax = to_dev_dax(dev->parent);
713 
714 	dev_dbg(dev, "%s\n", __func__);
715 
716 	WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
717 
718 	dev_dax->ranges[mapping->range_id].mapping = NULL;
719 	mapping->range_id = -1;
720 
721 	device_unregister(dev);
722 }
723 
724 static void unregister_dax_mapping(void *data)
725 {
726 	if (rwsem_is_locked(&dax_region_rwsem))
727 		return __unregister_dax_mapping(data);
728 
729 	if (WARN_ON_ONCE(down_write_killable(&dax_region_rwsem) != 0))
730 		return;
731 	__unregister_dax_mapping(data);
732 	up_write(&dax_region_rwsem);
733 }
734 
735 static struct dev_dax_range *get_dax_range(struct device *dev)
736 {
737 	struct dax_mapping *mapping = to_dax_mapping(dev);
738 	struct dev_dax *dev_dax = to_dev_dax(dev->parent);
739 	int rc;
740 
741 	rc = down_write_killable(&dax_region_rwsem);
742 	if (rc)
743 		return NULL;
744 	if (mapping->range_id < 0) {
745 		up_write(&dax_region_rwsem);
746 		return NULL;
747 	}
748 
749 	return &dev_dax->ranges[mapping->range_id];
750 }
751 
752 static void put_dax_range(void)
753 {
754 	up_write(&dax_region_rwsem);
755 }
756 
757 static ssize_t start_show(struct device *dev,
758 		struct device_attribute *attr, char *buf)
759 {
760 	struct dev_dax_range *dax_range;
761 	ssize_t rc;
762 
763 	dax_range = get_dax_range(dev);
764 	if (!dax_range)
765 		return -ENXIO;
766 	rc = sysfs_emit(buf, "%#llx\n", dax_range->range.start);
767 	put_dax_range();
768 
769 	return rc;
770 }
771 static DEVICE_ATTR(start, 0400, start_show, NULL);
772 
773 static ssize_t end_show(struct device *dev,
774 		struct device_attribute *attr, char *buf)
775 {
776 	struct dev_dax_range *dax_range;
777 	ssize_t rc;
778 
779 	dax_range = get_dax_range(dev);
780 	if (!dax_range)
781 		return -ENXIO;
782 	rc = sysfs_emit(buf, "%#llx\n", dax_range->range.end);
783 	put_dax_range();
784 
785 	return rc;
786 }
787 static DEVICE_ATTR(end, 0400, end_show, NULL);
788 
789 static ssize_t pgoff_show(struct device *dev,
790 		struct device_attribute *attr, char *buf)
791 {
792 	struct dev_dax_range *dax_range;
793 	ssize_t rc;
794 
795 	dax_range = get_dax_range(dev);
796 	if (!dax_range)
797 		return -ENXIO;
798 	rc = sysfs_emit(buf, "%#lx\n", dax_range->pgoff);
799 	put_dax_range();
800 
801 	return rc;
802 }
803 static DEVICE_ATTR(page_offset, 0400, pgoff_show, NULL);
804 
805 static struct attribute *dax_mapping_attributes[] = {
806 	&dev_attr_start.attr,
807 	&dev_attr_end.attr,
808 	&dev_attr_page_offset.attr,
809 	NULL,
810 };
811 
812 static const struct attribute_group dax_mapping_attribute_group = {
813 	.attrs = dax_mapping_attributes,
814 };
815 
816 static const struct attribute_group *dax_mapping_attribute_groups[] = {
817 	&dax_mapping_attribute_group,
818 	NULL,
819 };
820 
821 static struct device_type dax_mapping_type = {
822 	.release = dax_mapping_release,
823 	.groups = dax_mapping_attribute_groups,
824 };
825 
826 static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id)
827 {
828 	struct dax_region *dax_region = dev_dax->region;
829 	struct dax_mapping *mapping;
830 	struct device *dev;
831 	int rc;
832 
833 	WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
834 
835 	if (dev_WARN_ONCE(&dev_dax->dev, !dax_region->dev->driver,
836 				"region disabled\n"))
837 		return -ENXIO;
838 
839 	mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
840 	if (!mapping)
841 		return -ENOMEM;
842 	mapping->range_id = range_id;
843 	mapping->id = ida_alloc(&dev_dax->ida, GFP_KERNEL);
844 	if (mapping->id < 0) {
845 		kfree(mapping);
846 		return -ENOMEM;
847 	}
848 	dev_dax->ranges[range_id].mapping = mapping;
849 	dev = &mapping->dev;
850 	device_initialize(dev);
851 	dev->parent = &dev_dax->dev;
852 	get_device(dev->parent);
853 	dev->type = &dax_mapping_type;
854 	dev_set_name(dev, "mapping%d", mapping->id);
855 	rc = device_add(dev);
856 	if (rc) {
857 		put_device(dev);
858 		return rc;
859 	}
860 
861 	rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_mapping,
862 			dev);
863 	if (rc)
864 		return rc;
865 	return 0;
866 }
867 
868 static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
869 		resource_size_t size)
870 {
871 	struct dax_region *dax_region = dev_dax->region;
872 	struct resource *res = &dax_region->res;
873 	struct device *dev = &dev_dax->dev;
874 	struct dev_dax_range *ranges;
875 	unsigned long pgoff = 0;
876 	struct resource *alloc;
877 	int i, rc;
878 
879 	WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
880 
881 	/* handle the seed alloc special case */
882 	if (!size) {
883 		if (dev_WARN_ONCE(dev, dev_dax->nr_range,
884 					"0-size allocation must be first\n"))
885 			return -EBUSY;
886 		/* nr_range == 0 is elsewhere special cased as 0-size device */
887 		return 0;
888 	}
889 
890 	alloc = __request_region(res, start, size, dev_name(dev), 0);
891 	if (!alloc)
892 		return -ENOMEM;
893 
894 	ranges = krealloc(dev_dax->ranges, sizeof(*ranges)
895 			* (dev_dax->nr_range + 1), GFP_KERNEL);
896 	if (!ranges) {
897 		__release_region(res, alloc->start, resource_size(alloc));
898 		return -ENOMEM;
899 	}
900 
901 	for (i = 0; i < dev_dax->nr_range; i++)
902 		pgoff += PHYS_PFN(range_len(&ranges[i].range));
903 	dev_dax->ranges = ranges;
904 	ranges[dev_dax->nr_range++] = (struct dev_dax_range) {
905 		.pgoff = pgoff,
906 		.range = {
907 			.start = alloc->start,
908 			.end = alloc->end,
909 		},
910 	};
911 
912 	dev_dbg(dev, "alloc range[%d]: %pa:%pa\n", dev_dax->nr_range - 1,
913 			&alloc->start, &alloc->end);
914 	/*
915 	 * A dev_dax instance must be registered before mapping device
916 	 * children can be added. Defer to devm_create_dev_dax() to add
917 	 * the initial mapping device.
918 	 */
919 	if (!device_is_registered(&dev_dax->dev))
920 		return 0;
921 
922 	rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1);
923 	if (rc)
924 		trim_dev_dax_range(dev_dax);
925 
926 	return rc;
927 }
928 
929 static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size)
930 {
931 	int last_range = dev_dax->nr_range - 1;
932 	struct dev_dax_range *dax_range = &dev_dax->ranges[last_range];
933 	bool is_shrink = resource_size(res) > size;
934 	struct range *range = &dax_range->range;
935 	struct device *dev = &dev_dax->dev;
936 	int rc;
937 
938 	WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
939 
940 	if (dev_WARN_ONCE(dev, !size, "deletion is handled by dev_dax_shrink\n"))
941 		return -EINVAL;
942 
943 	rc = adjust_resource(res, range->start, size);
944 	if (rc)
945 		return rc;
946 
947 	*range = (struct range) {
948 		.start = range->start,
949 		.end = range->start + size - 1,
950 	};
951 
952 	dev_dbg(dev, "%s range[%d]: %#llx:%#llx\n", is_shrink ? "shrink" : "extend",
953 			last_range, (unsigned long long) range->start,
954 			(unsigned long long) range->end);
955 
956 	return 0;
957 }
958 
959 static ssize_t size_show(struct device *dev,
960 		struct device_attribute *attr, char *buf)
961 {
962 	struct dev_dax *dev_dax = to_dev_dax(dev);
963 	unsigned long long size;
964 	int rc;
965 
966 	rc = down_write_killable(&dax_dev_rwsem);
967 	if (rc)
968 		return rc;
969 	size = dev_dax_size(dev_dax);
970 	up_write(&dax_dev_rwsem);
971 
972 	return sysfs_emit(buf, "%llu\n", size);
973 }
974 
975 static bool alloc_is_aligned(struct dev_dax *dev_dax, resource_size_t size)
976 {
977 	/*
978 	 * The minimum mapping granularity for a device instance is a
979 	 * single subsection, unless the arch says otherwise.
980 	 */
981 	return IS_ALIGNED(size, max_t(unsigned long, dev_dax->align, memremap_compat_align()));
982 }
983 
984 static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size)
985 {
986 	resource_size_t to_shrink = dev_dax_size(dev_dax) - size;
987 	struct dax_region *dax_region = dev_dax->region;
988 	struct device *dev = &dev_dax->dev;
989 	int i;
990 
991 	for (i = dev_dax->nr_range - 1; i >= 0; i--) {
992 		struct range *range = &dev_dax->ranges[i].range;
993 		struct dax_mapping *mapping = dev_dax->ranges[i].mapping;
994 		struct resource *adjust = NULL, *res;
995 		resource_size_t shrink;
996 
997 		shrink = min_t(u64, to_shrink, range_len(range));
998 		if (shrink >= range_len(range)) {
999 			devm_release_action(dax_region->dev,
1000 					unregister_dax_mapping, &mapping->dev);
1001 			trim_dev_dax_range(dev_dax);
1002 			to_shrink -= shrink;
1003 			if (!to_shrink)
1004 				break;
1005 			continue;
1006 		}
1007 
1008 		for_each_dax_region_resource(dax_region, res)
1009 			if (strcmp(res->name, dev_name(dev)) == 0
1010 					&& res->start == range->start) {
1011 				adjust = res;
1012 				break;
1013 			}
1014 
1015 		if (dev_WARN_ONCE(dev, !adjust || i != dev_dax->nr_range - 1,
1016 					"failed to find matching resource\n"))
1017 			return -ENXIO;
1018 		return adjust_dev_dax_range(dev_dax, adjust, range_len(range)
1019 				- shrink);
1020 	}
1021 	return 0;
1022 }
1023 
1024 /*
1025  * Only allow adjustments that preserve the relative pgoff of existing
1026  * allocations. I.e. the dev_dax->ranges array is ordered by increasing pgoff.
1027  */
1028 static bool adjust_ok(struct dev_dax *dev_dax, struct resource *res)
1029 {
1030 	struct dev_dax_range *last;
1031 	int i;
1032 
1033 	if (dev_dax->nr_range == 0)
1034 		return false;
1035 	if (strcmp(res->name, dev_name(&dev_dax->dev)) != 0)
1036 		return false;
1037 	last = &dev_dax->ranges[dev_dax->nr_range - 1];
1038 	if (last->range.start != res->start || last->range.end != res->end)
1039 		return false;
1040 	for (i = 0; i < dev_dax->nr_range - 1; i++) {
1041 		struct dev_dax_range *dax_range = &dev_dax->ranges[i];
1042 
1043 		if (dax_range->pgoff > last->pgoff)
1044 			return false;
1045 	}
1046 
1047 	return true;
1048 }
1049 
1050 static ssize_t dev_dax_resize(struct dax_region *dax_region,
1051 		struct dev_dax *dev_dax, resource_size_t size)
1052 {
1053 	resource_size_t avail = dax_region_avail_size(dax_region), to_alloc;
1054 	resource_size_t dev_size = dev_dax_size(dev_dax);
1055 	struct resource *region_res = &dax_region->res;
1056 	struct device *dev = &dev_dax->dev;
1057 	struct resource *res, *first;
1058 	resource_size_t alloc = 0;
1059 	int rc;
1060 
1061 	if (dev->driver)
1062 		return -EBUSY;
1063 	if (size == dev_size)
1064 		return 0;
1065 	if (size > dev_size && size - dev_size > avail)
1066 		return -ENOSPC;
1067 	if (size < dev_size)
1068 		return dev_dax_shrink(dev_dax, size);
1069 
1070 	to_alloc = size - dev_size;
1071 	if (dev_WARN_ONCE(dev, !alloc_is_aligned(dev_dax, to_alloc),
1072 			"resize of %pa misaligned\n", &to_alloc))
1073 		return -ENXIO;
1074 
1075 	/*
1076 	 * Expand the device into the unused portion of the region. This
1077 	 * may involve adjusting the end of an existing resource, or
1078 	 * allocating a new resource.
1079 	 */
1080 retry:
1081 	first = region_res->child;
1082 	if (!first)
1083 		return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc);
1084 
1085 	rc = -ENOSPC;
1086 	for (res = first; res; res = res->sibling) {
1087 		struct resource *next = res->sibling;
1088 
1089 		/* space at the beginning of the region */
1090 		if (res == first && res->start > dax_region->res.start) {
1091 			alloc = min(res->start - dax_region->res.start, to_alloc);
1092 			rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, alloc);
1093 			break;
1094 		}
1095 
1096 		alloc = 0;
1097 		/* space between allocations */
1098 		if (next && next->start > res->end + 1)
1099 			alloc = min(next->start - (res->end + 1), to_alloc);
1100 
1101 		/* space at the end of the region */
1102 		if (!alloc && !next && res->end < region_res->end)
1103 			alloc = min(region_res->end - res->end, to_alloc);
1104 
1105 		if (!alloc)
1106 			continue;
1107 
1108 		if (adjust_ok(dev_dax, res)) {
1109 			rc = adjust_dev_dax_range(dev_dax, res, resource_size(res) + alloc);
1110 			break;
1111 		}
1112 		rc = alloc_dev_dax_range(dev_dax, res->end + 1, alloc);
1113 		break;
1114 	}
1115 	if (rc)
1116 		return rc;
1117 	to_alloc -= alloc;
1118 	if (to_alloc)
1119 		goto retry;
1120 	return 0;
1121 }
1122 
1123 static ssize_t size_store(struct device *dev, struct device_attribute *attr,
1124 		const char *buf, size_t len)
1125 {
1126 	ssize_t rc;
1127 	unsigned long long val;
1128 	struct dev_dax *dev_dax = to_dev_dax(dev);
1129 	struct dax_region *dax_region = dev_dax->region;
1130 
1131 	rc = kstrtoull(buf, 0, &val);
1132 	if (rc)
1133 		return rc;
1134 
1135 	if (!alloc_is_aligned(dev_dax, val)) {
1136 		dev_dbg(dev, "%s: size: %lld misaligned\n", __func__, val);
1137 		return -EINVAL;
1138 	}
1139 
1140 	rc = down_write_killable(&dax_region_rwsem);
1141 	if (rc)
1142 		return rc;
1143 	if (!dax_region->dev->driver) {
1144 		rc = -ENXIO;
1145 		goto err_region;
1146 	}
1147 	rc = down_write_killable(&dax_dev_rwsem);
1148 	if (rc)
1149 		goto err_dev;
1150 
1151 	rc = dev_dax_resize(dax_region, dev_dax, val);
1152 
1153 err_dev:
1154 	up_write(&dax_dev_rwsem);
1155 err_region:
1156 	up_write(&dax_region_rwsem);
1157 
1158 	if (rc == 0)
1159 		return len;
1160 	return rc;
1161 }
1162 static DEVICE_ATTR_RW(size);
1163 
1164 static ssize_t range_parse(const char *opt, size_t len, struct range *range)
1165 {
1166 	unsigned long long addr = 0;
1167 	char *start, *end, *str;
1168 	ssize_t rc = -EINVAL;
1169 
1170 	str = kstrdup(opt, GFP_KERNEL);
1171 	if (!str)
1172 		return rc;
1173 
1174 	end = str;
1175 	start = strsep(&end, "-");
1176 	if (!start || !end)
1177 		goto err;
1178 
1179 	rc = kstrtoull(start, 16, &addr);
1180 	if (rc)
1181 		goto err;
1182 	range->start = addr;
1183 
1184 	rc = kstrtoull(end, 16, &addr);
1185 	if (rc)
1186 		goto err;
1187 	range->end = addr;
1188 
1189 err:
1190 	kfree(str);
1191 	return rc;
1192 }
1193 
1194 static ssize_t mapping_store(struct device *dev, struct device_attribute *attr,
1195 		const char *buf, size_t len)
1196 {
1197 	struct dev_dax *dev_dax = to_dev_dax(dev);
1198 	struct dax_region *dax_region = dev_dax->region;
1199 	size_t to_alloc;
1200 	struct range r;
1201 	ssize_t rc;
1202 
1203 	rc = range_parse(buf, len, &r);
1204 	if (rc)
1205 		return rc;
1206 
1207 	rc = -ENXIO;
1208 	rc = down_write_killable(&dax_region_rwsem);
1209 	if (rc)
1210 		return rc;
1211 	if (!dax_region->dev->driver) {
1212 		up_write(&dax_region_rwsem);
1213 		return rc;
1214 	}
1215 	rc = down_write_killable(&dax_dev_rwsem);
1216 	if (rc) {
1217 		up_write(&dax_region_rwsem);
1218 		return rc;
1219 	}
1220 
1221 	to_alloc = range_len(&r);
1222 	if (alloc_is_aligned(dev_dax, to_alloc))
1223 		rc = alloc_dev_dax_range(dev_dax, r.start, to_alloc);
1224 	up_write(&dax_dev_rwsem);
1225 	up_write(&dax_region_rwsem);
1226 
1227 	return rc == 0 ? len : rc;
1228 }
1229 static DEVICE_ATTR_WO(mapping);
1230 
1231 static ssize_t align_show(struct device *dev,
1232 		struct device_attribute *attr, char *buf)
1233 {
1234 	struct dev_dax *dev_dax = to_dev_dax(dev);
1235 
1236 	return sysfs_emit(buf, "%d\n", dev_dax->align);
1237 }
1238 
1239 static ssize_t dev_dax_validate_align(struct dev_dax *dev_dax)
1240 {
1241 	struct device *dev = &dev_dax->dev;
1242 	int i;
1243 
1244 	for (i = 0; i < dev_dax->nr_range; i++) {
1245 		size_t len = range_len(&dev_dax->ranges[i].range);
1246 
1247 		if (!alloc_is_aligned(dev_dax, len)) {
1248 			dev_dbg(dev, "%s: align %u invalid for range %d\n",
1249 				__func__, dev_dax->align, i);
1250 			return -EINVAL;
1251 		}
1252 	}
1253 
1254 	return 0;
1255 }
1256 
1257 static ssize_t align_store(struct device *dev, struct device_attribute *attr,
1258 		const char *buf, size_t len)
1259 {
1260 	struct dev_dax *dev_dax = to_dev_dax(dev);
1261 	struct dax_region *dax_region = dev_dax->region;
1262 	unsigned long val, align_save;
1263 	ssize_t rc;
1264 
1265 	rc = kstrtoul(buf, 0, &val);
1266 	if (rc)
1267 		return -ENXIO;
1268 
1269 	if (!dax_align_valid(val))
1270 		return -EINVAL;
1271 
1272 	rc = down_write_killable(&dax_region_rwsem);
1273 	if (rc)
1274 		return rc;
1275 	if (!dax_region->dev->driver) {
1276 		up_write(&dax_region_rwsem);
1277 		return -ENXIO;
1278 	}
1279 
1280 	rc = down_write_killable(&dax_dev_rwsem);
1281 	if (rc) {
1282 		up_write(&dax_region_rwsem);
1283 		return rc;
1284 	}
1285 	if (dev->driver) {
1286 		rc = -EBUSY;
1287 		goto out_unlock;
1288 	}
1289 
1290 	align_save = dev_dax->align;
1291 	dev_dax->align = val;
1292 	rc = dev_dax_validate_align(dev_dax);
1293 	if (rc)
1294 		dev_dax->align = align_save;
1295 out_unlock:
1296 	up_write(&dax_dev_rwsem);
1297 	up_write(&dax_region_rwsem);
1298 	return rc == 0 ? len : rc;
1299 }
1300 static DEVICE_ATTR_RW(align);
1301 
1302 static int dev_dax_target_node(struct dev_dax *dev_dax)
1303 {
1304 	struct dax_region *dax_region = dev_dax->region;
1305 
1306 	return dax_region->target_node;
1307 }
1308 
1309 static ssize_t target_node_show(struct device *dev,
1310 		struct device_attribute *attr, char *buf)
1311 {
1312 	struct dev_dax *dev_dax = to_dev_dax(dev);
1313 
1314 	return sysfs_emit(buf, "%d\n", dev_dax_target_node(dev_dax));
1315 }
1316 static DEVICE_ATTR_RO(target_node);
1317 
1318 static ssize_t resource_show(struct device *dev,
1319 		struct device_attribute *attr, char *buf)
1320 {
1321 	struct dev_dax *dev_dax = to_dev_dax(dev);
1322 	struct dax_region *dax_region = dev_dax->region;
1323 	unsigned long long start;
1324 
1325 	if (dev_dax->nr_range < 1)
1326 		start = dax_region->res.start;
1327 	else
1328 		start = dev_dax->ranges[0].range.start;
1329 
1330 	return sysfs_emit(buf, "%#llx\n", start);
1331 }
1332 static DEVICE_ATTR(resource, 0400, resource_show, NULL);
1333 
1334 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1335 		char *buf)
1336 {
1337 	/*
1338 	 * We only ever expect to handle device-dax instances, i.e. the
1339 	 * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero
1340 	 */
1341 	return sysfs_emit(buf, DAX_DEVICE_MODALIAS_FMT "\n", 0);
1342 }
1343 static DEVICE_ATTR_RO(modalias);
1344 
1345 static ssize_t numa_node_show(struct device *dev,
1346 		struct device_attribute *attr, char *buf)
1347 {
1348 	return sysfs_emit(buf, "%d\n", dev_to_node(dev));
1349 }
1350 static DEVICE_ATTR_RO(numa_node);
1351 
1352 static ssize_t memmap_on_memory_show(struct device *dev,
1353 				     struct device_attribute *attr, char *buf)
1354 {
1355 	struct dev_dax *dev_dax = to_dev_dax(dev);
1356 
1357 	return sysfs_emit(buf, "%d\n", dev_dax->memmap_on_memory);
1358 }
1359 
1360 static ssize_t memmap_on_memory_store(struct device *dev,
1361 				      struct device_attribute *attr,
1362 				      const char *buf, size_t len)
1363 {
1364 	struct dev_dax *dev_dax = to_dev_dax(dev);
1365 	bool val;
1366 	int rc;
1367 
1368 	rc = kstrtobool(buf, &val);
1369 	if (rc)
1370 		return rc;
1371 
1372 	if (val == true && !mhp_supports_memmap_on_memory()) {
1373 		dev_dbg(dev, "memmap_on_memory is not available\n");
1374 		return -EOPNOTSUPP;
1375 	}
1376 
1377 	rc = down_write_killable(&dax_dev_rwsem);
1378 	if (rc)
1379 		return rc;
1380 
1381 	if (dev_dax->memmap_on_memory != val && dev->driver &&
1382 	    to_dax_drv(dev->driver)->type == DAXDRV_KMEM_TYPE) {
1383 		up_write(&dax_dev_rwsem);
1384 		return -EBUSY;
1385 	}
1386 
1387 	dev_dax->memmap_on_memory = val;
1388 	up_write(&dax_dev_rwsem);
1389 
1390 	return len;
1391 }
1392 static DEVICE_ATTR_RW(memmap_on_memory);
1393 
1394 static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
1395 {
1396 	struct device *dev = container_of(kobj, struct device, kobj);
1397 	struct dev_dax *dev_dax = to_dev_dax(dev);
1398 	struct dax_region *dax_region = dev_dax->region;
1399 
1400 	if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0)
1401 		return 0;
1402 	if (a == &dev_attr_numa_node.attr && !IS_ENABLED(CONFIG_NUMA))
1403 		return 0;
1404 	if (a == &dev_attr_mapping.attr && is_static(dax_region))
1405 		return 0;
1406 	if ((a == &dev_attr_align.attr ||
1407 	     a == &dev_attr_size.attr) && is_static(dax_region))
1408 		return 0444;
1409 	return a->mode;
1410 }
1411 
1412 static struct attribute *dev_dax_attributes[] = {
1413 	&dev_attr_modalias.attr,
1414 	&dev_attr_size.attr,
1415 	&dev_attr_mapping.attr,
1416 	&dev_attr_target_node.attr,
1417 	&dev_attr_align.attr,
1418 	&dev_attr_resource.attr,
1419 	&dev_attr_numa_node.attr,
1420 	&dev_attr_memmap_on_memory.attr,
1421 	NULL,
1422 };
1423 
1424 static const struct attribute_group dev_dax_attribute_group = {
1425 	.attrs = dev_dax_attributes,
1426 	.is_visible = dev_dax_visible,
1427 };
1428 
1429 static const struct attribute_group *dax_attribute_groups[] = {
1430 	&dev_dax_attribute_group,
1431 	NULL,
1432 };
1433 
1434 static void dev_dax_release(struct device *dev)
1435 {
1436 	struct dev_dax *dev_dax = to_dev_dax(dev);
1437 	struct dax_device *dax_dev = dev_dax->dax_dev;
1438 
1439 	put_dax(dax_dev);
1440 	free_dev_dax_id(dev_dax);
1441 	kfree(dev_dax->pgmap);
1442 	kfree(dev_dax);
1443 }
1444 
1445 static const struct device_type dev_dax_type = {
1446 	.release = dev_dax_release,
1447 	.groups = dax_attribute_groups,
1448 };
1449 
1450 static struct dev_dax *__devm_create_dev_dax(struct dev_dax_data *data)
1451 {
1452 	struct dax_region *dax_region = data->dax_region;
1453 	struct device *parent = dax_region->dev;
1454 	struct dax_device *dax_dev;
1455 	struct dev_dax *dev_dax;
1456 	struct inode *inode;
1457 	struct device *dev;
1458 	int rc;
1459 
1460 	dev_dax = kzalloc(sizeof(*dev_dax), GFP_KERNEL);
1461 	if (!dev_dax)
1462 		return ERR_PTR(-ENOMEM);
1463 
1464 	dev_dax->region = dax_region;
1465 	if (is_static(dax_region)) {
1466 		if (dev_WARN_ONCE(parent, data->id < 0,
1467 				"dynamic id specified to static region\n")) {
1468 			rc = -EINVAL;
1469 			goto err_id;
1470 		}
1471 
1472 		dev_dax->id = data->id;
1473 	} else {
1474 		if (dev_WARN_ONCE(parent, data->id >= 0,
1475 				"static id specified to dynamic region\n")) {
1476 			rc = -EINVAL;
1477 			goto err_id;
1478 		}
1479 
1480 		rc = alloc_dev_dax_id(dev_dax);
1481 		if (rc < 0)
1482 			goto err_id;
1483 	}
1484 
1485 	dev = &dev_dax->dev;
1486 	device_initialize(dev);
1487 	dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id);
1488 
1489 	rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, data->size);
1490 	if (rc)
1491 		goto err_range;
1492 
1493 	if (data->pgmap) {
1494 		dev_WARN_ONCE(parent, !is_static(dax_region),
1495 			"custom dev_pagemap requires a static dax_region\n");
1496 
1497 		dev_dax->pgmap = kmemdup(data->pgmap,
1498 				sizeof(struct dev_pagemap), GFP_KERNEL);
1499 		if (!dev_dax->pgmap) {
1500 			rc = -ENOMEM;
1501 			goto err_pgmap;
1502 		}
1503 	}
1504 
1505 	/*
1506 	 * No dax_operations since there is no access to this device outside of
1507 	 * mmap of the resulting character device.
1508 	 */
1509 	dax_dev = alloc_dax(dev_dax, NULL);
1510 	if (IS_ERR(dax_dev)) {
1511 		rc = PTR_ERR(dax_dev);
1512 		goto err_alloc_dax;
1513 	}
1514 	set_dax_synchronous(dax_dev);
1515 	set_dax_nocache(dax_dev);
1516 	set_dax_nomc(dax_dev);
1517 
1518 	/* a device_dax instance is dead while the driver is not attached */
1519 	kill_dax(dax_dev);
1520 
1521 	dev_dax->dax_dev = dax_dev;
1522 	dev_dax->target_node = dax_region->target_node;
1523 	dev_dax->align = dax_region->align;
1524 	ida_init(&dev_dax->ida);
1525 
1526 	dev_dax->memmap_on_memory = data->memmap_on_memory;
1527 
1528 	inode = dax_inode(dax_dev);
1529 	dev->devt = inode->i_rdev;
1530 	dev->bus = &dax_bus_type;
1531 	dev->parent = parent;
1532 	dev->type = &dev_dax_type;
1533 
1534 	rc = device_add(dev);
1535 	if (rc) {
1536 		kill_dev_dax(dev_dax);
1537 		put_device(dev);
1538 		return ERR_PTR(rc);
1539 	}
1540 
1541 	rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev);
1542 	if (rc)
1543 		return ERR_PTR(rc);
1544 
1545 	/* register mapping device for the initial allocation range */
1546 	if (dev_dax->nr_range && range_len(&dev_dax->ranges[0].range)) {
1547 		rc = devm_register_dax_mapping(dev_dax, 0);
1548 		if (rc)
1549 			return ERR_PTR(rc);
1550 	}
1551 
1552 	return dev_dax;
1553 
1554 err_alloc_dax:
1555 	kfree(dev_dax->pgmap);
1556 err_pgmap:
1557 	free_dev_dax_ranges(dev_dax);
1558 err_range:
1559 	free_dev_dax_id(dev_dax);
1560 err_id:
1561 	kfree(dev_dax);
1562 
1563 	return ERR_PTR(rc);
1564 }
1565 
1566 struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
1567 {
1568 	struct dev_dax *dev_dax;
1569 	int rc;
1570 
1571 	rc = down_write_killable(&dax_region_rwsem);
1572 	if (rc)
1573 		return ERR_PTR(rc);
1574 
1575 	dev_dax = __devm_create_dev_dax(data);
1576 	up_write(&dax_region_rwsem);
1577 
1578 	return dev_dax;
1579 }
1580 EXPORT_SYMBOL_GPL(devm_create_dev_dax);
1581 
1582 int __dax_driver_register(struct dax_device_driver *dax_drv,
1583 		struct module *module, const char *mod_name)
1584 {
1585 	struct device_driver *drv = &dax_drv->drv;
1586 
1587 	/*
1588 	 * dax_bus_probe() calls dax_drv->probe() unconditionally.
1589 	 * So better be safe than sorry and ensure it is provided.
1590 	 */
1591 	if (!dax_drv->probe)
1592 		return -EINVAL;
1593 
1594 	INIT_LIST_HEAD(&dax_drv->ids);
1595 	drv->owner = module;
1596 	drv->name = mod_name;
1597 	drv->mod_name = mod_name;
1598 	drv->bus = &dax_bus_type;
1599 
1600 	return driver_register(drv);
1601 }
1602 EXPORT_SYMBOL_GPL(__dax_driver_register);
1603 
1604 void dax_driver_unregister(struct dax_device_driver *dax_drv)
1605 {
1606 	struct device_driver *drv = &dax_drv->drv;
1607 	struct dax_id *dax_id, *_id;
1608 
1609 	mutex_lock(&dax_bus_lock);
1610 	list_for_each_entry_safe(dax_id, _id, &dax_drv->ids, list) {
1611 		list_del(&dax_id->list);
1612 		kfree(dax_id);
1613 	}
1614 	mutex_unlock(&dax_bus_lock);
1615 	driver_unregister(drv);
1616 }
1617 EXPORT_SYMBOL_GPL(dax_driver_unregister);
1618 
1619 int __init dax_bus_init(void)
1620 {
1621 	return bus_register(&dax_bus_type);
1622 }
1623 
1624 void __exit dax_bus_exit(void)
1625 {
1626 	bus_unregister(&dax_bus_type);
1627 }
1628