1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017-2018 Intel Corporation. All rights reserved. */ 3 #include <linux/memremap.h> 4 #include <linux/device.h> 5 #include <linux/mutex.h> 6 #include <linux/list.h> 7 #include <linux/slab.h> 8 #include <linux/dax.h> 9 #include <linux/io.h> 10 #include "dax-private.h" 11 #include "bus.h" 12 13 static struct class *dax_class; 14 15 static DEFINE_MUTEX(dax_bus_lock); 16 17 #define DAX_NAME_LEN 30 18 struct dax_id { 19 struct list_head list; 20 char dev_name[DAX_NAME_LEN]; 21 }; 22 23 static int dax_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 24 { 25 /* 26 * We only ever expect to handle device-dax instances, i.e. the 27 * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero 28 */ 29 return add_uevent_var(env, "MODALIAS=" DAX_DEVICE_MODALIAS_FMT, 0); 30 } 31 32 static struct dax_device_driver *to_dax_drv(struct device_driver *drv) 33 { 34 return container_of(drv, struct dax_device_driver, drv); 35 } 36 37 static struct dax_id *__dax_match_id(struct dax_device_driver *dax_drv, 38 const char *dev_name) 39 { 40 struct dax_id *dax_id; 41 42 lockdep_assert_held(&dax_bus_lock); 43 44 list_for_each_entry(dax_id, &dax_drv->ids, list) 45 if (sysfs_streq(dax_id->dev_name, dev_name)) 46 return dax_id; 47 return NULL; 48 } 49 50 static int dax_match_id(struct dax_device_driver *dax_drv, struct device *dev) 51 { 52 int match; 53 54 mutex_lock(&dax_bus_lock); 55 match = !!__dax_match_id(dax_drv, dev_name(dev)); 56 mutex_unlock(&dax_bus_lock); 57 58 return match; 59 } 60 61 enum id_action { 62 ID_REMOVE, 63 ID_ADD, 64 }; 65 66 static ssize_t do_id_store(struct device_driver *drv, const char *buf, 67 size_t count, enum id_action action) 68 { 69 struct dax_device_driver *dax_drv = to_dax_drv(drv); 70 unsigned int region_id, id; 71 char devname[DAX_NAME_LEN]; 72 struct dax_id *dax_id; 73 ssize_t rc = count; 74 int fields; 75 76 fields = sscanf(buf, "dax%d.%d", ®ion_id, &id); 77 if (fields != 2) 78 return -EINVAL; 79 sprintf(devname, "dax%d.%d", region_id, id); 80 if (!sysfs_streq(buf, devname)) 81 return -EINVAL; 82 83 mutex_lock(&dax_bus_lock); 84 dax_id = __dax_match_id(dax_drv, buf); 85 if (!dax_id) { 86 if (action == ID_ADD) { 87 dax_id = kzalloc(sizeof(*dax_id), GFP_KERNEL); 88 if (dax_id) { 89 strncpy(dax_id->dev_name, buf, DAX_NAME_LEN); 90 list_add(&dax_id->list, &dax_drv->ids); 91 } else 92 rc = -ENOMEM; 93 } else 94 /* nothing to remove */; 95 } else if (action == ID_REMOVE) { 96 list_del(&dax_id->list); 97 kfree(dax_id); 98 } else 99 /* dax_id already added */; 100 mutex_unlock(&dax_bus_lock); 101 102 if (rc < 0) 103 return rc; 104 if (action == ID_ADD) 105 rc = driver_attach(drv); 106 if (rc) 107 return rc; 108 return count; 109 } 110 111 static ssize_t new_id_store(struct device_driver *drv, const char *buf, 112 size_t count) 113 { 114 return do_id_store(drv, buf, count, ID_ADD); 115 } 116 static DRIVER_ATTR_WO(new_id); 117 118 static ssize_t remove_id_store(struct device_driver *drv, const char *buf, 119 size_t count) 120 { 121 return do_id_store(drv, buf, count, ID_REMOVE); 122 } 123 static DRIVER_ATTR_WO(remove_id); 124 125 static struct attribute *dax_drv_attrs[] = { 126 &driver_attr_new_id.attr, 127 &driver_attr_remove_id.attr, 128 NULL, 129 }; 130 ATTRIBUTE_GROUPS(dax_drv); 131 132 static int dax_bus_match(struct device *dev, struct device_driver *drv); 133 134 static bool is_static(struct dax_region *dax_region) 135 { 136 return (dax_region->res.flags & IORESOURCE_DAX_STATIC) != 0; 137 } 138 139 static int dax_bus_probe(struct device *dev) 140 { 141 struct dax_device_driver *dax_drv = to_dax_drv(dev->driver); 142 struct dev_dax *dev_dax = to_dev_dax(dev); 143 struct dax_region *dax_region = dev_dax->region; 144 struct range *range = &dev_dax->range; 145 int rc; 146 147 if (range_len(range) == 0 || dev_dax->id < 0) 148 return -ENXIO; 149 150 rc = dax_drv->probe(dev_dax); 151 152 if (rc || is_static(dax_region)) 153 return rc; 154 155 /* 156 * Track new seed creation only after successful probe of the 157 * previous seed. 158 */ 159 if (dax_region->seed == dev) 160 dax_region->seed = NULL; 161 162 return 0; 163 } 164 165 static int dax_bus_remove(struct device *dev) 166 { 167 struct dax_device_driver *dax_drv = to_dax_drv(dev->driver); 168 struct dev_dax *dev_dax = to_dev_dax(dev); 169 170 return dax_drv->remove(dev_dax); 171 } 172 173 static struct bus_type dax_bus_type = { 174 .name = "dax", 175 .uevent = dax_bus_uevent, 176 .match = dax_bus_match, 177 .probe = dax_bus_probe, 178 .remove = dax_bus_remove, 179 .drv_groups = dax_drv_groups, 180 }; 181 182 static int dax_bus_match(struct device *dev, struct device_driver *drv) 183 { 184 struct dax_device_driver *dax_drv = to_dax_drv(drv); 185 186 /* 187 * All but the 'device-dax' driver, which has 'match_always' 188 * set, requires an exact id match. 189 */ 190 if (dax_drv->match_always) 191 return 1; 192 193 return dax_match_id(dax_drv, dev); 194 } 195 196 /* 197 * Rely on the fact that drvdata is set before the attributes are 198 * registered, and that the attributes are unregistered before drvdata 199 * is cleared to assume that drvdata is always valid. 200 */ 201 static ssize_t id_show(struct device *dev, 202 struct device_attribute *attr, char *buf) 203 { 204 struct dax_region *dax_region = dev_get_drvdata(dev); 205 206 return sprintf(buf, "%d\n", dax_region->id); 207 } 208 static DEVICE_ATTR_RO(id); 209 210 static ssize_t region_size_show(struct device *dev, 211 struct device_attribute *attr, char *buf) 212 { 213 struct dax_region *dax_region = dev_get_drvdata(dev); 214 215 return sprintf(buf, "%llu\n", (unsigned long long) 216 resource_size(&dax_region->res)); 217 } 218 static struct device_attribute dev_attr_region_size = __ATTR(size, 0444, 219 region_size_show, NULL); 220 221 static ssize_t align_show(struct device *dev, 222 struct device_attribute *attr, char *buf) 223 { 224 struct dax_region *dax_region = dev_get_drvdata(dev); 225 226 return sprintf(buf, "%u\n", dax_region->align); 227 } 228 static DEVICE_ATTR_RO(align); 229 230 #define for_each_dax_region_resource(dax_region, res) \ 231 for (res = (dax_region)->res.child; res; res = res->sibling) 232 233 static unsigned long long dax_region_avail_size(struct dax_region *dax_region) 234 { 235 resource_size_t size = resource_size(&dax_region->res); 236 struct resource *res; 237 238 device_lock_assert(dax_region->dev); 239 240 for_each_dax_region_resource(dax_region, res) 241 size -= resource_size(res); 242 return size; 243 } 244 245 static ssize_t available_size_show(struct device *dev, 246 struct device_attribute *attr, char *buf) 247 { 248 struct dax_region *dax_region = dev_get_drvdata(dev); 249 unsigned long long size; 250 251 device_lock(dev); 252 size = dax_region_avail_size(dax_region); 253 device_unlock(dev); 254 255 return sprintf(buf, "%llu\n", size); 256 } 257 static DEVICE_ATTR_RO(available_size); 258 259 static ssize_t seed_show(struct device *dev, 260 struct device_attribute *attr, char *buf) 261 { 262 struct dax_region *dax_region = dev_get_drvdata(dev); 263 struct device *seed; 264 ssize_t rc; 265 266 if (is_static(dax_region)) 267 return -EINVAL; 268 269 device_lock(dev); 270 seed = dax_region->seed; 271 rc = sprintf(buf, "%s\n", seed ? dev_name(seed) : ""); 272 device_unlock(dev); 273 274 return rc; 275 } 276 static DEVICE_ATTR_RO(seed); 277 278 static ssize_t create_show(struct device *dev, 279 struct device_attribute *attr, char *buf) 280 { 281 struct dax_region *dax_region = dev_get_drvdata(dev); 282 struct device *youngest; 283 ssize_t rc; 284 285 if (is_static(dax_region)) 286 return -EINVAL; 287 288 device_lock(dev); 289 youngest = dax_region->youngest; 290 rc = sprintf(buf, "%s\n", youngest ? dev_name(youngest) : ""); 291 device_unlock(dev); 292 293 return rc; 294 } 295 296 static ssize_t create_store(struct device *dev, struct device_attribute *attr, 297 const char *buf, size_t len) 298 { 299 struct dax_region *dax_region = dev_get_drvdata(dev); 300 unsigned long long avail; 301 ssize_t rc; 302 int val; 303 304 if (is_static(dax_region)) 305 return -EINVAL; 306 307 rc = kstrtoint(buf, 0, &val); 308 if (rc) 309 return rc; 310 if (val != 1) 311 return -EINVAL; 312 313 device_lock(dev); 314 avail = dax_region_avail_size(dax_region); 315 if (avail == 0) 316 rc = -ENOSPC; 317 else { 318 struct dev_dax_data data = { 319 .dax_region = dax_region, 320 .size = 0, 321 .id = -1, 322 }; 323 struct dev_dax *dev_dax = devm_create_dev_dax(&data); 324 325 if (IS_ERR(dev_dax)) 326 rc = PTR_ERR(dev_dax); 327 else { 328 /* 329 * In support of crafting multiple new devices 330 * simultaneously multiple seeds can be created, 331 * but only the first one that has not been 332 * successfully bound is tracked as the region 333 * seed. 334 */ 335 if (!dax_region->seed) 336 dax_region->seed = &dev_dax->dev; 337 dax_region->youngest = &dev_dax->dev; 338 rc = len; 339 } 340 } 341 device_unlock(dev); 342 343 return rc; 344 } 345 static DEVICE_ATTR_RW(create); 346 347 void kill_dev_dax(struct dev_dax *dev_dax) 348 { 349 struct dax_device *dax_dev = dev_dax->dax_dev; 350 struct inode *inode = dax_inode(dax_dev); 351 352 kill_dax(dax_dev); 353 unmap_mapping_range(inode->i_mapping, 0, 0, 1); 354 } 355 EXPORT_SYMBOL_GPL(kill_dev_dax); 356 357 static void free_dev_dax_range(struct dev_dax *dev_dax) 358 { 359 struct dax_region *dax_region = dev_dax->region; 360 struct range *range = &dev_dax->range; 361 362 device_lock_assert(dax_region->dev); 363 if (range_len(range)) 364 __release_region(&dax_region->res, range->start, 365 range_len(range)); 366 } 367 368 static void unregister_dev_dax(void *dev) 369 { 370 struct dev_dax *dev_dax = to_dev_dax(dev); 371 372 dev_dbg(dev, "%s\n", __func__); 373 374 kill_dev_dax(dev_dax); 375 free_dev_dax_range(dev_dax); 376 device_del(dev); 377 put_device(dev); 378 } 379 380 /* a return value >= 0 indicates this invocation invalidated the id */ 381 static int __free_dev_dax_id(struct dev_dax *dev_dax) 382 { 383 struct dax_region *dax_region = dev_dax->region; 384 struct device *dev = &dev_dax->dev; 385 int rc = dev_dax->id; 386 387 device_lock_assert(dev); 388 389 if (is_static(dax_region) || dev_dax->id < 0) 390 return -1; 391 ida_free(&dax_region->ida, dev_dax->id); 392 dev_dax->id = -1; 393 return rc; 394 } 395 396 static int free_dev_dax_id(struct dev_dax *dev_dax) 397 { 398 struct device *dev = &dev_dax->dev; 399 int rc; 400 401 device_lock(dev); 402 rc = __free_dev_dax_id(dev_dax); 403 device_unlock(dev); 404 return rc; 405 } 406 407 static ssize_t delete_store(struct device *dev, struct device_attribute *attr, 408 const char *buf, size_t len) 409 { 410 struct dax_region *dax_region = dev_get_drvdata(dev); 411 struct dev_dax *dev_dax; 412 struct device *victim; 413 bool do_del = false; 414 int rc; 415 416 if (is_static(dax_region)) 417 return -EINVAL; 418 419 victim = device_find_child_by_name(dax_region->dev, buf); 420 if (!victim) 421 return -ENXIO; 422 423 device_lock(dev); 424 device_lock(victim); 425 dev_dax = to_dev_dax(victim); 426 if (victim->driver || range_len(&dev_dax->range)) 427 rc = -EBUSY; 428 else { 429 /* 430 * Invalidate the device so it does not become active 431 * again, but always preserve device-id-0 so that 432 * /sys/bus/dax/ is guaranteed to be populated while any 433 * dax_region is registered. 434 */ 435 if (dev_dax->id > 0) { 436 do_del = __free_dev_dax_id(dev_dax) >= 0; 437 rc = len; 438 if (dax_region->seed == victim) 439 dax_region->seed = NULL; 440 if (dax_region->youngest == victim) 441 dax_region->youngest = NULL; 442 } else 443 rc = -EBUSY; 444 } 445 device_unlock(victim); 446 447 /* won the race to invalidate the device, clean it up */ 448 if (do_del) 449 devm_release_action(dev, unregister_dev_dax, victim); 450 device_unlock(dev); 451 put_device(victim); 452 453 return rc; 454 } 455 static DEVICE_ATTR_WO(delete); 456 457 static umode_t dax_region_visible(struct kobject *kobj, struct attribute *a, 458 int n) 459 { 460 struct device *dev = container_of(kobj, struct device, kobj); 461 struct dax_region *dax_region = dev_get_drvdata(dev); 462 463 if (is_static(dax_region)) 464 if (a == &dev_attr_available_size.attr 465 || a == &dev_attr_create.attr 466 || a == &dev_attr_seed.attr 467 || a == &dev_attr_delete.attr) 468 return 0; 469 return a->mode; 470 } 471 472 static struct attribute *dax_region_attributes[] = { 473 &dev_attr_available_size.attr, 474 &dev_attr_region_size.attr, 475 &dev_attr_align.attr, 476 &dev_attr_create.attr, 477 &dev_attr_seed.attr, 478 &dev_attr_delete.attr, 479 &dev_attr_id.attr, 480 NULL, 481 }; 482 483 static const struct attribute_group dax_region_attribute_group = { 484 .name = "dax_region", 485 .attrs = dax_region_attributes, 486 .is_visible = dax_region_visible, 487 }; 488 489 static const struct attribute_group *dax_region_attribute_groups[] = { 490 &dax_region_attribute_group, 491 NULL, 492 }; 493 494 static void dax_region_free(struct kref *kref) 495 { 496 struct dax_region *dax_region; 497 498 dax_region = container_of(kref, struct dax_region, kref); 499 kfree(dax_region); 500 } 501 502 void dax_region_put(struct dax_region *dax_region) 503 { 504 kref_put(&dax_region->kref, dax_region_free); 505 } 506 EXPORT_SYMBOL_GPL(dax_region_put); 507 508 static void dax_region_unregister(void *region) 509 { 510 struct dax_region *dax_region = region; 511 512 sysfs_remove_groups(&dax_region->dev->kobj, 513 dax_region_attribute_groups); 514 dax_region_put(dax_region); 515 } 516 517 struct dax_region *alloc_dax_region(struct device *parent, int region_id, 518 struct range *range, int target_node, unsigned int align, 519 unsigned long flags) 520 { 521 struct dax_region *dax_region; 522 523 /* 524 * The DAX core assumes that it can store its private data in 525 * parent->driver_data. This WARN is a reminder / safeguard for 526 * developers of device-dax drivers. 527 */ 528 if (dev_get_drvdata(parent)) { 529 dev_WARN(parent, "dax core failed to setup private data\n"); 530 return NULL; 531 } 532 533 if (!IS_ALIGNED(range->start, align) 534 || !IS_ALIGNED(range_len(range), align)) 535 return NULL; 536 537 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL); 538 if (!dax_region) 539 return NULL; 540 541 dev_set_drvdata(parent, dax_region); 542 kref_init(&dax_region->kref); 543 dax_region->id = region_id; 544 dax_region->align = align; 545 dax_region->dev = parent; 546 dax_region->target_node = target_node; 547 ida_init(&dax_region->ida); 548 dax_region->res = (struct resource) { 549 .start = range->start, 550 .end = range->end, 551 .flags = IORESOURCE_MEM | flags, 552 }; 553 554 if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) { 555 kfree(dax_region); 556 return NULL; 557 } 558 559 kref_get(&dax_region->kref); 560 if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region)) 561 return NULL; 562 return dax_region; 563 } 564 EXPORT_SYMBOL_GPL(alloc_dax_region); 565 566 static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start, 567 resource_size_t size) 568 { 569 struct dax_region *dax_region = dev_dax->region; 570 struct resource *res = &dax_region->res; 571 struct device *dev = &dev_dax->dev; 572 struct resource *alloc; 573 574 device_lock_assert(dax_region->dev); 575 576 /* handle the seed alloc special case */ 577 if (!size) { 578 dev_dax->range = (struct range) { 579 .start = res->start, 580 .end = res->start - 1, 581 }; 582 return 0; 583 } 584 585 alloc = __request_region(res, start, size, dev_name(dev), 0); 586 if (!alloc) 587 return -ENOMEM; 588 589 dev_dax->range = (struct range) { 590 .start = alloc->start, 591 .end = alloc->end, 592 }; 593 594 return 0; 595 } 596 597 static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size) 598 { 599 struct dax_region *dax_region = dev_dax->region; 600 struct range *range = &dev_dax->range; 601 int rc = 0; 602 603 device_lock_assert(dax_region->dev); 604 605 if (size) 606 rc = adjust_resource(res, range->start, size); 607 else 608 __release_region(&dax_region->res, range->start, range_len(range)); 609 if (rc) 610 return rc; 611 612 dev_dax->range = (struct range) { 613 .start = range->start, 614 .end = range->start + size - 1, 615 }; 616 617 return 0; 618 } 619 620 static ssize_t size_show(struct device *dev, 621 struct device_attribute *attr, char *buf) 622 { 623 struct dev_dax *dev_dax = to_dev_dax(dev); 624 unsigned long long size = range_len(&dev_dax->range); 625 626 return sprintf(buf, "%llu\n", size); 627 } 628 629 static bool alloc_is_aligned(struct dax_region *dax_region, 630 resource_size_t size) 631 { 632 /* 633 * The minimum mapping granularity for a device instance is a 634 * single subsection, unless the arch says otherwise. 635 */ 636 return IS_ALIGNED(size, max_t(unsigned long, dax_region->align, 637 memremap_compat_align())); 638 } 639 640 static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size) 641 { 642 struct dax_region *dax_region = dev_dax->region; 643 struct range *range = &dev_dax->range; 644 struct resource *res, *adjust = NULL; 645 struct device *dev = &dev_dax->dev; 646 647 for_each_dax_region_resource(dax_region, res) 648 if (strcmp(res->name, dev_name(dev)) == 0 649 && res->start == range->start) { 650 adjust = res; 651 break; 652 } 653 654 if (dev_WARN_ONCE(dev, !adjust, "failed to find matching resource\n")) 655 return -ENXIO; 656 return adjust_dev_dax_range(dev_dax, adjust, size); 657 } 658 659 static ssize_t dev_dax_resize(struct dax_region *dax_region, 660 struct dev_dax *dev_dax, resource_size_t size) 661 { 662 resource_size_t avail = dax_region_avail_size(dax_region), to_alloc; 663 resource_size_t dev_size = range_len(&dev_dax->range); 664 struct resource *region_res = &dax_region->res; 665 struct device *dev = &dev_dax->dev; 666 const char *name = dev_name(dev); 667 struct resource *res, *first; 668 669 if (dev->driver) 670 return -EBUSY; 671 if (size == dev_size) 672 return 0; 673 if (size > dev_size && size - dev_size > avail) 674 return -ENOSPC; 675 if (size < dev_size) 676 return dev_dax_shrink(dev_dax, size); 677 678 to_alloc = size - dev_size; 679 if (dev_WARN_ONCE(dev, !alloc_is_aligned(dax_region, to_alloc), 680 "resize of %pa misaligned\n", &to_alloc)) 681 return -ENXIO; 682 683 /* 684 * Expand the device into the unused portion of the region. This 685 * may involve adjusting the end of an existing resource, or 686 * allocating a new resource. 687 */ 688 first = region_res->child; 689 if (!first) 690 return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc); 691 for (res = first; to_alloc && res; res = res->sibling) { 692 struct resource *next = res->sibling; 693 resource_size_t free; 694 695 /* space at the beginning of the region */ 696 free = 0; 697 if (res == first && res->start > dax_region->res.start) 698 free = res->start - dax_region->res.start; 699 if (free >= to_alloc && dev_size == 0) 700 return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc); 701 702 free = 0; 703 /* space between allocations */ 704 if (next && next->start > res->end + 1) 705 free = next->start - res->end + 1; 706 707 /* space at the end of the region */ 708 if (free < to_alloc && !next && res->end < region_res->end) 709 free = region_res->end - res->end; 710 711 if (free >= to_alloc && strcmp(name, res->name) == 0) 712 return adjust_dev_dax_range(dev_dax, res, resource_size(res) + to_alloc); 713 else if (free >= to_alloc && dev_size == 0) 714 return alloc_dev_dax_range(dev_dax, res->end + 1, to_alloc); 715 } 716 return -ENOSPC; 717 } 718 719 static ssize_t size_store(struct device *dev, struct device_attribute *attr, 720 const char *buf, size_t len) 721 { 722 ssize_t rc; 723 unsigned long long val; 724 struct dev_dax *dev_dax = to_dev_dax(dev); 725 struct dax_region *dax_region = dev_dax->region; 726 727 rc = kstrtoull(buf, 0, &val); 728 if (rc) 729 return rc; 730 731 if (!alloc_is_aligned(dax_region, val)) { 732 dev_dbg(dev, "%s: size: %lld misaligned\n", __func__, val); 733 return -EINVAL; 734 } 735 736 device_lock(dax_region->dev); 737 if (!dax_region->dev->driver) { 738 device_unlock(dax_region->dev); 739 return -ENXIO; 740 } 741 device_lock(dev); 742 rc = dev_dax_resize(dax_region, dev_dax, val); 743 device_unlock(dev); 744 device_unlock(dax_region->dev); 745 746 return rc == 0 ? len : rc; 747 } 748 static DEVICE_ATTR_RW(size); 749 750 static int dev_dax_target_node(struct dev_dax *dev_dax) 751 { 752 struct dax_region *dax_region = dev_dax->region; 753 754 return dax_region->target_node; 755 } 756 757 static ssize_t target_node_show(struct device *dev, 758 struct device_attribute *attr, char *buf) 759 { 760 struct dev_dax *dev_dax = to_dev_dax(dev); 761 762 return sprintf(buf, "%d\n", dev_dax_target_node(dev_dax)); 763 } 764 static DEVICE_ATTR_RO(target_node); 765 766 static ssize_t resource_show(struct device *dev, 767 struct device_attribute *attr, char *buf) 768 { 769 struct dev_dax *dev_dax = to_dev_dax(dev); 770 771 return sprintf(buf, "%#llx\n", dev_dax->range.start); 772 } 773 static DEVICE_ATTR(resource, 0400, resource_show, NULL); 774 775 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 776 char *buf) 777 { 778 /* 779 * We only ever expect to handle device-dax instances, i.e. the 780 * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero 781 */ 782 return sprintf(buf, DAX_DEVICE_MODALIAS_FMT "\n", 0); 783 } 784 static DEVICE_ATTR_RO(modalias); 785 786 static ssize_t numa_node_show(struct device *dev, 787 struct device_attribute *attr, char *buf) 788 { 789 return sprintf(buf, "%d\n", dev_to_node(dev)); 790 } 791 static DEVICE_ATTR_RO(numa_node); 792 793 static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n) 794 { 795 struct device *dev = container_of(kobj, struct device, kobj); 796 struct dev_dax *dev_dax = to_dev_dax(dev); 797 struct dax_region *dax_region = dev_dax->region; 798 799 if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0) 800 return 0; 801 if (a == &dev_attr_numa_node.attr && !IS_ENABLED(CONFIG_NUMA)) 802 return 0; 803 if (a == &dev_attr_size.attr && is_static(dax_region)) 804 return 0444; 805 return a->mode; 806 } 807 808 static struct attribute *dev_dax_attributes[] = { 809 &dev_attr_modalias.attr, 810 &dev_attr_size.attr, 811 &dev_attr_target_node.attr, 812 &dev_attr_resource.attr, 813 &dev_attr_numa_node.attr, 814 NULL, 815 }; 816 817 static const struct attribute_group dev_dax_attribute_group = { 818 .attrs = dev_dax_attributes, 819 .is_visible = dev_dax_visible, 820 }; 821 822 static const struct attribute_group *dax_attribute_groups[] = { 823 &dev_dax_attribute_group, 824 NULL, 825 }; 826 827 static void dev_dax_release(struct device *dev) 828 { 829 struct dev_dax *dev_dax = to_dev_dax(dev); 830 struct dax_region *dax_region = dev_dax->region; 831 struct dax_device *dax_dev = dev_dax->dax_dev; 832 833 put_dax(dax_dev); 834 free_dev_dax_id(dev_dax); 835 dax_region_put(dax_region); 836 kfree(dev_dax->pgmap); 837 kfree(dev_dax); 838 } 839 840 static const struct device_type dev_dax_type = { 841 .release = dev_dax_release, 842 .groups = dax_attribute_groups, 843 }; 844 845 struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data) 846 { 847 struct dax_region *dax_region = data->dax_region; 848 struct device *parent = dax_region->dev; 849 struct dax_device *dax_dev; 850 struct dev_dax *dev_dax; 851 struct inode *inode; 852 struct device *dev; 853 int rc; 854 855 dev_dax = kzalloc(sizeof(*dev_dax), GFP_KERNEL); 856 if (!dev_dax) 857 return ERR_PTR(-ENOMEM); 858 859 if (is_static(dax_region)) { 860 if (dev_WARN_ONCE(parent, data->id < 0, 861 "dynamic id specified to static region\n")) { 862 rc = -EINVAL; 863 goto err_id; 864 } 865 866 dev_dax->id = data->id; 867 } else { 868 if (dev_WARN_ONCE(parent, data->id >= 0, 869 "static id specified to dynamic region\n")) { 870 rc = -EINVAL; 871 goto err_id; 872 } 873 874 rc = ida_alloc(&dax_region->ida, GFP_KERNEL); 875 if (rc < 0) 876 goto err_id; 877 dev_dax->id = rc; 878 } 879 880 dev_dax->region = dax_region; 881 dev = &dev_dax->dev; 882 device_initialize(dev); 883 dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id); 884 885 rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, data->size); 886 if (rc) 887 goto err_range; 888 889 if (data->pgmap) { 890 dev_WARN_ONCE(parent, !is_static(dax_region), 891 "custom dev_pagemap requires a static dax_region\n"); 892 893 dev_dax->pgmap = kmemdup(data->pgmap, 894 sizeof(struct dev_pagemap), GFP_KERNEL); 895 if (!dev_dax->pgmap) { 896 rc = -ENOMEM; 897 goto err_pgmap; 898 } 899 } 900 901 /* 902 * No 'host' or dax_operations since there is no access to this 903 * device outside of mmap of the resulting character device. 904 */ 905 dax_dev = alloc_dax(dev_dax, NULL, NULL, DAXDEV_F_SYNC); 906 if (IS_ERR(dax_dev)) { 907 rc = PTR_ERR(dax_dev); 908 goto err_alloc_dax; 909 } 910 911 /* a device_dax instance is dead while the driver is not attached */ 912 kill_dax(dax_dev); 913 914 /* from here on we're committed to teardown via dev_dax_release() */ 915 dev_dax->dax_dev = dax_dev; 916 dev_dax->target_node = dax_region->target_node; 917 kref_get(&dax_region->kref); 918 919 inode = dax_inode(dax_dev); 920 dev->devt = inode->i_rdev; 921 if (data->subsys == DEV_DAX_BUS) 922 dev->bus = &dax_bus_type; 923 else 924 dev->class = dax_class; 925 dev->parent = parent; 926 dev->type = &dev_dax_type; 927 928 rc = device_add(dev); 929 if (rc) { 930 kill_dev_dax(dev_dax); 931 put_device(dev); 932 return ERR_PTR(rc); 933 } 934 935 rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev); 936 if (rc) 937 return ERR_PTR(rc); 938 939 return dev_dax; 940 941 err_alloc_dax: 942 kfree(dev_dax->pgmap); 943 err_pgmap: 944 free_dev_dax_range(dev_dax); 945 err_range: 946 free_dev_dax_id(dev_dax); 947 err_id: 948 kfree(dev_dax); 949 950 return ERR_PTR(rc); 951 } 952 EXPORT_SYMBOL_GPL(devm_create_dev_dax); 953 954 static int match_always_count; 955 956 int __dax_driver_register(struct dax_device_driver *dax_drv, 957 struct module *module, const char *mod_name) 958 { 959 struct device_driver *drv = &dax_drv->drv; 960 int rc = 0; 961 962 INIT_LIST_HEAD(&dax_drv->ids); 963 drv->owner = module; 964 drv->name = mod_name; 965 drv->mod_name = mod_name; 966 drv->bus = &dax_bus_type; 967 968 /* there can only be one default driver */ 969 mutex_lock(&dax_bus_lock); 970 match_always_count += dax_drv->match_always; 971 if (match_always_count > 1) { 972 match_always_count--; 973 WARN_ON(1); 974 rc = -EINVAL; 975 } 976 mutex_unlock(&dax_bus_lock); 977 if (rc) 978 return rc; 979 return driver_register(drv); 980 } 981 EXPORT_SYMBOL_GPL(__dax_driver_register); 982 983 void dax_driver_unregister(struct dax_device_driver *dax_drv) 984 { 985 struct device_driver *drv = &dax_drv->drv; 986 struct dax_id *dax_id, *_id; 987 988 mutex_lock(&dax_bus_lock); 989 match_always_count -= dax_drv->match_always; 990 list_for_each_entry_safe(dax_id, _id, &dax_drv->ids, list) { 991 list_del(&dax_id->list); 992 kfree(dax_id); 993 } 994 mutex_unlock(&dax_bus_lock); 995 driver_unregister(drv); 996 } 997 EXPORT_SYMBOL_GPL(dax_driver_unregister); 998 999 int __init dax_bus_init(void) 1000 { 1001 int rc; 1002 1003 if (IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)) { 1004 dax_class = class_create(THIS_MODULE, "dax"); 1005 if (IS_ERR(dax_class)) 1006 return PTR_ERR(dax_class); 1007 } 1008 1009 rc = bus_register(&dax_bus_type); 1010 if (rc) 1011 class_destroy(dax_class); 1012 return rc; 1013 } 1014 1015 void __exit dax_bus_exit(void) 1016 { 1017 bus_unregister(&dax_bus_type); 1018 class_destroy(dax_class); 1019 } 1020