1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017-2018 Intel Corporation. All rights reserved. */ 3 #include <linux/memremap.h> 4 #include <linux/device.h> 5 #include <linux/mutex.h> 6 #include <linux/list.h> 7 #include <linux/slab.h> 8 #include <linux/dax.h> 9 #include <linux/io.h> 10 #include "dax-private.h" 11 #include "bus.h" 12 13 static DEFINE_MUTEX(dax_bus_lock); 14 15 #define DAX_NAME_LEN 30 16 struct dax_id { 17 struct list_head list; 18 char dev_name[DAX_NAME_LEN]; 19 }; 20 21 static int dax_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 22 { 23 /* 24 * We only ever expect to handle device-dax instances, i.e. the 25 * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero 26 */ 27 return add_uevent_var(env, "MODALIAS=" DAX_DEVICE_MODALIAS_FMT, 0); 28 } 29 30 static struct dax_device_driver *to_dax_drv(struct device_driver *drv) 31 { 32 return container_of(drv, struct dax_device_driver, drv); 33 } 34 35 static struct dax_id *__dax_match_id(struct dax_device_driver *dax_drv, 36 const char *dev_name) 37 { 38 struct dax_id *dax_id; 39 40 lockdep_assert_held(&dax_bus_lock); 41 42 list_for_each_entry(dax_id, &dax_drv->ids, list) 43 if (sysfs_streq(dax_id->dev_name, dev_name)) 44 return dax_id; 45 return NULL; 46 } 47 48 static int dax_match_id(struct dax_device_driver *dax_drv, struct device *dev) 49 { 50 int match; 51 52 mutex_lock(&dax_bus_lock); 53 match = !!__dax_match_id(dax_drv, dev_name(dev)); 54 mutex_unlock(&dax_bus_lock); 55 56 return match; 57 } 58 59 static int dax_match_type(struct dax_device_driver *dax_drv, struct device *dev) 60 { 61 enum dax_driver_type type = DAXDRV_DEVICE_TYPE; 62 struct dev_dax *dev_dax = to_dev_dax(dev); 63 64 if (dev_dax->region->res.flags & IORESOURCE_DAX_KMEM) 65 type = DAXDRV_KMEM_TYPE; 66 67 if (dax_drv->type == type) 68 return 1; 69 70 /* default to device mode if dax_kmem is disabled */ 71 if (dax_drv->type == DAXDRV_DEVICE_TYPE && 72 !IS_ENABLED(CONFIG_DEV_DAX_KMEM)) 73 return 1; 74 75 return 0; 76 } 77 78 enum id_action { 79 ID_REMOVE, 80 ID_ADD, 81 }; 82 83 static ssize_t do_id_store(struct device_driver *drv, const char *buf, 84 size_t count, enum id_action action) 85 { 86 struct dax_device_driver *dax_drv = to_dax_drv(drv); 87 unsigned int region_id, id; 88 char devname[DAX_NAME_LEN]; 89 struct dax_id *dax_id; 90 ssize_t rc = count; 91 int fields; 92 93 fields = sscanf(buf, "dax%d.%d", ®ion_id, &id); 94 if (fields != 2) 95 return -EINVAL; 96 sprintf(devname, "dax%d.%d", region_id, id); 97 if (!sysfs_streq(buf, devname)) 98 return -EINVAL; 99 100 mutex_lock(&dax_bus_lock); 101 dax_id = __dax_match_id(dax_drv, buf); 102 if (!dax_id) { 103 if (action == ID_ADD) { 104 dax_id = kzalloc(sizeof(*dax_id), GFP_KERNEL); 105 if (dax_id) { 106 strscpy(dax_id->dev_name, buf, DAX_NAME_LEN); 107 list_add(&dax_id->list, &dax_drv->ids); 108 } else 109 rc = -ENOMEM; 110 } 111 } else if (action == ID_REMOVE) { 112 list_del(&dax_id->list); 113 kfree(dax_id); 114 } 115 mutex_unlock(&dax_bus_lock); 116 117 if (rc < 0) 118 return rc; 119 if (action == ID_ADD) 120 rc = driver_attach(drv); 121 if (rc) 122 return rc; 123 return count; 124 } 125 126 static ssize_t new_id_store(struct device_driver *drv, const char *buf, 127 size_t count) 128 { 129 return do_id_store(drv, buf, count, ID_ADD); 130 } 131 static DRIVER_ATTR_WO(new_id); 132 133 static ssize_t remove_id_store(struct device_driver *drv, const char *buf, 134 size_t count) 135 { 136 return do_id_store(drv, buf, count, ID_REMOVE); 137 } 138 static DRIVER_ATTR_WO(remove_id); 139 140 static struct attribute *dax_drv_attrs[] = { 141 &driver_attr_new_id.attr, 142 &driver_attr_remove_id.attr, 143 NULL, 144 }; 145 ATTRIBUTE_GROUPS(dax_drv); 146 147 static int dax_bus_match(struct device *dev, struct device_driver *drv); 148 149 /* 150 * Static dax regions are regions created by an external subsystem 151 * nvdimm where a single range is assigned. Its boundaries are by the external 152 * subsystem and are usually limited to one physical memory range. For example, 153 * for PMEM it is usually defined by NVDIMM Namespace boundaries (i.e. a 154 * single contiguous range) 155 * 156 * On dynamic dax regions, the assigned region can be partitioned by dax core 157 * into multiple subdivisions. A subdivision is represented into one 158 * /dev/daxN.M device composed by one or more potentially discontiguous ranges. 159 * 160 * When allocating a dax region, drivers must set whether it's static 161 * (IORESOURCE_DAX_STATIC). On static dax devices, the @pgmap is pre-assigned 162 * to dax core when calling devm_create_dev_dax(), whereas in dynamic dax 163 * devices it is NULL but afterwards allocated by dax core on device ->probe(). 164 * Care is needed to make sure that dynamic dax devices are torn down with a 165 * cleared @pgmap field (see kill_dev_dax()). 166 */ 167 static bool is_static(struct dax_region *dax_region) 168 { 169 return (dax_region->res.flags & IORESOURCE_DAX_STATIC) != 0; 170 } 171 172 bool static_dev_dax(struct dev_dax *dev_dax) 173 { 174 return is_static(dev_dax->region); 175 } 176 EXPORT_SYMBOL_GPL(static_dev_dax); 177 178 static u64 dev_dax_size(struct dev_dax *dev_dax) 179 { 180 u64 size = 0; 181 int i; 182 183 device_lock_assert(&dev_dax->dev); 184 185 for (i = 0; i < dev_dax->nr_range; i++) 186 size += range_len(&dev_dax->ranges[i].range); 187 188 return size; 189 } 190 191 static int dax_bus_probe(struct device *dev) 192 { 193 struct dax_device_driver *dax_drv = to_dax_drv(dev->driver); 194 struct dev_dax *dev_dax = to_dev_dax(dev); 195 struct dax_region *dax_region = dev_dax->region; 196 int rc; 197 198 if (dev_dax_size(dev_dax) == 0 || dev_dax->id < 0) 199 return -ENXIO; 200 201 rc = dax_drv->probe(dev_dax); 202 203 if (rc || is_static(dax_region)) 204 return rc; 205 206 /* 207 * Track new seed creation only after successful probe of the 208 * previous seed. 209 */ 210 if (dax_region->seed == dev) 211 dax_region->seed = NULL; 212 213 return 0; 214 } 215 216 static void dax_bus_remove(struct device *dev) 217 { 218 struct dax_device_driver *dax_drv = to_dax_drv(dev->driver); 219 struct dev_dax *dev_dax = to_dev_dax(dev); 220 221 if (dax_drv->remove) 222 dax_drv->remove(dev_dax); 223 } 224 225 static struct bus_type dax_bus_type = { 226 .name = "dax", 227 .uevent = dax_bus_uevent, 228 .match = dax_bus_match, 229 .probe = dax_bus_probe, 230 .remove = dax_bus_remove, 231 .drv_groups = dax_drv_groups, 232 }; 233 234 static int dax_bus_match(struct device *dev, struct device_driver *drv) 235 { 236 struct dax_device_driver *dax_drv = to_dax_drv(drv); 237 238 if (dax_match_id(dax_drv, dev)) 239 return 1; 240 return dax_match_type(dax_drv, dev); 241 } 242 243 /* 244 * Rely on the fact that drvdata is set before the attributes are 245 * registered, and that the attributes are unregistered before drvdata 246 * is cleared to assume that drvdata is always valid. 247 */ 248 static ssize_t id_show(struct device *dev, 249 struct device_attribute *attr, char *buf) 250 { 251 struct dax_region *dax_region = dev_get_drvdata(dev); 252 253 return sprintf(buf, "%d\n", dax_region->id); 254 } 255 static DEVICE_ATTR_RO(id); 256 257 static ssize_t region_size_show(struct device *dev, 258 struct device_attribute *attr, char *buf) 259 { 260 struct dax_region *dax_region = dev_get_drvdata(dev); 261 262 return sprintf(buf, "%llu\n", (unsigned long long) 263 resource_size(&dax_region->res)); 264 } 265 static struct device_attribute dev_attr_region_size = __ATTR(size, 0444, 266 region_size_show, NULL); 267 268 static ssize_t region_align_show(struct device *dev, 269 struct device_attribute *attr, char *buf) 270 { 271 struct dax_region *dax_region = dev_get_drvdata(dev); 272 273 return sprintf(buf, "%u\n", dax_region->align); 274 } 275 static struct device_attribute dev_attr_region_align = 276 __ATTR(align, 0400, region_align_show, NULL); 277 278 #define for_each_dax_region_resource(dax_region, res) \ 279 for (res = (dax_region)->res.child; res; res = res->sibling) 280 281 static unsigned long long dax_region_avail_size(struct dax_region *dax_region) 282 { 283 resource_size_t size = resource_size(&dax_region->res); 284 struct resource *res; 285 286 device_lock_assert(dax_region->dev); 287 288 for_each_dax_region_resource(dax_region, res) 289 size -= resource_size(res); 290 return size; 291 } 292 293 static ssize_t available_size_show(struct device *dev, 294 struct device_attribute *attr, char *buf) 295 { 296 struct dax_region *dax_region = dev_get_drvdata(dev); 297 unsigned long long size; 298 299 device_lock(dev); 300 size = dax_region_avail_size(dax_region); 301 device_unlock(dev); 302 303 return sprintf(buf, "%llu\n", size); 304 } 305 static DEVICE_ATTR_RO(available_size); 306 307 static ssize_t seed_show(struct device *dev, 308 struct device_attribute *attr, char *buf) 309 { 310 struct dax_region *dax_region = dev_get_drvdata(dev); 311 struct device *seed; 312 ssize_t rc; 313 314 if (is_static(dax_region)) 315 return -EINVAL; 316 317 device_lock(dev); 318 seed = dax_region->seed; 319 rc = sprintf(buf, "%s\n", seed ? dev_name(seed) : ""); 320 device_unlock(dev); 321 322 return rc; 323 } 324 static DEVICE_ATTR_RO(seed); 325 326 static ssize_t create_show(struct device *dev, 327 struct device_attribute *attr, char *buf) 328 { 329 struct dax_region *dax_region = dev_get_drvdata(dev); 330 struct device *youngest; 331 ssize_t rc; 332 333 if (is_static(dax_region)) 334 return -EINVAL; 335 336 device_lock(dev); 337 youngest = dax_region->youngest; 338 rc = sprintf(buf, "%s\n", youngest ? dev_name(youngest) : ""); 339 device_unlock(dev); 340 341 return rc; 342 } 343 344 static ssize_t create_store(struct device *dev, struct device_attribute *attr, 345 const char *buf, size_t len) 346 { 347 struct dax_region *dax_region = dev_get_drvdata(dev); 348 unsigned long long avail; 349 ssize_t rc; 350 int val; 351 352 if (is_static(dax_region)) 353 return -EINVAL; 354 355 rc = kstrtoint(buf, 0, &val); 356 if (rc) 357 return rc; 358 if (val != 1) 359 return -EINVAL; 360 361 device_lock(dev); 362 avail = dax_region_avail_size(dax_region); 363 if (avail == 0) 364 rc = -ENOSPC; 365 else { 366 struct dev_dax_data data = { 367 .dax_region = dax_region, 368 .size = 0, 369 .id = -1, 370 .memmap_on_memory = false, 371 }; 372 struct dev_dax *dev_dax = devm_create_dev_dax(&data); 373 374 if (IS_ERR(dev_dax)) 375 rc = PTR_ERR(dev_dax); 376 else { 377 /* 378 * In support of crafting multiple new devices 379 * simultaneously multiple seeds can be created, 380 * but only the first one that has not been 381 * successfully bound is tracked as the region 382 * seed. 383 */ 384 if (!dax_region->seed) 385 dax_region->seed = &dev_dax->dev; 386 dax_region->youngest = &dev_dax->dev; 387 rc = len; 388 } 389 } 390 device_unlock(dev); 391 392 return rc; 393 } 394 static DEVICE_ATTR_RW(create); 395 396 void kill_dev_dax(struct dev_dax *dev_dax) 397 { 398 struct dax_device *dax_dev = dev_dax->dax_dev; 399 struct inode *inode = dax_inode(dax_dev); 400 401 kill_dax(dax_dev); 402 unmap_mapping_range(inode->i_mapping, 0, 0, 1); 403 404 /* 405 * Dynamic dax region have the pgmap allocated via dev_kzalloc() 406 * and thus freed by devm. Clear the pgmap to not have stale pgmap 407 * ranges on probe() from previous reconfigurations of region devices. 408 */ 409 if (!static_dev_dax(dev_dax)) 410 dev_dax->pgmap = NULL; 411 } 412 EXPORT_SYMBOL_GPL(kill_dev_dax); 413 414 static void trim_dev_dax_range(struct dev_dax *dev_dax) 415 { 416 int i = dev_dax->nr_range - 1; 417 struct range *range = &dev_dax->ranges[i].range; 418 struct dax_region *dax_region = dev_dax->region; 419 420 device_lock_assert(dax_region->dev); 421 dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i, 422 (unsigned long long)range->start, 423 (unsigned long long)range->end); 424 425 __release_region(&dax_region->res, range->start, range_len(range)); 426 if (--dev_dax->nr_range == 0) { 427 kfree(dev_dax->ranges); 428 dev_dax->ranges = NULL; 429 } 430 } 431 432 static void free_dev_dax_ranges(struct dev_dax *dev_dax) 433 { 434 while (dev_dax->nr_range) 435 trim_dev_dax_range(dev_dax); 436 } 437 438 static void unregister_dev_dax(void *dev) 439 { 440 struct dev_dax *dev_dax = to_dev_dax(dev); 441 442 dev_dbg(dev, "%s\n", __func__); 443 444 kill_dev_dax(dev_dax); 445 device_del(dev); 446 free_dev_dax_ranges(dev_dax); 447 put_device(dev); 448 } 449 450 static void dax_region_free(struct kref *kref) 451 { 452 struct dax_region *dax_region; 453 454 dax_region = container_of(kref, struct dax_region, kref); 455 kfree(dax_region); 456 } 457 458 static void dax_region_put(struct dax_region *dax_region) 459 { 460 kref_put(&dax_region->kref, dax_region_free); 461 } 462 463 /* a return value >= 0 indicates this invocation invalidated the id */ 464 static int __free_dev_dax_id(struct dev_dax *dev_dax) 465 { 466 struct device *dev = &dev_dax->dev; 467 struct dax_region *dax_region; 468 int rc = dev_dax->id; 469 470 device_lock_assert(dev); 471 472 if (!dev_dax->dyn_id || dev_dax->id < 0) 473 return -1; 474 dax_region = dev_dax->region; 475 ida_free(&dax_region->ida, dev_dax->id); 476 dax_region_put(dax_region); 477 dev_dax->id = -1; 478 return rc; 479 } 480 481 static int free_dev_dax_id(struct dev_dax *dev_dax) 482 { 483 struct device *dev = &dev_dax->dev; 484 int rc; 485 486 device_lock(dev); 487 rc = __free_dev_dax_id(dev_dax); 488 device_unlock(dev); 489 return rc; 490 } 491 492 static int alloc_dev_dax_id(struct dev_dax *dev_dax) 493 { 494 struct dax_region *dax_region = dev_dax->region; 495 int id; 496 497 id = ida_alloc(&dax_region->ida, GFP_KERNEL); 498 if (id < 0) 499 return id; 500 kref_get(&dax_region->kref); 501 dev_dax->dyn_id = true; 502 dev_dax->id = id; 503 return id; 504 } 505 506 static ssize_t delete_store(struct device *dev, struct device_attribute *attr, 507 const char *buf, size_t len) 508 { 509 struct dax_region *dax_region = dev_get_drvdata(dev); 510 struct dev_dax *dev_dax; 511 struct device *victim; 512 bool do_del = false; 513 int rc; 514 515 if (is_static(dax_region)) 516 return -EINVAL; 517 518 victim = device_find_child_by_name(dax_region->dev, buf); 519 if (!victim) 520 return -ENXIO; 521 522 device_lock(dev); 523 device_lock(victim); 524 dev_dax = to_dev_dax(victim); 525 if (victim->driver || dev_dax_size(dev_dax)) 526 rc = -EBUSY; 527 else { 528 /* 529 * Invalidate the device so it does not become active 530 * again, but always preserve device-id-0 so that 531 * /sys/bus/dax/ is guaranteed to be populated while any 532 * dax_region is registered. 533 */ 534 if (dev_dax->id > 0) { 535 do_del = __free_dev_dax_id(dev_dax) >= 0; 536 rc = len; 537 if (dax_region->seed == victim) 538 dax_region->seed = NULL; 539 if (dax_region->youngest == victim) 540 dax_region->youngest = NULL; 541 } else 542 rc = -EBUSY; 543 } 544 device_unlock(victim); 545 546 /* won the race to invalidate the device, clean it up */ 547 if (do_del) 548 devm_release_action(dev, unregister_dev_dax, victim); 549 device_unlock(dev); 550 put_device(victim); 551 552 return rc; 553 } 554 static DEVICE_ATTR_WO(delete); 555 556 static umode_t dax_region_visible(struct kobject *kobj, struct attribute *a, 557 int n) 558 { 559 struct device *dev = container_of(kobj, struct device, kobj); 560 struct dax_region *dax_region = dev_get_drvdata(dev); 561 562 if (is_static(dax_region)) 563 if (a == &dev_attr_available_size.attr 564 || a == &dev_attr_create.attr 565 || a == &dev_attr_seed.attr 566 || a == &dev_attr_delete.attr) 567 return 0; 568 return a->mode; 569 } 570 571 static struct attribute *dax_region_attributes[] = { 572 &dev_attr_available_size.attr, 573 &dev_attr_region_size.attr, 574 &dev_attr_region_align.attr, 575 &dev_attr_create.attr, 576 &dev_attr_seed.attr, 577 &dev_attr_delete.attr, 578 &dev_attr_id.attr, 579 NULL, 580 }; 581 582 static const struct attribute_group dax_region_attribute_group = { 583 .name = "dax_region", 584 .attrs = dax_region_attributes, 585 .is_visible = dax_region_visible, 586 }; 587 588 static const struct attribute_group *dax_region_attribute_groups[] = { 589 &dax_region_attribute_group, 590 NULL, 591 }; 592 593 static void dax_region_unregister(void *region) 594 { 595 struct dax_region *dax_region = region; 596 597 sysfs_remove_groups(&dax_region->dev->kobj, 598 dax_region_attribute_groups); 599 dax_region_put(dax_region); 600 } 601 602 struct dax_region *alloc_dax_region(struct device *parent, int region_id, 603 struct range *range, int target_node, unsigned int align, 604 unsigned long flags) 605 { 606 struct dax_region *dax_region; 607 608 /* 609 * The DAX core assumes that it can store its private data in 610 * parent->driver_data. This WARN is a reminder / safeguard for 611 * developers of device-dax drivers. 612 */ 613 if (dev_get_drvdata(parent)) { 614 dev_WARN(parent, "dax core failed to setup private data\n"); 615 return NULL; 616 } 617 618 if (!IS_ALIGNED(range->start, align) 619 || !IS_ALIGNED(range_len(range), align)) 620 return NULL; 621 622 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL); 623 if (!dax_region) 624 return NULL; 625 626 dev_set_drvdata(parent, dax_region); 627 kref_init(&dax_region->kref); 628 dax_region->id = region_id; 629 dax_region->align = align; 630 dax_region->dev = parent; 631 dax_region->target_node = target_node; 632 ida_init(&dax_region->ida); 633 dax_region->res = (struct resource) { 634 .start = range->start, 635 .end = range->end, 636 .flags = IORESOURCE_MEM | flags, 637 }; 638 639 if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) { 640 kfree(dax_region); 641 return NULL; 642 } 643 644 if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region)) 645 return NULL; 646 return dax_region; 647 } 648 EXPORT_SYMBOL_GPL(alloc_dax_region); 649 650 static void dax_mapping_release(struct device *dev) 651 { 652 struct dax_mapping *mapping = to_dax_mapping(dev); 653 struct device *parent = dev->parent; 654 struct dev_dax *dev_dax = to_dev_dax(parent); 655 656 ida_free(&dev_dax->ida, mapping->id); 657 kfree(mapping); 658 put_device(parent); 659 } 660 661 static void unregister_dax_mapping(void *data) 662 { 663 struct device *dev = data; 664 struct dax_mapping *mapping = to_dax_mapping(dev); 665 struct dev_dax *dev_dax = to_dev_dax(dev->parent); 666 struct dax_region *dax_region = dev_dax->region; 667 668 dev_dbg(dev, "%s\n", __func__); 669 670 device_lock_assert(dax_region->dev); 671 672 dev_dax->ranges[mapping->range_id].mapping = NULL; 673 mapping->range_id = -1; 674 675 device_unregister(dev); 676 } 677 678 static struct dev_dax_range *get_dax_range(struct device *dev) 679 { 680 struct dax_mapping *mapping = to_dax_mapping(dev); 681 struct dev_dax *dev_dax = to_dev_dax(dev->parent); 682 struct dax_region *dax_region = dev_dax->region; 683 684 device_lock(dax_region->dev); 685 if (mapping->range_id < 0) { 686 device_unlock(dax_region->dev); 687 return NULL; 688 } 689 690 return &dev_dax->ranges[mapping->range_id]; 691 } 692 693 static void put_dax_range(struct dev_dax_range *dax_range) 694 { 695 struct dax_mapping *mapping = dax_range->mapping; 696 struct dev_dax *dev_dax = to_dev_dax(mapping->dev.parent); 697 struct dax_region *dax_region = dev_dax->region; 698 699 device_unlock(dax_region->dev); 700 } 701 702 static ssize_t start_show(struct device *dev, 703 struct device_attribute *attr, char *buf) 704 { 705 struct dev_dax_range *dax_range; 706 ssize_t rc; 707 708 dax_range = get_dax_range(dev); 709 if (!dax_range) 710 return -ENXIO; 711 rc = sprintf(buf, "%#llx\n", dax_range->range.start); 712 put_dax_range(dax_range); 713 714 return rc; 715 } 716 static DEVICE_ATTR(start, 0400, start_show, NULL); 717 718 static ssize_t end_show(struct device *dev, 719 struct device_attribute *attr, char *buf) 720 { 721 struct dev_dax_range *dax_range; 722 ssize_t rc; 723 724 dax_range = get_dax_range(dev); 725 if (!dax_range) 726 return -ENXIO; 727 rc = sprintf(buf, "%#llx\n", dax_range->range.end); 728 put_dax_range(dax_range); 729 730 return rc; 731 } 732 static DEVICE_ATTR(end, 0400, end_show, NULL); 733 734 static ssize_t pgoff_show(struct device *dev, 735 struct device_attribute *attr, char *buf) 736 { 737 struct dev_dax_range *dax_range; 738 ssize_t rc; 739 740 dax_range = get_dax_range(dev); 741 if (!dax_range) 742 return -ENXIO; 743 rc = sprintf(buf, "%#lx\n", dax_range->pgoff); 744 put_dax_range(dax_range); 745 746 return rc; 747 } 748 static DEVICE_ATTR(page_offset, 0400, pgoff_show, NULL); 749 750 static struct attribute *dax_mapping_attributes[] = { 751 &dev_attr_start.attr, 752 &dev_attr_end.attr, 753 &dev_attr_page_offset.attr, 754 NULL, 755 }; 756 757 static const struct attribute_group dax_mapping_attribute_group = { 758 .attrs = dax_mapping_attributes, 759 }; 760 761 static const struct attribute_group *dax_mapping_attribute_groups[] = { 762 &dax_mapping_attribute_group, 763 NULL, 764 }; 765 766 static struct device_type dax_mapping_type = { 767 .release = dax_mapping_release, 768 .groups = dax_mapping_attribute_groups, 769 }; 770 771 static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id) 772 { 773 struct dax_region *dax_region = dev_dax->region; 774 struct dax_mapping *mapping; 775 struct device *dev; 776 int rc; 777 778 device_lock_assert(dax_region->dev); 779 780 if (dev_WARN_ONCE(&dev_dax->dev, !dax_region->dev->driver, 781 "region disabled\n")) 782 return -ENXIO; 783 784 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); 785 if (!mapping) 786 return -ENOMEM; 787 mapping->range_id = range_id; 788 mapping->id = ida_alloc(&dev_dax->ida, GFP_KERNEL); 789 if (mapping->id < 0) { 790 kfree(mapping); 791 return -ENOMEM; 792 } 793 dev_dax->ranges[range_id].mapping = mapping; 794 dev = &mapping->dev; 795 device_initialize(dev); 796 dev->parent = &dev_dax->dev; 797 get_device(dev->parent); 798 dev->type = &dax_mapping_type; 799 dev_set_name(dev, "mapping%d", mapping->id); 800 rc = device_add(dev); 801 if (rc) { 802 put_device(dev); 803 return rc; 804 } 805 806 rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_mapping, 807 dev); 808 if (rc) 809 return rc; 810 return 0; 811 } 812 813 static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start, 814 resource_size_t size) 815 { 816 struct dax_region *dax_region = dev_dax->region; 817 struct resource *res = &dax_region->res; 818 struct device *dev = &dev_dax->dev; 819 struct dev_dax_range *ranges; 820 unsigned long pgoff = 0; 821 struct resource *alloc; 822 int i, rc; 823 824 device_lock_assert(dax_region->dev); 825 826 /* handle the seed alloc special case */ 827 if (!size) { 828 if (dev_WARN_ONCE(dev, dev_dax->nr_range, 829 "0-size allocation must be first\n")) 830 return -EBUSY; 831 /* nr_range == 0 is elsewhere special cased as 0-size device */ 832 return 0; 833 } 834 835 alloc = __request_region(res, start, size, dev_name(dev), 0); 836 if (!alloc) 837 return -ENOMEM; 838 839 ranges = krealloc(dev_dax->ranges, sizeof(*ranges) 840 * (dev_dax->nr_range + 1), GFP_KERNEL); 841 if (!ranges) { 842 __release_region(res, alloc->start, resource_size(alloc)); 843 return -ENOMEM; 844 } 845 846 for (i = 0; i < dev_dax->nr_range; i++) 847 pgoff += PHYS_PFN(range_len(&ranges[i].range)); 848 dev_dax->ranges = ranges; 849 ranges[dev_dax->nr_range++] = (struct dev_dax_range) { 850 .pgoff = pgoff, 851 .range = { 852 .start = alloc->start, 853 .end = alloc->end, 854 }, 855 }; 856 857 dev_dbg(dev, "alloc range[%d]: %pa:%pa\n", dev_dax->nr_range - 1, 858 &alloc->start, &alloc->end); 859 /* 860 * A dev_dax instance must be registered before mapping device 861 * children can be added. Defer to devm_create_dev_dax() to add 862 * the initial mapping device. 863 */ 864 if (!device_is_registered(&dev_dax->dev)) 865 return 0; 866 867 rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1); 868 if (rc) 869 trim_dev_dax_range(dev_dax); 870 871 return rc; 872 } 873 874 static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size) 875 { 876 int last_range = dev_dax->nr_range - 1; 877 struct dev_dax_range *dax_range = &dev_dax->ranges[last_range]; 878 struct dax_region *dax_region = dev_dax->region; 879 bool is_shrink = resource_size(res) > size; 880 struct range *range = &dax_range->range; 881 struct device *dev = &dev_dax->dev; 882 int rc; 883 884 device_lock_assert(dax_region->dev); 885 886 if (dev_WARN_ONCE(dev, !size, "deletion is handled by dev_dax_shrink\n")) 887 return -EINVAL; 888 889 rc = adjust_resource(res, range->start, size); 890 if (rc) 891 return rc; 892 893 *range = (struct range) { 894 .start = range->start, 895 .end = range->start + size - 1, 896 }; 897 898 dev_dbg(dev, "%s range[%d]: %#llx:%#llx\n", is_shrink ? "shrink" : "extend", 899 last_range, (unsigned long long) range->start, 900 (unsigned long long) range->end); 901 902 return 0; 903 } 904 905 static ssize_t size_show(struct device *dev, 906 struct device_attribute *attr, char *buf) 907 { 908 struct dev_dax *dev_dax = to_dev_dax(dev); 909 unsigned long long size; 910 911 device_lock(dev); 912 size = dev_dax_size(dev_dax); 913 device_unlock(dev); 914 915 return sprintf(buf, "%llu\n", size); 916 } 917 918 static bool alloc_is_aligned(struct dev_dax *dev_dax, resource_size_t size) 919 { 920 /* 921 * The minimum mapping granularity for a device instance is a 922 * single subsection, unless the arch says otherwise. 923 */ 924 return IS_ALIGNED(size, max_t(unsigned long, dev_dax->align, memremap_compat_align())); 925 } 926 927 static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size) 928 { 929 resource_size_t to_shrink = dev_dax_size(dev_dax) - size; 930 struct dax_region *dax_region = dev_dax->region; 931 struct device *dev = &dev_dax->dev; 932 int i; 933 934 for (i = dev_dax->nr_range - 1; i >= 0; i--) { 935 struct range *range = &dev_dax->ranges[i].range; 936 struct dax_mapping *mapping = dev_dax->ranges[i].mapping; 937 struct resource *adjust = NULL, *res; 938 resource_size_t shrink; 939 940 shrink = min_t(u64, to_shrink, range_len(range)); 941 if (shrink >= range_len(range)) { 942 devm_release_action(dax_region->dev, 943 unregister_dax_mapping, &mapping->dev); 944 trim_dev_dax_range(dev_dax); 945 to_shrink -= shrink; 946 if (!to_shrink) 947 break; 948 continue; 949 } 950 951 for_each_dax_region_resource(dax_region, res) 952 if (strcmp(res->name, dev_name(dev)) == 0 953 && res->start == range->start) { 954 adjust = res; 955 break; 956 } 957 958 if (dev_WARN_ONCE(dev, !adjust || i != dev_dax->nr_range - 1, 959 "failed to find matching resource\n")) 960 return -ENXIO; 961 return adjust_dev_dax_range(dev_dax, adjust, range_len(range) 962 - shrink); 963 } 964 return 0; 965 } 966 967 /* 968 * Only allow adjustments that preserve the relative pgoff of existing 969 * allocations. I.e. the dev_dax->ranges array is ordered by increasing pgoff. 970 */ 971 static bool adjust_ok(struct dev_dax *dev_dax, struct resource *res) 972 { 973 struct dev_dax_range *last; 974 int i; 975 976 if (dev_dax->nr_range == 0) 977 return false; 978 if (strcmp(res->name, dev_name(&dev_dax->dev)) != 0) 979 return false; 980 last = &dev_dax->ranges[dev_dax->nr_range - 1]; 981 if (last->range.start != res->start || last->range.end != res->end) 982 return false; 983 for (i = 0; i < dev_dax->nr_range - 1; i++) { 984 struct dev_dax_range *dax_range = &dev_dax->ranges[i]; 985 986 if (dax_range->pgoff > last->pgoff) 987 return false; 988 } 989 990 return true; 991 } 992 993 static ssize_t dev_dax_resize(struct dax_region *dax_region, 994 struct dev_dax *dev_dax, resource_size_t size) 995 { 996 resource_size_t avail = dax_region_avail_size(dax_region), to_alloc; 997 resource_size_t dev_size = dev_dax_size(dev_dax); 998 struct resource *region_res = &dax_region->res; 999 struct device *dev = &dev_dax->dev; 1000 struct resource *res, *first; 1001 resource_size_t alloc = 0; 1002 int rc; 1003 1004 if (dev->driver) 1005 return -EBUSY; 1006 if (size == dev_size) 1007 return 0; 1008 if (size > dev_size && size - dev_size > avail) 1009 return -ENOSPC; 1010 if (size < dev_size) 1011 return dev_dax_shrink(dev_dax, size); 1012 1013 to_alloc = size - dev_size; 1014 if (dev_WARN_ONCE(dev, !alloc_is_aligned(dev_dax, to_alloc), 1015 "resize of %pa misaligned\n", &to_alloc)) 1016 return -ENXIO; 1017 1018 /* 1019 * Expand the device into the unused portion of the region. This 1020 * may involve adjusting the end of an existing resource, or 1021 * allocating a new resource. 1022 */ 1023 retry: 1024 first = region_res->child; 1025 if (!first) 1026 return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc); 1027 1028 rc = -ENOSPC; 1029 for (res = first; res; res = res->sibling) { 1030 struct resource *next = res->sibling; 1031 1032 /* space at the beginning of the region */ 1033 if (res == first && res->start > dax_region->res.start) { 1034 alloc = min(res->start - dax_region->res.start, to_alloc); 1035 rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, alloc); 1036 break; 1037 } 1038 1039 alloc = 0; 1040 /* space between allocations */ 1041 if (next && next->start > res->end + 1) 1042 alloc = min(next->start - (res->end + 1), to_alloc); 1043 1044 /* space at the end of the region */ 1045 if (!alloc && !next && res->end < region_res->end) 1046 alloc = min(region_res->end - res->end, to_alloc); 1047 1048 if (!alloc) 1049 continue; 1050 1051 if (adjust_ok(dev_dax, res)) { 1052 rc = adjust_dev_dax_range(dev_dax, res, resource_size(res) + alloc); 1053 break; 1054 } 1055 rc = alloc_dev_dax_range(dev_dax, res->end + 1, alloc); 1056 break; 1057 } 1058 if (rc) 1059 return rc; 1060 to_alloc -= alloc; 1061 if (to_alloc) 1062 goto retry; 1063 return 0; 1064 } 1065 1066 static ssize_t size_store(struct device *dev, struct device_attribute *attr, 1067 const char *buf, size_t len) 1068 { 1069 ssize_t rc; 1070 unsigned long long val; 1071 struct dev_dax *dev_dax = to_dev_dax(dev); 1072 struct dax_region *dax_region = dev_dax->region; 1073 1074 rc = kstrtoull(buf, 0, &val); 1075 if (rc) 1076 return rc; 1077 1078 if (!alloc_is_aligned(dev_dax, val)) { 1079 dev_dbg(dev, "%s: size: %lld misaligned\n", __func__, val); 1080 return -EINVAL; 1081 } 1082 1083 device_lock(dax_region->dev); 1084 if (!dax_region->dev->driver) { 1085 device_unlock(dax_region->dev); 1086 return -ENXIO; 1087 } 1088 device_lock(dev); 1089 rc = dev_dax_resize(dax_region, dev_dax, val); 1090 device_unlock(dev); 1091 device_unlock(dax_region->dev); 1092 1093 return rc == 0 ? len : rc; 1094 } 1095 static DEVICE_ATTR_RW(size); 1096 1097 static ssize_t range_parse(const char *opt, size_t len, struct range *range) 1098 { 1099 unsigned long long addr = 0; 1100 char *start, *end, *str; 1101 ssize_t rc = -EINVAL; 1102 1103 str = kstrdup(opt, GFP_KERNEL); 1104 if (!str) 1105 return rc; 1106 1107 end = str; 1108 start = strsep(&end, "-"); 1109 if (!start || !end) 1110 goto err; 1111 1112 rc = kstrtoull(start, 16, &addr); 1113 if (rc) 1114 goto err; 1115 range->start = addr; 1116 1117 rc = kstrtoull(end, 16, &addr); 1118 if (rc) 1119 goto err; 1120 range->end = addr; 1121 1122 err: 1123 kfree(str); 1124 return rc; 1125 } 1126 1127 static ssize_t mapping_store(struct device *dev, struct device_attribute *attr, 1128 const char *buf, size_t len) 1129 { 1130 struct dev_dax *dev_dax = to_dev_dax(dev); 1131 struct dax_region *dax_region = dev_dax->region; 1132 size_t to_alloc; 1133 struct range r; 1134 ssize_t rc; 1135 1136 rc = range_parse(buf, len, &r); 1137 if (rc) 1138 return rc; 1139 1140 rc = -ENXIO; 1141 device_lock(dax_region->dev); 1142 if (!dax_region->dev->driver) { 1143 device_unlock(dax_region->dev); 1144 return rc; 1145 } 1146 device_lock(dev); 1147 1148 to_alloc = range_len(&r); 1149 if (alloc_is_aligned(dev_dax, to_alloc)) 1150 rc = alloc_dev_dax_range(dev_dax, r.start, to_alloc); 1151 device_unlock(dev); 1152 device_unlock(dax_region->dev); 1153 1154 return rc == 0 ? len : rc; 1155 } 1156 static DEVICE_ATTR_WO(mapping); 1157 1158 static ssize_t align_show(struct device *dev, 1159 struct device_attribute *attr, char *buf) 1160 { 1161 struct dev_dax *dev_dax = to_dev_dax(dev); 1162 1163 return sprintf(buf, "%d\n", dev_dax->align); 1164 } 1165 1166 static ssize_t dev_dax_validate_align(struct dev_dax *dev_dax) 1167 { 1168 struct device *dev = &dev_dax->dev; 1169 int i; 1170 1171 for (i = 0; i < dev_dax->nr_range; i++) { 1172 size_t len = range_len(&dev_dax->ranges[i].range); 1173 1174 if (!alloc_is_aligned(dev_dax, len)) { 1175 dev_dbg(dev, "%s: align %u invalid for range %d\n", 1176 __func__, dev_dax->align, i); 1177 return -EINVAL; 1178 } 1179 } 1180 1181 return 0; 1182 } 1183 1184 static ssize_t align_store(struct device *dev, struct device_attribute *attr, 1185 const char *buf, size_t len) 1186 { 1187 struct dev_dax *dev_dax = to_dev_dax(dev); 1188 struct dax_region *dax_region = dev_dax->region; 1189 unsigned long val, align_save; 1190 ssize_t rc; 1191 1192 rc = kstrtoul(buf, 0, &val); 1193 if (rc) 1194 return -ENXIO; 1195 1196 if (!dax_align_valid(val)) 1197 return -EINVAL; 1198 1199 device_lock(dax_region->dev); 1200 if (!dax_region->dev->driver) { 1201 device_unlock(dax_region->dev); 1202 return -ENXIO; 1203 } 1204 1205 device_lock(dev); 1206 if (dev->driver) { 1207 rc = -EBUSY; 1208 goto out_unlock; 1209 } 1210 1211 align_save = dev_dax->align; 1212 dev_dax->align = val; 1213 rc = dev_dax_validate_align(dev_dax); 1214 if (rc) 1215 dev_dax->align = align_save; 1216 out_unlock: 1217 device_unlock(dev); 1218 device_unlock(dax_region->dev); 1219 return rc == 0 ? len : rc; 1220 } 1221 static DEVICE_ATTR_RW(align); 1222 1223 static int dev_dax_target_node(struct dev_dax *dev_dax) 1224 { 1225 struct dax_region *dax_region = dev_dax->region; 1226 1227 return dax_region->target_node; 1228 } 1229 1230 static ssize_t target_node_show(struct device *dev, 1231 struct device_attribute *attr, char *buf) 1232 { 1233 struct dev_dax *dev_dax = to_dev_dax(dev); 1234 1235 return sprintf(buf, "%d\n", dev_dax_target_node(dev_dax)); 1236 } 1237 static DEVICE_ATTR_RO(target_node); 1238 1239 static ssize_t resource_show(struct device *dev, 1240 struct device_attribute *attr, char *buf) 1241 { 1242 struct dev_dax *dev_dax = to_dev_dax(dev); 1243 struct dax_region *dax_region = dev_dax->region; 1244 unsigned long long start; 1245 1246 if (dev_dax->nr_range < 1) 1247 start = dax_region->res.start; 1248 else 1249 start = dev_dax->ranges[0].range.start; 1250 1251 return sprintf(buf, "%#llx\n", start); 1252 } 1253 static DEVICE_ATTR(resource, 0400, resource_show, NULL); 1254 1255 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 1256 char *buf) 1257 { 1258 /* 1259 * We only ever expect to handle device-dax instances, i.e. the 1260 * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero 1261 */ 1262 return sprintf(buf, DAX_DEVICE_MODALIAS_FMT "\n", 0); 1263 } 1264 static DEVICE_ATTR_RO(modalias); 1265 1266 static ssize_t numa_node_show(struct device *dev, 1267 struct device_attribute *attr, char *buf) 1268 { 1269 return sprintf(buf, "%d\n", dev_to_node(dev)); 1270 } 1271 static DEVICE_ATTR_RO(numa_node); 1272 1273 static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n) 1274 { 1275 struct device *dev = container_of(kobj, struct device, kobj); 1276 struct dev_dax *dev_dax = to_dev_dax(dev); 1277 struct dax_region *dax_region = dev_dax->region; 1278 1279 if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0) 1280 return 0; 1281 if (a == &dev_attr_numa_node.attr && !IS_ENABLED(CONFIG_NUMA)) 1282 return 0; 1283 if (a == &dev_attr_mapping.attr && is_static(dax_region)) 1284 return 0; 1285 if ((a == &dev_attr_align.attr || 1286 a == &dev_attr_size.attr) && is_static(dax_region)) 1287 return 0444; 1288 return a->mode; 1289 } 1290 1291 static struct attribute *dev_dax_attributes[] = { 1292 &dev_attr_modalias.attr, 1293 &dev_attr_size.attr, 1294 &dev_attr_mapping.attr, 1295 &dev_attr_target_node.attr, 1296 &dev_attr_align.attr, 1297 &dev_attr_resource.attr, 1298 &dev_attr_numa_node.attr, 1299 NULL, 1300 }; 1301 1302 static const struct attribute_group dev_dax_attribute_group = { 1303 .attrs = dev_dax_attributes, 1304 .is_visible = dev_dax_visible, 1305 }; 1306 1307 static const struct attribute_group *dax_attribute_groups[] = { 1308 &dev_dax_attribute_group, 1309 NULL, 1310 }; 1311 1312 static void dev_dax_release(struct device *dev) 1313 { 1314 struct dev_dax *dev_dax = to_dev_dax(dev); 1315 struct dax_device *dax_dev = dev_dax->dax_dev; 1316 1317 put_dax(dax_dev); 1318 free_dev_dax_id(dev_dax); 1319 kfree(dev_dax->pgmap); 1320 kfree(dev_dax); 1321 } 1322 1323 static const struct device_type dev_dax_type = { 1324 .release = dev_dax_release, 1325 .groups = dax_attribute_groups, 1326 }; 1327 1328 struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data) 1329 { 1330 struct dax_region *dax_region = data->dax_region; 1331 struct device *parent = dax_region->dev; 1332 struct dax_device *dax_dev; 1333 struct dev_dax *dev_dax; 1334 struct inode *inode; 1335 struct device *dev; 1336 int rc; 1337 1338 dev_dax = kzalloc(sizeof(*dev_dax), GFP_KERNEL); 1339 if (!dev_dax) 1340 return ERR_PTR(-ENOMEM); 1341 1342 dev_dax->region = dax_region; 1343 if (is_static(dax_region)) { 1344 if (dev_WARN_ONCE(parent, data->id < 0, 1345 "dynamic id specified to static region\n")) { 1346 rc = -EINVAL; 1347 goto err_id; 1348 } 1349 1350 dev_dax->id = data->id; 1351 } else { 1352 if (dev_WARN_ONCE(parent, data->id >= 0, 1353 "static id specified to dynamic region\n")) { 1354 rc = -EINVAL; 1355 goto err_id; 1356 } 1357 1358 rc = alloc_dev_dax_id(dev_dax); 1359 if (rc < 0) 1360 goto err_id; 1361 } 1362 1363 dev = &dev_dax->dev; 1364 device_initialize(dev); 1365 dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id); 1366 1367 rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, data->size); 1368 if (rc) 1369 goto err_range; 1370 1371 if (data->pgmap) { 1372 dev_WARN_ONCE(parent, !is_static(dax_region), 1373 "custom dev_pagemap requires a static dax_region\n"); 1374 1375 dev_dax->pgmap = kmemdup(data->pgmap, 1376 sizeof(struct dev_pagemap), GFP_KERNEL); 1377 if (!dev_dax->pgmap) { 1378 rc = -ENOMEM; 1379 goto err_pgmap; 1380 } 1381 } 1382 1383 /* 1384 * No dax_operations since there is no access to this device outside of 1385 * mmap of the resulting character device. 1386 */ 1387 dax_dev = alloc_dax(dev_dax, NULL); 1388 if (IS_ERR(dax_dev)) { 1389 rc = PTR_ERR(dax_dev); 1390 goto err_alloc_dax; 1391 } 1392 set_dax_synchronous(dax_dev); 1393 set_dax_nocache(dax_dev); 1394 set_dax_nomc(dax_dev); 1395 1396 /* a device_dax instance is dead while the driver is not attached */ 1397 kill_dax(dax_dev); 1398 1399 dev_dax->dax_dev = dax_dev; 1400 dev_dax->target_node = dax_region->target_node; 1401 dev_dax->align = dax_region->align; 1402 ida_init(&dev_dax->ida); 1403 1404 dev_dax->memmap_on_memory = data->memmap_on_memory; 1405 1406 inode = dax_inode(dax_dev); 1407 dev->devt = inode->i_rdev; 1408 dev->bus = &dax_bus_type; 1409 dev->parent = parent; 1410 dev->type = &dev_dax_type; 1411 1412 rc = device_add(dev); 1413 if (rc) { 1414 kill_dev_dax(dev_dax); 1415 put_device(dev); 1416 return ERR_PTR(rc); 1417 } 1418 1419 rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev); 1420 if (rc) 1421 return ERR_PTR(rc); 1422 1423 /* register mapping device for the initial allocation range */ 1424 if (dev_dax->nr_range && range_len(&dev_dax->ranges[0].range)) { 1425 rc = devm_register_dax_mapping(dev_dax, 0); 1426 if (rc) 1427 return ERR_PTR(rc); 1428 } 1429 1430 return dev_dax; 1431 1432 err_alloc_dax: 1433 kfree(dev_dax->pgmap); 1434 err_pgmap: 1435 free_dev_dax_ranges(dev_dax); 1436 err_range: 1437 free_dev_dax_id(dev_dax); 1438 err_id: 1439 kfree(dev_dax); 1440 1441 return ERR_PTR(rc); 1442 } 1443 EXPORT_SYMBOL_GPL(devm_create_dev_dax); 1444 1445 int __dax_driver_register(struct dax_device_driver *dax_drv, 1446 struct module *module, const char *mod_name) 1447 { 1448 struct device_driver *drv = &dax_drv->drv; 1449 1450 /* 1451 * dax_bus_probe() calls dax_drv->probe() unconditionally. 1452 * So better be safe than sorry and ensure it is provided. 1453 */ 1454 if (!dax_drv->probe) 1455 return -EINVAL; 1456 1457 INIT_LIST_HEAD(&dax_drv->ids); 1458 drv->owner = module; 1459 drv->name = mod_name; 1460 drv->mod_name = mod_name; 1461 drv->bus = &dax_bus_type; 1462 1463 return driver_register(drv); 1464 } 1465 EXPORT_SYMBOL_GPL(__dax_driver_register); 1466 1467 void dax_driver_unregister(struct dax_device_driver *dax_drv) 1468 { 1469 struct device_driver *drv = &dax_drv->drv; 1470 struct dax_id *dax_id, *_id; 1471 1472 mutex_lock(&dax_bus_lock); 1473 list_for_each_entry_safe(dax_id, _id, &dax_drv->ids, list) { 1474 list_del(&dax_id->list); 1475 kfree(dax_id); 1476 } 1477 mutex_unlock(&dax_bus_lock); 1478 driver_unregister(drv); 1479 } 1480 EXPORT_SYMBOL_GPL(dax_driver_unregister); 1481 1482 int __init dax_bus_init(void) 1483 { 1484 return bus_register(&dax_bus_type); 1485 } 1486 1487 void __exit dax_bus_exit(void) 1488 { 1489 bus_unregister(&dax_bus_type); 1490 } 1491