1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017-2018 Intel Corporation. All rights reserved. */ 3 #include <linux/memremap.h> 4 #include <linux/device.h> 5 #include <linux/mutex.h> 6 #include <linux/list.h> 7 #include <linux/slab.h> 8 #include <linux/dax.h> 9 #include <linux/io.h> 10 #include "dax-private.h" 11 #include "bus.h" 12 13 static DEFINE_MUTEX(dax_bus_lock); 14 15 #define DAX_NAME_LEN 30 16 struct dax_id { 17 struct list_head list; 18 char dev_name[DAX_NAME_LEN]; 19 }; 20 21 static int dax_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 22 { 23 /* 24 * We only ever expect to handle device-dax instances, i.e. the 25 * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero 26 */ 27 return add_uevent_var(env, "MODALIAS=" DAX_DEVICE_MODALIAS_FMT, 0); 28 } 29 30 static struct dax_device_driver *to_dax_drv(struct device_driver *drv) 31 { 32 return container_of(drv, struct dax_device_driver, drv); 33 } 34 35 static struct dax_id *__dax_match_id(struct dax_device_driver *dax_drv, 36 const char *dev_name) 37 { 38 struct dax_id *dax_id; 39 40 lockdep_assert_held(&dax_bus_lock); 41 42 list_for_each_entry(dax_id, &dax_drv->ids, list) 43 if (sysfs_streq(dax_id->dev_name, dev_name)) 44 return dax_id; 45 return NULL; 46 } 47 48 static int dax_match_id(struct dax_device_driver *dax_drv, struct device *dev) 49 { 50 int match; 51 52 mutex_lock(&dax_bus_lock); 53 match = !!__dax_match_id(dax_drv, dev_name(dev)); 54 mutex_unlock(&dax_bus_lock); 55 56 return match; 57 } 58 59 static int dax_match_type(struct dax_device_driver *dax_drv, struct device *dev) 60 { 61 enum dax_driver_type type = DAXDRV_DEVICE_TYPE; 62 struct dev_dax *dev_dax = to_dev_dax(dev); 63 64 if (dev_dax->region->res.flags & IORESOURCE_DAX_KMEM) 65 type = DAXDRV_KMEM_TYPE; 66 67 if (dax_drv->type == type) 68 return 1; 69 70 /* default to device mode if dax_kmem is disabled */ 71 if (dax_drv->type == DAXDRV_DEVICE_TYPE && 72 !IS_ENABLED(CONFIG_DEV_DAX_KMEM)) 73 return 1; 74 75 return 0; 76 } 77 78 enum id_action { 79 ID_REMOVE, 80 ID_ADD, 81 }; 82 83 static ssize_t do_id_store(struct device_driver *drv, const char *buf, 84 size_t count, enum id_action action) 85 { 86 struct dax_device_driver *dax_drv = to_dax_drv(drv); 87 unsigned int region_id, id; 88 char devname[DAX_NAME_LEN]; 89 struct dax_id *dax_id; 90 ssize_t rc = count; 91 int fields; 92 93 fields = sscanf(buf, "dax%d.%d", ®ion_id, &id); 94 if (fields != 2) 95 return -EINVAL; 96 sprintf(devname, "dax%d.%d", region_id, id); 97 if (!sysfs_streq(buf, devname)) 98 return -EINVAL; 99 100 mutex_lock(&dax_bus_lock); 101 dax_id = __dax_match_id(dax_drv, buf); 102 if (!dax_id) { 103 if (action == ID_ADD) { 104 dax_id = kzalloc(sizeof(*dax_id), GFP_KERNEL); 105 if (dax_id) { 106 strscpy(dax_id->dev_name, buf, DAX_NAME_LEN); 107 list_add(&dax_id->list, &dax_drv->ids); 108 } else 109 rc = -ENOMEM; 110 } 111 } else if (action == ID_REMOVE) { 112 list_del(&dax_id->list); 113 kfree(dax_id); 114 } 115 mutex_unlock(&dax_bus_lock); 116 117 if (rc < 0) 118 return rc; 119 if (action == ID_ADD) 120 rc = driver_attach(drv); 121 if (rc) 122 return rc; 123 return count; 124 } 125 126 static ssize_t new_id_store(struct device_driver *drv, const char *buf, 127 size_t count) 128 { 129 return do_id_store(drv, buf, count, ID_ADD); 130 } 131 static DRIVER_ATTR_WO(new_id); 132 133 static ssize_t remove_id_store(struct device_driver *drv, const char *buf, 134 size_t count) 135 { 136 return do_id_store(drv, buf, count, ID_REMOVE); 137 } 138 static DRIVER_ATTR_WO(remove_id); 139 140 static struct attribute *dax_drv_attrs[] = { 141 &driver_attr_new_id.attr, 142 &driver_attr_remove_id.attr, 143 NULL, 144 }; 145 ATTRIBUTE_GROUPS(dax_drv); 146 147 static int dax_bus_match(struct device *dev, struct device_driver *drv); 148 149 /* 150 * Static dax regions are regions created by an external subsystem 151 * nvdimm where a single range is assigned. Its boundaries are by the external 152 * subsystem and are usually limited to one physical memory range. For example, 153 * for PMEM it is usually defined by NVDIMM Namespace boundaries (i.e. a 154 * single contiguous range) 155 * 156 * On dynamic dax regions, the assigned region can be partitioned by dax core 157 * into multiple subdivisions. A subdivision is represented into one 158 * /dev/daxN.M device composed by one or more potentially discontiguous ranges. 159 * 160 * When allocating a dax region, drivers must set whether it's static 161 * (IORESOURCE_DAX_STATIC). On static dax devices, the @pgmap is pre-assigned 162 * to dax core when calling devm_create_dev_dax(), whereas in dynamic dax 163 * devices it is NULL but afterwards allocated by dax core on device ->probe(). 164 * Care is needed to make sure that dynamic dax devices are torn down with a 165 * cleared @pgmap field (see kill_dev_dax()). 166 */ 167 static bool is_static(struct dax_region *dax_region) 168 { 169 return (dax_region->res.flags & IORESOURCE_DAX_STATIC) != 0; 170 } 171 172 bool static_dev_dax(struct dev_dax *dev_dax) 173 { 174 return is_static(dev_dax->region); 175 } 176 EXPORT_SYMBOL_GPL(static_dev_dax); 177 178 static u64 dev_dax_size(struct dev_dax *dev_dax) 179 { 180 u64 size = 0; 181 int i; 182 183 device_lock_assert(&dev_dax->dev); 184 185 for (i = 0; i < dev_dax->nr_range; i++) 186 size += range_len(&dev_dax->ranges[i].range); 187 188 return size; 189 } 190 191 static int dax_bus_probe(struct device *dev) 192 { 193 struct dax_device_driver *dax_drv = to_dax_drv(dev->driver); 194 struct dev_dax *dev_dax = to_dev_dax(dev); 195 struct dax_region *dax_region = dev_dax->region; 196 int rc; 197 198 if (dev_dax_size(dev_dax) == 0 || dev_dax->id < 0) 199 return -ENXIO; 200 201 rc = dax_drv->probe(dev_dax); 202 203 if (rc || is_static(dax_region)) 204 return rc; 205 206 /* 207 * Track new seed creation only after successful probe of the 208 * previous seed. 209 */ 210 if (dax_region->seed == dev) 211 dax_region->seed = NULL; 212 213 return 0; 214 } 215 216 static void dax_bus_remove(struct device *dev) 217 { 218 struct dax_device_driver *dax_drv = to_dax_drv(dev->driver); 219 struct dev_dax *dev_dax = to_dev_dax(dev); 220 221 if (dax_drv->remove) 222 dax_drv->remove(dev_dax); 223 } 224 225 static struct bus_type dax_bus_type = { 226 .name = "dax", 227 .uevent = dax_bus_uevent, 228 .match = dax_bus_match, 229 .probe = dax_bus_probe, 230 .remove = dax_bus_remove, 231 .drv_groups = dax_drv_groups, 232 }; 233 234 static int dax_bus_match(struct device *dev, struct device_driver *drv) 235 { 236 struct dax_device_driver *dax_drv = to_dax_drv(drv); 237 238 if (dax_match_id(dax_drv, dev)) 239 return 1; 240 return dax_match_type(dax_drv, dev); 241 } 242 243 /* 244 * Rely on the fact that drvdata is set before the attributes are 245 * registered, and that the attributes are unregistered before drvdata 246 * is cleared to assume that drvdata is always valid. 247 */ 248 static ssize_t id_show(struct device *dev, 249 struct device_attribute *attr, char *buf) 250 { 251 struct dax_region *dax_region = dev_get_drvdata(dev); 252 253 return sprintf(buf, "%d\n", dax_region->id); 254 } 255 static DEVICE_ATTR_RO(id); 256 257 static ssize_t region_size_show(struct device *dev, 258 struct device_attribute *attr, char *buf) 259 { 260 struct dax_region *dax_region = dev_get_drvdata(dev); 261 262 return sprintf(buf, "%llu\n", (unsigned long long) 263 resource_size(&dax_region->res)); 264 } 265 static struct device_attribute dev_attr_region_size = __ATTR(size, 0444, 266 region_size_show, NULL); 267 268 static ssize_t region_align_show(struct device *dev, 269 struct device_attribute *attr, char *buf) 270 { 271 struct dax_region *dax_region = dev_get_drvdata(dev); 272 273 return sprintf(buf, "%u\n", dax_region->align); 274 } 275 static struct device_attribute dev_attr_region_align = 276 __ATTR(align, 0400, region_align_show, NULL); 277 278 #define for_each_dax_region_resource(dax_region, res) \ 279 for (res = (dax_region)->res.child; res; res = res->sibling) 280 281 static unsigned long long dax_region_avail_size(struct dax_region *dax_region) 282 { 283 resource_size_t size = resource_size(&dax_region->res); 284 struct resource *res; 285 286 device_lock_assert(dax_region->dev); 287 288 for_each_dax_region_resource(dax_region, res) 289 size -= resource_size(res); 290 return size; 291 } 292 293 static ssize_t available_size_show(struct device *dev, 294 struct device_attribute *attr, char *buf) 295 { 296 struct dax_region *dax_region = dev_get_drvdata(dev); 297 unsigned long long size; 298 299 device_lock(dev); 300 size = dax_region_avail_size(dax_region); 301 device_unlock(dev); 302 303 return sprintf(buf, "%llu\n", size); 304 } 305 static DEVICE_ATTR_RO(available_size); 306 307 static ssize_t seed_show(struct device *dev, 308 struct device_attribute *attr, char *buf) 309 { 310 struct dax_region *dax_region = dev_get_drvdata(dev); 311 struct device *seed; 312 ssize_t rc; 313 314 if (is_static(dax_region)) 315 return -EINVAL; 316 317 device_lock(dev); 318 seed = dax_region->seed; 319 rc = sprintf(buf, "%s\n", seed ? dev_name(seed) : ""); 320 device_unlock(dev); 321 322 return rc; 323 } 324 static DEVICE_ATTR_RO(seed); 325 326 static ssize_t create_show(struct device *dev, 327 struct device_attribute *attr, char *buf) 328 { 329 struct dax_region *dax_region = dev_get_drvdata(dev); 330 struct device *youngest; 331 ssize_t rc; 332 333 if (is_static(dax_region)) 334 return -EINVAL; 335 336 device_lock(dev); 337 youngest = dax_region->youngest; 338 rc = sprintf(buf, "%s\n", youngest ? dev_name(youngest) : ""); 339 device_unlock(dev); 340 341 return rc; 342 } 343 344 static ssize_t create_store(struct device *dev, struct device_attribute *attr, 345 const char *buf, size_t len) 346 { 347 struct dax_region *dax_region = dev_get_drvdata(dev); 348 unsigned long long avail; 349 ssize_t rc; 350 int val; 351 352 if (is_static(dax_region)) 353 return -EINVAL; 354 355 rc = kstrtoint(buf, 0, &val); 356 if (rc) 357 return rc; 358 if (val != 1) 359 return -EINVAL; 360 361 device_lock(dev); 362 avail = dax_region_avail_size(dax_region); 363 if (avail == 0) 364 rc = -ENOSPC; 365 else { 366 struct dev_dax_data data = { 367 .dax_region = dax_region, 368 .size = 0, 369 .id = -1, 370 }; 371 struct dev_dax *dev_dax = devm_create_dev_dax(&data); 372 373 if (IS_ERR(dev_dax)) 374 rc = PTR_ERR(dev_dax); 375 else { 376 /* 377 * In support of crafting multiple new devices 378 * simultaneously multiple seeds can be created, 379 * but only the first one that has not been 380 * successfully bound is tracked as the region 381 * seed. 382 */ 383 if (!dax_region->seed) 384 dax_region->seed = &dev_dax->dev; 385 dax_region->youngest = &dev_dax->dev; 386 rc = len; 387 } 388 } 389 device_unlock(dev); 390 391 return rc; 392 } 393 static DEVICE_ATTR_RW(create); 394 395 void kill_dev_dax(struct dev_dax *dev_dax) 396 { 397 struct dax_device *dax_dev = dev_dax->dax_dev; 398 struct inode *inode = dax_inode(dax_dev); 399 400 kill_dax(dax_dev); 401 unmap_mapping_range(inode->i_mapping, 0, 0, 1); 402 403 /* 404 * Dynamic dax region have the pgmap allocated via dev_kzalloc() 405 * and thus freed by devm. Clear the pgmap to not have stale pgmap 406 * ranges on probe() from previous reconfigurations of region devices. 407 */ 408 if (!static_dev_dax(dev_dax)) 409 dev_dax->pgmap = NULL; 410 } 411 EXPORT_SYMBOL_GPL(kill_dev_dax); 412 413 static void trim_dev_dax_range(struct dev_dax *dev_dax) 414 { 415 int i = dev_dax->nr_range - 1; 416 struct range *range = &dev_dax->ranges[i].range; 417 struct dax_region *dax_region = dev_dax->region; 418 419 device_lock_assert(dax_region->dev); 420 dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i, 421 (unsigned long long)range->start, 422 (unsigned long long)range->end); 423 424 __release_region(&dax_region->res, range->start, range_len(range)); 425 if (--dev_dax->nr_range == 0) { 426 kfree(dev_dax->ranges); 427 dev_dax->ranges = NULL; 428 } 429 } 430 431 static void free_dev_dax_ranges(struct dev_dax *dev_dax) 432 { 433 while (dev_dax->nr_range) 434 trim_dev_dax_range(dev_dax); 435 } 436 437 static void unregister_dev_dax(void *dev) 438 { 439 struct dev_dax *dev_dax = to_dev_dax(dev); 440 441 dev_dbg(dev, "%s\n", __func__); 442 443 kill_dev_dax(dev_dax); 444 device_del(dev); 445 free_dev_dax_ranges(dev_dax); 446 put_device(dev); 447 } 448 449 static void dax_region_free(struct kref *kref) 450 { 451 struct dax_region *dax_region; 452 453 dax_region = container_of(kref, struct dax_region, kref); 454 kfree(dax_region); 455 } 456 457 static void dax_region_put(struct dax_region *dax_region) 458 { 459 kref_put(&dax_region->kref, dax_region_free); 460 } 461 462 /* a return value >= 0 indicates this invocation invalidated the id */ 463 static int __free_dev_dax_id(struct dev_dax *dev_dax) 464 { 465 struct device *dev = &dev_dax->dev; 466 struct dax_region *dax_region; 467 int rc = dev_dax->id; 468 469 device_lock_assert(dev); 470 471 if (!dev_dax->dyn_id || dev_dax->id < 0) 472 return -1; 473 dax_region = dev_dax->region; 474 ida_free(&dax_region->ida, dev_dax->id); 475 dax_region_put(dax_region); 476 dev_dax->id = -1; 477 return rc; 478 } 479 480 static int free_dev_dax_id(struct dev_dax *dev_dax) 481 { 482 struct device *dev = &dev_dax->dev; 483 int rc; 484 485 device_lock(dev); 486 rc = __free_dev_dax_id(dev_dax); 487 device_unlock(dev); 488 return rc; 489 } 490 491 static int alloc_dev_dax_id(struct dev_dax *dev_dax) 492 { 493 struct dax_region *dax_region = dev_dax->region; 494 int id; 495 496 id = ida_alloc(&dax_region->ida, GFP_KERNEL); 497 if (id < 0) 498 return id; 499 kref_get(&dax_region->kref); 500 dev_dax->dyn_id = true; 501 dev_dax->id = id; 502 return id; 503 } 504 505 static ssize_t delete_store(struct device *dev, struct device_attribute *attr, 506 const char *buf, size_t len) 507 { 508 struct dax_region *dax_region = dev_get_drvdata(dev); 509 struct dev_dax *dev_dax; 510 struct device *victim; 511 bool do_del = false; 512 int rc; 513 514 if (is_static(dax_region)) 515 return -EINVAL; 516 517 victim = device_find_child_by_name(dax_region->dev, buf); 518 if (!victim) 519 return -ENXIO; 520 521 device_lock(dev); 522 device_lock(victim); 523 dev_dax = to_dev_dax(victim); 524 if (victim->driver || dev_dax_size(dev_dax)) 525 rc = -EBUSY; 526 else { 527 /* 528 * Invalidate the device so it does not become active 529 * again, but always preserve device-id-0 so that 530 * /sys/bus/dax/ is guaranteed to be populated while any 531 * dax_region is registered. 532 */ 533 if (dev_dax->id > 0) { 534 do_del = __free_dev_dax_id(dev_dax) >= 0; 535 rc = len; 536 if (dax_region->seed == victim) 537 dax_region->seed = NULL; 538 if (dax_region->youngest == victim) 539 dax_region->youngest = NULL; 540 } else 541 rc = -EBUSY; 542 } 543 device_unlock(victim); 544 545 /* won the race to invalidate the device, clean it up */ 546 if (do_del) 547 devm_release_action(dev, unregister_dev_dax, victim); 548 device_unlock(dev); 549 put_device(victim); 550 551 return rc; 552 } 553 static DEVICE_ATTR_WO(delete); 554 555 static umode_t dax_region_visible(struct kobject *kobj, struct attribute *a, 556 int n) 557 { 558 struct device *dev = container_of(kobj, struct device, kobj); 559 struct dax_region *dax_region = dev_get_drvdata(dev); 560 561 if (is_static(dax_region)) 562 if (a == &dev_attr_available_size.attr 563 || a == &dev_attr_create.attr 564 || a == &dev_attr_seed.attr 565 || a == &dev_attr_delete.attr) 566 return 0; 567 return a->mode; 568 } 569 570 static struct attribute *dax_region_attributes[] = { 571 &dev_attr_available_size.attr, 572 &dev_attr_region_size.attr, 573 &dev_attr_region_align.attr, 574 &dev_attr_create.attr, 575 &dev_attr_seed.attr, 576 &dev_attr_delete.attr, 577 &dev_attr_id.attr, 578 NULL, 579 }; 580 581 static const struct attribute_group dax_region_attribute_group = { 582 .name = "dax_region", 583 .attrs = dax_region_attributes, 584 .is_visible = dax_region_visible, 585 }; 586 587 static const struct attribute_group *dax_region_attribute_groups[] = { 588 &dax_region_attribute_group, 589 NULL, 590 }; 591 592 static void dax_region_unregister(void *region) 593 { 594 struct dax_region *dax_region = region; 595 596 sysfs_remove_groups(&dax_region->dev->kobj, 597 dax_region_attribute_groups); 598 dax_region_put(dax_region); 599 } 600 601 struct dax_region *alloc_dax_region(struct device *parent, int region_id, 602 struct range *range, int target_node, unsigned int align, 603 unsigned long flags) 604 { 605 struct dax_region *dax_region; 606 607 /* 608 * The DAX core assumes that it can store its private data in 609 * parent->driver_data. This WARN is a reminder / safeguard for 610 * developers of device-dax drivers. 611 */ 612 if (dev_get_drvdata(parent)) { 613 dev_WARN(parent, "dax core failed to setup private data\n"); 614 return NULL; 615 } 616 617 if (!IS_ALIGNED(range->start, align) 618 || !IS_ALIGNED(range_len(range), align)) 619 return NULL; 620 621 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL); 622 if (!dax_region) 623 return NULL; 624 625 dev_set_drvdata(parent, dax_region); 626 kref_init(&dax_region->kref); 627 dax_region->id = region_id; 628 dax_region->align = align; 629 dax_region->dev = parent; 630 dax_region->target_node = target_node; 631 ida_init(&dax_region->ida); 632 dax_region->res = (struct resource) { 633 .start = range->start, 634 .end = range->end, 635 .flags = IORESOURCE_MEM | flags, 636 }; 637 638 if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) { 639 kfree(dax_region); 640 return NULL; 641 } 642 643 if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region)) 644 return NULL; 645 return dax_region; 646 } 647 EXPORT_SYMBOL_GPL(alloc_dax_region); 648 649 static void dax_mapping_release(struct device *dev) 650 { 651 struct dax_mapping *mapping = to_dax_mapping(dev); 652 struct device *parent = dev->parent; 653 struct dev_dax *dev_dax = to_dev_dax(parent); 654 655 ida_free(&dev_dax->ida, mapping->id); 656 kfree(mapping); 657 put_device(parent); 658 } 659 660 static void unregister_dax_mapping(void *data) 661 { 662 struct device *dev = data; 663 struct dax_mapping *mapping = to_dax_mapping(dev); 664 struct dev_dax *dev_dax = to_dev_dax(dev->parent); 665 struct dax_region *dax_region = dev_dax->region; 666 667 dev_dbg(dev, "%s\n", __func__); 668 669 device_lock_assert(dax_region->dev); 670 671 dev_dax->ranges[mapping->range_id].mapping = NULL; 672 mapping->range_id = -1; 673 674 device_unregister(dev); 675 } 676 677 static struct dev_dax_range *get_dax_range(struct device *dev) 678 { 679 struct dax_mapping *mapping = to_dax_mapping(dev); 680 struct dev_dax *dev_dax = to_dev_dax(dev->parent); 681 struct dax_region *dax_region = dev_dax->region; 682 683 device_lock(dax_region->dev); 684 if (mapping->range_id < 0) { 685 device_unlock(dax_region->dev); 686 return NULL; 687 } 688 689 return &dev_dax->ranges[mapping->range_id]; 690 } 691 692 static void put_dax_range(struct dev_dax_range *dax_range) 693 { 694 struct dax_mapping *mapping = dax_range->mapping; 695 struct dev_dax *dev_dax = to_dev_dax(mapping->dev.parent); 696 struct dax_region *dax_region = dev_dax->region; 697 698 device_unlock(dax_region->dev); 699 } 700 701 static ssize_t start_show(struct device *dev, 702 struct device_attribute *attr, char *buf) 703 { 704 struct dev_dax_range *dax_range; 705 ssize_t rc; 706 707 dax_range = get_dax_range(dev); 708 if (!dax_range) 709 return -ENXIO; 710 rc = sprintf(buf, "%#llx\n", dax_range->range.start); 711 put_dax_range(dax_range); 712 713 return rc; 714 } 715 static DEVICE_ATTR(start, 0400, start_show, NULL); 716 717 static ssize_t end_show(struct device *dev, 718 struct device_attribute *attr, char *buf) 719 { 720 struct dev_dax_range *dax_range; 721 ssize_t rc; 722 723 dax_range = get_dax_range(dev); 724 if (!dax_range) 725 return -ENXIO; 726 rc = sprintf(buf, "%#llx\n", dax_range->range.end); 727 put_dax_range(dax_range); 728 729 return rc; 730 } 731 static DEVICE_ATTR(end, 0400, end_show, NULL); 732 733 static ssize_t pgoff_show(struct device *dev, 734 struct device_attribute *attr, char *buf) 735 { 736 struct dev_dax_range *dax_range; 737 ssize_t rc; 738 739 dax_range = get_dax_range(dev); 740 if (!dax_range) 741 return -ENXIO; 742 rc = sprintf(buf, "%#lx\n", dax_range->pgoff); 743 put_dax_range(dax_range); 744 745 return rc; 746 } 747 static DEVICE_ATTR(page_offset, 0400, pgoff_show, NULL); 748 749 static struct attribute *dax_mapping_attributes[] = { 750 &dev_attr_start.attr, 751 &dev_attr_end.attr, 752 &dev_attr_page_offset.attr, 753 NULL, 754 }; 755 756 static const struct attribute_group dax_mapping_attribute_group = { 757 .attrs = dax_mapping_attributes, 758 }; 759 760 static const struct attribute_group *dax_mapping_attribute_groups[] = { 761 &dax_mapping_attribute_group, 762 NULL, 763 }; 764 765 static struct device_type dax_mapping_type = { 766 .release = dax_mapping_release, 767 .groups = dax_mapping_attribute_groups, 768 }; 769 770 static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id) 771 { 772 struct dax_region *dax_region = dev_dax->region; 773 struct dax_mapping *mapping; 774 struct device *dev; 775 int rc; 776 777 device_lock_assert(dax_region->dev); 778 779 if (dev_WARN_ONCE(&dev_dax->dev, !dax_region->dev->driver, 780 "region disabled\n")) 781 return -ENXIO; 782 783 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); 784 if (!mapping) 785 return -ENOMEM; 786 mapping->range_id = range_id; 787 mapping->id = ida_alloc(&dev_dax->ida, GFP_KERNEL); 788 if (mapping->id < 0) { 789 kfree(mapping); 790 return -ENOMEM; 791 } 792 dev_dax->ranges[range_id].mapping = mapping; 793 dev = &mapping->dev; 794 device_initialize(dev); 795 dev->parent = &dev_dax->dev; 796 get_device(dev->parent); 797 dev->type = &dax_mapping_type; 798 dev_set_name(dev, "mapping%d", mapping->id); 799 rc = device_add(dev); 800 if (rc) { 801 put_device(dev); 802 return rc; 803 } 804 805 rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_mapping, 806 dev); 807 if (rc) 808 return rc; 809 return 0; 810 } 811 812 static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start, 813 resource_size_t size) 814 { 815 struct dax_region *dax_region = dev_dax->region; 816 struct resource *res = &dax_region->res; 817 struct device *dev = &dev_dax->dev; 818 struct dev_dax_range *ranges; 819 unsigned long pgoff = 0; 820 struct resource *alloc; 821 int i, rc; 822 823 device_lock_assert(dax_region->dev); 824 825 /* handle the seed alloc special case */ 826 if (!size) { 827 if (dev_WARN_ONCE(dev, dev_dax->nr_range, 828 "0-size allocation must be first\n")) 829 return -EBUSY; 830 /* nr_range == 0 is elsewhere special cased as 0-size device */ 831 return 0; 832 } 833 834 alloc = __request_region(res, start, size, dev_name(dev), 0); 835 if (!alloc) 836 return -ENOMEM; 837 838 ranges = krealloc(dev_dax->ranges, sizeof(*ranges) 839 * (dev_dax->nr_range + 1), GFP_KERNEL); 840 if (!ranges) { 841 __release_region(res, alloc->start, resource_size(alloc)); 842 return -ENOMEM; 843 } 844 845 for (i = 0; i < dev_dax->nr_range; i++) 846 pgoff += PHYS_PFN(range_len(&ranges[i].range)); 847 dev_dax->ranges = ranges; 848 ranges[dev_dax->nr_range++] = (struct dev_dax_range) { 849 .pgoff = pgoff, 850 .range = { 851 .start = alloc->start, 852 .end = alloc->end, 853 }, 854 }; 855 856 dev_dbg(dev, "alloc range[%d]: %pa:%pa\n", dev_dax->nr_range - 1, 857 &alloc->start, &alloc->end); 858 /* 859 * A dev_dax instance must be registered before mapping device 860 * children can be added. Defer to devm_create_dev_dax() to add 861 * the initial mapping device. 862 */ 863 if (!device_is_registered(&dev_dax->dev)) 864 return 0; 865 866 rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1); 867 if (rc) 868 trim_dev_dax_range(dev_dax); 869 870 return rc; 871 } 872 873 static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size) 874 { 875 int last_range = dev_dax->nr_range - 1; 876 struct dev_dax_range *dax_range = &dev_dax->ranges[last_range]; 877 struct dax_region *dax_region = dev_dax->region; 878 bool is_shrink = resource_size(res) > size; 879 struct range *range = &dax_range->range; 880 struct device *dev = &dev_dax->dev; 881 int rc; 882 883 device_lock_assert(dax_region->dev); 884 885 if (dev_WARN_ONCE(dev, !size, "deletion is handled by dev_dax_shrink\n")) 886 return -EINVAL; 887 888 rc = adjust_resource(res, range->start, size); 889 if (rc) 890 return rc; 891 892 *range = (struct range) { 893 .start = range->start, 894 .end = range->start + size - 1, 895 }; 896 897 dev_dbg(dev, "%s range[%d]: %#llx:%#llx\n", is_shrink ? "shrink" : "extend", 898 last_range, (unsigned long long) range->start, 899 (unsigned long long) range->end); 900 901 return 0; 902 } 903 904 static ssize_t size_show(struct device *dev, 905 struct device_attribute *attr, char *buf) 906 { 907 struct dev_dax *dev_dax = to_dev_dax(dev); 908 unsigned long long size; 909 910 device_lock(dev); 911 size = dev_dax_size(dev_dax); 912 device_unlock(dev); 913 914 return sprintf(buf, "%llu\n", size); 915 } 916 917 static bool alloc_is_aligned(struct dev_dax *dev_dax, resource_size_t size) 918 { 919 /* 920 * The minimum mapping granularity for a device instance is a 921 * single subsection, unless the arch says otherwise. 922 */ 923 return IS_ALIGNED(size, max_t(unsigned long, dev_dax->align, memremap_compat_align())); 924 } 925 926 static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size) 927 { 928 resource_size_t to_shrink = dev_dax_size(dev_dax) - size; 929 struct dax_region *dax_region = dev_dax->region; 930 struct device *dev = &dev_dax->dev; 931 int i; 932 933 for (i = dev_dax->nr_range - 1; i >= 0; i--) { 934 struct range *range = &dev_dax->ranges[i].range; 935 struct dax_mapping *mapping = dev_dax->ranges[i].mapping; 936 struct resource *adjust = NULL, *res; 937 resource_size_t shrink; 938 939 shrink = min_t(u64, to_shrink, range_len(range)); 940 if (shrink >= range_len(range)) { 941 devm_release_action(dax_region->dev, 942 unregister_dax_mapping, &mapping->dev); 943 trim_dev_dax_range(dev_dax); 944 to_shrink -= shrink; 945 if (!to_shrink) 946 break; 947 continue; 948 } 949 950 for_each_dax_region_resource(dax_region, res) 951 if (strcmp(res->name, dev_name(dev)) == 0 952 && res->start == range->start) { 953 adjust = res; 954 break; 955 } 956 957 if (dev_WARN_ONCE(dev, !adjust || i != dev_dax->nr_range - 1, 958 "failed to find matching resource\n")) 959 return -ENXIO; 960 return adjust_dev_dax_range(dev_dax, adjust, range_len(range) 961 - shrink); 962 } 963 return 0; 964 } 965 966 /* 967 * Only allow adjustments that preserve the relative pgoff of existing 968 * allocations. I.e. the dev_dax->ranges array is ordered by increasing pgoff. 969 */ 970 static bool adjust_ok(struct dev_dax *dev_dax, struct resource *res) 971 { 972 struct dev_dax_range *last; 973 int i; 974 975 if (dev_dax->nr_range == 0) 976 return false; 977 if (strcmp(res->name, dev_name(&dev_dax->dev)) != 0) 978 return false; 979 last = &dev_dax->ranges[dev_dax->nr_range - 1]; 980 if (last->range.start != res->start || last->range.end != res->end) 981 return false; 982 for (i = 0; i < dev_dax->nr_range - 1; i++) { 983 struct dev_dax_range *dax_range = &dev_dax->ranges[i]; 984 985 if (dax_range->pgoff > last->pgoff) 986 return false; 987 } 988 989 return true; 990 } 991 992 static ssize_t dev_dax_resize(struct dax_region *dax_region, 993 struct dev_dax *dev_dax, resource_size_t size) 994 { 995 resource_size_t avail = dax_region_avail_size(dax_region), to_alloc; 996 resource_size_t dev_size = dev_dax_size(dev_dax); 997 struct resource *region_res = &dax_region->res; 998 struct device *dev = &dev_dax->dev; 999 struct resource *res, *first; 1000 resource_size_t alloc = 0; 1001 int rc; 1002 1003 if (dev->driver) 1004 return -EBUSY; 1005 if (size == dev_size) 1006 return 0; 1007 if (size > dev_size && size - dev_size > avail) 1008 return -ENOSPC; 1009 if (size < dev_size) 1010 return dev_dax_shrink(dev_dax, size); 1011 1012 to_alloc = size - dev_size; 1013 if (dev_WARN_ONCE(dev, !alloc_is_aligned(dev_dax, to_alloc), 1014 "resize of %pa misaligned\n", &to_alloc)) 1015 return -ENXIO; 1016 1017 /* 1018 * Expand the device into the unused portion of the region. This 1019 * may involve adjusting the end of an existing resource, or 1020 * allocating a new resource. 1021 */ 1022 retry: 1023 first = region_res->child; 1024 if (!first) 1025 return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc); 1026 1027 rc = -ENOSPC; 1028 for (res = first; res; res = res->sibling) { 1029 struct resource *next = res->sibling; 1030 1031 /* space at the beginning of the region */ 1032 if (res == first && res->start > dax_region->res.start) { 1033 alloc = min(res->start - dax_region->res.start, to_alloc); 1034 rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, alloc); 1035 break; 1036 } 1037 1038 alloc = 0; 1039 /* space between allocations */ 1040 if (next && next->start > res->end + 1) 1041 alloc = min(next->start - (res->end + 1), to_alloc); 1042 1043 /* space at the end of the region */ 1044 if (!alloc && !next && res->end < region_res->end) 1045 alloc = min(region_res->end - res->end, to_alloc); 1046 1047 if (!alloc) 1048 continue; 1049 1050 if (adjust_ok(dev_dax, res)) { 1051 rc = adjust_dev_dax_range(dev_dax, res, resource_size(res) + alloc); 1052 break; 1053 } 1054 rc = alloc_dev_dax_range(dev_dax, res->end + 1, alloc); 1055 break; 1056 } 1057 if (rc) 1058 return rc; 1059 to_alloc -= alloc; 1060 if (to_alloc) 1061 goto retry; 1062 return 0; 1063 } 1064 1065 static ssize_t size_store(struct device *dev, struct device_attribute *attr, 1066 const char *buf, size_t len) 1067 { 1068 ssize_t rc; 1069 unsigned long long val; 1070 struct dev_dax *dev_dax = to_dev_dax(dev); 1071 struct dax_region *dax_region = dev_dax->region; 1072 1073 rc = kstrtoull(buf, 0, &val); 1074 if (rc) 1075 return rc; 1076 1077 if (!alloc_is_aligned(dev_dax, val)) { 1078 dev_dbg(dev, "%s: size: %lld misaligned\n", __func__, val); 1079 return -EINVAL; 1080 } 1081 1082 device_lock(dax_region->dev); 1083 if (!dax_region->dev->driver) { 1084 device_unlock(dax_region->dev); 1085 return -ENXIO; 1086 } 1087 device_lock(dev); 1088 rc = dev_dax_resize(dax_region, dev_dax, val); 1089 device_unlock(dev); 1090 device_unlock(dax_region->dev); 1091 1092 return rc == 0 ? len : rc; 1093 } 1094 static DEVICE_ATTR_RW(size); 1095 1096 static ssize_t range_parse(const char *opt, size_t len, struct range *range) 1097 { 1098 unsigned long long addr = 0; 1099 char *start, *end, *str; 1100 ssize_t rc = -EINVAL; 1101 1102 str = kstrdup(opt, GFP_KERNEL); 1103 if (!str) 1104 return rc; 1105 1106 end = str; 1107 start = strsep(&end, "-"); 1108 if (!start || !end) 1109 goto err; 1110 1111 rc = kstrtoull(start, 16, &addr); 1112 if (rc) 1113 goto err; 1114 range->start = addr; 1115 1116 rc = kstrtoull(end, 16, &addr); 1117 if (rc) 1118 goto err; 1119 range->end = addr; 1120 1121 err: 1122 kfree(str); 1123 return rc; 1124 } 1125 1126 static ssize_t mapping_store(struct device *dev, struct device_attribute *attr, 1127 const char *buf, size_t len) 1128 { 1129 struct dev_dax *dev_dax = to_dev_dax(dev); 1130 struct dax_region *dax_region = dev_dax->region; 1131 size_t to_alloc; 1132 struct range r; 1133 ssize_t rc; 1134 1135 rc = range_parse(buf, len, &r); 1136 if (rc) 1137 return rc; 1138 1139 rc = -ENXIO; 1140 device_lock(dax_region->dev); 1141 if (!dax_region->dev->driver) { 1142 device_unlock(dax_region->dev); 1143 return rc; 1144 } 1145 device_lock(dev); 1146 1147 to_alloc = range_len(&r); 1148 if (alloc_is_aligned(dev_dax, to_alloc)) 1149 rc = alloc_dev_dax_range(dev_dax, r.start, to_alloc); 1150 device_unlock(dev); 1151 device_unlock(dax_region->dev); 1152 1153 return rc == 0 ? len : rc; 1154 } 1155 static DEVICE_ATTR_WO(mapping); 1156 1157 static ssize_t align_show(struct device *dev, 1158 struct device_attribute *attr, char *buf) 1159 { 1160 struct dev_dax *dev_dax = to_dev_dax(dev); 1161 1162 return sprintf(buf, "%d\n", dev_dax->align); 1163 } 1164 1165 static ssize_t dev_dax_validate_align(struct dev_dax *dev_dax) 1166 { 1167 struct device *dev = &dev_dax->dev; 1168 int i; 1169 1170 for (i = 0; i < dev_dax->nr_range; i++) { 1171 size_t len = range_len(&dev_dax->ranges[i].range); 1172 1173 if (!alloc_is_aligned(dev_dax, len)) { 1174 dev_dbg(dev, "%s: align %u invalid for range %d\n", 1175 __func__, dev_dax->align, i); 1176 return -EINVAL; 1177 } 1178 } 1179 1180 return 0; 1181 } 1182 1183 static ssize_t align_store(struct device *dev, struct device_attribute *attr, 1184 const char *buf, size_t len) 1185 { 1186 struct dev_dax *dev_dax = to_dev_dax(dev); 1187 struct dax_region *dax_region = dev_dax->region; 1188 unsigned long val, align_save; 1189 ssize_t rc; 1190 1191 rc = kstrtoul(buf, 0, &val); 1192 if (rc) 1193 return -ENXIO; 1194 1195 if (!dax_align_valid(val)) 1196 return -EINVAL; 1197 1198 device_lock(dax_region->dev); 1199 if (!dax_region->dev->driver) { 1200 device_unlock(dax_region->dev); 1201 return -ENXIO; 1202 } 1203 1204 device_lock(dev); 1205 if (dev->driver) { 1206 rc = -EBUSY; 1207 goto out_unlock; 1208 } 1209 1210 align_save = dev_dax->align; 1211 dev_dax->align = val; 1212 rc = dev_dax_validate_align(dev_dax); 1213 if (rc) 1214 dev_dax->align = align_save; 1215 out_unlock: 1216 device_unlock(dev); 1217 device_unlock(dax_region->dev); 1218 return rc == 0 ? len : rc; 1219 } 1220 static DEVICE_ATTR_RW(align); 1221 1222 static int dev_dax_target_node(struct dev_dax *dev_dax) 1223 { 1224 struct dax_region *dax_region = dev_dax->region; 1225 1226 return dax_region->target_node; 1227 } 1228 1229 static ssize_t target_node_show(struct device *dev, 1230 struct device_attribute *attr, char *buf) 1231 { 1232 struct dev_dax *dev_dax = to_dev_dax(dev); 1233 1234 return sprintf(buf, "%d\n", dev_dax_target_node(dev_dax)); 1235 } 1236 static DEVICE_ATTR_RO(target_node); 1237 1238 static ssize_t resource_show(struct device *dev, 1239 struct device_attribute *attr, char *buf) 1240 { 1241 struct dev_dax *dev_dax = to_dev_dax(dev); 1242 struct dax_region *dax_region = dev_dax->region; 1243 unsigned long long start; 1244 1245 if (dev_dax->nr_range < 1) 1246 start = dax_region->res.start; 1247 else 1248 start = dev_dax->ranges[0].range.start; 1249 1250 return sprintf(buf, "%#llx\n", start); 1251 } 1252 static DEVICE_ATTR(resource, 0400, resource_show, NULL); 1253 1254 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 1255 char *buf) 1256 { 1257 /* 1258 * We only ever expect to handle device-dax instances, i.e. the 1259 * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero 1260 */ 1261 return sprintf(buf, DAX_DEVICE_MODALIAS_FMT "\n", 0); 1262 } 1263 static DEVICE_ATTR_RO(modalias); 1264 1265 static ssize_t numa_node_show(struct device *dev, 1266 struct device_attribute *attr, char *buf) 1267 { 1268 return sprintf(buf, "%d\n", dev_to_node(dev)); 1269 } 1270 static DEVICE_ATTR_RO(numa_node); 1271 1272 static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n) 1273 { 1274 struct device *dev = container_of(kobj, struct device, kobj); 1275 struct dev_dax *dev_dax = to_dev_dax(dev); 1276 struct dax_region *dax_region = dev_dax->region; 1277 1278 if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0) 1279 return 0; 1280 if (a == &dev_attr_numa_node.attr && !IS_ENABLED(CONFIG_NUMA)) 1281 return 0; 1282 if (a == &dev_attr_mapping.attr && is_static(dax_region)) 1283 return 0; 1284 if ((a == &dev_attr_align.attr || 1285 a == &dev_attr_size.attr) && is_static(dax_region)) 1286 return 0444; 1287 return a->mode; 1288 } 1289 1290 static struct attribute *dev_dax_attributes[] = { 1291 &dev_attr_modalias.attr, 1292 &dev_attr_size.attr, 1293 &dev_attr_mapping.attr, 1294 &dev_attr_target_node.attr, 1295 &dev_attr_align.attr, 1296 &dev_attr_resource.attr, 1297 &dev_attr_numa_node.attr, 1298 NULL, 1299 }; 1300 1301 static const struct attribute_group dev_dax_attribute_group = { 1302 .attrs = dev_dax_attributes, 1303 .is_visible = dev_dax_visible, 1304 }; 1305 1306 static const struct attribute_group *dax_attribute_groups[] = { 1307 &dev_dax_attribute_group, 1308 NULL, 1309 }; 1310 1311 static void dev_dax_release(struct device *dev) 1312 { 1313 struct dev_dax *dev_dax = to_dev_dax(dev); 1314 struct dax_device *dax_dev = dev_dax->dax_dev; 1315 1316 put_dax(dax_dev); 1317 free_dev_dax_id(dev_dax); 1318 kfree(dev_dax->pgmap); 1319 kfree(dev_dax); 1320 } 1321 1322 static const struct device_type dev_dax_type = { 1323 .release = dev_dax_release, 1324 .groups = dax_attribute_groups, 1325 }; 1326 1327 struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data) 1328 { 1329 struct dax_region *dax_region = data->dax_region; 1330 struct device *parent = dax_region->dev; 1331 struct dax_device *dax_dev; 1332 struct dev_dax *dev_dax; 1333 struct inode *inode; 1334 struct device *dev; 1335 int rc; 1336 1337 dev_dax = kzalloc(sizeof(*dev_dax), GFP_KERNEL); 1338 if (!dev_dax) 1339 return ERR_PTR(-ENOMEM); 1340 1341 dev_dax->region = dax_region; 1342 if (is_static(dax_region)) { 1343 if (dev_WARN_ONCE(parent, data->id < 0, 1344 "dynamic id specified to static region\n")) { 1345 rc = -EINVAL; 1346 goto err_id; 1347 } 1348 1349 dev_dax->id = data->id; 1350 } else { 1351 if (dev_WARN_ONCE(parent, data->id >= 0, 1352 "static id specified to dynamic region\n")) { 1353 rc = -EINVAL; 1354 goto err_id; 1355 } 1356 1357 rc = alloc_dev_dax_id(dev_dax); 1358 if (rc < 0) 1359 goto err_id; 1360 } 1361 1362 dev = &dev_dax->dev; 1363 device_initialize(dev); 1364 dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id); 1365 1366 rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, data->size); 1367 if (rc) 1368 goto err_range; 1369 1370 if (data->pgmap) { 1371 dev_WARN_ONCE(parent, !is_static(dax_region), 1372 "custom dev_pagemap requires a static dax_region\n"); 1373 1374 dev_dax->pgmap = kmemdup(data->pgmap, 1375 sizeof(struct dev_pagemap), GFP_KERNEL); 1376 if (!dev_dax->pgmap) { 1377 rc = -ENOMEM; 1378 goto err_pgmap; 1379 } 1380 } 1381 1382 /* 1383 * No dax_operations since there is no access to this device outside of 1384 * mmap of the resulting character device. 1385 */ 1386 dax_dev = alloc_dax(dev_dax, NULL); 1387 if (IS_ERR(dax_dev)) { 1388 rc = PTR_ERR(dax_dev); 1389 goto err_alloc_dax; 1390 } 1391 set_dax_synchronous(dax_dev); 1392 set_dax_nocache(dax_dev); 1393 set_dax_nomc(dax_dev); 1394 1395 /* a device_dax instance is dead while the driver is not attached */ 1396 kill_dax(dax_dev); 1397 1398 dev_dax->dax_dev = dax_dev; 1399 dev_dax->target_node = dax_region->target_node; 1400 dev_dax->align = dax_region->align; 1401 ida_init(&dev_dax->ida); 1402 1403 inode = dax_inode(dax_dev); 1404 dev->devt = inode->i_rdev; 1405 dev->bus = &dax_bus_type; 1406 dev->parent = parent; 1407 dev->type = &dev_dax_type; 1408 1409 rc = device_add(dev); 1410 if (rc) { 1411 kill_dev_dax(dev_dax); 1412 put_device(dev); 1413 return ERR_PTR(rc); 1414 } 1415 1416 rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev); 1417 if (rc) 1418 return ERR_PTR(rc); 1419 1420 /* register mapping device for the initial allocation range */ 1421 if (dev_dax->nr_range && range_len(&dev_dax->ranges[0].range)) { 1422 rc = devm_register_dax_mapping(dev_dax, 0); 1423 if (rc) 1424 return ERR_PTR(rc); 1425 } 1426 1427 return dev_dax; 1428 1429 err_alloc_dax: 1430 kfree(dev_dax->pgmap); 1431 err_pgmap: 1432 free_dev_dax_ranges(dev_dax); 1433 err_range: 1434 free_dev_dax_id(dev_dax); 1435 err_id: 1436 kfree(dev_dax); 1437 1438 return ERR_PTR(rc); 1439 } 1440 EXPORT_SYMBOL_GPL(devm_create_dev_dax); 1441 1442 int __dax_driver_register(struct dax_device_driver *dax_drv, 1443 struct module *module, const char *mod_name) 1444 { 1445 struct device_driver *drv = &dax_drv->drv; 1446 1447 /* 1448 * dax_bus_probe() calls dax_drv->probe() unconditionally. 1449 * So better be safe than sorry and ensure it is provided. 1450 */ 1451 if (!dax_drv->probe) 1452 return -EINVAL; 1453 1454 INIT_LIST_HEAD(&dax_drv->ids); 1455 drv->owner = module; 1456 drv->name = mod_name; 1457 drv->mod_name = mod_name; 1458 drv->bus = &dax_bus_type; 1459 1460 return driver_register(drv); 1461 } 1462 EXPORT_SYMBOL_GPL(__dax_driver_register); 1463 1464 void dax_driver_unregister(struct dax_device_driver *dax_drv) 1465 { 1466 struct device_driver *drv = &dax_drv->drv; 1467 struct dax_id *dax_id, *_id; 1468 1469 mutex_lock(&dax_bus_lock); 1470 list_for_each_entry_safe(dax_id, _id, &dax_drv->ids, list) { 1471 list_del(&dax_id->list); 1472 kfree(dax_id); 1473 } 1474 mutex_unlock(&dax_bus_lock); 1475 driver_unregister(drv); 1476 } 1477 EXPORT_SYMBOL_GPL(dax_driver_unregister); 1478 1479 int __init dax_bus_init(void) 1480 { 1481 return bus_register(&dax_bus_type); 1482 } 1483 1484 void __exit dax_bus_exit(void) 1485 { 1486 bus_unregister(&dax_bus_type); 1487 } 1488