1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 4 */ 5 #include <linux/libnvdimm.h> 6 #include <linux/suspend.h> 7 #include <linux/export.h> 8 #include <linux/module.h> 9 #include <linux/blkdev.h> 10 #include <linux/device.h> 11 #include <linux/ctype.h> 12 #include <linux/ndctl.h> 13 #include <linux/mutex.h> 14 #include <linux/slab.h> 15 #include <linux/io.h> 16 #include "nd-core.h" 17 #include "nd.h" 18 19 LIST_HEAD(nvdimm_bus_list); 20 DEFINE_MUTEX(nvdimm_bus_list_mutex); 21 22 void nvdimm_bus_lock(struct device *dev) 23 { 24 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 25 26 if (!nvdimm_bus) 27 return; 28 mutex_lock(&nvdimm_bus->reconfig_mutex); 29 } 30 EXPORT_SYMBOL(nvdimm_bus_lock); 31 32 void nvdimm_bus_unlock(struct device *dev) 33 { 34 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 35 36 if (!nvdimm_bus) 37 return; 38 mutex_unlock(&nvdimm_bus->reconfig_mutex); 39 } 40 EXPORT_SYMBOL(nvdimm_bus_unlock); 41 42 bool is_nvdimm_bus_locked(struct device *dev) 43 { 44 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 45 46 if (!nvdimm_bus) 47 return false; 48 return mutex_is_locked(&nvdimm_bus->reconfig_mutex); 49 } 50 EXPORT_SYMBOL(is_nvdimm_bus_locked); 51 52 struct nvdimm_map { 53 struct nvdimm_bus *nvdimm_bus; 54 struct list_head list; 55 resource_size_t offset; 56 unsigned long flags; 57 size_t size; 58 union { 59 void *mem; 60 void __iomem *iomem; 61 }; 62 struct kref kref; 63 }; 64 65 static struct nvdimm_map *find_nvdimm_map(struct device *dev, 66 resource_size_t offset) 67 { 68 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 69 struct nvdimm_map *nvdimm_map; 70 71 list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list) 72 if (nvdimm_map->offset == offset) 73 return nvdimm_map; 74 return NULL; 75 } 76 77 static struct nvdimm_map *alloc_nvdimm_map(struct device *dev, 78 resource_size_t offset, size_t size, unsigned long flags) 79 { 80 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 81 struct nvdimm_map *nvdimm_map; 82 83 nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL); 84 if (!nvdimm_map) 85 return NULL; 86 87 INIT_LIST_HEAD(&nvdimm_map->list); 88 nvdimm_map->nvdimm_bus = nvdimm_bus; 89 nvdimm_map->offset = offset; 90 nvdimm_map->flags = flags; 91 nvdimm_map->size = size; 92 kref_init(&nvdimm_map->kref); 93 94 if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) { 95 dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n", 96 &offset, size, dev_name(dev)); 97 goto err_request_region; 98 } 99 100 if (flags) 101 nvdimm_map->mem = memremap(offset, size, flags); 102 else 103 nvdimm_map->iomem = ioremap(offset, size); 104 105 if (!nvdimm_map->mem) 106 goto err_map; 107 108 dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!", 109 __func__); 110 list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list); 111 112 return nvdimm_map; 113 114 err_map: 115 release_mem_region(offset, size); 116 err_request_region: 117 kfree(nvdimm_map); 118 return NULL; 119 } 120 121 static void nvdimm_map_release(struct kref *kref) 122 { 123 struct nvdimm_bus *nvdimm_bus; 124 struct nvdimm_map *nvdimm_map; 125 126 nvdimm_map = container_of(kref, struct nvdimm_map, kref); 127 nvdimm_bus = nvdimm_map->nvdimm_bus; 128 129 dev_dbg(&nvdimm_bus->dev, "%pa\n", &nvdimm_map->offset); 130 list_del(&nvdimm_map->list); 131 if (nvdimm_map->flags) 132 memunmap(nvdimm_map->mem); 133 else 134 iounmap(nvdimm_map->iomem); 135 release_mem_region(nvdimm_map->offset, nvdimm_map->size); 136 kfree(nvdimm_map); 137 } 138 139 static void nvdimm_map_put(void *data) 140 { 141 struct nvdimm_map *nvdimm_map = data; 142 struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus; 143 144 guard(nvdimm_bus)(&nvdimm_bus->dev); 145 kref_put(&nvdimm_map->kref, nvdimm_map_release); 146 } 147 148 /** 149 * devm_nvdimm_memremap - map a resource that is shared across regions 150 * @dev: device that will own a reference to the shared mapping 151 * @offset: physical base address of the mapping 152 * @size: mapping size 153 * @flags: memremap flags, or, if zero, perform an ioremap instead 154 */ 155 void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset, 156 size_t size, unsigned long flags) 157 { 158 struct nvdimm_map *nvdimm_map; 159 160 scoped_guard(nvdimm_bus, dev) { 161 nvdimm_map = find_nvdimm_map(dev, offset); 162 if (!nvdimm_map) 163 nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags); 164 else 165 kref_get(&nvdimm_map->kref); 166 } 167 168 if (!nvdimm_map) 169 return NULL; 170 171 if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map)) 172 return NULL; 173 174 return nvdimm_map->mem; 175 } 176 EXPORT_SYMBOL_GPL(devm_nvdimm_memremap); 177 178 u64 nd_fletcher64(void *addr, size_t len, bool le) 179 { 180 u32 *buf = addr; 181 u32 lo32 = 0; 182 u64 hi32 = 0; 183 int i; 184 185 for (i = 0; i < len / sizeof(u32); i++) { 186 lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i]; 187 hi32 += lo32; 188 } 189 190 return hi32 << 32 | lo32; 191 } 192 EXPORT_SYMBOL_GPL(nd_fletcher64); 193 194 struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus) 195 { 196 /* struct nvdimm_bus definition is private to libnvdimm */ 197 return nvdimm_bus->nd_desc; 198 } 199 EXPORT_SYMBOL_GPL(to_nd_desc); 200 201 struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus) 202 { 203 /* struct nvdimm_bus definition is private to libnvdimm */ 204 return &nvdimm_bus->dev; 205 } 206 EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev); 207 208 /** 209 * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes 210 * @dev: container device for the uuid property 211 * @uuid_out: uuid buffer to replace 212 * @buf: raw sysfs buffer to parse 213 * 214 * Enforce that uuids can only be changed while the device is disabled 215 * (driver detached) 216 * LOCKING: expects device_lock() is held on entry 217 */ 218 int nd_uuid_store(struct device *dev, uuid_t **uuid_out, const char *buf, 219 size_t len) 220 { 221 uuid_t uuid; 222 int rc; 223 224 if (dev->driver) 225 return -EBUSY; 226 227 rc = uuid_parse(buf, &uuid); 228 if (rc) 229 return rc; 230 231 kfree(*uuid_out); 232 *uuid_out = kmemdup(&uuid, sizeof(uuid), GFP_KERNEL); 233 if (!(*uuid_out)) 234 return -ENOMEM; 235 236 return 0; 237 } 238 239 ssize_t nd_size_select_show(unsigned long current_size, 240 const unsigned long *supported, char *buf) 241 { 242 ssize_t len = 0; 243 int i; 244 245 for (i = 0; supported[i]; i++) 246 if (current_size == supported[i]) 247 len += sprintf(buf + len, "[%ld] ", supported[i]); 248 else 249 len += sprintf(buf + len, "%ld ", supported[i]); 250 len += sprintf(buf + len, "\n"); 251 return len; 252 } 253 254 ssize_t nd_size_select_store(struct device *dev, const char *buf, 255 unsigned long *current_size, const unsigned long *supported) 256 { 257 unsigned long lbasize; 258 int rc, i; 259 260 if (dev->driver) 261 return -EBUSY; 262 263 rc = kstrtoul(buf, 0, &lbasize); 264 if (rc) 265 return rc; 266 267 for (i = 0; supported[i]; i++) 268 if (lbasize == supported[i]) 269 break; 270 271 if (supported[i]) { 272 *current_size = lbasize; 273 return 0; 274 } else { 275 return -EINVAL; 276 } 277 } 278 279 static ssize_t commands_show(struct device *dev, 280 struct device_attribute *attr, char *buf) 281 { 282 int cmd, len = 0; 283 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 284 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 285 286 for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG) 287 len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd)); 288 len += sprintf(buf + len, "\n"); 289 return len; 290 } 291 static DEVICE_ATTR_RO(commands); 292 293 static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus) 294 { 295 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 296 struct device *parent = nvdimm_bus->dev.parent; 297 298 if (nd_desc->provider_name) 299 return nd_desc->provider_name; 300 else if (parent) 301 return dev_name(parent); 302 else 303 return "unknown"; 304 } 305 306 static ssize_t provider_show(struct device *dev, 307 struct device_attribute *attr, char *buf) 308 { 309 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 310 311 return sprintf(buf, "%s\n", nvdimm_bus_provider(nvdimm_bus)); 312 } 313 static DEVICE_ATTR_RO(provider); 314 315 static int flush_namespaces(struct device *dev, void *data) 316 { 317 device_lock(dev); 318 device_unlock(dev); 319 return 0; 320 } 321 322 static int flush_regions_dimms(struct device *dev, void *data) 323 { 324 device_lock(dev); 325 device_unlock(dev); 326 device_for_each_child(dev, NULL, flush_namespaces); 327 return 0; 328 } 329 330 static ssize_t wait_probe_show(struct device *dev, 331 struct device_attribute *attr, char *buf) 332 { 333 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 334 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 335 int rc; 336 337 if (nd_desc->flush_probe) { 338 rc = nd_desc->flush_probe(nd_desc); 339 if (rc) 340 return rc; 341 } 342 nd_synchronize(); 343 device_for_each_child(dev, NULL, flush_regions_dimms); 344 return sprintf(buf, "1\n"); 345 } 346 static DEVICE_ATTR_RO(wait_probe); 347 348 static struct attribute *nvdimm_bus_attributes[] = { 349 &dev_attr_commands.attr, 350 &dev_attr_wait_probe.attr, 351 &dev_attr_provider.attr, 352 NULL, 353 }; 354 355 static const struct attribute_group nvdimm_bus_attribute_group = { 356 .attrs = nvdimm_bus_attributes, 357 }; 358 359 static ssize_t capability_show(struct device *dev, 360 struct device_attribute *attr, char *buf) 361 { 362 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 363 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 364 enum nvdimm_fwa_capability cap; 365 366 if (!nd_desc->fw_ops) 367 return -EOPNOTSUPP; 368 369 cap = nd_desc->fw_ops->capability(nd_desc); 370 371 switch (cap) { 372 case NVDIMM_FWA_CAP_QUIESCE: 373 return sprintf(buf, "quiesce\n"); 374 case NVDIMM_FWA_CAP_LIVE: 375 return sprintf(buf, "live\n"); 376 default: 377 return -EOPNOTSUPP; 378 } 379 } 380 381 static DEVICE_ATTR_RO(capability); 382 383 static ssize_t activate_show(struct device *dev, 384 struct device_attribute *attr, char *buf) 385 { 386 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 387 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 388 enum nvdimm_fwa_capability cap; 389 enum nvdimm_fwa_state state; 390 391 if (!nd_desc->fw_ops) 392 return -EOPNOTSUPP; 393 394 cap = nd_desc->fw_ops->capability(nd_desc); 395 state = nd_desc->fw_ops->activate_state(nd_desc); 396 397 if (cap < NVDIMM_FWA_CAP_QUIESCE) 398 return -EOPNOTSUPP; 399 400 switch (state) { 401 case NVDIMM_FWA_IDLE: 402 return sprintf(buf, "idle\n"); 403 case NVDIMM_FWA_BUSY: 404 return sprintf(buf, "busy\n"); 405 case NVDIMM_FWA_ARMED: 406 return sprintf(buf, "armed\n"); 407 case NVDIMM_FWA_ARM_OVERFLOW: 408 return sprintf(buf, "overflow\n"); 409 default: 410 return -ENXIO; 411 } 412 } 413 414 static int exec_firmware_activate(void *data) 415 { 416 struct nvdimm_bus_descriptor *nd_desc = data; 417 418 return nd_desc->fw_ops->activate(nd_desc); 419 } 420 421 static ssize_t activate_store(struct device *dev, 422 struct device_attribute *attr, const char *buf, size_t len) 423 { 424 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 425 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 426 enum nvdimm_fwa_state state; 427 bool quiesce; 428 ssize_t rc; 429 430 if (!nd_desc->fw_ops) 431 return -EOPNOTSUPP; 432 433 if (sysfs_streq(buf, "live")) 434 quiesce = false; 435 else if (sysfs_streq(buf, "quiesce")) 436 quiesce = true; 437 else 438 return -EINVAL; 439 440 state = nd_desc->fw_ops->activate_state(nd_desc); 441 442 switch (state) { 443 case NVDIMM_FWA_BUSY: 444 rc = -EBUSY; 445 break; 446 case NVDIMM_FWA_ARMED: 447 case NVDIMM_FWA_ARM_OVERFLOW: 448 if (quiesce) 449 rc = hibernate_quiet_exec(exec_firmware_activate, nd_desc); 450 else 451 rc = nd_desc->fw_ops->activate(nd_desc); 452 break; 453 case NVDIMM_FWA_IDLE: 454 default: 455 rc = -ENXIO; 456 } 457 458 if (rc == 0) 459 rc = len; 460 return rc; 461 } 462 463 static DEVICE_ATTR_ADMIN_RW(activate); 464 465 static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribute *a, int n) 466 { 467 struct device *dev = container_of(kobj, typeof(*dev), kobj); 468 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 469 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 470 enum nvdimm_fwa_capability cap; 471 472 /* 473 * Both 'activate' and 'capability' disappear when no ops 474 * detected, or a negative capability is indicated. 475 */ 476 if (!nd_desc->fw_ops) 477 return 0; 478 479 cap = nd_desc->fw_ops->capability(nd_desc); 480 if (cap < NVDIMM_FWA_CAP_QUIESCE) 481 return 0; 482 483 return a->mode; 484 } 485 static struct attribute *nvdimm_bus_firmware_attributes[] = { 486 &dev_attr_activate.attr, 487 &dev_attr_capability.attr, 488 NULL, 489 }; 490 491 static const struct attribute_group nvdimm_bus_firmware_attribute_group = { 492 .name = "firmware", 493 .attrs = nvdimm_bus_firmware_attributes, 494 .is_visible = nvdimm_bus_firmware_visible, 495 }; 496 497 const struct attribute_group *nvdimm_bus_attribute_groups[] = { 498 &nvdimm_bus_attribute_group, 499 &nvdimm_bus_firmware_attribute_group, 500 NULL, 501 }; 502 503 int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) 504 { 505 return badrange_add(&nvdimm_bus->badrange, addr, length); 506 } 507 EXPORT_SYMBOL_GPL(nvdimm_bus_add_badrange); 508 509 static __init int libnvdimm_init(void) 510 { 511 int rc; 512 513 rc = nvdimm_bus_init(); 514 if (rc) 515 return rc; 516 rc = nvdimm_init(); 517 if (rc) 518 goto err_dimm; 519 rc = nd_region_init(); 520 if (rc) 521 goto err_region; 522 523 nd_label_init(); 524 525 return 0; 526 err_region: 527 nvdimm_exit(); 528 err_dimm: 529 nvdimm_bus_exit(); 530 return rc; 531 } 532 533 static __exit void libnvdimm_exit(void) 534 { 535 WARN_ON(!list_empty(&nvdimm_bus_list)); 536 nd_region_exit(); 537 nvdimm_exit(); 538 nvdimm_bus_exit(); 539 nvdimm_devs_exit(); 540 } 541 542 MODULE_DESCRIPTION("NVDIMM (Non-Volatile Memory Device) core"); 543 MODULE_LICENSE("GPL v2"); 544 MODULE_AUTHOR("Intel Corporation"); 545 subsys_initcall(libnvdimm_init); 546 module_exit(libnvdimm_exit); 547