1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 4 */ 5 #include <linux/libnvdimm.h> 6 #include <linux/suspend.h> 7 #include <linux/export.h> 8 #include <linux/module.h> 9 #include <linux/blkdev.h> 10 #include <linux/device.h> 11 #include <linux/ctype.h> 12 #include <linux/ndctl.h> 13 #include <linux/mutex.h> 14 #include <linux/slab.h> 15 #include <linux/io.h> 16 #include "nd-core.h" 17 #include "nd.h" 18 19 LIST_HEAD(nvdimm_bus_list); 20 DEFINE_MUTEX(nvdimm_bus_list_mutex); 21 22 void nvdimm_bus_lock(struct device *dev) 23 { 24 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 25 26 if (!nvdimm_bus) 27 return; 28 mutex_lock(&nvdimm_bus->reconfig_mutex); 29 } 30 EXPORT_SYMBOL(nvdimm_bus_lock); 31 32 void nvdimm_bus_unlock(struct device *dev) 33 { 34 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 35 36 if (!nvdimm_bus) 37 return; 38 mutex_unlock(&nvdimm_bus->reconfig_mutex); 39 } 40 EXPORT_SYMBOL(nvdimm_bus_unlock); 41 42 bool is_nvdimm_bus_locked(struct device *dev) 43 { 44 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 45 46 if (!nvdimm_bus) 47 return false; 48 return mutex_is_locked(&nvdimm_bus->reconfig_mutex); 49 } 50 EXPORT_SYMBOL(is_nvdimm_bus_locked); 51 52 struct nvdimm_map { 53 struct nvdimm_bus *nvdimm_bus; 54 struct list_head list; 55 resource_size_t offset; 56 unsigned long flags; 57 size_t size; 58 union { 59 void *mem; 60 void __iomem *iomem; 61 }; 62 struct kref kref; 63 }; 64 65 static struct nvdimm_map *find_nvdimm_map(struct device *dev, 66 resource_size_t offset) 67 { 68 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 69 struct nvdimm_map *nvdimm_map; 70 71 list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list) 72 if (nvdimm_map->offset == offset) 73 return nvdimm_map; 74 return NULL; 75 } 76 77 static struct nvdimm_map *alloc_nvdimm_map(struct device *dev, 78 resource_size_t offset, size_t size, unsigned long flags) 79 { 80 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 81 struct nvdimm_map *nvdimm_map; 82 83 nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL); 84 if (!nvdimm_map) 85 return NULL; 86 87 INIT_LIST_HEAD(&nvdimm_map->list); 88 nvdimm_map->nvdimm_bus = nvdimm_bus; 89 nvdimm_map->offset = offset; 90 nvdimm_map->flags = flags; 91 nvdimm_map->size = size; 92 kref_init(&nvdimm_map->kref); 93 94 if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) { 95 dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n", 96 &offset, size, dev_name(dev)); 97 goto err_request_region; 98 } 99 100 if (flags) 101 nvdimm_map->mem = memremap(offset, size, flags); 102 else 103 nvdimm_map->iomem = ioremap(offset, size); 104 105 if (!nvdimm_map->mem) 106 goto err_map; 107 108 dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!", 109 __func__); 110 list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list); 111 112 return nvdimm_map; 113 114 err_map: 115 release_mem_region(offset, size); 116 err_request_region: 117 kfree(nvdimm_map); 118 return NULL; 119 } 120 121 static void nvdimm_map_release(struct kref *kref) 122 { 123 struct nvdimm_bus *nvdimm_bus; 124 struct nvdimm_map *nvdimm_map; 125 126 nvdimm_map = container_of(kref, struct nvdimm_map, kref); 127 nvdimm_bus = nvdimm_map->nvdimm_bus; 128 129 dev_dbg(&nvdimm_bus->dev, "%pa\n", &nvdimm_map->offset); 130 list_del(&nvdimm_map->list); 131 if (nvdimm_map->flags) 132 memunmap(nvdimm_map->mem); 133 else 134 iounmap(nvdimm_map->iomem); 135 release_mem_region(nvdimm_map->offset, nvdimm_map->size); 136 kfree(nvdimm_map); 137 } 138 139 static void nvdimm_map_put(void *data) 140 { 141 struct nvdimm_map *nvdimm_map = data; 142 struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus; 143 144 nvdimm_bus_lock(&nvdimm_bus->dev); 145 kref_put(&nvdimm_map->kref, nvdimm_map_release); 146 nvdimm_bus_unlock(&nvdimm_bus->dev); 147 } 148 149 /** 150 * devm_nvdimm_memremap - map a resource that is shared across regions 151 * @dev: device that will own a reference to the shared mapping 152 * @offset: physical base address of the mapping 153 * @size: mapping size 154 * @flags: memremap flags, or, if zero, perform an ioremap instead 155 */ 156 void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset, 157 size_t size, unsigned long flags) 158 { 159 struct nvdimm_map *nvdimm_map; 160 161 nvdimm_bus_lock(dev); 162 nvdimm_map = find_nvdimm_map(dev, offset); 163 if (!nvdimm_map) 164 nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags); 165 else 166 kref_get(&nvdimm_map->kref); 167 nvdimm_bus_unlock(dev); 168 169 if (!nvdimm_map) 170 return NULL; 171 172 if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map)) 173 return NULL; 174 175 return nvdimm_map->mem; 176 } 177 EXPORT_SYMBOL_GPL(devm_nvdimm_memremap); 178 179 u64 nd_fletcher64(void *addr, size_t len, bool le) 180 { 181 u32 *buf = addr; 182 u32 lo32 = 0; 183 u64 hi32 = 0; 184 int i; 185 186 for (i = 0; i < len / sizeof(u32); i++) { 187 lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i]; 188 hi32 += lo32; 189 } 190 191 return hi32 << 32 | lo32; 192 } 193 EXPORT_SYMBOL_GPL(nd_fletcher64); 194 195 struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus) 196 { 197 /* struct nvdimm_bus definition is private to libnvdimm */ 198 return nvdimm_bus->nd_desc; 199 } 200 EXPORT_SYMBOL_GPL(to_nd_desc); 201 202 struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus) 203 { 204 /* struct nvdimm_bus definition is private to libnvdimm */ 205 return &nvdimm_bus->dev; 206 } 207 EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev); 208 209 /** 210 * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes 211 * @dev: container device for the uuid property 212 * @uuid_out: uuid buffer to replace 213 * @buf: raw sysfs buffer to parse 214 * 215 * Enforce that uuids can only be changed while the device is disabled 216 * (driver detached) 217 * LOCKING: expects device_lock() is held on entry 218 */ 219 int nd_uuid_store(struct device *dev, uuid_t **uuid_out, const char *buf, 220 size_t len) 221 { 222 uuid_t uuid; 223 int rc; 224 225 if (dev->driver) 226 return -EBUSY; 227 228 rc = uuid_parse(buf, &uuid); 229 if (rc) 230 return rc; 231 232 kfree(*uuid_out); 233 *uuid_out = kmemdup(&uuid, sizeof(uuid), GFP_KERNEL); 234 if (!(*uuid_out)) 235 return -ENOMEM; 236 237 return 0; 238 } 239 240 ssize_t nd_size_select_show(unsigned long current_size, 241 const unsigned long *supported, char *buf) 242 { 243 ssize_t len = 0; 244 int i; 245 246 for (i = 0; supported[i]; i++) 247 if (current_size == supported[i]) 248 len += sprintf(buf + len, "[%ld] ", supported[i]); 249 else 250 len += sprintf(buf + len, "%ld ", supported[i]); 251 len += sprintf(buf + len, "\n"); 252 return len; 253 } 254 255 ssize_t nd_size_select_store(struct device *dev, const char *buf, 256 unsigned long *current_size, const unsigned long *supported) 257 { 258 unsigned long lbasize; 259 int rc, i; 260 261 if (dev->driver) 262 return -EBUSY; 263 264 rc = kstrtoul(buf, 0, &lbasize); 265 if (rc) 266 return rc; 267 268 for (i = 0; supported[i]; i++) 269 if (lbasize == supported[i]) 270 break; 271 272 if (supported[i]) { 273 *current_size = lbasize; 274 return 0; 275 } else { 276 return -EINVAL; 277 } 278 } 279 280 static ssize_t commands_show(struct device *dev, 281 struct device_attribute *attr, char *buf) 282 { 283 int cmd, len = 0; 284 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 285 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 286 287 for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG) 288 len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd)); 289 len += sprintf(buf + len, "\n"); 290 return len; 291 } 292 static DEVICE_ATTR_RO(commands); 293 294 static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus) 295 { 296 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 297 struct device *parent = nvdimm_bus->dev.parent; 298 299 if (nd_desc->provider_name) 300 return nd_desc->provider_name; 301 else if (parent) 302 return dev_name(parent); 303 else 304 return "unknown"; 305 } 306 307 static ssize_t provider_show(struct device *dev, 308 struct device_attribute *attr, char *buf) 309 { 310 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 311 312 return sprintf(buf, "%s\n", nvdimm_bus_provider(nvdimm_bus)); 313 } 314 static DEVICE_ATTR_RO(provider); 315 316 static int flush_namespaces(struct device *dev, void *data) 317 { 318 device_lock(dev); 319 device_unlock(dev); 320 return 0; 321 } 322 323 static int flush_regions_dimms(struct device *dev, void *data) 324 { 325 device_lock(dev); 326 device_unlock(dev); 327 device_for_each_child(dev, NULL, flush_namespaces); 328 return 0; 329 } 330 331 static ssize_t wait_probe_show(struct device *dev, 332 struct device_attribute *attr, char *buf) 333 { 334 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 335 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 336 int rc; 337 338 if (nd_desc->flush_probe) { 339 rc = nd_desc->flush_probe(nd_desc); 340 if (rc) 341 return rc; 342 } 343 nd_synchronize(); 344 device_for_each_child(dev, NULL, flush_regions_dimms); 345 return sprintf(buf, "1\n"); 346 } 347 static DEVICE_ATTR_RO(wait_probe); 348 349 static struct attribute *nvdimm_bus_attributes[] = { 350 &dev_attr_commands.attr, 351 &dev_attr_wait_probe.attr, 352 &dev_attr_provider.attr, 353 NULL, 354 }; 355 356 static const struct attribute_group nvdimm_bus_attribute_group = { 357 .attrs = nvdimm_bus_attributes, 358 }; 359 360 static ssize_t capability_show(struct device *dev, 361 struct device_attribute *attr, char *buf) 362 { 363 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 364 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 365 enum nvdimm_fwa_capability cap; 366 367 if (!nd_desc->fw_ops) 368 return -EOPNOTSUPP; 369 370 cap = nd_desc->fw_ops->capability(nd_desc); 371 372 switch (cap) { 373 case NVDIMM_FWA_CAP_QUIESCE: 374 return sprintf(buf, "quiesce\n"); 375 case NVDIMM_FWA_CAP_LIVE: 376 return sprintf(buf, "live\n"); 377 default: 378 return -EOPNOTSUPP; 379 } 380 } 381 382 static DEVICE_ATTR_RO(capability); 383 384 static ssize_t activate_show(struct device *dev, 385 struct device_attribute *attr, char *buf) 386 { 387 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 388 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 389 enum nvdimm_fwa_capability cap; 390 enum nvdimm_fwa_state state; 391 392 if (!nd_desc->fw_ops) 393 return -EOPNOTSUPP; 394 395 cap = nd_desc->fw_ops->capability(nd_desc); 396 state = nd_desc->fw_ops->activate_state(nd_desc); 397 398 if (cap < NVDIMM_FWA_CAP_QUIESCE) 399 return -EOPNOTSUPP; 400 401 switch (state) { 402 case NVDIMM_FWA_IDLE: 403 return sprintf(buf, "idle\n"); 404 case NVDIMM_FWA_BUSY: 405 return sprintf(buf, "busy\n"); 406 case NVDIMM_FWA_ARMED: 407 return sprintf(buf, "armed\n"); 408 case NVDIMM_FWA_ARM_OVERFLOW: 409 return sprintf(buf, "overflow\n"); 410 default: 411 return -ENXIO; 412 } 413 } 414 415 static int exec_firmware_activate(void *data) 416 { 417 struct nvdimm_bus_descriptor *nd_desc = data; 418 419 return nd_desc->fw_ops->activate(nd_desc); 420 } 421 422 static ssize_t activate_store(struct device *dev, 423 struct device_attribute *attr, const char *buf, size_t len) 424 { 425 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 426 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 427 enum nvdimm_fwa_state state; 428 bool quiesce; 429 ssize_t rc; 430 431 if (!nd_desc->fw_ops) 432 return -EOPNOTSUPP; 433 434 if (sysfs_streq(buf, "live")) 435 quiesce = false; 436 else if (sysfs_streq(buf, "quiesce")) 437 quiesce = true; 438 else 439 return -EINVAL; 440 441 state = nd_desc->fw_ops->activate_state(nd_desc); 442 443 switch (state) { 444 case NVDIMM_FWA_BUSY: 445 rc = -EBUSY; 446 break; 447 case NVDIMM_FWA_ARMED: 448 case NVDIMM_FWA_ARM_OVERFLOW: 449 if (quiesce) 450 rc = hibernate_quiet_exec(exec_firmware_activate, nd_desc); 451 else 452 rc = nd_desc->fw_ops->activate(nd_desc); 453 break; 454 case NVDIMM_FWA_IDLE: 455 default: 456 rc = -ENXIO; 457 } 458 459 if (rc == 0) 460 rc = len; 461 return rc; 462 } 463 464 static DEVICE_ATTR_ADMIN_RW(activate); 465 466 static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribute *a, int n) 467 { 468 struct device *dev = container_of(kobj, typeof(*dev), kobj); 469 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 470 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 471 enum nvdimm_fwa_capability cap; 472 473 /* 474 * Both 'activate' and 'capability' disappear when no ops 475 * detected, or a negative capability is indicated. 476 */ 477 if (!nd_desc->fw_ops) 478 return 0; 479 480 cap = nd_desc->fw_ops->capability(nd_desc); 481 if (cap < NVDIMM_FWA_CAP_QUIESCE) 482 return 0; 483 484 return a->mode; 485 } 486 static struct attribute *nvdimm_bus_firmware_attributes[] = { 487 &dev_attr_activate.attr, 488 &dev_attr_capability.attr, 489 NULL, 490 }; 491 492 static const struct attribute_group nvdimm_bus_firmware_attribute_group = { 493 .name = "firmware", 494 .attrs = nvdimm_bus_firmware_attributes, 495 .is_visible = nvdimm_bus_firmware_visible, 496 }; 497 498 const struct attribute_group *nvdimm_bus_attribute_groups[] = { 499 &nvdimm_bus_attribute_group, 500 &nvdimm_bus_firmware_attribute_group, 501 NULL, 502 }; 503 504 int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) 505 { 506 return badrange_add(&nvdimm_bus->badrange, addr, length); 507 } 508 EXPORT_SYMBOL_GPL(nvdimm_bus_add_badrange); 509 510 static __init int libnvdimm_init(void) 511 { 512 int rc; 513 514 rc = nvdimm_bus_init(); 515 if (rc) 516 return rc; 517 rc = nvdimm_init(); 518 if (rc) 519 goto err_dimm; 520 rc = nd_region_init(); 521 if (rc) 522 goto err_region; 523 524 nd_label_init(); 525 526 return 0; 527 err_region: 528 nvdimm_exit(); 529 err_dimm: 530 nvdimm_bus_exit(); 531 return rc; 532 } 533 534 static __exit void libnvdimm_exit(void) 535 { 536 WARN_ON(!list_empty(&nvdimm_bus_list)); 537 nd_region_exit(); 538 nvdimm_exit(); 539 nvdimm_bus_exit(); 540 nvdimm_devs_exit(); 541 } 542 543 MODULE_LICENSE("GPL v2"); 544 MODULE_AUTHOR("Intel Corporation"); 545 subsys_initcall(libnvdimm_init); 546 module_exit(libnvdimm_exit); 547