Lines Matching refs:nvdimm_bus
58 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
60 return nvdimm_bus->nd_desc->module;
65 static void nvdimm_bus_probe_start(struct nvdimm_bus *nvdimm_bus)
67 nvdimm_bus_lock(&nvdimm_bus->dev);
68 nvdimm_bus->probe_active++;
69 nvdimm_bus_unlock(&nvdimm_bus->dev);
72 static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
74 nvdimm_bus_lock(&nvdimm_bus->dev);
75 if (--nvdimm_bus->probe_active == 0)
76 wake_up(&nvdimm_bus->wait);
77 nvdimm_bus_unlock(&nvdimm_bus->dev);
84 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
90 dev_dbg(&nvdimm_bus->dev, "START: %s.probe(%s)\n",
93 nvdimm_bus_probe_start(nvdimm_bus);
98 nvdimm_bus_probe_end(nvdimm_bus);
100 dev_dbg(&nvdimm_bus->dev, "END: %s.probe(%s) = %d\n", dev->driver->name,
112 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
117 dev_dbg(&nvdimm_bus->dev, "%s.remove(%s)\n", dev->driver->name,
124 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
132 dev_dbg(&nvdimm_bus->dev, "%s.shutdown(%s)\n",
153 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
155 if (!nvdimm_bus)
195 static void nvdimm_clear_badblocks_regions(struct nvdimm_bus *nvdimm_bus,
203 device_for_each_child(&nvdimm_bus->dev, &ctx,
207 static void nvdimm_account_cleared_poison(struct nvdimm_bus *nvdimm_bus,
211 badrange_forget(&nvdimm_bus->badrange, phys, cleared);
214 nvdimm_clear_badblocks_regions(nvdimm_bus, phys, cleared);
220 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
228 if (!nvdimm_bus)
231 nd_desc = nvdimm_bus->nd_desc;
269 nvdimm_account_cleared_poison(nvdimm_bus, phys, clear_err.cleared);
288 struct nvdimm_bus *nvdimm_bus;
290 nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
291 ida_free(&nd_ida, nvdimm_bus->id);
292 kfree(nvdimm_bus);
305 struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev)
318 struct nvdimm_bus *to_nvdimm_bus(struct device *dev)
320 struct nvdimm_bus *nvdimm_bus;
322 nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
324 return nvdimm_bus;
328 struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm)
336 struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
339 struct nvdimm_bus *nvdimm_bus;
342 nvdimm_bus = kzalloc(sizeof(*nvdimm_bus), GFP_KERNEL);
343 if (!nvdimm_bus)
345 INIT_LIST_HEAD(&nvdimm_bus->list);
346 INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
347 init_waitqueue_head(&nvdimm_bus->wait);
348 nvdimm_bus->id = ida_alloc(&nd_ida, GFP_KERNEL);
349 if (nvdimm_bus->id < 0) {
350 kfree(nvdimm_bus);
353 mutex_init(&nvdimm_bus->reconfig_mutex);
354 badrange_init(&nvdimm_bus->badrange);
355 nvdimm_bus->nd_desc = nd_desc;
356 nvdimm_bus->dev.parent = parent;
357 nvdimm_bus->dev.type = &nvdimm_bus_dev_type;
358 nvdimm_bus->dev.groups = nd_desc->attr_groups;
359 nvdimm_bus->dev.bus = &nvdimm_bus_type;
360 nvdimm_bus->dev.of_node = nd_desc->of_node;
361 device_initialize(&nvdimm_bus->dev);
362 lockdep_set_class(&nvdimm_bus->dev.mutex, &nvdimm_bus_key);
363 device_set_pm_not_required(&nvdimm_bus->dev);
364 rc = dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id);
368 rc = device_add(&nvdimm_bus->dev);
370 dev_dbg(&nvdimm_bus->dev, "registration failed: %d\n", rc);
374 return nvdimm_bus;
376 put_device(&nvdimm_bus->dev);
381 void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus)
383 if (!nvdimm_bus)
385 device_unregister(&nvdimm_bus->dev);
421 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
424 list_del_init(&nvdimm_bus->list);
427 wait_event(nvdimm_bus->wait,
428 atomic_read(&nvdimm_bus->ioctl_active) == 0);
431 device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
433 spin_lock(&nvdimm_bus->badrange.lock);
434 free_badrange_list(&nvdimm_bus->badrange.list);
435 spin_unlock(&nvdimm_bus->badrange.lock);
437 nvdimm_bus_destroy_ndctl(nvdimm_bus);
442 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
445 rc = nvdimm_bus_create_ndctl(nvdimm_bus);
450 list_add_tail(&nvdimm_bus->list, &nvdimm_bus_list);
454 dev_set_drvdata(dev, nvdimm_bus->nd_desc);
736 int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
738 dev_t devt = MKDEV(nvdimm_bus_major, nvdimm_bus->id);
749 dev->parent = &nvdimm_bus->dev;
752 rc = dev_set_name(dev, "ndctl%d", nvdimm_bus->id);
758 dev_dbg(&nvdimm_bus->dev, "failed to register ndctl%d: %d\n",
759 nvdimm_bus->id, rc);
769 void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus)
771 device_destroy(&nd_class, MKDEV(nvdimm_bus_major, nvdimm_bus->id));
937 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
940 if (nvdimm_bus->probe_active == 0)
944 wait_event(nvdimm_bus->wait,
945 nvdimm_bus->probe_active == 0);
998 static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus,
1001 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
1013 return device_for_each_child(&nvdimm_bus->dev, data,
1020 wait_nvdimm_bus_probe_idle(&nvdimm_bus->dev);
1026 static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
1029 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
1032 struct device *dev = &nvdimm_bus->dev;
1182 rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
1193 nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
1232 struct nvdimm_bus *nvdimm_bus, *found = NULL;
1239 list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
1243 dev = device_find_child(&nvdimm_bus->dev,
1248 found = nvdimm_bus;
1249 } else if (nvdimm_bus->id == id) {
1250 found = nvdimm_bus;
1254 atomic_inc(&nvdimm_bus->ioctl_active);
1263 nvdimm_bus = found;
1264 rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
1268 if (atomic_dec_and_test(&nvdimm_bus->ioctl_active))
1269 wake_up(&nvdimm_bus->wait);