xref: /linux/drivers/nvdimm/dimm_devs.c (revision ba9dac987319d4f3969691dcf366ef19c9ed8281)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4  */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/moduleparam.h>
7 #include <linux/vmalloc.h>
8 #include <linux/device.h>
9 #include <linux/ndctl.h>
10 #include <linux/slab.h>
11 #include <linux/io.h>
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include "nd-core.h"
15 #include "label.h"
16 #include "pmem.h"
17 #include "nd.h"
18 
19 static DEFINE_IDA(dimm_ida);
20 
21 /*
22  * Retrieve bus and dimm handle and return if this bus supports
23  * get_config_data commands
24  */
nvdimm_check_config_data(struct device * dev)25 int nvdimm_check_config_data(struct device *dev)
26 {
27 	struct nvdimm *nvdimm = to_nvdimm(dev);
28 
29 	if (!nvdimm->cmd_mask ||
30 	    !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
31 		if (test_bit(NDD_LABELING, &nvdimm->flags))
32 			return -ENXIO;
33 		else
34 			return -ENOTTY;
35 	}
36 
37 	return 0;
38 }
39 
validate_dimm(struct nvdimm_drvdata * ndd)40 static int validate_dimm(struct nvdimm_drvdata *ndd)
41 {
42 	int rc;
43 
44 	if (!ndd)
45 		return -EINVAL;
46 
47 	rc = nvdimm_check_config_data(ndd->dev);
48 	if (rc)
49 		dev_dbg(ndd->dev, "%ps: %s error: %d\n",
50 				__builtin_return_address(0), __func__, rc);
51 	return rc;
52 }
53 
54 /**
55  * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
56  * @ndd: dimm to initialize
57  *
58  * Returns: %0 if the area is already valid, -errno on error
59  */
nvdimm_init_nsarea(struct nvdimm_drvdata * ndd)60 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
61 {
62 	struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
63 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
64 	struct nvdimm_bus_descriptor *nd_desc;
65 	int rc = validate_dimm(ndd);
66 	int cmd_rc = 0;
67 
68 	if (rc)
69 		return rc;
70 
71 	if (cmd->config_size)
72 		return 0; /* already valid */
73 
74 	memset(cmd, 0, sizeof(*cmd));
75 	nd_desc = nvdimm_bus->nd_desc;
76 	rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
77 			ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
78 	if (rc < 0)
79 		return rc;
80 	return cmd_rc;
81 }
82 
nvdimm_get_config_data(struct nvdimm_drvdata * ndd,void * buf,size_t offset,size_t len)83 int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
84 			   size_t offset, size_t len)
85 {
86 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
87 	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
88 	int rc = validate_dimm(ndd), cmd_rc = 0;
89 	struct nd_cmd_get_config_data_hdr *cmd;
90 	size_t max_cmd_size, buf_offset;
91 
92 	if (rc)
93 		return rc;
94 
95 	if (offset + len > ndd->nsarea.config_size)
96 		return -ENXIO;
97 
98 	max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
99 	cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
100 	if (!cmd)
101 		return -ENOMEM;
102 
103 	for (buf_offset = 0; len;
104 	     len -= cmd->in_length, buf_offset += cmd->in_length) {
105 		size_t cmd_size;
106 
107 		cmd->in_offset = offset + buf_offset;
108 		cmd->in_length = min(max_cmd_size, len);
109 
110 		cmd_size = sizeof(*cmd) + cmd->in_length;
111 
112 		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
113 				ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
114 		if (rc < 0)
115 			break;
116 		if (cmd_rc < 0) {
117 			rc = cmd_rc;
118 			break;
119 		}
120 
121 		/* out_buf should be valid, copy it into our output buffer */
122 		memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
123 	}
124 	kvfree(cmd);
125 
126 	return rc;
127 }
128 
nvdimm_set_config_data(struct nvdimm_drvdata * ndd,size_t offset,void * buf,size_t len)129 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
130 		void *buf, size_t len)
131 {
132 	size_t max_cmd_size, buf_offset;
133 	struct nd_cmd_set_config_hdr *cmd;
134 	int rc = validate_dimm(ndd), cmd_rc = 0;
135 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
136 	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
137 
138 	if (rc)
139 		return rc;
140 
141 	if (offset + len > ndd->nsarea.config_size)
142 		return -ENXIO;
143 
144 	max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
145 	cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
146 	if (!cmd)
147 		return -ENOMEM;
148 
149 	for (buf_offset = 0; len; len -= cmd->in_length,
150 			buf_offset += cmd->in_length) {
151 		size_t cmd_size;
152 
153 		cmd->in_offset = offset + buf_offset;
154 		cmd->in_length = min(max_cmd_size, len);
155 		memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
156 
157 		/* status is output in the last 4-bytes of the command buffer */
158 		cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
159 
160 		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
161 				ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
162 		if (rc < 0)
163 			break;
164 		if (cmd_rc < 0) {
165 			rc = cmd_rc;
166 			break;
167 		}
168 	}
169 	kvfree(cmd);
170 
171 	return rc;
172 }
173 
nvdimm_set_labeling(struct device * dev)174 void nvdimm_set_labeling(struct device *dev)
175 {
176 	struct nvdimm *nvdimm = to_nvdimm(dev);
177 
178 	set_bit(NDD_LABELING, &nvdimm->flags);
179 }
180 
nvdimm_set_locked(struct device * dev)181 void nvdimm_set_locked(struct device *dev)
182 {
183 	struct nvdimm *nvdimm = to_nvdimm(dev);
184 
185 	set_bit(NDD_LOCKED, &nvdimm->flags);
186 }
187 
nvdimm_clear_locked(struct device * dev)188 void nvdimm_clear_locked(struct device *dev)
189 {
190 	struct nvdimm *nvdimm = to_nvdimm(dev);
191 
192 	clear_bit(NDD_LOCKED, &nvdimm->flags);
193 }
194 
nvdimm_release(struct device * dev)195 static void nvdimm_release(struct device *dev)
196 {
197 	struct nvdimm *nvdimm = to_nvdimm(dev);
198 
199 	ida_free(&dimm_ida, nvdimm->id);
200 	kfree(nvdimm);
201 }
202 
to_nvdimm(struct device * dev)203 struct nvdimm *to_nvdimm(struct device *dev)
204 {
205 	struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
206 
207 	WARN_ON(!is_nvdimm(dev));
208 	return nvdimm;
209 }
210 EXPORT_SYMBOL_GPL(to_nvdimm);
211 
to_ndd(struct nd_mapping * nd_mapping)212 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
213 {
214 	struct nvdimm *nvdimm = nd_mapping->nvdimm;
215 
216 	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
217 
218 	return dev_get_drvdata(&nvdimm->dev);
219 }
220 EXPORT_SYMBOL(to_ndd);
221 
nvdimm_drvdata_release(struct kref * kref)222 void nvdimm_drvdata_release(struct kref *kref)
223 {
224 	struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
225 	struct device *dev = ndd->dev;
226 	struct resource *res, *_r;
227 
228 	dev_dbg(dev, "trace\n");
229 	scoped_guard(nvdimm_bus, dev) {
230 		for_each_dpa_resource_safe(ndd, res, _r)
231 			nvdimm_free_dpa(ndd, res);
232 	}
233 
234 	kvfree(ndd->data);
235 	kfree(ndd);
236 	put_device(dev);
237 }
238 
get_ndd(struct nvdimm_drvdata * ndd)239 void get_ndd(struct nvdimm_drvdata *ndd)
240 {
241 	kref_get(&ndd->kref);
242 }
243 
put_ndd(struct nvdimm_drvdata * ndd)244 void put_ndd(struct nvdimm_drvdata *ndd)
245 {
246 	if (ndd)
247 		kref_put(&ndd->kref, nvdimm_drvdata_release);
248 }
249 
nvdimm_name(struct nvdimm * nvdimm)250 const char *nvdimm_name(struct nvdimm *nvdimm)
251 {
252 	return dev_name(&nvdimm->dev);
253 }
254 EXPORT_SYMBOL_GPL(nvdimm_name);
255 
nvdimm_kobj(struct nvdimm * nvdimm)256 struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
257 {
258 	return &nvdimm->dev.kobj;
259 }
260 EXPORT_SYMBOL_GPL(nvdimm_kobj);
261 
nvdimm_cmd_mask(struct nvdimm * nvdimm)262 unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
263 {
264 	return nvdimm->cmd_mask;
265 }
266 EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
267 
nvdimm_provider_data(struct nvdimm * nvdimm)268 void *nvdimm_provider_data(struct nvdimm *nvdimm)
269 {
270 	if (nvdimm)
271 		return nvdimm->provider_data;
272 	return NULL;
273 }
274 EXPORT_SYMBOL_GPL(nvdimm_provider_data);
275 
commands_show(struct device * dev,struct device_attribute * attr,char * buf)276 static ssize_t commands_show(struct device *dev,
277 		struct device_attribute *attr, char *buf)
278 {
279 	struct nvdimm *nvdimm = to_nvdimm(dev);
280 	int cmd, len = 0;
281 
282 	if (!nvdimm->cmd_mask)
283 		return sprintf(buf, "\n");
284 
285 	for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
286 		len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
287 	len += sprintf(buf + len, "\n");
288 	return len;
289 }
290 static DEVICE_ATTR_RO(commands);
291 
flags_show(struct device * dev,struct device_attribute * attr,char * buf)292 static ssize_t flags_show(struct device *dev,
293 		struct device_attribute *attr, char *buf)
294 {
295 	struct nvdimm *nvdimm = to_nvdimm(dev);
296 
297 	return sprintf(buf, "%s%s\n",
298 			test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "",
299 			test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
300 }
301 static DEVICE_ATTR_RO(flags);
302 
state_show(struct device * dev,struct device_attribute * attr,char * buf)303 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
304 		char *buf)
305 {
306 	struct nvdimm *nvdimm = to_nvdimm(dev);
307 
308 	/*
309 	 * The state may be in the process of changing, userspace should
310 	 * quiesce probing if it wants a static answer
311 	 */
312 	nvdimm_bus_lock(dev);
313 	nvdimm_bus_unlock(dev);
314 	return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
315 			? "active" : "idle");
316 }
317 static DEVICE_ATTR_RO(state);
318 
__available_slots_show(struct nvdimm_drvdata * ndd,char * buf)319 static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
320 {
321 	struct device *dev;
322 	u32 nfree;
323 
324 	if (!ndd)
325 		return -ENXIO;
326 
327 	dev = ndd->dev;
328 	guard(nvdimm_bus)(dev);
329 	nfree = nd_label_nfree(ndd);
330 	if (nfree - 1 > nfree) {
331 		dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
332 		nfree = 0;
333 	} else
334 		nfree--;
335 	return sprintf(buf, "%d\n", nfree);
336 }
337 
available_slots_show(struct device * dev,struct device_attribute * attr,char * buf)338 static ssize_t available_slots_show(struct device *dev,
339 				    struct device_attribute *attr, char *buf)
340 {
341 	ssize_t rc;
342 
343 	device_lock(dev);
344 	rc = __available_slots_show(dev_get_drvdata(dev), buf);
345 	device_unlock(dev);
346 
347 	return rc;
348 }
349 static DEVICE_ATTR_RO(available_slots);
350 
security_show(struct device * dev,struct device_attribute * attr,char * buf)351 static ssize_t security_show(struct device *dev,
352 			     struct device_attribute *attr, char *buf)
353 {
354 	struct nvdimm *nvdimm = to_nvdimm(dev);
355 
356 	/*
357 	 * For the test version we need to poll the "hardware" in order
358 	 * to get the updated status for unlock testing.
359 	 */
360 	if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST))
361 		nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
362 
363 	if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
364 		return sprintf(buf, "overwrite\n");
365 	if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
366 		return sprintf(buf, "disabled\n");
367 	if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
368 		return sprintf(buf, "unlocked\n");
369 	if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
370 		return sprintf(buf, "locked\n");
371 	return -ENOTTY;
372 }
373 
frozen_show(struct device * dev,struct device_attribute * attr,char * buf)374 static ssize_t frozen_show(struct device *dev,
375 		struct device_attribute *attr, char *buf)
376 {
377 	struct nvdimm *nvdimm = to_nvdimm(dev);
378 
379 	return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN,
380 				&nvdimm->sec.flags));
381 }
382 static DEVICE_ATTR_RO(frozen);
383 
security_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)384 static ssize_t security_store(struct device *dev,
385 		struct device_attribute *attr, const char *buf, size_t len)
386 
387 {
388 	/*
389 	 * Require all userspace triggered security management to be
390 	 * done while probing is idle and the DIMM is not in active use
391 	 * in any region.
392 	 */
393 	guard(device)(dev);
394 	guard(nvdimm_bus)(dev);
395 	wait_nvdimm_bus_probe_idle(dev);
396 	return nvdimm_security_store(dev, buf, len);
397 }
398 static DEVICE_ATTR_RW(security);
399 
400 static struct attribute *nvdimm_attributes[] = {
401 	&dev_attr_state.attr,
402 	&dev_attr_flags.attr,
403 	&dev_attr_commands.attr,
404 	&dev_attr_available_slots.attr,
405 	&dev_attr_security.attr,
406 	&dev_attr_frozen.attr,
407 	NULL,
408 };
409 
nvdimm_visible(struct kobject * kobj,struct attribute * a,int n)410 static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
411 {
412 	struct device *dev = container_of(kobj, typeof(*dev), kobj);
413 	struct nvdimm *nvdimm = to_nvdimm(dev);
414 
415 	if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr)
416 		return a->mode;
417 	if (!nvdimm->sec.flags)
418 		return 0;
419 
420 	if (a == &dev_attr_security.attr) {
421 		/* Are there any state mutation ops (make writable)? */
422 		if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
423 				|| nvdimm->sec.ops->change_key
424 				|| nvdimm->sec.ops->erase
425 				|| nvdimm->sec.ops->overwrite)
426 			return a->mode;
427 		return 0444;
428 	}
429 
430 	if (nvdimm->sec.ops->freeze)
431 		return a->mode;
432 	return 0;
433 }
434 
435 static const struct attribute_group nvdimm_attribute_group = {
436 	.attrs = nvdimm_attributes,
437 	.is_visible = nvdimm_visible,
438 };
439 
result_show(struct device * dev,struct device_attribute * attr,char * buf)440 static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf)
441 {
442 	struct nvdimm *nvdimm = to_nvdimm(dev);
443 	enum nvdimm_fwa_result result;
444 
445 	if (!nvdimm->fw_ops)
446 		return -EOPNOTSUPP;
447 
448 	guard(nvdimm_bus)(dev);
449 	result = nvdimm->fw_ops->activate_result(nvdimm);
450 
451 	switch (result) {
452 	case NVDIMM_FWA_RESULT_NONE:
453 		return sprintf(buf, "none\n");
454 	case NVDIMM_FWA_RESULT_SUCCESS:
455 		return sprintf(buf, "success\n");
456 	case NVDIMM_FWA_RESULT_FAIL:
457 		return sprintf(buf, "fail\n");
458 	case NVDIMM_FWA_RESULT_NOTSTAGED:
459 		return sprintf(buf, "not_staged\n");
460 	case NVDIMM_FWA_RESULT_NEEDRESET:
461 		return sprintf(buf, "need_reset\n");
462 	default:
463 		return -ENXIO;
464 	}
465 }
466 static DEVICE_ATTR_ADMIN_RO(result);
467 
activate_show(struct device * dev,struct device_attribute * attr,char * buf)468 static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf)
469 {
470 	struct nvdimm *nvdimm = to_nvdimm(dev);
471 	enum nvdimm_fwa_state state;
472 
473 	if (!nvdimm->fw_ops)
474 		return -EOPNOTSUPP;
475 
476 	guard(nvdimm_bus)(dev);
477 	state = nvdimm->fw_ops->activate_state(nvdimm);
478 
479 	switch (state) {
480 	case NVDIMM_FWA_IDLE:
481 		return sprintf(buf, "idle\n");
482 	case NVDIMM_FWA_BUSY:
483 		return sprintf(buf, "busy\n");
484 	case NVDIMM_FWA_ARMED:
485 		return sprintf(buf, "armed\n");
486 	default:
487 		return -ENXIO;
488 	}
489 }
490 
activate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)491 static ssize_t activate_store(struct device *dev, struct device_attribute *attr,
492 		const char *buf, size_t len)
493 {
494 	struct nvdimm *nvdimm = to_nvdimm(dev);
495 	enum nvdimm_fwa_trigger arg;
496 	int rc;
497 
498 	if (!nvdimm->fw_ops)
499 		return -EOPNOTSUPP;
500 
501 	if (sysfs_streq(buf, "arm"))
502 		arg = NVDIMM_FWA_ARM;
503 	else if (sysfs_streq(buf, "disarm"))
504 		arg = NVDIMM_FWA_DISARM;
505 	else
506 		return -EINVAL;
507 
508 	guard(nvdimm_bus)(dev);
509 	rc = nvdimm->fw_ops->arm(nvdimm, arg);
510 
511 	if (rc < 0)
512 		return rc;
513 	return len;
514 }
515 static DEVICE_ATTR_ADMIN_RW(activate);
516 
517 static struct attribute *nvdimm_firmware_attributes[] = {
518 	&dev_attr_activate.attr,
519 	&dev_attr_result.attr,
520 	NULL,
521 };
522 
nvdimm_firmware_visible(struct kobject * kobj,struct attribute * a,int n)523 static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
524 {
525 	struct device *dev = container_of(kobj, typeof(*dev), kobj);
526 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
527 	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
528 	struct nvdimm *nvdimm = to_nvdimm(dev);
529 	enum nvdimm_fwa_capability cap;
530 
531 	if (!nd_desc->fw_ops)
532 		return 0;
533 	if (!nvdimm->fw_ops)
534 		return 0;
535 
536 	guard(nvdimm_bus)(dev);
537 	cap = nd_desc->fw_ops->capability(nd_desc);
538 
539 	if (cap < NVDIMM_FWA_CAP_QUIESCE)
540 		return 0;
541 
542 	return a->mode;
543 }
544 
545 static const struct attribute_group nvdimm_firmware_attribute_group = {
546 	.name = "firmware",
547 	.attrs = nvdimm_firmware_attributes,
548 	.is_visible = nvdimm_firmware_visible,
549 };
550 
551 static const struct attribute_group *nvdimm_attribute_groups[] = {
552 	&nd_device_attribute_group,
553 	&nvdimm_attribute_group,
554 	&nvdimm_firmware_attribute_group,
555 	NULL,
556 };
557 
558 static const struct device_type nvdimm_device_type = {
559 	.name = "nvdimm",
560 	.release = nvdimm_release,
561 	.groups = nvdimm_attribute_groups,
562 };
563 
is_nvdimm(const struct device * dev)564 bool is_nvdimm(const struct device *dev)
565 {
566 	return dev->type == &nvdimm_device_type;
567 }
568 
569 static struct lock_class_key nvdimm_key;
570 
__nvdimm_create(struct nvdimm_bus * nvdimm_bus,void * provider_data,const struct attribute_group ** groups,unsigned long flags,unsigned long cmd_mask,int num_flush,struct resource * flush_wpq,const char * dimm_id,const struct nvdimm_security_ops * sec_ops,const struct nvdimm_fw_ops * fw_ops)571 struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
572 		void *provider_data, const struct attribute_group **groups,
573 		unsigned long flags, unsigned long cmd_mask, int num_flush,
574 		struct resource *flush_wpq, const char *dimm_id,
575 		const struct nvdimm_security_ops *sec_ops,
576 		const struct nvdimm_fw_ops *fw_ops)
577 {
578 	struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
579 	struct device *dev;
580 
581 	if (!nvdimm)
582 		return NULL;
583 
584 	nvdimm->id = ida_alloc(&dimm_ida, GFP_KERNEL);
585 	if (nvdimm->id < 0) {
586 		kfree(nvdimm);
587 		return NULL;
588 	}
589 
590 	nvdimm->dimm_id = dimm_id;
591 	nvdimm->provider_data = provider_data;
592 	nvdimm->flags = flags;
593 	nvdimm->cmd_mask = cmd_mask;
594 	nvdimm->num_flush = num_flush;
595 	nvdimm->flush_wpq = flush_wpq;
596 	atomic_set(&nvdimm->busy, 0);
597 	dev = &nvdimm->dev;
598 	dev_set_name(dev, "nmem%d", nvdimm->id);
599 	dev->parent = &nvdimm_bus->dev;
600 	dev->type = &nvdimm_device_type;
601 	dev->devt = MKDEV(nvdimm_major, nvdimm->id);
602 	dev->groups = groups;
603 	nvdimm->sec.ops = sec_ops;
604 	nvdimm->fw_ops = fw_ops;
605 	nvdimm->sec.overwrite_tmo = 0;
606 	INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
607 	/*
608 	 * Security state must be initialized before device_add() for
609 	 * attribute visibility.
610 	 */
611 	/* get security state and extended (master) state */
612 	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
613 	nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
614 	device_initialize(dev);
615 	lockdep_set_class(&dev->mutex, &nvdimm_key);
616 	if (test_bit(NDD_REGISTER_SYNC, &flags))
617 		nd_device_register_sync(dev);
618 	else
619 		nd_device_register(dev);
620 
621 	return nvdimm;
622 }
623 EXPORT_SYMBOL_GPL(__nvdimm_create);
624 
nvdimm_delete(struct nvdimm * nvdimm)625 void nvdimm_delete(struct nvdimm *nvdimm)
626 {
627 	struct device *dev = &nvdimm->dev;
628 	bool dev_put = false;
629 
630 	/* We are shutting down. Make state frozen artificially. */
631 	scoped_guard(nvdimm_bus, dev) {
632 		set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags);
633 		dev_put = test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
634 	}
635 	cancel_delayed_work_sync(&nvdimm->dwork);
636 	if (dev_put)
637 		put_device(dev);
638 	nd_device_unregister(dev, ND_SYNC);
639 }
640 EXPORT_SYMBOL_GPL(nvdimm_delete);
641 
shutdown_security_notify(void * data)642 static void shutdown_security_notify(void *data)
643 {
644 	struct nvdimm *nvdimm = data;
645 
646 	sysfs_put(nvdimm->sec.overwrite_state);
647 }
648 
nvdimm_security_setup_events(struct device * dev)649 int nvdimm_security_setup_events(struct device *dev)
650 {
651 	struct nvdimm *nvdimm = to_nvdimm(dev);
652 
653 	if (!nvdimm->sec.flags || !nvdimm->sec.ops
654 			|| !nvdimm->sec.ops->overwrite)
655 		return 0;
656 	nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
657 	if (!nvdimm->sec.overwrite_state)
658 		return -ENOMEM;
659 
660 	return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
661 }
662 EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
663 
nvdimm_in_overwrite(struct nvdimm * nvdimm)664 int nvdimm_in_overwrite(struct nvdimm *nvdimm)
665 {
666 	return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
667 }
668 EXPORT_SYMBOL_GPL(nvdimm_in_overwrite);
669 
nvdimm_security_freeze(struct nvdimm * nvdimm)670 int nvdimm_security_freeze(struct nvdimm *nvdimm)
671 {
672 	int rc;
673 
674 	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
675 
676 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze)
677 		return -EOPNOTSUPP;
678 
679 	if (!nvdimm->sec.flags)
680 		return -EIO;
681 
682 	if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
683 		dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n");
684 		return -EBUSY;
685 	}
686 
687 	rc = nvdimm->sec.ops->freeze(nvdimm);
688 	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
689 
690 	return rc;
691 }
692 
dpa_align(struct nd_region * nd_region)693 static unsigned long dpa_align(struct nd_region *nd_region)
694 {
695 	struct device *dev = &nd_region->dev;
696 
697 	if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev),
698 				"bus lock required for capacity provision\n"))
699 		return 0;
700 	if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align
701 				% nd_region->ndr_mappings,
702 				"invalid region align %#lx mappings: %d\n",
703 				nd_region->align, nd_region->ndr_mappings))
704 		return 0;
705 	return nd_region->align / nd_region->ndr_mappings;
706 }
707 
708 /**
709  * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
710  *			   contiguous unallocated dpa range.
711  * @nd_region: constrain available space check to this reference region
712  * @nd_mapping: container of dpa-resource-root + labels
713  *
714  * Returns: %0 if there is an alignment error, otherwise the max
715  *		unallocated dpa range
716  */
nd_pmem_max_contiguous_dpa(struct nd_region * nd_region,struct nd_mapping * nd_mapping)717 resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
718 					   struct nd_mapping *nd_mapping)
719 {
720 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
721 	struct nvdimm_bus *nvdimm_bus;
722 	resource_size_t max = 0;
723 	struct resource *res;
724 	unsigned long align;
725 
726 	/* if a dimm is disabled the available capacity is zero */
727 	if (!ndd)
728 		return 0;
729 
730 	align = dpa_align(nd_region);
731 	if (!align)
732 		return 0;
733 
734 	nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
735 	if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
736 		return 0;
737 	for_each_dpa_resource(ndd, res) {
738 		resource_size_t start, end;
739 
740 		if (strcmp(res->name, "pmem-reserve") != 0)
741 			continue;
742 		/* trim free space relative to current alignment setting */
743 		start = ALIGN(res->start, align);
744 		end = ALIGN_DOWN(res->end + 1, align) - 1;
745 		if (end < start)
746 			continue;
747 		if (end - start + 1 > max)
748 			max = end - start + 1;
749 	}
750 	release_free_pmem(nvdimm_bus, nd_mapping);
751 	return max;
752 }
753 
754 /**
755  * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
756  * @nd_mapping: container of dpa-resource-root + labels
757  * @nd_region: constrain available space check to this reference region
758  *
759  * Validate that a PMEM label, if present, aligns with the start of an
760  * interleave set.
761  *
762  * Returns: %0 if there is an alignment error, otherwise the unallocated dpa
763  */
nd_pmem_available_dpa(struct nd_region * nd_region,struct nd_mapping * nd_mapping)764 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
765 				      struct nd_mapping *nd_mapping)
766 {
767 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
768 	resource_size_t map_start, map_end, busy = 0;
769 	struct resource *res;
770 	unsigned long align;
771 
772 	if (!ndd)
773 		return 0;
774 
775 	align = dpa_align(nd_region);
776 	if (!align)
777 		return 0;
778 
779 	map_start = nd_mapping->start;
780 	map_end = map_start + nd_mapping->size - 1;
781 	for_each_dpa_resource(ndd, res) {
782 		resource_size_t start, end;
783 
784 		start = ALIGN_DOWN(res->start, align);
785 		end = ALIGN(res->end + 1, align) - 1;
786 		if (start >= map_start && start < map_end) {
787 			if (end > map_end) {
788 				nd_dbg_dpa(nd_region, ndd, res,
789 					   "misaligned to iset\n");
790 				return 0;
791 			}
792 			busy += end - start + 1;
793 		} else if (end >= map_start && end <= map_end) {
794 			busy += end - start + 1;
795 		} else if (map_start > start && map_start < end) {
796 			/* total eclipse of the mapping */
797 			busy += nd_mapping->size;
798 		}
799 	}
800 
801 	if (busy < nd_mapping->size)
802 		return ALIGN_DOWN(nd_mapping->size - busy, align);
803 	return 0;
804 }
805 
nvdimm_free_dpa(struct nvdimm_drvdata * ndd,struct resource * res)806 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
807 {
808 	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
809 	kfree(res->name);
810 	__release_region(&ndd->dpa, res->start, resource_size(res));
811 }
812 
nvdimm_allocate_dpa(struct nvdimm_drvdata * ndd,struct nd_label_id * label_id,resource_size_t start,resource_size_t n)813 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
814 		struct nd_label_id *label_id, resource_size_t start,
815 		resource_size_t n)
816 {
817 	char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
818 	struct resource *res;
819 
820 	if (!name)
821 		return NULL;
822 
823 	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
824 	res = __request_region(&ndd->dpa, start, n, name, 0);
825 	if (!res)
826 		kfree(name);
827 	return res;
828 }
829 
830 /**
831  * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
832  * @ndd: container of dpa-resource-root + labels
833  * @label_id: dpa resource name of the form pmem-<human readable uuid>
834  *
835  * Returns: sum of the dpa allocated to the label_id
836  */
nvdimm_allocated_dpa(struct nvdimm_drvdata * ndd,struct nd_label_id * label_id)837 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
838 		struct nd_label_id *label_id)
839 {
840 	resource_size_t allocated = 0;
841 	struct resource *res;
842 
843 	for_each_dpa_resource(ndd, res)
844 		if (strcmp(res->name, label_id->id) == 0)
845 			allocated += resource_size(res);
846 
847 	return allocated;
848 }
849 
count_dimms(struct device * dev,void * c)850 static int count_dimms(struct device *dev, void *c)
851 {
852 	int *count = c;
853 
854 	if (is_nvdimm(dev))
855 		(*count)++;
856 	return 0;
857 }
858 
nvdimm_bus_check_dimm_count(struct nvdimm_bus * nvdimm_bus,int dimm_count)859 int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
860 {
861 	int count = 0;
862 	/* Flush any possible dimm registration failures */
863 	nd_synchronize();
864 
865 	device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
866 	dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
867 	if (count != dimm_count)
868 		return -ENXIO;
869 	return 0;
870 }
871 EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
872 
nvdimm_devs_exit(void)873 void __exit nvdimm_devs_exit(void)
874 {
875 	ida_destroy(&dimm_ida);
876 }
877