1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4 #include <linux/platform_device.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/genalloc.h>
8 #include <linux/vmalloc.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/list_sort.h>
11 #include <linux/libnvdimm.h>
12 #include <linux/ndctl.h>
13 #include <nd-core.h>
14 #include <linux/printk.h>
15 #include <linux/seq_buf.h>
16 #include <linux/papr_scm.h>
17 #include <uapi/linux/papr_pdsm.h>
18
19 #include "../watermark.h"
20 #include "nfit_test.h"
21 #include "ndtest.h"
22
23 enum {
24 DIMM_SIZE = SZ_32M,
25 LABEL_SIZE = SZ_128K,
26 NUM_INSTANCES = 2,
27 NUM_DCR = 4,
28 NDTEST_MAX_MAPPING = 6,
29 };
30
31 #define NDTEST_SCM_DIMM_CMD_MASK \
32 ((1ul << ND_CMD_GET_CONFIG_SIZE) | \
33 (1ul << ND_CMD_GET_CONFIG_DATA) | \
34 (1ul << ND_CMD_SET_CONFIG_DATA) | \
35 (1ul << ND_CMD_CALL))
36
37 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
38 (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
39 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
40
41 static DEFINE_SPINLOCK(ndtest_lock);
42 static struct ndtest_priv *instances[NUM_INSTANCES];
43
44 static const struct class ndtest_dimm_class = {
45 .name = "nfit_test_dimm",
46 };
47
48 static struct gen_pool *ndtest_pool;
49
50 static struct ndtest_dimm dimm_group1[] = {
51 {
52 .size = DIMM_SIZE,
53 .handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
54 .uuid_str = "1e5c75d2-b618-11ea-9aa3-507b9ddc0f72",
55 .physical_id = 0,
56 .num_formats = 2,
57 },
58 {
59 .size = DIMM_SIZE,
60 .handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
61 .uuid_str = "1c4d43ac-b618-11ea-be80-507b9ddc0f72",
62 .physical_id = 1,
63 .num_formats = 2,
64 },
65 {
66 .size = DIMM_SIZE,
67 .handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
68 .uuid_str = "a9f17ffc-b618-11ea-b36d-507b9ddc0f72",
69 .physical_id = 2,
70 .num_formats = 2,
71 },
72 {
73 .size = DIMM_SIZE,
74 .handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
75 .uuid_str = "b6b83b22-b618-11ea-8aae-507b9ddc0f72",
76 .physical_id = 3,
77 .num_formats = 2,
78 },
79 {
80 .size = DIMM_SIZE,
81 .handle = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
82 .uuid_str = "bf9baaee-b618-11ea-b181-507b9ddc0f72",
83 .physical_id = 4,
84 .num_formats = 2,
85 },
86 };
87
88 static struct ndtest_dimm dimm_group2[] = {
89 {
90 .size = DIMM_SIZE,
91 .handle = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
92 .uuid_str = "ca0817e2-b618-11ea-9db3-507b9ddc0f72",
93 .physical_id = 0,
94 .num_formats = 1,
95 .flags = PAPR_PMEM_UNARMED | PAPR_PMEM_EMPTY |
96 PAPR_PMEM_SAVE_FAILED | PAPR_PMEM_SHUTDOWN_DIRTY |
97 PAPR_PMEM_HEALTH_FATAL,
98 },
99 };
100
101 static struct ndtest_mapping region0_mapping[] = {
102 {
103 .dimm = 0,
104 .position = 0,
105 .start = 0,
106 .size = SZ_16M,
107 },
108 {
109 .dimm = 1,
110 .position = 1,
111 .start = 0,
112 .size = SZ_16M,
113 }
114 };
115
116 static struct ndtest_mapping region1_mapping[] = {
117 {
118 .dimm = 0,
119 .position = 0,
120 .start = SZ_16M,
121 .size = SZ_16M,
122 },
123 {
124 .dimm = 1,
125 .position = 1,
126 .start = SZ_16M,
127 .size = SZ_16M,
128 },
129 {
130 .dimm = 2,
131 .position = 2,
132 .start = SZ_16M,
133 .size = SZ_16M,
134 },
135 {
136 .dimm = 3,
137 .position = 3,
138 .start = SZ_16M,
139 .size = SZ_16M,
140 },
141 };
142
143 static struct ndtest_region bus0_regions[] = {
144 {
145 .type = ND_DEVICE_NAMESPACE_PMEM,
146 .num_mappings = ARRAY_SIZE(region0_mapping),
147 .mapping = region0_mapping,
148 .size = DIMM_SIZE,
149 .range_index = 1,
150 },
151 {
152 .type = ND_DEVICE_NAMESPACE_PMEM,
153 .num_mappings = ARRAY_SIZE(region1_mapping),
154 .mapping = region1_mapping,
155 .size = DIMM_SIZE * 2,
156 .range_index = 2,
157 },
158 };
159
160 static struct ndtest_mapping region6_mapping[] = {
161 {
162 .dimm = 0,
163 .position = 0,
164 .start = 0,
165 .size = DIMM_SIZE,
166 },
167 };
168
169 static struct ndtest_region bus1_regions[] = {
170 {
171 .type = ND_DEVICE_NAMESPACE_IO,
172 .num_mappings = ARRAY_SIZE(region6_mapping),
173 .mapping = region6_mapping,
174 .size = DIMM_SIZE,
175 .range_index = 1,
176 },
177 };
178
179 static struct ndtest_config bus_configs[NUM_INSTANCES] = {
180 /* bus 1 */
181 {
182 .dimm_start = 0,
183 .dimm_count = ARRAY_SIZE(dimm_group1),
184 .dimms = dimm_group1,
185 .regions = bus0_regions,
186 .num_regions = ARRAY_SIZE(bus0_regions),
187 },
188 /* bus 2 */
189 {
190 .dimm_start = ARRAY_SIZE(dimm_group1),
191 .dimm_count = ARRAY_SIZE(dimm_group2),
192 .dimms = dimm_group2,
193 .regions = bus1_regions,
194 .num_regions = ARRAY_SIZE(bus1_regions),
195 },
196 };
197
to_ndtest_priv(struct device * dev)198 static inline struct ndtest_priv *to_ndtest_priv(struct device *dev)
199 {
200 struct platform_device *pdev = to_platform_device(dev);
201
202 return container_of(pdev, struct ndtest_priv, pdev);
203 }
204
ndtest_config_get(struct ndtest_dimm * p,unsigned int buf_len,struct nd_cmd_get_config_data_hdr * hdr)205 static int ndtest_config_get(struct ndtest_dimm *p, unsigned int buf_len,
206 struct nd_cmd_get_config_data_hdr *hdr)
207 {
208 unsigned int len;
209
210 if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
211 return -EINVAL;
212
213 hdr->status = 0;
214 len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
215 memcpy(hdr->out_buf, p->label_area + hdr->in_offset, len);
216
217 return buf_len - len;
218 }
219
ndtest_config_set(struct ndtest_dimm * p,unsigned int buf_len,struct nd_cmd_set_config_hdr * hdr)220 static int ndtest_config_set(struct ndtest_dimm *p, unsigned int buf_len,
221 struct nd_cmd_set_config_hdr *hdr)
222 {
223 unsigned int len;
224
225 if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
226 return -EINVAL;
227
228 len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
229 memcpy(p->label_area + hdr->in_offset, hdr->in_buf, len);
230
231 return buf_len - len;
232 }
233
ndtest_get_config_size(struct ndtest_dimm * dimm,unsigned int buf_len,struct nd_cmd_get_config_size * size)234 static int ndtest_get_config_size(struct ndtest_dimm *dimm, unsigned int buf_len,
235 struct nd_cmd_get_config_size *size)
236 {
237 size->status = 0;
238 size->max_xfer = 8;
239 size->config_size = dimm->config_size;
240
241 return 0;
242 }
243
ndtest_ctl(struct nvdimm_bus_descriptor * nd_desc,struct nvdimm * nvdimm,unsigned int cmd,void * buf,unsigned int buf_len,int * cmd_rc)244 static int ndtest_ctl(struct nvdimm_bus_descriptor *nd_desc,
245 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
246 unsigned int buf_len, int *cmd_rc)
247 {
248 struct ndtest_dimm *dimm;
249 int _cmd_rc;
250
251 if (!cmd_rc)
252 cmd_rc = &_cmd_rc;
253
254 *cmd_rc = 0;
255
256 if (!nvdimm)
257 return -EINVAL;
258
259 dimm = nvdimm_provider_data(nvdimm);
260 if (!dimm)
261 return -EINVAL;
262
263 switch (cmd) {
264 case ND_CMD_GET_CONFIG_SIZE:
265 *cmd_rc = ndtest_get_config_size(dimm, buf_len, buf);
266 break;
267 case ND_CMD_GET_CONFIG_DATA:
268 *cmd_rc = ndtest_config_get(dimm, buf_len, buf);
269 break;
270 case ND_CMD_SET_CONFIG_DATA:
271 *cmd_rc = ndtest_config_set(dimm, buf_len, buf);
272 break;
273 default:
274 return -EINVAL;
275 }
276
277 /* Failures for a DIMM can be injected using fail_cmd and
278 * fail_cmd_code, see the device attributes below
279 */
280 if ((1 << cmd) & dimm->fail_cmd)
281 return dimm->fail_cmd_code ? dimm->fail_cmd_code : -EIO;
282
283 return 0;
284 }
285
ndtest_resource_lookup(resource_size_t addr)286 static struct nfit_test_resource *ndtest_resource_lookup(resource_size_t addr)
287 {
288 int i;
289
290 for (i = 0; i < NUM_INSTANCES; i++) {
291 struct nfit_test_resource *n, *nfit_res = NULL;
292 struct ndtest_priv *t = instances[i];
293
294 if (!t)
295 continue;
296 spin_lock(&ndtest_lock);
297 list_for_each_entry(n, &t->resources, list) {
298 if (addr >= n->res.start && (addr < n->res.start
299 + resource_size(&n->res))) {
300 nfit_res = n;
301 break;
302 } else if (addr >= (unsigned long) n->buf
303 && (addr < (unsigned long) n->buf
304 + resource_size(&n->res))) {
305 nfit_res = n;
306 break;
307 }
308 }
309 spin_unlock(&ndtest_lock);
310 if (nfit_res)
311 return nfit_res;
312 }
313
314 pr_warn("Failed to get resource\n");
315
316 return NULL;
317 }
318
ndtest_release_resource(void * data)319 static void ndtest_release_resource(void *data)
320 {
321 struct nfit_test_resource *res = data;
322
323 spin_lock(&ndtest_lock);
324 list_del(&res->list);
325 spin_unlock(&ndtest_lock);
326
327 if (resource_size(&res->res) >= DIMM_SIZE)
328 gen_pool_free(ndtest_pool, res->res.start,
329 resource_size(&res->res));
330 vfree(res->buf);
331 kfree(res);
332 }
333
ndtest_alloc_resource(struct ndtest_priv * p,size_t size,dma_addr_t * dma)334 static void *ndtest_alloc_resource(struct ndtest_priv *p, size_t size,
335 dma_addr_t *dma)
336 {
337 dma_addr_t __dma;
338 void *buf;
339 struct nfit_test_resource *res;
340 struct genpool_data_align data = {
341 .align = SZ_128M,
342 };
343
344 res = kzalloc(sizeof(*res), GFP_KERNEL);
345 if (!res)
346 return NULL;
347
348 buf = vmalloc(size);
349 if (size >= DIMM_SIZE)
350 __dma = gen_pool_alloc_algo(ndtest_pool, size,
351 gen_pool_first_fit_align, &data);
352 else
353 __dma = (unsigned long) buf;
354
355 if (!__dma)
356 goto buf_err;
357
358 INIT_LIST_HEAD(&res->list);
359 res->dev = &p->pdev.dev;
360 res->buf = buf;
361 res->res.start = __dma;
362 res->res.end = __dma + size - 1;
363 res->res.name = "NFIT";
364 spin_lock_init(&res->lock);
365 INIT_LIST_HEAD(&res->requests);
366 spin_lock(&ndtest_lock);
367 list_add(&res->list, &p->resources);
368 spin_unlock(&ndtest_lock);
369
370 if (dma)
371 *dma = __dma;
372
373 if (!devm_add_action(&p->pdev.dev, ndtest_release_resource, res))
374 return res->buf;
375
376 buf_err:
377 if (__dma && size >= DIMM_SIZE)
378 gen_pool_free(ndtest_pool, __dma, size);
379 if (buf)
380 vfree(buf);
381 kfree(res);
382
383 return NULL;
384 }
385
range_index_show(struct device * dev,struct device_attribute * attr,char * buf)386 static ssize_t range_index_show(struct device *dev,
387 struct device_attribute *attr, char *buf)
388 {
389 struct nd_region *nd_region = to_nd_region(dev);
390 struct ndtest_region *region = nd_region_provider_data(nd_region);
391
392 return sprintf(buf, "%d\n", region->range_index);
393 }
394 static DEVICE_ATTR_RO(range_index);
395
396 static struct attribute *ndtest_region_attributes[] = {
397 &dev_attr_range_index.attr,
398 NULL,
399 };
400
401 static const struct attribute_group ndtest_region_attribute_group = {
402 .name = "papr",
403 .attrs = ndtest_region_attributes,
404 };
405
406 static const struct attribute_group *ndtest_region_attribute_groups[] = {
407 &ndtest_region_attribute_group,
408 NULL,
409 };
410
ndtest_create_region(struct ndtest_priv * p,struct ndtest_region * region)411 static int ndtest_create_region(struct ndtest_priv *p,
412 struct ndtest_region *region)
413 {
414 struct nd_mapping_desc mappings[NDTEST_MAX_MAPPING];
415 struct nd_region_desc *ndr_desc, _ndr_desc;
416 struct nd_interleave_set *nd_set;
417 struct resource res;
418 int i, ndimm = region->mapping[0].dimm;
419 u64 uuid[2];
420
421 memset(&res, 0, sizeof(res));
422 memset(&mappings, 0, sizeof(mappings));
423 memset(&_ndr_desc, 0, sizeof(_ndr_desc));
424 ndr_desc = &_ndr_desc;
425
426 if (!ndtest_alloc_resource(p, region->size, &res.start))
427 return -ENOMEM;
428
429 res.end = res.start + region->size - 1;
430 ndr_desc->mapping = mappings;
431 ndr_desc->res = &res;
432 ndr_desc->provider_data = region;
433 ndr_desc->attr_groups = ndtest_region_attribute_groups;
434
435 if (uuid_parse(p->config->dimms[ndimm].uuid_str, (uuid_t *)uuid)) {
436 pr_err("failed to parse UUID\n");
437 return -ENXIO;
438 }
439
440 nd_set = devm_kzalloc(&p->pdev.dev, sizeof(*nd_set), GFP_KERNEL);
441 if (!nd_set)
442 return -ENOMEM;
443
444 nd_set->cookie1 = cpu_to_le64(uuid[0]);
445 nd_set->cookie2 = cpu_to_le64(uuid[1]);
446 nd_set->altcookie = nd_set->cookie1;
447 ndr_desc->nd_set = nd_set;
448
449 for (i = 0; i < region->num_mappings; i++) {
450 ndimm = region->mapping[i].dimm;
451 mappings[i].start = region->mapping[i].start;
452 mappings[i].size = region->mapping[i].size;
453 mappings[i].position = region->mapping[i].position;
454 mappings[i].nvdimm = p->config->dimms[ndimm].nvdimm;
455 }
456
457 ndr_desc->num_mappings = region->num_mappings;
458 region->region = nvdimm_pmem_region_create(p->bus, ndr_desc);
459
460 if (!region->region) {
461 dev_err(&p->pdev.dev, "Error registering region %pR\n",
462 ndr_desc->res);
463 return -ENXIO;
464 }
465
466 return 0;
467 }
468
ndtest_init_regions(struct ndtest_priv * p)469 static int ndtest_init_regions(struct ndtest_priv *p)
470 {
471 int i, ret = 0;
472
473 for (i = 0; i < p->config->num_regions; i++) {
474 ret = ndtest_create_region(p, &p->config->regions[i]);
475 if (ret)
476 return ret;
477 }
478
479 return 0;
480 }
481
put_dimms(void * data)482 static void put_dimms(void *data)
483 {
484 struct ndtest_priv *p = data;
485 int i;
486
487 for (i = 0; i < p->config->dimm_count; i++)
488 if (p->config->dimms[i].dev) {
489 device_unregister(p->config->dimms[i].dev);
490 p->config->dimms[i].dev = NULL;
491 }
492 }
493
handle_show(struct device * dev,struct device_attribute * attr,char * buf)494 static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
495 char *buf)
496 {
497 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
498
499 return sprintf(buf, "%#x\n", dimm->handle);
500 }
501 static DEVICE_ATTR_RO(handle);
502
fail_cmd_show(struct device * dev,struct device_attribute * attr,char * buf)503 static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
504 char *buf)
505 {
506 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
507
508 return sprintf(buf, "%#x\n", dimm->fail_cmd);
509 }
510
fail_cmd_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)511 static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
512 const char *buf, size_t size)
513 {
514 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
515 unsigned long val;
516 ssize_t rc;
517
518 rc = kstrtol(buf, 0, &val);
519 if (rc)
520 return rc;
521
522 dimm->fail_cmd = val;
523
524 return size;
525 }
526 static DEVICE_ATTR_RW(fail_cmd);
527
fail_cmd_code_show(struct device * dev,struct device_attribute * attr,char * buf)528 static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr,
529 char *buf)
530 {
531 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
532
533 return sprintf(buf, "%d\n", dimm->fail_cmd_code);
534 }
535
fail_cmd_code_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)536 static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr,
537 const char *buf, size_t size)
538 {
539 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
540 unsigned long val;
541 ssize_t rc;
542
543 rc = kstrtol(buf, 0, &val);
544 if (rc)
545 return rc;
546
547 dimm->fail_cmd_code = val;
548 return size;
549 }
550 static DEVICE_ATTR_RW(fail_cmd_code);
551
552 static struct attribute *dimm_attributes[] = {
553 &dev_attr_handle.attr,
554 &dev_attr_fail_cmd.attr,
555 &dev_attr_fail_cmd_code.attr,
556 NULL,
557 };
558
559 static struct attribute_group dimm_attribute_group = {
560 .attrs = dimm_attributes,
561 };
562
563 static const struct attribute_group *dimm_attribute_groups[] = {
564 &dimm_attribute_group,
565 NULL,
566 };
567
phys_id_show(struct device * dev,struct device_attribute * attr,char * buf)568 static ssize_t phys_id_show(struct device *dev,
569 struct device_attribute *attr, char *buf)
570 {
571 struct nvdimm *nvdimm = to_nvdimm(dev);
572 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
573
574 return sprintf(buf, "%#x\n", dimm->physical_id);
575 }
576 static DEVICE_ATTR_RO(phys_id);
577
vendor_show(struct device * dev,struct device_attribute * attr,char * buf)578 static ssize_t vendor_show(struct device *dev,
579 struct device_attribute *attr, char *buf)
580 {
581 return sprintf(buf, "0x1234567\n");
582 }
583 static DEVICE_ATTR_RO(vendor);
584
id_show(struct device * dev,struct device_attribute * attr,char * buf)585 static ssize_t id_show(struct device *dev,
586 struct device_attribute *attr, char *buf)
587 {
588 struct nvdimm *nvdimm = to_nvdimm(dev);
589 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
590
591 return sprintf(buf, "%04x-%02x-%04x-%08x", 0xabcd,
592 0xa, 2016, ~(dimm->handle));
593 }
594 static DEVICE_ATTR_RO(id);
595
nvdimm_handle_show(struct device * dev,struct device_attribute * attr,char * buf)596 static ssize_t nvdimm_handle_show(struct device *dev,
597 struct device_attribute *attr, char *buf)
598 {
599 struct nvdimm *nvdimm = to_nvdimm(dev);
600 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
601
602 return sprintf(buf, "%#x\n", dimm->handle);
603 }
604
605 static struct device_attribute dev_attr_nvdimm_show_handle = {
606 .attr = { .name = "handle", .mode = 0444 },
607 .show = nvdimm_handle_show,
608 };
609
subsystem_vendor_show(struct device * dev,struct device_attribute * attr,char * buf)610 static ssize_t subsystem_vendor_show(struct device *dev,
611 struct device_attribute *attr, char *buf)
612 {
613 return sprintf(buf, "0x%04x\n", 0);
614 }
615 static DEVICE_ATTR_RO(subsystem_vendor);
616
dirty_shutdown_show(struct device * dev,struct device_attribute * attr,char * buf)617 static ssize_t dirty_shutdown_show(struct device *dev,
618 struct device_attribute *attr, char *buf)
619 {
620 return sprintf(buf, "%d\n", 42);
621 }
622 static DEVICE_ATTR_RO(dirty_shutdown);
623
formats_show(struct device * dev,struct device_attribute * attr,char * buf)624 static ssize_t formats_show(struct device *dev,
625 struct device_attribute *attr, char *buf)
626 {
627 struct nvdimm *nvdimm = to_nvdimm(dev);
628 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
629
630 return sprintf(buf, "%d\n", dimm->num_formats);
631 }
632 static DEVICE_ATTR_RO(formats);
633
format_show(struct device * dev,struct device_attribute * attr,char * buf)634 static ssize_t format_show(struct device *dev,
635 struct device_attribute *attr, char *buf)
636 {
637 struct nvdimm *nvdimm = to_nvdimm(dev);
638 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
639
640 if (dimm->num_formats > 1)
641 return sprintf(buf, "0x201\n");
642
643 return sprintf(buf, "0x101\n");
644 }
645 static DEVICE_ATTR_RO(format);
646
format1_show(struct device * dev,struct device_attribute * attr,char * buf)647 static ssize_t format1_show(struct device *dev, struct device_attribute *attr,
648 char *buf)
649 {
650 return sprintf(buf, "0x301\n");
651 }
652 static DEVICE_ATTR_RO(format1);
653
ndtest_nvdimm_attr_visible(struct kobject * kobj,struct attribute * a,int n)654 static umode_t ndtest_nvdimm_attr_visible(struct kobject *kobj,
655 struct attribute *a, int n)
656 {
657 struct device *dev = container_of(kobj, struct device, kobj);
658 struct nvdimm *nvdimm = to_nvdimm(dev);
659 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
660
661 if (a == &dev_attr_format1.attr && dimm->num_formats <= 1)
662 return 0;
663
664 return a->mode;
665 }
666
flags_show(struct device * dev,struct device_attribute * attr,char * buf)667 static ssize_t flags_show(struct device *dev,
668 struct device_attribute *attr, char *buf)
669 {
670 struct nvdimm *nvdimm = to_nvdimm(dev);
671 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
672 struct seq_buf s;
673 u64 flags;
674
675 flags = dimm->flags;
676
677 seq_buf_init(&s, buf, PAGE_SIZE);
678 if (flags & PAPR_PMEM_UNARMED_MASK)
679 seq_buf_printf(&s, "not_armed ");
680
681 if (flags & PAPR_PMEM_BAD_SHUTDOWN_MASK)
682 seq_buf_printf(&s, "flush_fail ");
683
684 if (flags & PAPR_PMEM_BAD_RESTORE_MASK)
685 seq_buf_printf(&s, "restore_fail ");
686
687 if (flags & PAPR_PMEM_SAVE_MASK)
688 seq_buf_printf(&s, "save_fail ");
689
690 if (flags & PAPR_PMEM_SMART_EVENT_MASK)
691 seq_buf_printf(&s, "smart_notify ");
692
693
694 if (seq_buf_used(&s))
695 seq_buf_printf(&s, "\n");
696
697 return seq_buf_used(&s);
698 }
699 static DEVICE_ATTR_RO(flags);
700
701 static struct attribute *ndtest_nvdimm_attributes[] = {
702 &dev_attr_nvdimm_show_handle.attr,
703 &dev_attr_vendor.attr,
704 &dev_attr_id.attr,
705 &dev_attr_phys_id.attr,
706 &dev_attr_subsystem_vendor.attr,
707 &dev_attr_dirty_shutdown.attr,
708 &dev_attr_formats.attr,
709 &dev_attr_format.attr,
710 &dev_attr_format1.attr,
711 &dev_attr_flags.attr,
712 NULL,
713 };
714
715 static const struct attribute_group ndtest_nvdimm_attribute_group = {
716 .name = "papr",
717 .attrs = ndtest_nvdimm_attributes,
718 .is_visible = ndtest_nvdimm_attr_visible,
719 };
720
721 static const struct attribute_group *ndtest_nvdimm_attribute_groups[] = {
722 &ndtest_nvdimm_attribute_group,
723 NULL,
724 };
725
ndtest_dimm_register(struct ndtest_priv * priv,struct ndtest_dimm * dimm,int id)726 static int ndtest_dimm_register(struct ndtest_priv *priv,
727 struct ndtest_dimm *dimm, int id)
728 {
729 struct device *dev = &priv->pdev.dev;
730 unsigned long dimm_flags = dimm->flags;
731
732 if (dimm->num_formats > 1)
733 set_bit(NDD_LABELING, &dimm_flags);
734
735 if (dimm->flags & PAPR_PMEM_UNARMED_MASK)
736 set_bit(NDD_UNARMED, &dimm_flags);
737
738 dimm->nvdimm = nvdimm_create(priv->bus, dimm,
739 ndtest_nvdimm_attribute_groups, dimm_flags,
740 NDTEST_SCM_DIMM_CMD_MASK, 0, NULL);
741 if (!dimm->nvdimm) {
742 dev_err(dev, "Error creating DIMM object for %pOF\n", priv->dn);
743 return -ENXIO;
744 }
745
746 dimm->dev = device_create_with_groups(&ndtest_dimm_class,
747 &priv->pdev.dev,
748 0, dimm, dimm_attribute_groups,
749 "test_dimm%d", id);
750 if (!dimm->dev) {
751 pr_err("Could not create dimm device attributes\n");
752 return -ENOMEM;
753 }
754
755 return 0;
756 }
757
ndtest_nvdimm_init(struct ndtest_priv * p)758 static int ndtest_nvdimm_init(struct ndtest_priv *p)
759 {
760 struct ndtest_dimm *d;
761 void *res;
762 int i, id;
763
764 for (i = 0; i < p->config->dimm_count; i++) {
765 d = &p->config->dimms[i];
766 d->id = id = p->config->dimm_start + i;
767 res = ndtest_alloc_resource(p, LABEL_SIZE, NULL);
768 if (!res)
769 return -ENOMEM;
770
771 d->label_area = res;
772 sprintf(d->label_area, "label%d", id);
773 d->config_size = LABEL_SIZE;
774
775 if (!ndtest_alloc_resource(p, d->size,
776 &p->dimm_dma[id]))
777 return -ENOMEM;
778
779 if (!ndtest_alloc_resource(p, LABEL_SIZE,
780 &p->label_dma[id]))
781 return -ENOMEM;
782
783 if (!ndtest_alloc_resource(p, LABEL_SIZE,
784 &p->dcr_dma[id]))
785 return -ENOMEM;
786
787 d->address = p->dimm_dma[id];
788
789 ndtest_dimm_register(p, d, id);
790 }
791
792 return 0;
793 }
794
compatible_show(struct device * dev,struct device_attribute * attr,char * buf)795 static ssize_t compatible_show(struct device *dev,
796 struct device_attribute *attr, char *buf)
797 {
798 return sprintf(buf, "nvdimm_test");
799 }
800 static DEVICE_ATTR_RO(compatible);
801
802 static struct attribute *of_node_attributes[] = {
803 &dev_attr_compatible.attr,
804 NULL
805 };
806
807 static const struct attribute_group of_node_attribute_group = {
808 .name = "of_node",
809 .attrs = of_node_attributes,
810 };
811
812 static const struct attribute_group *ndtest_attribute_groups[] = {
813 &of_node_attribute_group,
814 NULL,
815 };
816
ndtest_bus_register(struct ndtest_priv * p)817 static int ndtest_bus_register(struct ndtest_priv *p)
818 {
819 p->config = &bus_configs[p->pdev.id];
820
821 p->bus_desc.ndctl = ndtest_ctl;
822 p->bus_desc.module = THIS_MODULE;
823 p->bus_desc.provider_name = NULL;
824 p->bus_desc.attr_groups = ndtest_attribute_groups;
825
826 p->bus = nvdimm_bus_register(&p->pdev.dev, &p->bus_desc);
827 if (!p->bus) {
828 dev_err(&p->pdev.dev, "Error creating nvdimm bus %pOF\n", p->dn);
829 return -ENOMEM;
830 }
831
832 return 0;
833 }
834
ndtest_remove(struct platform_device * pdev)835 static void ndtest_remove(struct platform_device *pdev)
836 {
837 struct ndtest_priv *p = to_ndtest_priv(&pdev->dev);
838
839 nvdimm_bus_unregister(p->bus);
840 }
841
ndtest_probe(struct platform_device * pdev)842 static int ndtest_probe(struct platform_device *pdev)
843 {
844 struct ndtest_priv *p;
845 int rc;
846
847 p = to_ndtest_priv(&pdev->dev);
848 if (ndtest_bus_register(p))
849 return -ENOMEM;
850
851 p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
852 sizeof(dma_addr_t), GFP_KERNEL);
853 p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
854 sizeof(dma_addr_t), GFP_KERNEL);
855 p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
856 sizeof(dma_addr_t), GFP_KERNEL);
857
858 rc = ndtest_nvdimm_init(p);
859 if (rc)
860 goto err;
861
862 rc = ndtest_init_regions(p);
863 if (rc)
864 goto err;
865
866 rc = devm_add_action_or_reset(&pdev->dev, put_dimms, p);
867 if (rc)
868 goto err;
869
870 platform_set_drvdata(pdev, p);
871
872 return 0;
873
874 err:
875 pr_err("%s:%d Failed nvdimm init\n", __func__, __LINE__);
876 return rc;
877 }
878
879 static const struct platform_device_id ndtest_id[] = {
880 { KBUILD_MODNAME },
881 { },
882 };
883
884 static struct platform_driver ndtest_driver = {
885 .probe = ndtest_probe,
886 .remove_new = ndtest_remove,
887 .driver = {
888 .name = KBUILD_MODNAME,
889 },
890 .id_table = ndtest_id,
891 };
892
ndtest_release(struct device * dev)893 static void ndtest_release(struct device *dev)
894 {
895 struct ndtest_priv *p = to_ndtest_priv(dev);
896
897 kfree(p);
898 }
899
cleanup_devices(void)900 static void cleanup_devices(void)
901 {
902 int i;
903
904 for (i = 0; i < NUM_INSTANCES; i++)
905 if (instances[i])
906 platform_device_unregister(&instances[i]->pdev);
907
908 nfit_test_teardown();
909
910 if (ndtest_pool)
911 gen_pool_destroy(ndtest_pool);
912
913
914 class_unregister(&ndtest_dimm_class);
915 }
916
ndtest_init(void)917 static __init int ndtest_init(void)
918 {
919 int rc, i;
920
921 pmem_test();
922 libnvdimm_test();
923 device_dax_test();
924 dax_pmem_test();
925
926 nfit_test_setup(ndtest_resource_lookup, NULL);
927
928 rc = class_register(&ndtest_dimm_class);
929 if (rc)
930 goto err_register;
931
932 ndtest_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
933 if (!ndtest_pool) {
934 rc = -ENOMEM;
935 goto err_register;
936 }
937
938 if (gen_pool_add(ndtest_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
939 rc = -ENOMEM;
940 goto err_register;
941 }
942
943 /* Each instance can be taken as a bus, which can have multiple dimms */
944 for (i = 0; i < NUM_INSTANCES; i++) {
945 struct ndtest_priv *priv;
946 struct platform_device *pdev;
947
948 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
949 if (!priv) {
950 rc = -ENOMEM;
951 goto err_register;
952 }
953
954 INIT_LIST_HEAD(&priv->resources);
955 pdev = &priv->pdev;
956 pdev->name = KBUILD_MODNAME;
957 pdev->id = i;
958 pdev->dev.release = ndtest_release;
959 rc = platform_device_register(pdev);
960 if (rc) {
961 put_device(&pdev->dev);
962 goto err_register;
963 }
964 get_device(&pdev->dev);
965
966 instances[i] = priv;
967 }
968
969 rc = platform_driver_register(&ndtest_driver);
970 if (rc)
971 goto err_register;
972
973 return 0;
974
975 err_register:
976 pr_err("Error registering platform device\n");
977 cleanup_devices();
978
979 return rc;
980 }
981
ndtest_exit(void)982 static __exit void ndtest_exit(void)
983 {
984 cleanup_devices();
985 platform_driver_unregister(&ndtest_driver);
986 }
987
988 module_init(ndtest_init);
989 module_exit(ndtest_exit);
990 MODULE_DESCRIPTION("Test non-NFIT devices");
991 MODULE_LICENSE("GPL");
992 MODULE_AUTHOR("IBM Corporation");
993