xref: /linux/tools/testing/nvdimm/test/nfit.c (revision 071bf69a0220253a44acb8b2a27f7a262b9a46bf)
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/platform_device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/libnvdimm.h>
17 #include <linux/vmalloc.h>
18 #include <linux/device.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/ndctl.h>
22 #include <linux/sizes.h>
23 #include <linux/list.h>
24 #include <linux/slab.h>
25 #include <nfit.h>
26 #include <nd.h>
27 #include "nfit_test.h"
28 
29 /*
30  * Generate an NFIT table to describe the following topology:
31  *
32  * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
33  *
34  *                     (a)                       (b)            DIMM   BLK-REGION
35  *           +----------+--------------+----------+---------+
36  * +------+  |  blk2.0  |     pm0.0    |  blk2.1  |  pm1.0  |    0      region2
37  * | imc0 +--+- - - - - region0 - - - -+----------+         +
38  * +--+---+  |  blk3.0  |     pm0.0    |  blk3.1  |  pm1.0  |    1      region3
39  *    |      +----------+--------------v----------v         v
40  * +--+---+                            |                    |
41  * | cpu0 |                                    region1
42  * +--+---+                            |                    |
43  *    |      +-------------------------^----------^         ^
44  * +--+---+  |                 blk4.0             |  pm1.0  |    2      region4
45  * | imc1 +--+-------------------------+----------+         +
46  * +------+  |                 blk5.0             |  pm1.0  |    3      region5
47  *           +-------------------------+----------+-+-------+
48  *
49  * +--+---+
50  * | cpu1 |
51  * +--+---+                   (Hotplug DIMM)
52  *    |      +----------------------------------------------+
53  * +--+---+  |                 blk6.0/pm7.0                 |    4      region6/7
54  * | imc0 +--+----------------------------------------------+
55  * +------+
56  *
57  *
58  * *) In this layout we have four dimms and two memory controllers in one
59  *    socket.  Each unique interface (BLK or PMEM) to DPA space
60  *    is identified by a region device with a dynamically assigned id.
61  *
62  * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
63  *    A single PMEM namespace "pm0.0" is created using half of the
64  *    REGION0 SPA-range.  REGION0 spans dimm0 and dimm1.  PMEM namespace
65  *    allocate from from the bottom of a region.  The unallocated
66  *    portion of REGION0 aliases with REGION2 and REGION3.  That
67  *    unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
68  *    "blk3.0") starting at the base of each DIMM to offset (a) in those
69  *    DIMMs.  "pm0.0", "blk2.0" and "blk3.0" are free-form readable
70  *    names that can be assigned to a namespace.
71  *
72  * *) In the last portion of dimm0 and dimm1 we have an interleaved
73  *    SPA range, REGION1, that spans those two dimms as well as dimm2
74  *    and dimm3.  Some of REGION1 allocated to a PMEM namespace named
75  *    "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
76  *    dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
77  *    "blk5.0".
78  *
79  * *) The portion of dimm2 and dimm3 that do not participate in the
80  *    REGION1 interleaved SPA range (i.e. the DPA address below offset
81  *    (b) are also included in the "blk4.0" and "blk5.0" namespaces.
82  *    Note, that BLK namespaces need not be contiguous in DPA-space, and
83  *    can consume aliased capacity from multiple interleave sets.
84  *
85  * BUS1: Legacy NVDIMM (single contiguous range)
86  *
87  *  region2
88  * +---------------------+
89  * |---------------------|
90  * ||       pm2.0       ||
91  * |---------------------|
92  * +---------------------+
93  *
94  * *) A NFIT-table may describe a simple system-physical-address range
95  *    with no BLK aliasing.  This type of region may optionally
96  *    reference an NVDIMM.
97  */
98 enum {
99 	NUM_PM  = 3,
100 	NUM_DCR = 5,
101 	NUM_HINTS = 8,
102 	NUM_BDW = NUM_DCR,
103 	NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
104 	NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */,
105 	DIMM_SIZE = SZ_32M,
106 	LABEL_SIZE = SZ_128K,
107 	SPA_VCD_SIZE = SZ_4M,
108 	SPA0_SIZE = DIMM_SIZE,
109 	SPA1_SIZE = DIMM_SIZE*2,
110 	SPA2_SIZE = DIMM_SIZE,
111 	BDW_SIZE = 64 << 8,
112 	DCR_SIZE = 12,
113 	NUM_NFITS = 2, /* permit testing multiple NFITs per system */
114 };
115 
116 struct nfit_test_dcr {
117 	__le64 bdw_addr;
118 	__le32 bdw_status;
119 	__u8 aperature[BDW_SIZE];
120 };
121 
122 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
123 	(((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
124 	 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
125 
126 static u32 handle[NUM_DCR] = {
127 	[0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
128 	[1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
129 	[2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
130 	[3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
131 	[4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
132 };
133 
134 struct nfit_test {
135 	struct acpi_nfit_desc acpi_desc;
136 	struct platform_device pdev;
137 	struct list_head resources;
138 	void *nfit_buf;
139 	dma_addr_t nfit_dma;
140 	size_t nfit_size;
141 	int num_dcr;
142 	int num_pm;
143 	void **dimm;
144 	dma_addr_t *dimm_dma;
145 	void **flush;
146 	dma_addr_t *flush_dma;
147 	void **label;
148 	dma_addr_t *label_dma;
149 	void **spa_set;
150 	dma_addr_t *spa_set_dma;
151 	struct nfit_test_dcr **dcr;
152 	dma_addr_t *dcr_dma;
153 	int (*alloc)(struct nfit_test *t);
154 	void (*setup)(struct nfit_test *t);
155 	int setup_hotplug;
156 	struct ars_state {
157 		struct nd_cmd_ars_status *ars_status;
158 		unsigned long deadline;
159 		spinlock_t lock;
160 	} ars_state;
161 };
162 
163 static struct nfit_test *to_nfit_test(struct device *dev)
164 {
165 	struct platform_device *pdev = to_platform_device(dev);
166 
167 	return container_of(pdev, struct nfit_test, pdev);
168 }
169 
170 static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
171 		unsigned int buf_len)
172 {
173 	if (buf_len < sizeof(*nd_cmd))
174 		return -EINVAL;
175 
176 	nd_cmd->status = 0;
177 	nd_cmd->config_size = LABEL_SIZE;
178 	nd_cmd->max_xfer = SZ_4K;
179 
180 	return 0;
181 }
182 
183 static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
184 		*nd_cmd, unsigned int buf_len, void *label)
185 {
186 	unsigned int len, offset = nd_cmd->in_offset;
187 	int rc;
188 
189 	if (buf_len < sizeof(*nd_cmd))
190 		return -EINVAL;
191 	if (offset >= LABEL_SIZE)
192 		return -EINVAL;
193 	if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
194 		return -EINVAL;
195 
196 	nd_cmd->status = 0;
197 	len = min(nd_cmd->in_length, LABEL_SIZE - offset);
198 	memcpy(nd_cmd->out_buf, label + offset, len);
199 	rc = buf_len - sizeof(*nd_cmd) - len;
200 
201 	return rc;
202 }
203 
204 static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
205 		unsigned int buf_len, void *label)
206 {
207 	unsigned int len, offset = nd_cmd->in_offset;
208 	u32 *status;
209 	int rc;
210 
211 	if (buf_len < sizeof(*nd_cmd))
212 		return -EINVAL;
213 	if (offset >= LABEL_SIZE)
214 		return -EINVAL;
215 	if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
216 		return -EINVAL;
217 
218 	status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
219 	*status = 0;
220 	len = min(nd_cmd->in_length, LABEL_SIZE - offset);
221 	memcpy(label + offset, nd_cmd->in_buf, len);
222 	rc = buf_len - sizeof(*nd_cmd) - (len + 4);
223 
224 	return rc;
225 }
226 
227 #define NFIT_TEST_ARS_RECORDS 4
228 #define NFIT_TEST_CLEAR_ERR_UNIT 256
229 
230 static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
231 		unsigned int buf_len)
232 {
233 	if (buf_len < sizeof(*nd_cmd))
234 		return -EINVAL;
235 
236 	nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
237 		+ NFIT_TEST_ARS_RECORDS * sizeof(struct nd_ars_record);
238 	nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
239 	nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT;
240 
241 	return 0;
242 }
243 
244 /*
245  * Initialize the ars_state to return an ars_result 1 second in the future with
246  * a 4K error range in the middle of the requested address range.
247  */
248 static void post_ars_status(struct ars_state *ars_state, u64 addr, u64 len)
249 {
250 	struct nd_cmd_ars_status *ars_status;
251 	struct nd_ars_record *ars_record;
252 
253 	ars_state->deadline = jiffies + 1*HZ;
254 	ars_status = ars_state->ars_status;
255 	ars_status->status = 0;
256 	ars_status->out_length = sizeof(struct nd_cmd_ars_status)
257 		+ sizeof(struct nd_ars_record);
258 	ars_status->address = addr;
259 	ars_status->length = len;
260 	ars_status->type = ND_ARS_PERSISTENT;
261 	ars_status->num_records = 1;
262 	ars_record = &ars_status->records[0];
263 	ars_record->handle = 0;
264 	ars_record->err_address = addr + len / 2;
265 	ars_record->length = SZ_4K;
266 }
267 
268 static int nfit_test_cmd_ars_start(struct ars_state *ars_state,
269 		struct nd_cmd_ars_start *ars_start, unsigned int buf_len,
270 		int *cmd_rc)
271 {
272 	if (buf_len < sizeof(*ars_start))
273 		return -EINVAL;
274 
275 	spin_lock(&ars_state->lock);
276 	if (time_before(jiffies, ars_state->deadline)) {
277 		ars_start->status = NFIT_ARS_START_BUSY;
278 		*cmd_rc = -EBUSY;
279 	} else {
280 		ars_start->status = 0;
281 		ars_start->scrub_time = 1;
282 		post_ars_status(ars_state, ars_start->address,
283 				ars_start->length);
284 		*cmd_rc = 0;
285 	}
286 	spin_unlock(&ars_state->lock);
287 
288 	return 0;
289 }
290 
291 static int nfit_test_cmd_ars_status(struct ars_state *ars_state,
292 		struct nd_cmd_ars_status *ars_status, unsigned int buf_len,
293 		int *cmd_rc)
294 {
295 	if (buf_len < ars_state->ars_status->out_length)
296 		return -EINVAL;
297 
298 	spin_lock(&ars_state->lock);
299 	if (time_before(jiffies, ars_state->deadline)) {
300 		memset(ars_status, 0, buf_len);
301 		ars_status->status = NFIT_ARS_STATUS_BUSY;
302 		ars_status->out_length = sizeof(*ars_status);
303 		*cmd_rc = -EBUSY;
304 	} else {
305 		memcpy(ars_status, ars_state->ars_status,
306 				ars_state->ars_status->out_length);
307 		*cmd_rc = 0;
308 	}
309 	spin_unlock(&ars_state->lock);
310 	return 0;
311 }
312 
313 static int nfit_test_cmd_clear_error(struct nd_cmd_clear_error *clear_err,
314 		unsigned int buf_len, int *cmd_rc)
315 {
316 	const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1;
317 	if (buf_len < sizeof(*clear_err))
318 		return -EINVAL;
319 
320 	if ((clear_err->address & mask) || (clear_err->length & mask))
321 		return -EINVAL;
322 
323 	/*
324 	 * Report 'all clear' success for all commands even though a new
325 	 * scrub will find errors again.  This is enough to have the
326 	 * error removed from the 'badblocks' tracking in the pmem
327 	 * driver.
328 	 */
329 	clear_err->status = 0;
330 	clear_err->cleared = clear_err->length;
331 	*cmd_rc = 0;
332 	return 0;
333 }
334 
335 static int nfit_test_cmd_smart(struct nd_cmd_smart *smart, unsigned int buf_len)
336 {
337 	static const struct nd_smart_payload smart_data = {
338 		.flags = ND_SMART_HEALTH_VALID | ND_SMART_TEMP_VALID
339 			| ND_SMART_SPARES_VALID | ND_SMART_ALARM_VALID
340 			| ND_SMART_USED_VALID | ND_SMART_SHUTDOWN_VALID,
341 		.health = ND_SMART_NON_CRITICAL_HEALTH,
342 		.temperature = 23 * 16,
343 		.spares = 75,
344 		.alarm_flags = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
345 		.life_used = 5,
346 		.shutdown_state = 0,
347 		.vendor_size = 0,
348 	};
349 
350 	if (buf_len < sizeof(*smart))
351 		return -EINVAL;
352 	memcpy(smart->data, &smart_data, sizeof(smart_data));
353 	return 0;
354 }
355 
356 static int nfit_test_cmd_smart_threshold(struct nd_cmd_smart_threshold *smart_t,
357 		unsigned int buf_len)
358 {
359 	static const struct nd_smart_threshold_payload smart_t_data = {
360 		.alarm_control = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
361 		.temperature = 40 * 16,
362 		.spares = 5,
363 	};
364 
365 	if (buf_len < sizeof(*smart_t))
366 		return -EINVAL;
367 	memcpy(smart_t->data, &smart_t_data, sizeof(smart_t_data));
368 	return 0;
369 }
370 
371 static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
372 		struct nvdimm *nvdimm, unsigned int cmd, void *buf,
373 		unsigned int buf_len, int *cmd_rc)
374 {
375 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
376 	struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
377 	unsigned int func = cmd;
378 	int i, rc = 0, __cmd_rc;
379 
380 	if (!cmd_rc)
381 		cmd_rc = &__cmd_rc;
382 	*cmd_rc = 0;
383 
384 	if (nvdimm) {
385 		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
386 		unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
387 
388 		if (!nfit_mem)
389 			return -ENOTTY;
390 
391 		if (cmd == ND_CMD_CALL) {
392 			struct nd_cmd_pkg *call_pkg = buf;
393 
394 			buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
395 			buf = (void *) call_pkg->nd_payload;
396 			func = call_pkg->nd_command;
397 			if (call_pkg->nd_family != nfit_mem->family)
398 				return -ENOTTY;
399 		}
400 
401 		if (!test_bit(cmd, &cmd_mask)
402 				|| !test_bit(func, &nfit_mem->dsm_mask))
403 			return -ENOTTY;
404 
405 		/* lookup label space for the given dimm */
406 		for (i = 0; i < ARRAY_SIZE(handle); i++)
407 			if (__to_nfit_memdev(nfit_mem)->device_handle ==
408 					handle[i])
409 				break;
410 		if (i >= ARRAY_SIZE(handle))
411 			return -ENXIO;
412 
413 		switch (func) {
414 		case ND_CMD_GET_CONFIG_SIZE:
415 			rc = nfit_test_cmd_get_config_size(buf, buf_len);
416 			break;
417 		case ND_CMD_GET_CONFIG_DATA:
418 			rc = nfit_test_cmd_get_config_data(buf, buf_len,
419 				t->label[i]);
420 			break;
421 		case ND_CMD_SET_CONFIG_DATA:
422 			rc = nfit_test_cmd_set_config_data(buf, buf_len,
423 				t->label[i]);
424 			break;
425 		case ND_CMD_SMART:
426 			rc = nfit_test_cmd_smart(buf, buf_len);
427 			break;
428 		case ND_CMD_SMART_THRESHOLD:
429 			rc = nfit_test_cmd_smart_threshold(buf, buf_len);
430 			break;
431 		default:
432 			return -ENOTTY;
433 		}
434 	} else {
435 		struct ars_state *ars_state = &t->ars_state;
436 
437 		if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask))
438 			return -ENOTTY;
439 
440 		switch (func) {
441 		case ND_CMD_ARS_CAP:
442 			rc = nfit_test_cmd_ars_cap(buf, buf_len);
443 			break;
444 		case ND_CMD_ARS_START:
445 			rc = nfit_test_cmd_ars_start(ars_state, buf, buf_len,
446 					cmd_rc);
447 			break;
448 		case ND_CMD_ARS_STATUS:
449 			rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len,
450 					cmd_rc);
451 			break;
452 		case ND_CMD_CLEAR_ERROR:
453 			rc = nfit_test_cmd_clear_error(buf, buf_len, cmd_rc);
454 			break;
455 		default:
456 			return -ENOTTY;
457 		}
458 	}
459 
460 	return rc;
461 }
462 
463 static DEFINE_SPINLOCK(nfit_test_lock);
464 static struct nfit_test *instances[NUM_NFITS];
465 
466 static void release_nfit_res(void *data)
467 {
468 	struct nfit_test_resource *nfit_res = data;
469 	struct resource *res = nfit_res->res;
470 
471 	spin_lock(&nfit_test_lock);
472 	list_del(&nfit_res->list);
473 	spin_unlock(&nfit_test_lock);
474 
475 	vfree(nfit_res->buf);
476 	kfree(res);
477 	kfree(nfit_res);
478 }
479 
480 static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
481 		void *buf)
482 {
483 	struct device *dev = &t->pdev.dev;
484 	struct resource *res = kzalloc(sizeof(*res) * 2, GFP_KERNEL);
485 	struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
486 			GFP_KERNEL);
487 	int rc;
488 
489 	if (!res || !buf || !nfit_res)
490 		goto err;
491 	rc = devm_add_action(dev, release_nfit_res, nfit_res);
492 	if (rc)
493 		goto err;
494 	INIT_LIST_HEAD(&nfit_res->list);
495 	memset(buf, 0, size);
496 	nfit_res->dev = dev;
497 	nfit_res->buf = buf;
498 	nfit_res->res = res;
499 	res->start = *dma;
500 	res->end = *dma + size - 1;
501 	res->name = "NFIT";
502 	spin_lock(&nfit_test_lock);
503 	list_add(&nfit_res->list, &t->resources);
504 	spin_unlock(&nfit_test_lock);
505 
506 	return nfit_res->buf;
507  err:
508 	if (buf)
509 		vfree(buf);
510 	kfree(res);
511 	kfree(nfit_res);
512 	return NULL;
513 }
514 
515 static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
516 {
517 	void *buf = vmalloc(size);
518 
519 	*dma = (unsigned long) buf;
520 	return __test_alloc(t, size, dma, buf);
521 }
522 
523 static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
524 {
525 	int i;
526 
527 	for (i = 0; i < ARRAY_SIZE(instances); i++) {
528 		struct nfit_test_resource *n, *nfit_res = NULL;
529 		struct nfit_test *t = instances[i];
530 
531 		if (!t)
532 			continue;
533 		spin_lock(&nfit_test_lock);
534 		list_for_each_entry(n, &t->resources, list) {
535 			if (addr >= n->res->start && (addr < n->res->start
536 						+ resource_size(n->res))) {
537 				nfit_res = n;
538 				break;
539 			} else if (addr >= (unsigned long) n->buf
540 					&& (addr < (unsigned long) n->buf
541 						+ resource_size(n->res))) {
542 				nfit_res = n;
543 				break;
544 			}
545 		}
546 		spin_unlock(&nfit_test_lock);
547 		if (nfit_res)
548 			return nfit_res;
549 	}
550 
551 	return NULL;
552 }
553 
554 static int ars_state_init(struct device *dev, struct ars_state *ars_state)
555 {
556 	ars_state->ars_status = devm_kzalloc(dev,
557 			sizeof(struct nd_cmd_ars_status)
558 			+ sizeof(struct nd_ars_record) * NFIT_TEST_ARS_RECORDS,
559 			GFP_KERNEL);
560 	if (!ars_state->ars_status)
561 		return -ENOMEM;
562 	spin_lock_init(&ars_state->lock);
563 	return 0;
564 }
565 
566 static int nfit_test0_alloc(struct nfit_test *t)
567 {
568 	size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
569 			+ sizeof(struct acpi_nfit_memory_map) * NUM_MEM
570 			+ sizeof(struct acpi_nfit_control_region) * NUM_DCR
571 			+ offsetof(struct acpi_nfit_control_region,
572 					window_size) * NUM_DCR
573 			+ sizeof(struct acpi_nfit_data_region) * NUM_BDW
574 			+ (sizeof(struct acpi_nfit_flush_address)
575 					+ sizeof(u64) * NUM_HINTS) * NUM_DCR;
576 	int i;
577 
578 	t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
579 	if (!t->nfit_buf)
580 		return -ENOMEM;
581 	t->nfit_size = nfit_size;
582 
583 	t->spa_set[0] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[0]);
584 	if (!t->spa_set[0])
585 		return -ENOMEM;
586 
587 	t->spa_set[1] = test_alloc(t, SPA1_SIZE, &t->spa_set_dma[1]);
588 	if (!t->spa_set[1])
589 		return -ENOMEM;
590 
591 	t->spa_set[2] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[2]);
592 	if (!t->spa_set[2])
593 		return -ENOMEM;
594 
595 	for (i = 0; i < NUM_DCR; i++) {
596 		t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
597 		if (!t->dimm[i])
598 			return -ENOMEM;
599 
600 		t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
601 		if (!t->label[i])
602 			return -ENOMEM;
603 		sprintf(t->label[i], "label%d", i);
604 
605 		t->flush[i] = test_alloc(t, sizeof(u64) * NUM_HINTS,
606 				&t->flush_dma[i]);
607 		if (!t->flush[i])
608 			return -ENOMEM;
609 	}
610 
611 	for (i = 0; i < NUM_DCR; i++) {
612 		t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
613 		if (!t->dcr[i])
614 			return -ENOMEM;
615 	}
616 
617 	return ars_state_init(&t->pdev.dev, &t->ars_state);
618 }
619 
620 static int nfit_test1_alloc(struct nfit_test *t)
621 {
622 	size_t nfit_size = sizeof(struct acpi_nfit_system_address) * 2
623 		+ sizeof(struct acpi_nfit_memory_map)
624 		+ offsetof(struct acpi_nfit_control_region, window_size);
625 
626 	t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
627 	if (!t->nfit_buf)
628 		return -ENOMEM;
629 	t->nfit_size = nfit_size;
630 
631 	t->spa_set[0] = test_alloc(t, SPA2_SIZE, &t->spa_set_dma[0]);
632 	if (!t->spa_set[0])
633 		return -ENOMEM;
634 
635 	t->spa_set[1] = test_alloc(t, SPA_VCD_SIZE, &t->spa_set_dma[1]);
636 	if (!t->spa_set[1])
637 		return -ENOMEM;
638 
639 	return ars_state_init(&t->pdev.dev, &t->ars_state);
640 }
641 
642 static void dcr_common_init(struct acpi_nfit_control_region *dcr)
643 {
644 	dcr->vendor_id = 0xabcd;
645 	dcr->device_id = 0;
646 	dcr->revision_id = 1;
647 	dcr->valid_fields = 1;
648 	dcr->manufacturing_location = 0xa;
649 	dcr->manufacturing_date = cpu_to_be16(2016);
650 }
651 
652 static void nfit_test0_setup(struct nfit_test *t)
653 {
654 	const int flush_hint_size = sizeof(struct acpi_nfit_flush_address)
655 		+ (sizeof(u64) * NUM_HINTS);
656 	struct acpi_nfit_desc *acpi_desc;
657 	struct acpi_nfit_memory_map *memdev;
658 	void *nfit_buf = t->nfit_buf;
659 	struct acpi_nfit_system_address *spa;
660 	struct acpi_nfit_control_region *dcr;
661 	struct acpi_nfit_data_region *bdw;
662 	struct acpi_nfit_flush_address *flush;
663 	unsigned int offset, i;
664 
665 	/*
666 	 * spa0 (interleave first half of dimm0 and dimm1, note storage
667 	 * does not actually alias the related block-data-window
668 	 * regions)
669 	 */
670 	spa = nfit_buf;
671 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
672 	spa->header.length = sizeof(*spa);
673 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
674 	spa->range_index = 0+1;
675 	spa->address = t->spa_set_dma[0];
676 	spa->length = SPA0_SIZE;
677 
678 	/*
679 	 * spa1 (interleave last half of the 4 DIMMS, note storage
680 	 * does not actually alias the related block-data-window
681 	 * regions)
682 	 */
683 	spa = nfit_buf + sizeof(*spa);
684 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
685 	spa->header.length = sizeof(*spa);
686 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
687 	spa->range_index = 1+1;
688 	spa->address = t->spa_set_dma[1];
689 	spa->length = SPA1_SIZE;
690 
691 	/* spa2 (dcr0) dimm0 */
692 	spa = nfit_buf + sizeof(*spa) * 2;
693 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
694 	spa->header.length = sizeof(*spa);
695 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
696 	spa->range_index = 2+1;
697 	spa->address = t->dcr_dma[0];
698 	spa->length = DCR_SIZE;
699 
700 	/* spa3 (dcr1) dimm1 */
701 	spa = nfit_buf + sizeof(*spa) * 3;
702 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
703 	spa->header.length = sizeof(*spa);
704 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
705 	spa->range_index = 3+1;
706 	spa->address = t->dcr_dma[1];
707 	spa->length = DCR_SIZE;
708 
709 	/* spa4 (dcr2) dimm2 */
710 	spa = nfit_buf + sizeof(*spa) * 4;
711 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
712 	spa->header.length = sizeof(*spa);
713 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
714 	spa->range_index = 4+1;
715 	spa->address = t->dcr_dma[2];
716 	spa->length = DCR_SIZE;
717 
718 	/* spa5 (dcr3) dimm3 */
719 	spa = nfit_buf + sizeof(*spa) * 5;
720 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
721 	spa->header.length = sizeof(*spa);
722 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
723 	spa->range_index = 5+1;
724 	spa->address = t->dcr_dma[3];
725 	spa->length = DCR_SIZE;
726 
727 	/* spa6 (bdw for dcr0) dimm0 */
728 	spa = nfit_buf + sizeof(*spa) * 6;
729 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
730 	spa->header.length = sizeof(*spa);
731 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
732 	spa->range_index = 6+1;
733 	spa->address = t->dimm_dma[0];
734 	spa->length = DIMM_SIZE;
735 
736 	/* spa7 (bdw for dcr1) dimm1 */
737 	spa = nfit_buf + sizeof(*spa) * 7;
738 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
739 	spa->header.length = sizeof(*spa);
740 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
741 	spa->range_index = 7+1;
742 	spa->address = t->dimm_dma[1];
743 	spa->length = DIMM_SIZE;
744 
745 	/* spa8 (bdw for dcr2) dimm2 */
746 	spa = nfit_buf + sizeof(*spa) * 8;
747 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
748 	spa->header.length = sizeof(*spa);
749 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
750 	spa->range_index = 8+1;
751 	spa->address = t->dimm_dma[2];
752 	spa->length = DIMM_SIZE;
753 
754 	/* spa9 (bdw for dcr3) dimm3 */
755 	spa = nfit_buf + sizeof(*spa) * 9;
756 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
757 	spa->header.length = sizeof(*spa);
758 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
759 	spa->range_index = 9+1;
760 	spa->address = t->dimm_dma[3];
761 	spa->length = DIMM_SIZE;
762 
763 	offset = sizeof(*spa) * 10;
764 	/* mem-region0 (spa0, dimm0) */
765 	memdev = nfit_buf + offset;
766 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
767 	memdev->header.length = sizeof(*memdev);
768 	memdev->device_handle = handle[0];
769 	memdev->physical_id = 0;
770 	memdev->region_id = 0;
771 	memdev->range_index = 0+1;
772 	memdev->region_index = 4+1;
773 	memdev->region_size = SPA0_SIZE/2;
774 	memdev->region_offset = t->spa_set_dma[0];
775 	memdev->address = 0;
776 	memdev->interleave_index = 0;
777 	memdev->interleave_ways = 2;
778 
779 	/* mem-region1 (spa0, dimm1) */
780 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map);
781 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
782 	memdev->header.length = sizeof(*memdev);
783 	memdev->device_handle = handle[1];
784 	memdev->physical_id = 1;
785 	memdev->region_id = 0;
786 	memdev->range_index = 0+1;
787 	memdev->region_index = 5+1;
788 	memdev->region_size = SPA0_SIZE/2;
789 	memdev->region_offset = t->spa_set_dma[0] + SPA0_SIZE/2;
790 	memdev->address = 0;
791 	memdev->interleave_index = 0;
792 	memdev->interleave_ways = 2;
793 
794 	/* mem-region2 (spa1, dimm0) */
795 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2;
796 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
797 	memdev->header.length = sizeof(*memdev);
798 	memdev->device_handle = handle[0];
799 	memdev->physical_id = 0;
800 	memdev->region_id = 1;
801 	memdev->range_index = 1+1;
802 	memdev->region_index = 4+1;
803 	memdev->region_size = SPA1_SIZE/4;
804 	memdev->region_offset = t->spa_set_dma[1];
805 	memdev->address = SPA0_SIZE/2;
806 	memdev->interleave_index = 0;
807 	memdev->interleave_ways = 4;
808 
809 	/* mem-region3 (spa1, dimm1) */
810 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3;
811 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
812 	memdev->header.length = sizeof(*memdev);
813 	memdev->device_handle = handle[1];
814 	memdev->physical_id = 1;
815 	memdev->region_id = 1;
816 	memdev->range_index = 1+1;
817 	memdev->region_index = 5+1;
818 	memdev->region_size = SPA1_SIZE/4;
819 	memdev->region_offset = t->spa_set_dma[1] + SPA1_SIZE/4;
820 	memdev->address = SPA0_SIZE/2;
821 	memdev->interleave_index = 0;
822 	memdev->interleave_ways = 4;
823 
824 	/* mem-region4 (spa1, dimm2) */
825 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4;
826 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
827 	memdev->header.length = sizeof(*memdev);
828 	memdev->device_handle = handle[2];
829 	memdev->physical_id = 2;
830 	memdev->region_id = 0;
831 	memdev->range_index = 1+1;
832 	memdev->region_index = 6+1;
833 	memdev->region_size = SPA1_SIZE/4;
834 	memdev->region_offset = t->spa_set_dma[1] + 2*SPA1_SIZE/4;
835 	memdev->address = SPA0_SIZE/2;
836 	memdev->interleave_index = 0;
837 	memdev->interleave_ways = 4;
838 
839 	/* mem-region5 (spa1, dimm3) */
840 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5;
841 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
842 	memdev->header.length = sizeof(*memdev);
843 	memdev->device_handle = handle[3];
844 	memdev->physical_id = 3;
845 	memdev->region_id = 0;
846 	memdev->range_index = 1+1;
847 	memdev->region_index = 7+1;
848 	memdev->region_size = SPA1_SIZE/4;
849 	memdev->region_offset = t->spa_set_dma[1] + 3*SPA1_SIZE/4;
850 	memdev->address = SPA0_SIZE/2;
851 	memdev->interleave_index = 0;
852 	memdev->interleave_ways = 4;
853 
854 	/* mem-region6 (spa/dcr0, dimm0) */
855 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6;
856 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
857 	memdev->header.length = sizeof(*memdev);
858 	memdev->device_handle = handle[0];
859 	memdev->physical_id = 0;
860 	memdev->region_id = 0;
861 	memdev->range_index = 2+1;
862 	memdev->region_index = 0+1;
863 	memdev->region_size = 0;
864 	memdev->region_offset = 0;
865 	memdev->address = 0;
866 	memdev->interleave_index = 0;
867 	memdev->interleave_ways = 1;
868 
869 	/* mem-region7 (spa/dcr1, dimm1) */
870 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7;
871 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
872 	memdev->header.length = sizeof(*memdev);
873 	memdev->device_handle = handle[1];
874 	memdev->physical_id = 1;
875 	memdev->region_id = 0;
876 	memdev->range_index = 3+1;
877 	memdev->region_index = 1+1;
878 	memdev->region_size = 0;
879 	memdev->region_offset = 0;
880 	memdev->address = 0;
881 	memdev->interleave_index = 0;
882 	memdev->interleave_ways = 1;
883 
884 	/* mem-region8 (spa/dcr2, dimm2) */
885 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8;
886 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
887 	memdev->header.length = sizeof(*memdev);
888 	memdev->device_handle = handle[2];
889 	memdev->physical_id = 2;
890 	memdev->region_id = 0;
891 	memdev->range_index = 4+1;
892 	memdev->region_index = 2+1;
893 	memdev->region_size = 0;
894 	memdev->region_offset = 0;
895 	memdev->address = 0;
896 	memdev->interleave_index = 0;
897 	memdev->interleave_ways = 1;
898 
899 	/* mem-region9 (spa/dcr3, dimm3) */
900 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9;
901 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
902 	memdev->header.length = sizeof(*memdev);
903 	memdev->device_handle = handle[3];
904 	memdev->physical_id = 3;
905 	memdev->region_id = 0;
906 	memdev->range_index = 5+1;
907 	memdev->region_index = 3+1;
908 	memdev->region_size = 0;
909 	memdev->region_offset = 0;
910 	memdev->address = 0;
911 	memdev->interleave_index = 0;
912 	memdev->interleave_ways = 1;
913 
914 	/* mem-region10 (spa/bdw0, dimm0) */
915 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10;
916 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
917 	memdev->header.length = sizeof(*memdev);
918 	memdev->device_handle = handle[0];
919 	memdev->physical_id = 0;
920 	memdev->region_id = 0;
921 	memdev->range_index = 6+1;
922 	memdev->region_index = 0+1;
923 	memdev->region_size = 0;
924 	memdev->region_offset = 0;
925 	memdev->address = 0;
926 	memdev->interleave_index = 0;
927 	memdev->interleave_ways = 1;
928 
929 	/* mem-region11 (spa/bdw1, dimm1) */
930 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11;
931 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
932 	memdev->header.length = sizeof(*memdev);
933 	memdev->device_handle = handle[1];
934 	memdev->physical_id = 1;
935 	memdev->region_id = 0;
936 	memdev->range_index = 7+1;
937 	memdev->region_index = 1+1;
938 	memdev->region_size = 0;
939 	memdev->region_offset = 0;
940 	memdev->address = 0;
941 	memdev->interleave_index = 0;
942 	memdev->interleave_ways = 1;
943 
944 	/* mem-region12 (spa/bdw2, dimm2) */
945 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12;
946 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
947 	memdev->header.length = sizeof(*memdev);
948 	memdev->device_handle = handle[2];
949 	memdev->physical_id = 2;
950 	memdev->region_id = 0;
951 	memdev->range_index = 8+1;
952 	memdev->region_index = 2+1;
953 	memdev->region_size = 0;
954 	memdev->region_offset = 0;
955 	memdev->address = 0;
956 	memdev->interleave_index = 0;
957 	memdev->interleave_ways = 1;
958 
959 	/* mem-region13 (spa/dcr3, dimm3) */
960 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13;
961 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
962 	memdev->header.length = sizeof(*memdev);
963 	memdev->device_handle = handle[3];
964 	memdev->physical_id = 3;
965 	memdev->region_id = 0;
966 	memdev->range_index = 9+1;
967 	memdev->region_index = 3+1;
968 	memdev->region_size = 0;
969 	memdev->region_offset = 0;
970 	memdev->address = 0;
971 	memdev->interleave_index = 0;
972 	memdev->interleave_ways = 1;
973 
974 	offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
975 	/* dcr-descriptor0: blk */
976 	dcr = nfit_buf + offset;
977 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
978 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
979 	dcr->region_index = 0+1;
980 	dcr_common_init(dcr);
981 	dcr->serial_number = ~handle[0];
982 	dcr->code = NFIT_FIC_BLK;
983 	dcr->windows = 1;
984 	dcr->window_size = DCR_SIZE;
985 	dcr->command_offset = 0;
986 	dcr->command_size = 8;
987 	dcr->status_offset = 8;
988 	dcr->status_size = 4;
989 
990 	/* dcr-descriptor1: blk */
991 	dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region);
992 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
993 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
994 	dcr->region_index = 1+1;
995 	dcr_common_init(dcr);
996 	dcr->serial_number = ~handle[1];
997 	dcr->code = NFIT_FIC_BLK;
998 	dcr->windows = 1;
999 	dcr->window_size = DCR_SIZE;
1000 	dcr->command_offset = 0;
1001 	dcr->command_size = 8;
1002 	dcr->status_offset = 8;
1003 	dcr->status_size = 4;
1004 
1005 	/* dcr-descriptor2: blk */
1006 	dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2;
1007 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1008 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
1009 	dcr->region_index = 2+1;
1010 	dcr_common_init(dcr);
1011 	dcr->serial_number = ~handle[2];
1012 	dcr->code = NFIT_FIC_BLK;
1013 	dcr->windows = 1;
1014 	dcr->window_size = DCR_SIZE;
1015 	dcr->command_offset = 0;
1016 	dcr->command_size = 8;
1017 	dcr->status_offset = 8;
1018 	dcr->status_size = 4;
1019 
1020 	/* dcr-descriptor3: blk */
1021 	dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3;
1022 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1023 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
1024 	dcr->region_index = 3+1;
1025 	dcr_common_init(dcr);
1026 	dcr->serial_number = ~handle[3];
1027 	dcr->code = NFIT_FIC_BLK;
1028 	dcr->windows = 1;
1029 	dcr->window_size = DCR_SIZE;
1030 	dcr->command_offset = 0;
1031 	dcr->command_size = 8;
1032 	dcr->status_offset = 8;
1033 	dcr->status_size = 4;
1034 
1035 	offset = offset + sizeof(struct acpi_nfit_control_region) * 4;
1036 	/* dcr-descriptor0: pmem */
1037 	dcr = nfit_buf + offset;
1038 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1039 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1040 			window_size);
1041 	dcr->region_index = 4+1;
1042 	dcr_common_init(dcr);
1043 	dcr->serial_number = ~handle[0];
1044 	dcr->code = NFIT_FIC_BYTEN;
1045 	dcr->windows = 0;
1046 
1047 	/* dcr-descriptor1: pmem */
1048 	dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1049 			window_size);
1050 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1051 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1052 			window_size);
1053 	dcr->region_index = 5+1;
1054 	dcr_common_init(dcr);
1055 	dcr->serial_number = ~handle[1];
1056 	dcr->code = NFIT_FIC_BYTEN;
1057 	dcr->windows = 0;
1058 
1059 	/* dcr-descriptor2: pmem */
1060 	dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1061 			window_size) * 2;
1062 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1063 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1064 			window_size);
1065 	dcr->region_index = 6+1;
1066 	dcr_common_init(dcr);
1067 	dcr->serial_number = ~handle[2];
1068 	dcr->code = NFIT_FIC_BYTEN;
1069 	dcr->windows = 0;
1070 
1071 	/* dcr-descriptor3: pmem */
1072 	dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1073 			window_size) * 3;
1074 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1075 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1076 			window_size);
1077 	dcr->region_index = 7+1;
1078 	dcr_common_init(dcr);
1079 	dcr->serial_number = ~handle[3];
1080 	dcr->code = NFIT_FIC_BYTEN;
1081 	dcr->windows = 0;
1082 
1083 	offset = offset + offsetof(struct acpi_nfit_control_region,
1084 			window_size) * 4;
1085 	/* bdw0 (spa/dcr0, dimm0) */
1086 	bdw = nfit_buf + offset;
1087 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1088 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1089 	bdw->region_index = 0+1;
1090 	bdw->windows = 1;
1091 	bdw->offset = 0;
1092 	bdw->size = BDW_SIZE;
1093 	bdw->capacity = DIMM_SIZE;
1094 	bdw->start_address = 0;
1095 
1096 	/* bdw1 (spa/dcr1, dimm1) */
1097 	bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region);
1098 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1099 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1100 	bdw->region_index = 1+1;
1101 	bdw->windows = 1;
1102 	bdw->offset = 0;
1103 	bdw->size = BDW_SIZE;
1104 	bdw->capacity = DIMM_SIZE;
1105 	bdw->start_address = 0;
1106 
1107 	/* bdw2 (spa/dcr2, dimm2) */
1108 	bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2;
1109 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1110 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1111 	bdw->region_index = 2+1;
1112 	bdw->windows = 1;
1113 	bdw->offset = 0;
1114 	bdw->size = BDW_SIZE;
1115 	bdw->capacity = DIMM_SIZE;
1116 	bdw->start_address = 0;
1117 
1118 	/* bdw3 (spa/dcr3, dimm3) */
1119 	bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3;
1120 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1121 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1122 	bdw->region_index = 3+1;
1123 	bdw->windows = 1;
1124 	bdw->offset = 0;
1125 	bdw->size = BDW_SIZE;
1126 	bdw->capacity = DIMM_SIZE;
1127 	bdw->start_address = 0;
1128 
1129 	offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
1130 	/* flush0 (dimm0) */
1131 	flush = nfit_buf + offset;
1132 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1133 	flush->header.length = flush_hint_size;
1134 	flush->device_handle = handle[0];
1135 	flush->hint_count = NUM_HINTS;
1136 	for (i = 0; i < NUM_HINTS; i++)
1137 		flush->hint_address[i] = t->flush_dma[0] + i * sizeof(u64);
1138 
1139 	/* flush1 (dimm1) */
1140 	flush = nfit_buf + offset + flush_hint_size * 1;
1141 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1142 	flush->header.length = flush_hint_size;
1143 	flush->device_handle = handle[1];
1144 	flush->hint_count = NUM_HINTS;
1145 	for (i = 0; i < NUM_HINTS; i++)
1146 		flush->hint_address[i] = t->flush_dma[1] + i * sizeof(u64);
1147 
1148 	/* flush2 (dimm2) */
1149 	flush = nfit_buf + offset + flush_hint_size  * 2;
1150 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1151 	flush->header.length = flush_hint_size;
1152 	flush->device_handle = handle[2];
1153 	flush->hint_count = NUM_HINTS;
1154 	for (i = 0; i < NUM_HINTS; i++)
1155 		flush->hint_address[i] = t->flush_dma[2] + i * sizeof(u64);
1156 
1157 	/* flush3 (dimm3) */
1158 	flush = nfit_buf + offset + flush_hint_size * 3;
1159 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1160 	flush->header.length = flush_hint_size;
1161 	flush->device_handle = handle[3];
1162 	flush->hint_count = NUM_HINTS;
1163 	for (i = 0; i < NUM_HINTS; i++)
1164 		flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64);
1165 
1166 	if (t->setup_hotplug) {
1167 		offset = offset + flush_hint_size * 4;
1168 		/* dcr-descriptor4: blk */
1169 		dcr = nfit_buf + offset;
1170 		dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1171 		dcr->header.length = sizeof(struct acpi_nfit_control_region);
1172 		dcr->region_index = 8+1;
1173 		dcr_common_init(dcr);
1174 		dcr->serial_number = ~handle[4];
1175 		dcr->code = NFIT_FIC_BLK;
1176 		dcr->windows = 1;
1177 		dcr->window_size = DCR_SIZE;
1178 		dcr->command_offset = 0;
1179 		dcr->command_size = 8;
1180 		dcr->status_offset = 8;
1181 		dcr->status_size = 4;
1182 
1183 		offset = offset + sizeof(struct acpi_nfit_control_region);
1184 		/* dcr-descriptor4: pmem */
1185 		dcr = nfit_buf + offset;
1186 		dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1187 		dcr->header.length = offsetof(struct acpi_nfit_control_region,
1188 				window_size);
1189 		dcr->region_index = 9+1;
1190 		dcr_common_init(dcr);
1191 		dcr->serial_number = ~handle[4];
1192 		dcr->code = NFIT_FIC_BYTEN;
1193 		dcr->windows = 0;
1194 
1195 		offset = offset + offsetof(struct acpi_nfit_control_region,
1196 				window_size);
1197 		/* bdw4 (spa/dcr4, dimm4) */
1198 		bdw = nfit_buf + offset;
1199 		bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1200 		bdw->header.length = sizeof(struct acpi_nfit_data_region);
1201 		bdw->region_index = 8+1;
1202 		bdw->windows = 1;
1203 		bdw->offset = 0;
1204 		bdw->size = BDW_SIZE;
1205 		bdw->capacity = DIMM_SIZE;
1206 		bdw->start_address = 0;
1207 
1208 		offset = offset + sizeof(struct acpi_nfit_data_region);
1209 		/* spa10 (dcr4) dimm4 */
1210 		spa = nfit_buf + offset;
1211 		spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1212 		spa->header.length = sizeof(*spa);
1213 		memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1214 		spa->range_index = 10+1;
1215 		spa->address = t->dcr_dma[4];
1216 		spa->length = DCR_SIZE;
1217 
1218 		/*
1219 		 * spa11 (single-dimm interleave for hotplug, note storage
1220 		 * does not actually alias the related block-data-window
1221 		 * regions)
1222 		 */
1223 		spa = nfit_buf + offset + sizeof(*spa);
1224 		spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1225 		spa->header.length = sizeof(*spa);
1226 		memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1227 		spa->range_index = 11+1;
1228 		spa->address = t->spa_set_dma[2];
1229 		spa->length = SPA0_SIZE;
1230 
1231 		/* spa12 (bdw for dcr4) dimm4 */
1232 		spa = nfit_buf + offset + sizeof(*spa) * 2;
1233 		spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1234 		spa->header.length = sizeof(*spa);
1235 		memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1236 		spa->range_index = 12+1;
1237 		spa->address = t->dimm_dma[4];
1238 		spa->length = DIMM_SIZE;
1239 
1240 		offset = offset + sizeof(*spa) * 3;
1241 		/* mem-region14 (spa/dcr4, dimm4) */
1242 		memdev = nfit_buf + offset;
1243 		memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1244 		memdev->header.length = sizeof(*memdev);
1245 		memdev->device_handle = handle[4];
1246 		memdev->physical_id = 4;
1247 		memdev->region_id = 0;
1248 		memdev->range_index = 10+1;
1249 		memdev->region_index = 8+1;
1250 		memdev->region_size = 0;
1251 		memdev->region_offset = 0;
1252 		memdev->address = 0;
1253 		memdev->interleave_index = 0;
1254 		memdev->interleave_ways = 1;
1255 
1256 		/* mem-region15 (spa0, dimm4) */
1257 		memdev = nfit_buf + offset +
1258 				sizeof(struct acpi_nfit_memory_map);
1259 		memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1260 		memdev->header.length = sizeof(*memdev);
1261 		memdev->device_handle = handle[4];
1262 		memdev->physical_id = 4;
1263 		memdev->region_id = 0;
1264 		memdev->range_index = 11+1;
1265 		memdev->region_index = 9+1;
1266 		memdev->region_size = SPA0_SIZE;
1267 		memdev->region_offset = t->spa_set_dma[2];
1268 		memdev->address = 0;
1269 		memdev->interleave_index = 0;
1270 		memdev->interleave_ways = 1;
1271 
1272 		/* mem-region16 (spa/bdw4, dimm4) */
1273 		memdev = nfit_buf + offset +
1274 				sizeof(struct acpi_nfit_memory_map) * 2;
1275 		memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1276 		memdev->header.length = sizeof(*memdev);
1277 		memdev->device_handle = handle[4];
1278 		memdev->physical_id = 4;
1279 		memdev->region_id = 0;
1280 		memdev->range_index = 12+1;
1281 		memdev->region_index = 8+1;
1282 		memdev->region_size = 0;
1283 		memdev->region_offset = 0;
1284 		memdev->address = 0;
1285 		memdev->interleave_index = 0;
1286 		memdev->interleave_ways = 1;
1287 
1288 		offset = offset + sizeof(struct acpi_nfit_memory_map) * 3;
1289 		/* flush3 (dimm4) */
1290 		flush = nfit_buf + offset;
1291 		flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1292 		flush->header.length = flush_hint_size;
1293 		flush->device_handle = handle[4];
1294 		flush->hint_count = NUM_HINTS;
1295 		for (i = 0; i < NUM_HINTS; i++)
1296 			flush->hint_address[i] = t->flush_dma[4]
1297 				+ i * sizeof(u64);
1298 	}
1299 
1300 	post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA0_SIZE);
1301 
1302 	acpi_desc = &t->acpi_desc;
1303 	set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
1304 	set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
1305 	set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
1306 	set_bit(ND_CMD_SMART, &acpi_desc->dimm_cmd_force_en);
1307 	set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
1308 	set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
1309 	set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
1310 	set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
1311 	set_bit(ND_CMD_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
1312 }
1313 
1314 static void nfit_test1_setup(struct nfit_test *t)
1315 {
1316 	size_t offset;
1317 	void *nfit_buf = t->nfit_buf;
1318 	struct acpi_nfit_memory_map *memdev;
1319 	struct acpi_nfit_control_region *dcr;
1320 	struct acpi_nfit_system_address *spa;
1321 	struct acpi_nfit_desc *acpi_desc;
1322 
1323 	offset = 0;
1324 	/* spa0 (flat range with no bdw aliasing) */
1325 	spa = nfit_buf + offset;
1326 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1327 	spa->header.length = sizeof(*spa);
1328 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1329 	spa->range_index = 0+1;
1330 	spa->address = t->spa_set_dma[0];
1331 	spa->length = SPA2_SIZE;
1332 
1333 	/* virtual cd region */
1334 	spa = nfit_buf + sizeof(*spa);
1335 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1336 	spa->header.length = sizeof(*spa);
1337 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16);
1338 	spa->range_index = 0;
1339 	spa->address = t->spa_set_dma[1];
1340 	spa->length = SPA_VCD_SIZE;
1341 
1342 	offset += sizeof(*spa) * 2;
1343 	/* mem-region0 (spa0, dimm0) */
1344 	memdev = nfit_buf + offset;
1345 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1346 	memdev->header.length = sizeof(*memdev);
1347 	memdev->device_handle = 0;
1348 	memdev->physical_id = 0;
1349 	memdev->region_id = 0;
1350 	memdev->range_index = 0+1;
1351 	memdev->region_index = 0+1;
1352 	memdev->region_size = SPA2_SIZE;
1353 	memdev->region_offset = 0;
1354 	memdev->address = 0;
1355 	memdev->interleave_index = 0;
1356 	memdev->interleave_ways = 1;
1357 	memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
1358 		| ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
1359 		| ACPI_NFIT_MEM_NOT_ARMED;
1360 
1361 	offset += sizeof(*memdev);
1362 	/* dcr-descriptor0 */
1363 	dcr = nfit_buf + offset;
1364 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1365 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1366 			window_size);
1367 	dcr->region_index = 0+1;
1368 	dcr_common_init(dcr);
1369 	dcr->serial_number = ~0;
1370 	dcr->code = NFIT_FIC_BYTE;
1371 	dcr->windows = 0;
1372 
1373 	post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA2_SIZE);
1374 
1375 	acpi_desc = &t->acpi_desc;
1376 	set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
1377 	set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
1378 	set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
1379 	set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
1380 }
1381 
1382 static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
1383 		void *iobuf, u64 len, int rw)
1384 {
1385 	struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
1386 	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1387 	struct nd_region *nd_region = &ndbr->nd_region;
1388 	unsigned int lane;
1389 
1390 	lane = nd_region_acquire_lane(nd_region);
1391 	if (rw)
1392 		memcpy(mmio->addr.base + dpa, iobuf, len);
1393 	else {
1394 		memcpy(iobuf, mmio->addr.base + dpa, len);
1395 
1396 		/* give us some some coverage of the mmio_flush_range() API */
1397 		mmio_flush_range(mmio->addr.base + dpa, len);
1398 	}
1399 	nd_region_release_lane(nd_region, lane);
1400 
1401 	return 0;
1402 }
1403 
1404 static int nfit_test_probe(struct platform_device *pdev)
1405 {
1406 	struct nvdimm_bus_descriptor *nd_desc;
1407 	struct acpi_nfit_desc *acpi_desc;
1408 	struct device *dev = &pdev->dev;
1409 	struct nfit_test *nfit_test;
1410 	int rc;
1411 
1412 	nfit_test = to_nfit_test(&pdev->dev);
1413 
1414 	/* common alloc */
1415 	if (nfit_test->num_dcr) {
1416 		int num = nfit_test->num_dcr;
1417 
1418 		nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
1419 				GFP_KERNEL);
1420 		nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
1421 				GFP_KERNEL);
1422 		nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
1423 				GFP_KERNEL);
1424 		nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
1425 				GFP_KERNEL);
1426 		nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
1427 				GFP_KERNEL);
1428 		nfit_test->label_dma = devm_kcalloc(dev, num,
1429 				sizeof(dma_addr_t), GFP_KERNEL);
1430 		nfit_test->dcr = devm_kcalloc(dev, num,
1431 				sizeof(struct nfit_test_dcr *), GFP_KERNEL);
1432 		nfit_test->dcr_dma = devm_kcalloc(dev, num,
1433 				sizeof(dma_addr_t), GFP_KERNEL);
1434 		if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
1435 				&& nfit_test->label_dma && nfit_test->dcr
1436 				&& nfit_test->dcr_dma && nfit_test->flush
1437 				&& nfit_test->flush_dma)
1438 			/* pass */;
1439 		else
1440 			return -ENOMEM;
1441 	}
1442 
1443 	if (nfit_test->num_pm) {
1444 		int num = nfit_test->num_pm;
1445 
1446 		nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
1447 				GFP_KERNEL);
1448 		nfit_test->spa_set_dma = devm_kcalloc(dev, num,
1449 				sizeof(dma_addr_t), GFP_KERNEL);
1450 		if (nfit_test->spa_set && nfit_test->spa_set_dma)
1451 			/* pass */;
1452 		else
1453 			return -ENOMEM;
1454 	}
1455 
1456 	/* per-nfit specific alloc */
1457 	if (nfit_test->alloc(nfit_test))
1458 		return -ENOMEM;
1459 
1460 	nfit_test->setup(nfit_test);
1461 	acpi_desc = &nfit_test->acpi_desc;
1462 	acpi_nfit_desc_init(acpi_desc, &pdev->dev);
1463 	acpi_desc->blk_do_io = nfit_test_blk_do_io;
1464 	nd_desc = &acpi_desc->nd_desc;
1465 	nd_desc->provider_name = NULL;
1466 	nd_desc->module = THIS_MODULE;
1467 	nd_desc->ndctl = nfit_test_ctl;
1468 
1469 	rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf,
1470 			nfit_test->nfit_size);
1471 	if (rc)
1472 		return rc;
1473 
1474 	if (nfit_test->setup != nfit_test0_setup)
1475 		return 0;
1476 
1477 	nfit_test->setup_hotplug = 1;
1478 	nfit_test->setup(nfit_test);
1479 
1480 	rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf,
1481 			nfit_test->nfit_size);
1482 	if (rc)
1483 		return rc;
1484 
1485 	return 0;
1486 }
1487 
1488 static int nfit_test_remove(struct platform_device *pdev)
1489 {
1490 	return 0;
1491 }
1492 
1493 static void nfit_test_release(struct device *dev)
1494 {
1495 	struct nfit_test *nfit_test = to_nfit_test(dev);
1496 
1497 	kfree(nfit_test);
1498 }
1499 
1500 static const struct platform_device_id nfit_test_id[] = {
1501 	{ KBUILD_MODNAME },
1502 	{ },
1503 };
1504 
1505 static struct platform_driver nfit_test_driver = {
1506 	.probe = nfit_test_probe,
1507 	.remove = nfit_test_remove,
1508 	.driver = {
1509 		.name = KBUILD_MODNAME,
1510 	},
1511 	.id_table = nfit_test_id,
1512 };
1513 
1514 static __init int nfit_test_init(void)
1515 {
1516 	int rc, i;
1517 
1518 	nfit_test_setup(nfit_test_lookup);
1519 
1520 	for (i = 0; i < NUM_NFITS; i++) {
1521 		struct nfit_test *nfit_test;
1522 		struct platform_device *pdev;
1523 
1524 		nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
1525 		if (!nfit_test) {
1526 			rc = -ENOMEM;
1527 			goto err_register;
1528 		}
1529 		INIT_LIST_HEAD(&nfit_test->resources);
1530 		switch (i) {
1531 		case 0:
1532 			nfit_test->num_pm = NUM_PM;
1533 			nfit_test->num_dcr = NUM_DCR;
1534 			nfit_test->alloc = nfit_test0_alloc;
1535 			nfit_test->setup = nfit_test0_setup;
1536 			break;
1537 		case 1:
1538 			nfit_test->num_pm = 1;
1539 			nfit_test->alloc = nfit_test1_alloc;
1540 			nfit_test->setup = nfit_test1_setup;
1541 			break;
1542 		default:
1543 			rc = -EINVAL;
1544 			goto err_register;
1545 		}
1546 		pdev = &nfit_test->pdev;
1547 		pdev->name = KBUILD_MODNAME;
1548 		pdev->id = i;
1549 		pdev->dev.release = nfit_test_release;
1550 		rc = platform_device_register(pdev);
1551 		if (rc) {
1552 			put_device(&pdev->dev);
1553 			goto err_register;
1554 		}
1555 
1556 		rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1557 		if (rc)
1558 			goto err_register;
1559 
1560 		instances[i] = nfit_test;
1561 	}
1562 
1563 	rc = platform_driver_register(&nfit_test_driver);
1564 	if (rc)
1565 		goto err_register;
1566 	return 0;
1567 
1568  err_register:
1569 	for (i = 0; i < NUM_NFITS; i++)
1570 		if (instances[i])
1571 			platform_device_unregister(&instances[i]->pdev);
1572 	nfit_test_teardown();
1573 	return rc;
1574 }
1575 
1576 static __exit void nfit_test_exit(void)
1577 {
1578 	int i;
1579 
1580 	platform_driver_unregister(&nfit_test_driver);
1581 	for (i = 0; i < NUM_NFITS; i++)
1582 		platform_device_unregister(&instances[i]->pdev);
1583 	nfit_test_teardown();
1584 }
1585 
1586 module_init(nfit_test_init);
1587 module_exit(nfit_test_exit);
1588 MODULE_LICENSE("GPL v2");
1589 MODULE_AUTHOR("Intel Corporation");
1590