xref: /linux/tools/testing/nvdimm/test/nfit.c (revision 59024954a1e7e26b62680e1f2b5725249a6c09f7)
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/platform_device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/workqueue.h>
17 #include <linux/libnvdimm.h>
18 #include <linux/vmalloc.h>
19 #include <linux/device.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/ndctl.h>
23 #include <linux/sizes.h>
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <nfit.h>
27 #include <nd.h>
28 #include "nfit_test.h"
29 
30 /*
31  * Generate an NFIT table to describe the following topology:
32  *
33  * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
34  *
35  *                     (a)                       (b)            DIMM   BLK-REGION
36  *           +----------+--------------+----------+---------+
37  * +------+  |  blk2.0  |     pm0.0    |  blk2.1  |  pm1.0  |    0      region2
38  * | imc0 +--+- - - - - region0 - - - -+----------+         +
39  * +--+---+  |  blk3.0  |     pm0.0    |  blk3.1  |  pm1.0  |    1      region3
40  *    |      +----------+--------------v----------v         v
41  * +--+---+                            |                    |
42  * | cpu0 |                                    region1
43  * +--+---+                            |                    |
44  *    |      +-------------------------^----------^         ^
45  * +--+---+  |                 blk4.0             |  pm1.0  |    2      region4
46  * | imc1 +--+-------------------------+----------+         +
47  * +------+  |                 blk5.0             |  pm1.0  |    3      region5
48  *           +-------------------------+----------+-+-------+
49  *
50  * +--+---+
51  * | cpu1 |
52  * +--+---+                   (Hotplug DIMM)
53  *    |      +----------------------------------------------+
54  * +--+---+  |                 blk6.0/pm7.0                 |    4      region6/7
55  * | imc0 +--+----------------------------------------------+
56  * +------+
57  *
58  *
59  * *) In this layout we have four dimms and two memory controllers in one
60  *    socket.  Each unique interface (BLK or PMEM) to DPA space
61  *    is identified by a region device with a dynamically assigned id.
62  *
63  * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
64  *    A single PMEM namespace "pm0.0" is created using half of the
65  *    REGION0 SPA-range.  REGION0 spans dimm0 and dimm1.  PMEM namespace
66  *    allocate from from the bottom of a region.  The unallocated
67  *    portion of REGION0 aliases with REGION2 and REGION3.  That
68  *    unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
69  *    "blk3.0") starting at the base of each DIMM to offset (a) in those
70  *    DIMMs.  "pm0.0", "blk2.0" and "blk3.0" are free-form readable
71  *    names that can be assigned to a namespace.
72  *
73  * *) In the last portion of dimm0 and dimm1 we have an interleaved
74  *    SPA range, REGION1, that spans those two dimms as well as dimm2
75  *    and dimm3.  Some of REGION1 allocated to a PMEM namespace named
76  *    "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
77  *    dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
78  *    "blk5.0".
79  *
80  * *) The portion of dimm2 and dimm3 that do not participate in the
81  *    REGION1 interleaved SPA range (i.e. the DPA address below offset
82  *    (b) are also included in the "blk4.0" and "blk5.0" namespaces.
83  *    Note, that BLK namespaces need not be contiguous in DPA-space, and
84  *    can consume aliased capacity from multiple interleave sets.
85  *
86  * BUS1: Legacy NVDIMM (single contiguous range)
87  *
88  *  region2
89  * +---------------------+
90  * |---------------------|
91  * ||       pm2.0       ||
92  * |---------------------|
93  * +---------------------+
94  *
95  * *) A NFIT-table may describe a simple system-physical-address range
96  *    with no BLK aliasing.  This type of region may optionally
97  *    reference an NVDIMM.
98  */
99 enum {
100 	NUM_PM  = 3,
101 	NUM_DCR = 5,
102 	NUM_HINTS = 8,
103 	NUM_BDW = NUM_DCR,
104 	NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
105 	NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */,
106 	DIMM_SIZE = SZ_32M,
107 	LABEL_SIZE = SZ_128K,
108 	SPA_VCD_SIZE = SZ_4M,
109 	SPA0_SIZE = DIMM_SIZE,
110 	SPA1_SIZE = DIMM_SIZE*2,
111 	SPA2_SIZE = DIMM_SIZE,
112 	BDW_SIZE = 64 << 8,
113 	DCR_SIZE = 12,
114 	NUM_NFITS = 2, /* permit testing multiple NFITs per system */
115 };
116 
117 struct nfit_test_dcr {
118 	__le64 bdw_addr;
119 	__le32 bdw_status;
120 	__u8 aperature[BDW_SIZE];
121 };
122 
123 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
124 	(((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
125 	 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
126 
127 static u32 handle[NUM_DCR] = {
128 	[0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
129 	[1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
130 	[2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
131 	[3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
132 	[4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
133 };
134 
135 struct nfit_test {
136 	struct acpi_nfit_desc acpi_desc;
137 	struct platform_device pdev;
138 	struct list_head resources;
139 	void *nfit_buf;
140 	dma_addr_t nfit_dma;
141 	size_t nfit_size;
142 	int num_dcr;
143 	int num_pm;
144 	void **dimm;
145 	dma_addr_t *dimm_dma;
146 	void **flush;
147 	dma_addr_t *flush_dma;
148 	void **label;
149 	dma_addr_t *label_dma;
150 	void **spa_set;
151 	dma_addr_t *spa_set_dma;
152 	struct nfit_test_dcr **dcr;
153 	dma_addr_t *dcr_dma;
154 	int (*alloc)(struct nfit_test *t);
155 	void (*setup)(struct nfit_test *t);
156 	int setup_hotplug;
157 	struct ars_state {
158 		struct nd_cmd_ars_status *ars_status;
159 		unsigned long deadline;
160 		spinlock_t lock;
161 	} ars_state;
162 };
163 
164 static struct nfit_test *to_nfit_test(struct device *dev)
165 {
166 	struct platform_device *pdev = to_platform_device(dev);
167 
168 	return container_of(pdev, struct nfit_test, pdev);
169 }
170 
171 static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
172 		unsigned int buf_len)
173 {
174 	if (buf_len < sizeof(*nd_cmd))
175 		return -EINVAL;
176 
177 	nd_cmd->status = 0;
178 	nd_cmd->config_size = LABEL_SIZE;
179 	nd_cmd->max_xfer = SZ_4K;
180 
181 	return 0;
182 }
183 
184 static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
185 		*nd_cmd, unsigned int buf_len, void *label)
186 {
187 	unsigned int len, offset = nd_cmd->in_offset;
188 	int rc;
189 
190 	if (buf_len < sizeof(*nd_cmd))
191 		return -EINVAL;
192 	if (offset >= LABEL_SIZE)
193 		return -EINVAL;
194 	if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
195 		return -EINVAL;
196 
197 	nd_cmd->status = 0;
198 	len = min(nd_cmd->in_length, LABEL_SIZE - offset);
199 	memcpy(nd_cmd->out_buf, label + offset, len);
200 	rc = buf_len - sizeof(*nd_cmd) - len;
201 
202 	return rc;
203 }
204 
205 static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
206 		unsigned int buf_len, void *label)
207 {
208 	unsigned int len, offset = nd_cmd->in_offset;
209 	u32 *status;
210 	int rc;
211 
212 	if (buf_len < sizeof(*nd_cmd))
213 		return -EINVAL;
214 	if (offset >= LABEL_SIZE)
215 		return -EINVAL;
216 	if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
217 		return -EINVAL;
218 
219 	status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
220 	*status = 0;
221 	len = min(nd_cmd->in_length, LABEL_SIZE - offset);
222 	memcpy(label + offset, nd_cmd->in_buf, len);
223 	rc = buf_len - sizeof(*nd_cmd) - (len + 4);
224 
225 	return rc;
226 }
227 
228 #define NFIT_TEST_ARS_RECORDS 4
229 #define NFIT_TEST_CLEAR_ERR_UNIT 256
230 
231 static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
232 		unsigned int buf_len)
233 {
234 	if (buf_len < sizeof(*nd_cmd))
235 		return -EINVAL;
236 
237 	nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
238 		+ NFIT_TEST_ARS_RECORDS * sizeof(struct nd_ars_record);
239 	nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
240 	nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT;
241 
242 	return 0;
243 }
244 
245 /*
246  * Initialize the ars_state to return an ars_result 1 second in the future with
247  * a 4K error range in the middle of the requested address range.
248  */
249 static void post_ars_status(struct ars_state *ars_state, u64 addr, u64 len)
250 {
251 	struct nd_cmd_ars_status *ars_status;
252 	struct nd_ars_record *ars_record;
253 
254 	ars_state->deadline = jiffies + 1*HZ;
255 	ars_status = ars_state->ars_status;
256 	ars_status->status = 0;
257 	ars_status->out_length = sizeof(struct nd_cmd_ars_status)
258 		+ sizeof(struct nd_ars_record);
259 	ars_status->address = addr;
260 	ars_status->length = len;
261 	ars_status->type = ND_ARS_PERSISTENT;
262 	ars_status->num_records = 1;
263 	ars_record = &ars_status->records[0];
264 	ars_record->handle = 0;
265 	ars_record->err_address = addr + len / 2;
266 	ars_record->length = SZ_4K;
267 }
268 
269 static int nfit_test_cmd_ars_start(struct ars_state *ars_state,
270 		struct nd_cmd_ars_start *ars_start, unsigned int buf_len,
271 		int *cmd_rc)
272 {
273 	if (buf_len < sizeof(*ars_start))
274 		return -EINVAL;
275 
276 	spin_lock(&ars_state->lock);
277 	if (time_before(jiffies, ars_state->deadline)) {
278 		ars_start->status = NFIT_ARS_START_BUSY;
279 		*cmd_rc = -EBUSY;
280 	} else {
281 		ars_start->status = 0;
282 		ars_start->scrub_time = 1;
283 		post_ars_status(ars_state, ars_start->address,
284 				ars_start->length);
285 		*cmd_rc = 0;
286 	}
287 	spin_unlock(&ars_state->lock);
288 
289 	return 0;
290 }
291 
292 static int nfit_test_cmd_ars_status(struct ars_state *ars_state,
293 		struct nd_cmd_ars_status *ars_status, unsigned int buf_len,
294 		int *cmd_rc)
295 {
296 	if (buf_len < ars_state->ars_status->out_length)
297 		return -EINVAL;
298 
299 	spin_lock(&ars_state->lock);
300 	if (time_before(jiffies, ars_state->deadline)) {
301 		memset(ars_status, 0, buf_len);
302 		ars_status->status = NFIT_ARS_STATUS_BUSY;
303 		ars_status->out_length = sizeof(*ars_status);
304 		*cmd_rc = -EBUSY;
305 	} else {
306 		memcpy(ars_status, ars_state->ars_status,
307 				ars_state->ars_status->out_length);
308 		*cmd_rc = 0;
309 	}
310 	spin_unlock(&ars_state->lock);
311 	return 0;
312 }
313 
314 static int nfit_test_cmd_clear_error(struct nd_cmd_clear_error *clear_err,
315 		unsigned int buf_len, int *cmd_rc)
316 {
317 	const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1;
318 	if (buf_len < sizeof(*clear_err))
319 		return -EINVAL;
320 
321 	if ((clear_err->address & mask) || (clear_err->length & mask))
322 		return -EINVAL;
323 
324 	/*
325 	 * Report 'all clear' success for all commands even though a new
326 	 * scrub will find errors again.  This is enough to have the
327 	 * error removed from the 'badblocks' tracking in the pmem
328 	 * driver.
329 	 */
330 	clear_err->status = 0;
331 	clear_err->cleared = clear_err->length;
332 	*cmd_rc = 0;
333 	return 0;
334 }
335 
336 static int nfit_test_cmd_smart(struct nd_cmd_smart *smart, unsigned int buf_len)
337 {
338 	static const struct nd_smart_payload smart_data = {
339 		.flags = ND_SMART_HEALTH_VALID | ND_SMART_TEMP_VALID
340 			| ND_SMART_SPARES_VALID | ND_SMART_ALARM_VALID
341 			| ND_SMART_USED_VALID | ND_SMART_SHUTDOWN_VALID,
342 		.health = ND_SMART_NON_CRITICAL_HEALTH,
343 		.temperature = 23 * 16,
344 		.spares = 75,
345 		.alarm_flags = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
346 		.life_used = 5,
347 		.shutdown_state = 0,
348 		.vendor_size = 0,
349 	};
350 
351 	if (buf_len < sizeof(*smart))
352 		return -EINVAL;
353 	memcpy(smart->data, &smart_data, sizeof(smart_data));
354 	return 0;
355 }
356 
357 static int nfit_test_cmd_smart_threshold(struct nd_cmd_smart_threshold *smart_t,
358 		unsigned int buf_len)
359 {
360 	static const struct nd_smart_threshold_payload smart_t_data = {
361 		.alarm_control = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
362 		.temperature = 40 * 16,
363 		.spares = 5,
364 	};
365 
366 	if (buf_len < sizeof(*smart_t))
367 		return -EINVAL;
368 	memcpy(smart_t->data, &smart_t_data, sizeof(smart_t_data));
369 	return 0;
370 }
371 
372 static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
373 		struct nvdimm *nvdimm, unsigned int cmd, void *buf,
374 		unsigned int buf_len, int *cmd_rc)
375 {
376 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
377 	struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
378 	unsigned int func = cmd;
379 	int i, rc = 0, __cmd_rc;
380 
381 	if (!cmd_rc)
382 		cmd_rc = &__cmd_rc;
383 	*cmd_rc = 0;
384 
385 	if (nvdimm) {
386 		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
387 		unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
388 
389 		if (!nfit_mem)
390 			return -ENOTTY;
391 
392 		if (cmd == ND_CMD_CALL) {
393 			struct nd_cmd_pkg *call_pkg = buf;
394 
395 			buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
396 			buf = (void *) call_pkg->nd_payload;
397 			func = call_pkg->nd_command;
398 			if (call_pkg->nd_family != nfit_mem->family)
399 				return -ENOTTY;
400 		}
401 
402 		if (!test_bit(cmd, &cmd_mask)
403 				|| !test_bit(func, &nfit_mem->dsm_mask))
404 			return -ENOTTY;
405 
406 		/* lookup label space for the given dimm */
407 		for (i = 0; i < ARRAY_SIZE(handle); i++)
408 			if (__to_nfit_memdev(nfit_mem)->device_handle ==
409 					handle[i])
410 				break;
411 		if (i >= ARRAY_SIZE(handle))
412 			return -ENXIO;
413 
414 		switch (func) {
415 		case ND_CMD_GET_CONFIG_SIZE:
416 			rc = nfit_test_cmd_get_config_size(buf, buf_len);
417 			break;
418 		case ND_CMD_GET_CONFIG_DATA:
419 			rc = nfit_test_cmd_get_config_data(buf, buf_len,
420 				t->label[i]);
421 			break;
422 		case ND_CMD_SET_CONFIG_DATA:
423 			rc = nfit_test_cmd_set_config_data(buf, buf_len,
424 				t->label[i]);
425 			break;
426 		case ND_CMD_SMART:
427 			rc = nfit_test_cmd_smart(buf, buf_len);
428 			break;
429 		case ND_CMD_SMART_THRESHOLD:
430 			rc = nfit_test_cmd_smart_threshold(buf, buf_len);
431 			break;
432 		default:
433 			return -ENOTTY;
434 		}
435 	} else {
436 		struct ars_state *ars_state = &t->ars_state;
437 
438 		if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask))
439 			return -ENOTTY;
440 
441 		switch (func) {
442 		case ND_CMD_ARS_CAP:
443 			rc = nfit_test_cmd_ars_cap(buf, buf_len);
444 			break;
445 		case ND_CMD_ARS_START:
446 			rc = nfit_test_cmd_ars_start(ars_state, buf, buf_len,
447 					cmd_rc);
448 			break;
449 		case ND_CMD_ARS_STATUS:
450 			rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len,
451 					cmd_rc);
452 			break;
453 		case ND_CMD_CLEAR_ERROR:
454 			rc = nfit_test_cmd_clear_error(buf, buf_len, cmd_rc);
455 			break;
456 		default:
457 			return -ENOTTY;
458 		}
459 	}
460 
461 	return rc;
462 }
463 
464 static DEFINE_SPINLOCK(nfit_test_lock);
465 static struct nfit_test *instances[NUM_NFITS];
466 
467 static void release_nfit_res(void *data)
468 {
469 	struct nfit_test_resource *nfit_res = data;
470 	struct resource *res = nfit_res->res;
471 
472 	spin_lock(&nfit_test_lock);
473 	list_del(&nfit_res->list);
474 	spin_unlock(&nfit_test_lock);
475 
476 	vfree(nfit_res->buf);
477 	kfree(res);
478 	kfree(nfit_res);
479 }
480 
481 static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
482 		void *buf)
483 {
484 	struct device *dev = &t->pdev.dev;
485 	struct resource *res = kzalloc(sizeof(*res) * 2, GFP_KERNEL);
486 	struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
487 			GFP_KERNEL);
488 	int rc;
489 
490 	if (!res || !buf || !nfit_res)
491 		goto err;
492 	rc = devm_add_action(dev, release_nfit_res, nfit_res);
493 	if (rc)
494 		goto err;
495 	INIT_LIST_HEAD(&nfit_res->list);
496 	memset(buf, 0, size);
497 	nfit_res->dev = dev;
498 	nfit_res->buf = buf;
499 	nfit_res->res = res;
500 	res->start = *dma;
501 	res->end = *dma + size - 1;
502 	res->name = "NFIT";
503 	spin_lock(&nfit_test_lock);
504 	list_add(&nfit_res->list, &t->resources);
505 	spin_unlock(&nfit_test_lock);
506 
507 	return nfit_res->buf;
508  err:
509 	if (buf)
510 		vfree(buf);
511 	kfree(res);
512 	kfree(nfit_res);
513 	return NULL;
514 }
515 
516 static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
517 {
518 	void *buf = vmalloc(size);
519 
520 	*dma = (unsigned long) buf;
521 	return __test_alloc(t, size, dma, buf);
522 }
523 
524 static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
525 {
526 	int i;
527 
528 	for (i = 0; i < ARRAY_SIZE(instances); i++) {
529 		struct nfit_test_resource *n, *nfit_res = NULL;
530 		struct nfit_test *t = instances[i];
531 
532 		if (!t)
533 			continue;
534 		spin_lock(&nfit_test_lock);
535 		list_for_each_entry(n, &t->resources, list) {
536 			if (addr >= n->res->start && (addr < n->res->start
537 						+ resource_size(n->res))) {
538 				nfit_res = n;
539 				break;
540 			} else if (addr >= (unsigned long) n->buf
541 					&& (addr < (unsigned long) n->buf
542 						+ resource_size(n->res))) {
543 				nfit_res = n;
544 				break;
545 			}
546 		}
547 		spin_unlock(&nfit_test_lock);
548 		if (nfit_res)
549 			return nfit_res;
550 	}
551 
552 	return NULL;
553 }
554 
555 static int ars_state_init(struct device *dev, struct ars_state *ars_state)
556 {
557 	ars_state->ars_status = devm_kzalloc(dev,
558 			sizeof(struct nd_cmd_ars_status)
559 			+ sizeof(struct nd_ars_record) * NFIT_TEST_ARS_RECORDS,
560 			GFP_KERNEL);
561 	if (!ars_state->ars_status)
562 		return -ENOMEM;
563 	spin_lock_init(&ars_state->lock);
564 	return 0;
565 }
566 
567 static int nfit_test0_alloc(struct nfit_test *t)
568 {
569 	size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
570 			+ sizeof(struct acpi_nfit_memory_map) * NUM_MEM
571 			+ sizeof(struct acpi_nfit_control_region) * NUM_DCR
572 			+ offsetof(struct acpi_nfit_control_region,
573 					window_size) * NUM_DCR
574 			+ sizeof(struct acpi_nfit_data_region) * NUM_BDW
575 			+ (sizeof(struct acpi_nfit_flush_address)
576 					+ sizeof(u64) * NUM_HINTS) * NUM_DCR;
577 	int i;
578 
579 	t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
580 	if (!t->nfit_buf)
581 		return -ENOMEM;
582 	t->nfit_size = nfit_size;
583 
584 	t->spa_set[0] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[0]);
585 	if (!t->spa_set[0])
586 		return -ENOMEM;
587 
588 	t->spa_set[1] = test_alloc(t, SPA1_SIZE, &t->spa_set_dma[1]);
589 	if (!t->spa_set[1])
590 		return -ENOMEM;
591 
592 	t->spa_set[2] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[2]);
593 	if (!t->spa_set[2])
594 		return -ENOMEM;
595 
596 	for (i = 0; i < NUM_DCR; i++) {
597 		t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
598 		if (!t->dimm[i])
599 			return -ENOMEM;
600 
601 		t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
602 		if (!t->label[i])
603 			return -ENOMEM;
604 		sprintf(t->label[i], "label%d", i);
605 
606 		t->flush[i] = test_alloc(t, max(PAGE_SIZE,
607 					sizeof(u64) * NUM_HINTS),
608 				&t->flush_dma[i]);
609 		if (!t->flush[i])
610 			return -ENOMEM;
611 	}
612 
613 	for (i = 0; i < NUM_DCR; i++) {
614 		t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
615 		if (!t->dcr[i])
616 			return -ENOMEM;
617 	}
618 
619 	return ars_state_init(&t->pdev.dev, &t->ars_state);
620 }
621 
622 static int nfit_test1_alloc(struct nfit_test *t)
623 {
624 	size_t nfit_size = sizeof(struct acpi_nfit_system_address) * 2
625 		+ sizeof(struct acpi_nfit_memory_map)
626 		+ offsetof(struct acpi_nfit_control_region, window_size);
627 
628 	t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
629 	if (!t->nfit_buf)
630 		return -ENOMEM;
631 	t->nfit_size = nfit_size;
632 
633 	t->spa_set[0] = test_alloc(t, SPA2_SIZE, &t->spa_set_dma[0]);
634 	if (!t->spa_set[0])
635 		return -ENOMEM;
636 
637 	t->spa_set[1] = test_alloc(t, SPA_VCD_SIZE, &t->spa_set_dma[1]);
638 	if (!t->spa_set[1])
639 		return -ENOMEM;
640 
641 	return ars_state_init(&t->pdev.dev, &t->ars_state);
642 }
643 
644 static void dcr_common_init(struct acpi_nfit_control_region *dcr)
645 {
646 	dcr->vendor_id = 0xabcd;
647 	dcr->device_id = 0;
648 	dcr->revision_id = 1;
649 	dcr->valid_fields = 1;
650 	dcr->manufacturing_location = 0xa;
651 	dcr->manufacturing_date = cpu_to_be16(2016);
652 }
653 
654 static void nfit_test0_setup(struct nfit_test *t)
655 {
656 	const int flush_hint_size = sizeof(struct acpi_nfit_flush_address)
657 		+ (sizeof(u64) * NUM_HINTS);
658 	struct acpi_nfit_desc *acpi_desc;
659 	struct acpi_nfit_memory_map *memdev;
660 	void *nfit_buf = t->nfit_buf;
661 	struct acpi_nfit_system_address *spa;
662 	struct acpi_nfit_control_region *dcr;
663 	struct acpi_nfit_data_region *bdw;
664 	struct acpi_nfit_flush_address *flush;
665 	unsigned int offset, i;
666 
667 	/*
668 	 * spa0 (interleave first half of dimm0 and dimm1, note storage
669 	 * does not actually alias the related block-data-window
670 	 * regions)
671 	 */
672 	spa = nfit_buf;
673 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
674 	spa->header.length = sizeof(*spa);
675 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
676 	spa->range_index = 0+1;
677 	spa->address = t->spa_set_dma[0];
678 	spa->length = SPA0_SIZE;
679 
680 	/*
681 	 * spa1 (interleave last half of the 4 DIMMS, note storage
682 	 * does not actually alias the related block-data-window
683 	 * regions)
684 	 */
685 	spa = nfit_buf + sizeof(*spa);
686 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
687 	spa->header.length = sizeof(*spa);
688 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
689 	spa->range_index = 1+1;
690 	spa->address = t->spa_set_dma[1];
691 	spa->length = SPA1_SIZE;
692 
693 	/* spa2 (dcr0) dimm0 */
694 	spa = nfit_buf + sizeof(*spa) * 2;
695 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
696 	spa->header.length = sizeof(*spa);
697 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
698 	spa->range_index = 2+1;
699 	spa->address = t->dcr_dma[0];
700 	spa->length = DCR_SIZE;
701 
702 	/* spa3 (dcr1) dimm1 */
703 	spa = nfit_buf + sizeof(*spa) * 3;
704 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
705 	spa->header.length = sizeof(*spa);
706 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
707 	spa->range_index = 3+1;
708 	spa->address = t->dcr_dma[1];
709 	spa->length = DCR_SIZE;
710 
711 	/* spa4 (dcr2) dimm2 */
712 	spa = nfit_buf + sizeof(*spa) * 4;
713 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
714 	spa->header.length = sizeof(*spa);
715 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
716 	spa->range_index = 4+1;
717 	spa->address = t->dcr_dma[2];
718 	spa->length = DCR_SIZE;
719 
720 	/* spa5 (dcr3) dimm3 */
721 	spa = nfit_buf + sizeof(*spa) * 5;
722 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
723 	spa->header.length = sizeof(*spa);
724 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
725 	spa->range_index = 5+1;
726 	spa->address = t->dcr_dma[3];
727 	spa->length = DCR_SIZE;
728 
729 	/* spa6 (bdw for dcr0) dimm0 */
730 	spa = nfit_buf + sizeof(*spa) * 6;
731 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
732 	spa->header.length = sizeof(*spa);
733 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
734 	spa->range_index = 6+1;
735 	spa->address = t->dimm_dma[0];
736 	spa->length = DIMM_SIZE;
737 
738 	/* spa7 (bdw for dcr1) dimm1 */
739 	spa = nfit_buf + sizeof(*spa) * 7;
740 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
741 	spa->header.length = sizeof(*spa);
742 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
743 	spa->range_index = 7+1;
744 	spa->address = t->dimm_dma[1];
745 	spa->length = DIMM_SIZE;
746 
747 	/* spa8 (bdw for dcr2) dimm2 */
748 	spa = nfit_buf + sizeof(*spa) * 8;
749 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
750 	spa->header.length = sizeof(*spa);
751 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
752 	spa->range_index = 8+1;
753 	spa->address = t->dimm_dma[2];
754 	spa->length = DIMM_SIZE;
755 
756 	/* spa9 (bdw for dcr3) dimm3 */
757 	spa = nfit_buf + sizeof(*spa) * 9;
758 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
759 	spa->header.length = sizeof(*spa);
760 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
761 	spa->range_index = 9+1;
762 	spa->address = t->dimm_dma[3];
763 	spa->length = DIMM_SIZE;
764 
765 	offset = sizeof(*spa) * 10;
766 	/* mem-region0 (spa0, dimm0) */
767 	memdev = nfit_buf + offset;
768 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
769 	memdev->header.length = sizeof(*memdev);
770 	memdev->device_handle = handle[0];
771 	memdev->physical_id = 0;
772 	memdev->region_id = 0;
773 	memdev->range_index = 0+1;
774 	memdev->region_index = 4+1;
775 	memdev->region_size = SPA0_SIZE/2;
776 	memdev->region_offset = t->spa_set_dma[0];
777 	memdev->address = 0;
778 	memdev->interleave_index = 0;
779 	memdev->interleave_ways = 2;
780 
781 	/* mem-region1 (spa0, dimm1) */
782 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map);
783 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
784 	memdev->header.length = sizeof(*memdev);
785 	memdev->device_handle = handle[1];
786 	memdev->physical_id = 1;
787 	memdev->region_id = 0;
788 	memdev->range_index = 0+1;
789 	memdev->region_index = 5+1;
790 	memdev->region_size = SPA0_SIZE/2;
791 	memdev->region_offset = t->spa_set_dma[0] + SPA0_SIZE/2;
792 	memdev->address = 0;
793 	memdev->interleave_index = 0;
794 	memdev->interleave_ways = 2;
795 
796 	/* mem-region2 (spa1, dimm0) */
797 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2;
798 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
799 	memdev->header.length = sizeof(*memdev);
800 	memdev->device_handle = handle[0];
801 	memdev->physical_id = 0;
802 	memdev->region_id = 1;
803 	memdev->range_index = 1+1;
804 	memdev->region_index = 4+1;
805 	memdev->region_size = SPA1_SIZE/4;
806 	memdev->region_offset = t->spa_set_dma[1];
807 	memdev->address = SPA0_SIZE/2;
808 	memdev->interleave_index = 0;
809 	memdev->interleave_ways = 4;
810 
811 	/* mem-region3 (spa1, dimm1) */
812 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3;
813 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
814 	memdev->header.length = sizeof(*memdev);
815 	memdev->device_handle = handle[1];
816 	memdev->physical_id = 1;
817 	memdev->region_id = 1;
818 	memdev->range_index = 1+1;
819 	memdev->region_index = 5+1;
820 	memdev->region_size = SPA1_SIZE/4;
821 	memdev->region_offset = t->spa_set_dma[1] + SPA1_SIZE/4;
822 	memdev->address = SPA0_SIZE/2;
823 	memdev->interleave_index = 0;
824 	memdev->interleave_ways = 4;
825 
826 	/* mem-region4 (spa1, dimm2) */
827 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4;
828 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
829 	memdev->header.length = sizeof(*memdev);
830 	memdev->device_handle = handle[2];
831 	memdev->physical_id = 2;
832 	memdev->region_id = 0;
833 	memdev->range_index = 1+1;
834 	memdev->region_index = 6+1;
835 	memdev->region_size = SPA1_SIZE/4;
836 	memdev->region_offset = t->spa_set_dma[1] + 2*SPA1_SIZE/4;
837 	memdev->address = SPA0_SIZE/2;
838 	memdev->interleave_index = 0;
839 	memdev->interleave_ways = 4;
840 
841 	/* mem-region5 (spa1, dimm3) */
842 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5;
843 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
844 	memdev->header.length = sizeof(*memdev);
845 	memdev->device_handle = handle[3];
846 	memdev->physical_id = 3;
847 	memdev->region_id = 0;
848 	memdev->range_index = 1+1;
849 	memdev->region_index = 7+1;
850 	memdev->region_size = SPA1_SIZE/4;
851 	memdev->region_offset = t->spa_set_dma[1] + 3*SPA1_SIZE/4;
852 	memdev->address = SPA0_SIZE/2;
853 	memdev->interleave_index = 0;
854 	memdev->interleave_ways = 4;
855 
856 	/* mem-region6 (spa/dcr0, dimm0) */
857 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6;
858 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
859 	memdev->header.length = sizeof(*memdev);
860 	memdev->device_handle = handle[0];
861 	memdev->physical_id = 0;
862 	memdev->region_id = 0;
863 	memdev->range_index = 2+1;
864 	memdev->region_index = 0+1;
865 	memdev->region_size = 0;
866 	memdev->region_offset = 0;
867 	memdev->address = 0;
868 	memdev->interleave_index = 0;
869 	memdev->interleave_ways = 1;
870 
871 	/* mem-region7 (spa/dcr1, dimm1) */
872 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7;
873 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
874 	memdev->header.length = sizeof(*memdev);
875 	memdev->device_handle = handle[1];
876 	memdev->physical_id = 1;
877 	memdev->region_id = 0;
878 	memdev->range_index = 3+1;
879 	memdev->region_index = 1+1;
880 	memdev->region_size = 0;
881 	memdev->region_offset = 0;
882 	memdev->address = 0;
883 	memdev->interleave_index = 0;
884 	memdev->interleave_ways = 1;
885 
886 	/* mem-region8 (spa/dcr2, dimm2) */
887 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8;
888 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
889 	memdev->header.length = sizeof(*memdev);
890 	memdev->device_handle = handle[2];
891 	memdev->physical_id = 2;
892 	memdev->region_id = 0;
893 	memdev->range_index = 4+1;
894 	memdev->region_index = 2+1;
895 	memdev->region_size = 0;
896 	memdev->region_offset = 0;
897 	memdev->address = 0;
898 	memdev->interleave_index = 0;
899 	memdev->interleave_ways = 1;
900 
901 	/* mem-region9 (spa/dcr3, dimm3) */
902 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9;
903 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
904 	memdev->header.length = sizeof(*memdev);
905 	memdev->device_handle = handle[3];
906 	memdev->physical_id = 3;
907 	memdev->region_id = 0;
908 	memdev->range_index = 5+1;
909 	memdev->region_index = 3+1;
910 	memdev->region_size = 0;
911 	memdev->region_offset = 0;
912 	memdev->address = 0;
913 	memdev->interleave_index = 0;
914 	memdev->interleave_ways = 1;
915 
916 	/* mem-region10 (spa/bdw0, dimm0) */
917 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10;
918 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
919 	memdev->header.length = sizeof(*memdev);
920 	memdev->device_handle = handle[0];
921 	memdev->physical_id = 0;
922 	memdev->region_id = 0;
923 	memdev->range_index = 6+1;
924 	memdev->region_index = 0+1;
925 	memdev->region_size = 0;
926 	memdev->region_offset = 0;
927 	memdev->address = 0;
928 	memdev->interleave_index = 0;
929 	memdev->interleave_ways = 1;
930 
931 	/* mem-region11 (spa/bdw1, dimm1) */
932 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11;
933 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
934 	memdev->header.length = sizeof(*memdev);
935 	memdev->device_handle = handle[1];
936 	memdev->physical_id = 1;
937 	memdev->region_id = 0;
938 	memdev->range_index = 7+1;
939 	memdev->region_index = 1+1;
940 	memdev->region_size = 0;
941 	memdev->region_offset = 0;
942 	memdev->address = 0;
943 	memdev->interleave_index = 0;
944 	memdev->interleave_ways = 1;
945 
946 	/* mem-region12 (spa/bdw2, dimm2) */
947 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12;
948 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
949 	memdev->header.length = sizeof(*memdev);
950 	memdev->device_handle = handle[2];
951 	memdev->physical_id = 2;
952 	memdev->region_id = 0;
953 	memdev->range_index = 8+1;
954 	memdev->region_index = 2+1;
955 	memdev->region_size = 0;
956 	memdev->region_offset = 0;
957 	memdev->address = 0;
958 	memdev->interleave_index = 0;
959 	memdev->interleave_ways = 1;
960 
961 	/* mem-region13 (spa/dcr3, dimm3) */
962 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13;
963 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
964 	memdev->header.length = sizeof(*memdev);
965 	memdev->device_handle = handle[3];
966 	memdev->physical_id = 3;
967 	memdev->region_id = 0;
968 	memdev->range_index = 9+1;
969 	memdev->region_index = 3+1;
970 	memdev->region_size = 0;
971 	memdev->region_offset = 0;
972 	memdev->address = 0;
973 	memdev->interleave_index = 0;
974 	memdev->interleave_ways = 1;
975 
976 	offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
977 	/* dcr-descriptor0: blk */
978 	dcr = nfit_buf + offset;
979 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
980 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
981 	dcr->region_index = 0+1;
982 	dcr_common_init(dcr);
983 	dcr->serial_number = ~handle[0];
984 	dcr->code = NFIT_FIC_BLK;
985 	dcr->windows = 1;
986 	dcr->window_size = DCR_SIZE;
987 	dcr->command_offset = 0;
988 	dcr->command_size = 8;
989 	dcr->status_offset = 8;
990 	dcr->status_size = 4;
991 
992 	/* dcr-descriptor1: blk */
993 	dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region);
994 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
995 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
996 	dcr->region_index = 1+1;
997 	dcr_common_init(dcr);
998 	dcr->serial_number = ~handle[1];
999 	dcr->code = NFIT_FIC_BLK;
1000 	dcr->windows = 1;
1001 	dcr->window_size = DCR_SIZE;
1002 	dcr->command_offset = 0;
1003 	dcr->command_size = 8;
1004 	dcr->status_offset = 8;
1005 	dcr->status_size = 4;
1006 
1007 	/* dcr-descriptor2: blk */
1008 	dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2;
1009 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1010 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
1011 	dcr->region_index = 2+1;
1012 	dcr_common_init(dcr);
1013 	dcr->serial_number = ~handle[2];
1014 	dcr->code = NFIT_FIC_BLK;
1015 	dcr->windows = 1;
1016 	dcr->window_size = DCR_SIZE;
1017 	dcr->command_offset = 0;
1018 	dcr->command_size = 8;
1019 	dcr->status_offset = 8;
1020 	dcr->status_size = 4;
1021 
1022 	/* dcr-descriptor3: blk */
1023 	dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3;
1024 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1025 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
1026 	dcr->region_index = 3+1;
1027 	dcr_common_init(dcr);
1028 	dcr->serial_number = ~handle[3];
1029 	dcr->code = NFIT_FIC_BLK;
1030 	dcr->windows = 1;
1031 	dcr->window_size = DCR_SIZE;
1032 	dcr->command_offset = 0;
1033 	dcr->command_size = 8;
1034 	dcr->status_offset = 8;
1035 	dcr->status_size = 4;
1036 
1037 	offset = offset + sizeof(struct acpi_nfit_control_region) * 4;
1038 	/* dcr-descriptor0: pmem */
1039 	dcr = nfit_buf + offset;
1040 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1041 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1042 			window_size);
1043 	dcr->region_index = 4+1;
1044 	dcr_common_init(dcr);
1045 	dcr->serial_number = ~handle[0];
1046 	dcr->code = NFIT_FIC_BYTEN;
1047 	dcr->windows = 0;
1048 
1049 	/* dcr-descriptor1: pmem */
1050 	dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1051 			window_size);
1052 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1053 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1054 			window_size);
1055 	dcr->region_index = 5+1;
1056 	dcr_common_init(dcr);
1057 	dcr->serial_number = ~handle[1];
1058 	dcr->code = NFIT_FIC_BYTEN;
1059 	dcr->windows = 0;
1060 
1061 	/* dcr-descriptor2: pmem */
1062 	dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1063 			window_size) * 2;
1064 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1065 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1066 			window_size);
1067 	dcr->region_index = 6+1;
1068 	dcr_common_init(dcr);
1069 	dcr->serial_number = ~handle[2];
1070 	dcr->code = NFIT_FIC_BYTEN;
1071 	dcr->windows = 0;
1072 
1073 	/* dcr-descriptor3: pmem */
1074 	dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1075 			window_size) * 3;
1076 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1077 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1078 			window_size);
1079 	dcr->region_index = 7+1;
1080 	dcr_common_init(dcr);
1081 	dcr->serial_number = ~handle[3];
1082 	dcr->code = NFIT_FIC_BYTEN;
1083 	dcr->windows = 0;
1084 
1085 	offset = offset + offsetof(struct acpi_nfit_control_region,
1086 			window_size) * 4;
1087 	/* bdw0 (spa/dcr0, dimm0) */
1088 	bdw = nfit_buf + offset;
1089 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1090 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1091 	bdw->region_index = 0+1;
1092 	bdw->windows = 1;
1093 	bdw->offset = 0;
1094 	bdw->size = BDW_SIZE;
1095 	bdw->capacity = DIMM_SIZE;
1096 	bdw->start_address = 0;
1097 
1098 	/* bdw1 (spa/dcr1, dimm1) */
1099 	bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region);
1100 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1101 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1102 	bdw->region_index = 1+1;
1103 	bdw->windows = 1;
1104 	bdw->offset = 0;
1105 	bdw->size = BDW_SIZE;
1106 	bdw->capacity = DIMM_SIZE;
1107 	bdw->start_address = 0;
1108 
1109 	/* bdw2 (spa/dcr2, dimm2) */
1110 	bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2;
1111 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1112 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1113 	bdw->region_index = 2+1;
1114 	bdw->windows = 1;
1115 	bdw->offset = 0;
1116 	bdw->size = BDW_SIZE;
1117 	bdw->capacity = DIMM_SIZE;
1118 	bdw->start_address = 0;
1119 
1120 	/* bdw3 (spa/dcr3, dimm3) */
1121 	bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3;
1122 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1123 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1124 	bdw->region_index = 3+1;
1125 	bdw->windows = 1;
1126 	bdw->offset = 0;
1127 	bdw->size = BDW_SIZE;
1128 	bdw->capacity = DIMM_SIZE;
1129 	bdw->start_address = 0;
1130 
1131 	offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
1132 	/* flush0 (dimm0) */
1133 	flush = nfit_buf + offset;
1134 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1135 	flush->header.length = flush_hint_size;
1136 	flush->device_handle = handle[0];
1137 	flush->hint_count = NUM_HINTS;
1138 	for (i = 0; i < NUM_HINTS; i++)
1139 		flush->hint_address[i] = t->flush_dma[0] + i * sizeof(u64);
1140 
1141 	/* flush1 (dimm1) */
1142 	flush = nfit_buf + offset + flush_hint_size * 1;
1143 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1144 	flush->header.length = flush_hint_size;
1145 	flush->device_handle = handle[1];
1146 	flush->hint_count = NUM_HINTS;
1147 	for (i = 0; i < NUM_HINTS; i++)
1148 		flush->hint_address[i] = t->flush_dma[1] + i * sizeof(u64);
1149 
1150 	/* flush2 (dimm2) */
1151 	flush = nfit_buf + offset + flush_hint_size  * 2;
1152 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1153 	flush->header.length = flush_hint_size;
1154 	flush->device_handle = handle[2];
1155 	flush->hint_count = NUM_HINTS;
1156 	for (i = 0; i < NUM_HINTS; i++)
1157 		flush->hint_address[i] = t->flush_dma[2] + i * sizeof(u64);
1158 
1159 	/* flush3 (dimm3) */
1160 	flush = nfit_buf + offset + flush_hint_size * 3;
1161 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1162 	flush->header.length = flush_hint_size;
1163 	flush->device_handle = handle[3];
1164 	flush->hint_count = NUM_HINTS;
1165 	for (i = 0; i < NUM_HINTS; i++)
1166 		flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64);
1167 
1168 	if (t->setup_hotplug) {
1169 		offset = offset + flush_hint_size * 4;
1170 		/* dcr-descriptor4: blk */
1171 		dcr = nfit_buf + offset;
1172 		dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1173 		dcr->header.length = sizeof(struct acpi_nfit_control_region);
1174 		dcr->region_index = 8+1;
1175 		dcr_common_init(dcr);
1176 		dcr->serial_number = ~handle[4];
1177 		dcr->code = NFIT_FIC_BLK;
1178 		dcr->windows = 1;
1179 		dcr->window_size = DCR_SIZE;
1180 		dcr->command_offset = 0;
1181 		dcr->command_size = 8;
1182 		dcr->status_offset = 8;
1183 		dcr->status_size = 4;
1184 
1185 		offset = offset + sizeof(struct acpi_nfit_control_region);
1186 		/* dcr-descriptor4: pmem */
1187 		dcr = nfit_buf + offset;
1188 		dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1189 		dcr->header.length = offsetof(struct acpi_nfit_control_region,
1190 				window_size);
1191 		dcr->region_index = 9+1;
1192 		dcr_common_init(dcr);
1193 		dcr->serial_number = ~handle[4];
1194 		dcr->code = NFIT_FIC_BYTEN;
1195 		dcr->windows = 0;
1196 
1197 		offset = offset + offsetof(struct acpi_nfit_control_region,
1198 				window_size);
1199 		/* bdw4 (spa/dcr4, dimm4) */
1200 		bdw = nfit_buf + offset;
1201 		bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1202 		bdw->header.length = sizeof(struct acpi_nfit_data_region);
1203 		bdw->region_index = 8+1;
1204 		bdw->windows = 1;
1205 		bdw->offset = 0;
1206 		bdw->size = BDW_SIZE;
1207 		bdw->capacity = DIMM_SIZE;
1208 		bdw->start_address = 0;
1209 
1210 		offset = offset + sizeof(struct acpi_nfit_data_region);
1211 		/* spa10 (dcr4) dimm4 */
1212 		spa = nfit_buf + offset;
1213 		spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1214 		spa->header.length = sizeof(*spa);
1215 		memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1216 		spa->range_index = 10+1;
1217 		spa->address = t->dcr_dma[4];
1218 		spa->length = DCR_SIZE;
1219 
1220 		/*
1221 		 * spa11 (single-dimm interleave for hotplug, note storage
1222 		 * does not actually alias the related block-data-window
1223 		 * regions)
1224 		 */
1225 		spa = nfit_buf + offset + sizeof(*spa);
1226 		spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1227 		spa->header.length = sizeof(*spa);
1228 		memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1229 		spa->range_index = 11+1;
1230 		spa->address = t->spa_set_dma[2];
1231 		spa->length = SPA0_SIZE;
1232 
1233 		/* spa12 (bdw for dcr4) dimm4 */
1234 		spa = nfit_buf + offset + sizeof(*spa) * 2;
1235 		spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1236 		spa->header.length = sizeof(*spa);
1237 		memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1238 		spa->range_index = 12+1;
1239 		spa->address = t->dimm_dma[4];
1240 		spa->length = DIMM_SIZE;
1241 
1242 		offset = offset + sizeof(*spa) * 3;
1243 		/* mem-region14 (spa/dcr4, dimm4) */
1244 		memdev = nfit_buf + offset;
1245 		memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1246 		memdev->header.length = sizeof(*memdev);
1247 		memdev->device_handle = handle[4];
1248 		memdev->physical_id = 4;
1249 		memdev->region_id = 0;
1250 		memdev->range_index = 10+1;
1251 		memdev->region_index = 8+1;
1252 		memdev->region_size = 0;
1253 		memdev->region_offset = 0;
1254 		memdev->address = 0;
1255 		memdev->interleave_index = 0;
1256 		memdev->interleave_ways = 1;
1257 
1258 		/* mem-region15 (spa0, dimm4) */
1259 		memdev = nfit_buf + offset +
1260 				sizeof(struct acpi_nfit_memory_map);
1261 		memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1262 		memdev->header.length = sizeof(*memdev);
1263 		memdev->device_handle = handle[4];
1264 		memdev->physical_id = 4;
1265 		memdev->region_id = 0;
1266 		memdev->range_index = 11+1;
1267 		memdev->region_index = 9+1;
1268 		memdev->region_size = SPA0_SIZE;
1269 		memdev->region_offset = t->spa_set_dma[2];
1270 		memdev->address = 0;
1271 		memdev->interleave_index = 0;
1272 		memdev->interleave_ways = 1;
1273 
1274 		/* mem-region16 (spa/bdw4, dimm4) */
1275 		memdev = nfit_buf + offset +
1276 				sizeof(struct acpi_nfit_memory_map) * 2;
1277 		memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1278 		memdev->header.length = sizeof(*memdev);
1279 		memdev->device_handle = handle[4];
1280 		memdev->physical_id = 4;
1281 		memdev->region_id = 0;
1282 		memdev->range_index = 12+1;
1283 		memdev->region_index = 8+1;
1284 		memdev->region_size = 0;
1285 		memdev->region_offset = 0;
1286 		memdev->address = 0;
1287 		memdev->interleave_index = 0;
1288 		memdev->interleave_ways = 1;
1289 
1290 		offset = offset + sizeof(struct acpi_nfit_memory_map) * 3;
1291 		/* flush3 (dimm4) */
1292 		flush = nfit_buf + offset;
1293 		flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1294 		flush->header.length = flush_hint_size;
1295 		flush->device_handle = handle[4];
1296 		flush->hint_count = NUM_HINTS;
1297 		for (i = 0; i < NUM_HINTS; i++)
1298 			flush->hint_address[i] = t->flush_dma[4]
1299 				+ i * sizeof(u64);
1300 	}
1301 
1302 	post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA0_SIZE);
1303 
1304 	acpi_desc = &t->acpi_desc;
1305 	set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
1306 	set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
1307 	set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
1308 	set_bit(ND_CMD_SMART, &acpi_desc->dimm_cmd_force_en);
1309 	set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
1310 	set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
1311 	set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
1312 	set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
1313 	set_bit(ND_CMD_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
1314 }
1315 
1316 static void nfit_test1_setup(struct nfit_test *t)
1317 {
1318 	size_t offset;
1319 	void *nfit_buf = t->nfit_buf;
1320 	struct acpi_nfit_memory_map *memdev;
1321 	struct acpi_nfit_control_region *dcr;
1322 	struct acpi_nfit_system_address *spa;
1323 	struct acpi_nfit_desc *acpi_desc;
1324 
1325 	offset = 0;
1326 	/* spa0 (flat range with no bdw aliasing) */
1327 	spa = nfit_buf + offset;
1328 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1329 	spa->header.length = sizeof(*spa);
1330 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1331 	spa->range_index = 0+1;
1332 	spa->address = t->spa_set_dma[0];
1333 	spa->length = SPA2_SIZE;
1334 
1335 	/* virtual cd region */
1336 	spa = nfit_buf + sizeof(*spa);
1337 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1338 	spa->header.length = sizeof(*spa);
1339 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16);
1340 	spa->range_index = 0;
1341 	spa->address = t->spa_set_dma[1];
1342 	spa->length = SPA_VCD_SIZE;
1343 
1344 	offset += sizeof(*spa) * 2;
1345 	/* mem-region0 (spa0, dimm0) */
1346 	memdev = nfit_buf + offset;
1347 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1348 	memdev->header.length = sizeof(*memdev);
1349 	memdev->device_handle = 0;
1350 	memdev->physical_id = 0;
1351 	memdev->region_id = 0;
1352 	memdev->range_index = 0+1;
1353 	memdev->region_index = 0+1;
1354 	memdev->region_size = SPA2_SIZE;
1355 	memdev->region_offset = 0;
1356 	memdev->address = 0;
1357 	memdev->interleave_index = 0;
1358 	memdev->interleave_ways = 1;
1359 	memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
1360 		| ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
1361 		| ACPI_NFIT_MEM_NOT_ARMED;
1362 
1363 	offset += sizeof(*memdev);
1364 	/* dcr-descriptor0 */
1365 	dcr = nfit_buf + offset;
1366 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1367 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1368 			window_size);
1369 	dcr->region_index = 0+1;
1370 	dcr_common_init(dcr);
1371 	dcr->serial_number = ~0;
1372 	dcr->code = NFIT_FIC_BYTE;
1373 	dcr->windows = 0;
1374 
1375 	post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA2_SIZE);
1376 
1377 	acpi_desc = &t->acpi_desc;
1378 	set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
1379 	set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
1380 	set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
1381 	set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
1382 }
1383 
1384 static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
1385 		void *iobuf, u64 len, int rw)
1386 {
1387 	struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
1388 	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1389 	struct nd_region *nd_region = &ndbr->nd_region;
1390 	unsigned int lane;
1391 
1392 	lane = nd_region_acquire_lane(nd_region);
1393 	if (rw)
1394 		memcpy(mmio->addr.base + dpa, iobuf, len);
1395 	else {
1396 		memcpy(iobuf, mmio->addr.base + dpa, len);
1397 
1398 		/* give us some some coverage of the mmio_flush_range() API */
1399 		mmio_flush_range(mmio->addr.base + dpa, len);
1400 	}
1401 	nd_region_release_lane(nd_region, lane);
1402 
1403 	return 0;
1404 }
1405 
1406 static int nfit_test_probe(struct platform_device *pdev)
1407 {
1408 	struct nvdimm_bus_descriptor *nd_desc;
1409 	struct acpi_nfit_desc *acpi_desc;
1410 	struct device *dev = &pdev->dev;
1411 	struct nfit_test *nfit_test;
1412 	int rc;
1413 
1414 	nfit_test = to_nfit_test(&pdev->dev);
1415 
1416 	/* common alloc */
1417 	if (nfit_test->num_dcr) {
1418 		int num = nfit_test->num_dcr;
1419 
1420 		nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
1421 				GFP_KERNEL);
1422 		nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
1423 				GFP_KERNEL);
1424 		nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
1425 				GFP_KERNEL);
1426 		nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
1427 				GFP_KERNEL);
1428 		nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
1429 				GFP_KERNEL);
1430 		nfit_test->label_dma = devm_kcalloc(dev, num,
1431 				sizeof(dma_addr_t), GFP_KERNEL);
1432 		nfit_test->dcr = devm_kcalloc(dev, num,
1433 				sizeof(struct nfit_test_dcr *), GFP_KERNEL);
1434 		nfit_test->dcr_dma = devm_kcalloc(dev, num,
1435 				sizeof(dma_addr_t), GFP_KERNEL);
1436 		if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
1437 				&& nfit_test->label_dma && nfit_test->dcr
1438 				&& nfit_test->dcr_dma && nfit_test->flush
1439 				&& nfit_test->flush_dma)
1440 			/* pass */;
1441 		else
1442 			return -ENOMEM;
1443 	}
1444 
1445 	if (nfit_test->num_pm) {
1446 		int num = nfit_test->num_pm;
1447 
1448 		nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
1449 				GFP_KERNEL);
1450 		nfit_test->spa_set_dma = devm_kcalloc(dev, num,
1451 				sizeof(dma_addr_t), GFP_KERNEL);
1452 		if (nfit_test->spa_set && nfit_test->spa_set_dma)
1453 			/* pass */;
1454 		else
1455 			return -ENOMEM;
1456 	}
1457 
1458 	/* per-nfit specific alloc */
1459 	if (nfit_test->alloc(nfit_test))
1460 		return -ENOMEM;
1461 
1462 	nfit_test->setup(nfit_test);
1463 	acpi_desc = &nfit_test->acpi_desc;
1464 	acpi_nfit_desc_init(acpi_desc, &pdev->dev);
1465 	acpi_desc->blk_do_io = nfit_test_blk_do_io;
1466 	nd_desc = &acpi_desc->nd_desc;
1467 	nd_desc->provider_name = NULL;
1468 	nd_desc->module = THIS_MODULE;
1469 	nd_desc->ndctl = nfit_test_ctl;
1470 
1471 	rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf,
1472 			nfit_test->nfit_size);
1473 	if (rc)
1474 		return rc;
1475 
1476 	if (nfit_test->setup != nfit_test0_setup)
1477 		return 0;
1478 
1479 	flush_work(&acpi_desc->work);
1480 	nfit_test->setup_hotplug = 1;
1481 	nfit_test->setup(nfit_test);
1482 
1483 	rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf,
1484 			nfit_test->nfit_size);
1485 	if (rc)
1486 		return rc;
1487 
1488 	return 0;
1489 }
1490 
1491 static int nfit_test_remove(struct platform_device *pdev)
1492 {
1493 	return 0;
1494 }
1495 
1496 static void nfit_test_release(struct device *dev)
1497 {
1498 	struct nfit_test *nfit_test = to_nfit_test(dev);
1499 
1500 	kfree(nfit_test);
1501 }
1502 
1503 static const struct platform_device_id nfit_test_id[] = {
1504 	{ KBUILD_MODNAME },
1505 	{ },
1506 };
1507 
1508 static struct platform_driver nfit_test_driver = {
1509 	.probe = nfit_test_probe,
1510 	.remove = nfit_test_remove,
1511 	.driver = {
1512 		.name = KBUILD_MODNAME,
1513 	},
1514 	.id_table = nfit_test_id,
1515 };
1516 
1517 static __init int nfit_test_init(void)
1518 {
1519 	int rc, i;
1520 
1521 	nfit_test_setup(nfit_test_lookup);
1522 
1523 	for (i = 0; i < NUM_NFITS; i++) {
1524 		struct nfit_test *nfit_test;
1525 		struct platform_device *pdev;
1526 
1527 		nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
1528 		if (!nfit_test) {
1529 			rc = -ENOMEM;
1530 			goto err_register;
1531 		}
1532 		INIT_LIST_HEAD(&nfit_test->resources);
1533 		switch (i) {
1534 		case 0:
1535 			nfit_test->num_pm = NUM_PM;
1536 			nfit_test->num_dcr = NUM_DCR;
1537 			nfit_test->alloc = nfit_test0_alloc;
1538 			nfit_test->setup = nfit_test0_setup;
1539 			break;
1540 		case 1:
1541 			nfit_test->num_pm = 1;
1542 			nfit_test->alloc = nfit_test1_alloc;
1543 			nfit_test->setup = nfit_test1_setup;
1544 			break;
1545 		default:
1546 			rc = -EINVAL;
1547 			goto err_register;
1548 		}
1549 		pdev = &nfit_test->pdev;
1550 		pdev->name = KBUILD_MODNAME;
1551 		pdev->id = i;
1552 		pdev->dev.release = nfit_test_release;
1553 		rc = platform_device_register(pdev);
1554 		if (rc) {
1555 			put_device(&pdev->dev);
1556 			goto err_register;
1557 		}
1558 
1559 		rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1560 		if (rc)
1561 			goto err_register;
1562 
1563 		instances[i] = nfit_test;
1564 	}
1565 
1566 	rc = platform_driver_register(&nfit_test_driver);
1567 	if (rc)
1568 		goto err_register;
1569 	return 0;
1570 
1571  err_register:
1572 	for (i = 0; i < NUM_NFITS; i++)
1573 		if (instances[i])
1574 			platform_device_unregister(&instances[i]->pdev);
1575 	nfit_test_teardown();
1576 	return rc;
1577 }
1578 
1579 static __exit void nfit_test_exit(void)
1580 {
1581 	int i;
1582 
1583 	platform_driver_unregister(&nfit_test_driver);
1584 	for (i = 0; i < NUM_NFITS; i++)
1585 		platform_device_unregister(&instances[i]->pdev);
1586 	nfit_test_teardown();
1587 }
1588 
1589 module_init(nfit_test_init);
1590 module_exit(nfit_test_exit);
1591 MODULE_LICENSE("GPL v2");
1592 MODULE_AUTHOR("Intel Corporation");
1593