xref: /linux/tools/testing/nvdimm/test/nfit.c (revision 0883c2c06fb5bcf5b9e008270827e63c09a88c1e)
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/platform_device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/libnvdimm.h>
17 #include <linux/vmalloc.h>
18 #include <linux/device.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/ndctl.h>
22 #include <linux/sizes.h>
23 #include <linux/list.h>
24 #include <linux/slab.h>
25 #include <nfit.h>
26 #include <nd.h>
27 #include "nfit_test.h"
28 
29 /*
30  * Generate an NFIT table to describe the following topology:
31  *
32  * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
33  *
34  *                     (a)                       (b)            DIMM   BLK-REGION
35  *           +----------+--------------+----------+---------+
36  * +------+  |  blk2.0  |     pm0.0    |  blk2.1  |  pm1.0  |    0      region2
37  * | imc0 +--+- - - - - region0 - - - -+----------+         +
38  * +--+---+  |  blk3.0  |     pm0.0    |  blk3.1  |  pm1.0  |    1      region3
39  *    |      +----------+--------------v----------v         v
40  * +--+---+                            |                    |
41  * | cpu0 |                                    region1
42  * +--+---+                            |                    |
43  *    |      +-------------------------^----------^         ^
44  * +--+---+  |                 blk4.0             |  pm1.0  |    2      region4
45  * | imc1 +--+-------------------------+----------+         +
46  * +------+  |                 blk5.0             |  pm1.0  |    3      region5
47  *           +-------------------------+----------+-+-------+
48  *
49  * +--+---+
50  * | cpu1 |
51  * +--+---+                   (Hotplug DIMM)
52  *    |      +----------------------------------------------+
53  * +--+---+  |                 blk6.0/pm7.0                 |    4      region6/7
54  * | imc0 +--+----------------------------------------------+
55  * +------+
56  *
57  *
58  * *) In this layout we have four dimms and two memory controllers in one
59  *    socket.  Each unique interface (BLK or PMEM) to DPA space
60  *    is identified by a region device with a dynamically assigned id.
61  *
62  * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
63  *    A single PMEM namespace "pm0.0" is created using half of the
64  *    REGION0 SPA-range.  REGION0 spans dimm0 and dimm1.  PMEM namespace
65  *    allocate from from the bottom of a region.  The unallocated
66  *    portion of REGION0 aliases with REGION2 and REGION3.  That
67  *    unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
68  *    "blk3.0") starting at the base of each DIMM to offset (a) in those
69  *    DIMMs.  "pm0.0", "blk2.0" and "blk3.0" are free-form readable
70  *    names that can be assigned to a namespace.
71  *
72  * *) In the last portion of dimm0 and dimm1 we have an interleaved
73  *    SPA range, REGION1, that spans those two dimms as well as dimm2
74  *    and dimm3.  Some of REGION1 allocated to a PMEM namespace named
75  *    "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
76  *    dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
77  *    "blk5.0".
78  *
79  * *) The portion of dimm2 and dimm3 that do not participate in the
80  *    REGION1 interleaved SPA range (i.e. the DPA address below offset
81  *    (b) are also included in the "blk4.0" and "blk5.0" namespaces.
82  *    Note, that BLK namespaces need not be contiguous in DPA-space, and
83  *    can consume aliased capacity from multiple interleave sets.
84  *
85  * BUS1: Legacy NVDIMM (single contiguous range)
86  *
87  *  region2
88  * +---------------------+
89  * |---------------------|
90  * ||       pm2.0       ||
91  * |---------------------|
92  * +---------------------+
93  *
94  * *) A NFIT-table may describe a simple system-physical-address range
95  *    with no BLK aliasing.  This type of region may optionally
96  *    reference an NVDIMM.
97  */
98 enum {
99 	NUM_PM  = 3,
100 	NUM_DCR = 5,
101 	NUM_BDW = NUM_DCR,
102 	NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
103 	NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */,
104 	DIMM_SIZE = SZ_32M,
105 	LABEL_SIZE = SZ_128K,
106 	SPA0_SIZE = DIMM_SIZE,
107 	SPA1_SIZE = DIMM_SIZE*2,
108 	SPA2_SIZE = DIMM_SIZE,
109 	BDW_SIZE = 64 << 8,
110 	DCR_SIZE = 12,
111 	NUM_NFITS = 2, /* permit testing multiple NFITs per system */
112 };
113 
114 struct nfit_test_dcr {
115 	__le64 bdw_addr;
116 	__le32 bdw_status;
117 	__u8 aperature[BDW_SIZE];
118 };
119 
120 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
121 	(((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
122 	 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
123 
124 static u32 handle[NUM_DCR] = {
125 	[0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
126 	[1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
127 	[2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
128 	[3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
129 	[4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
130 };
131 
132 struct nfit_test {
133 	struct acpi_nfit_desc acpi_desc;
134 	struct platform_device pdev;
135 	struct list_head resources;
136 	void *nfit_buf;
137 	dma_addr_t nfit_dma;
138 	size_t nfit_size;
139 	int num_dcr;
140 	int num_pm;
141 	void **dimm;
142 	dma_addr_t *dimm_dma;
143 	void **flush;
144 	dma_addr_t *flush_dma;
145 	void **label;
146 	dma_addr_t *label_dma;
147 	void **spa_set;
148 	dma_addr_t *spa_set_dma;
149 	struct nfit_test_dcr **dcr;
150 	dma_addr_t *dcr_dma;
151 	int (*alloc)(struct nfit_test *t);
152 	void (*setup)(struct nfit_test *t);
153 	int setup_hotplug;
154 	struct ars_state {
155 		struct nd_cmd_ars_status *ars_status;
156 		unsigned long deadline;
157 		spinlock_t lock;
158 	} ars_state;
159 };
160 
161 static struct nfit_test *to_nfit_test(struct device *dev)
162 {
163 	struct platform_device *pdev = to_platform_device(dev);
164 
165 	return container_of(pdev, struct nfit_test, pdev);
166 }
167 
168 static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
169 		unsigned int buf_len)
170 {
171 	if (buf_len < sizeof(*nd_cmd))
172 		return -EINVAL;
173 
174 	nd_cmd->status = 0;
175 	nd_cmd->config_size = LABEL_SIZE;
176 	nd_cmd->max_xfer = SZ_4K;
177 
178 	return 0;
179 }
180 
181 static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
182 		*nd_cmd, unsigned int buf_len, void *label)
183 {
184 	unsigned int len, offset = nd_cmd->in_offset;
185 	int rc;
186 
187 	if (buf_len < sizeof(*nd_cmd))
188 		return -EINVAL;
189 	if (offset >= LABEL_SIZE)
190 		return -EINVAL;
191 	if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
192 		return -EINVAL;
193 
194 	nd_cmd->status = 0;
195 	len = min(nd_cmd->in_length, LABEL_SIZE - offset);
196 	memcpy(nd_cmd->out_buf, label + offset, len);
197 	rc = buf_len - sizeof(*nd_cmd) - len;
198 
199 	return rc;
200 }
201 
202 static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
203 		unsigned int buf_len, void *label)
204 {
205 	unsigned int len, offset = nd_cmd->in_offset;
206 	u32 *status;
207 	int rc;
208 
209 	if (buf_len < sizeof(*nd_cmd))
210 		return -EINVAL;
211 	if (offset >= LABEL_SIZE)
212 		return -EINVAL;
213 	if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
214 		return -EINVAL;
215 
216 	status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
217 	*status = 0;
218 	len = min(nd_cmd->in_length, LABEL_SIZE - offset);
219 	memcpy(label + offset, nd_cmd->in_buf, len);
220 	rc = buf_len - sizeof(*nd_cmd) - (len + 4);
221 
222 	return rc;
223 }
224 
225 #define NFIT_TEST_ARS_RECORDS 4
226 #define NFIT_TEST_CLEAR_ERR_UNIT 256
227 
228 static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
229 		unsigned int buf_len)
230 {
231 	if (buf_len < sizeof(*nd_cmd))
232 		return -EINVAL;
233 
234 	nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
235 		+ NFIT_TEST_ARS_RECORDS * sizeof(struct nd_ars_record);
236 	nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
237 	nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT;
238 
239 	return 0;
240 }
241 
242 /*
243  * Initialize the ars_state to return an ars_result 1 second in the future with
244  * a 4K error range in the middle of the requested address range.
245  */
246 static void post_ars_status(struct ars_state *ars_state, u64 addr, u64 len)
247 {
248 	struct nd_cmd_ars_status *ars_status;
249 	struct nd_ars_record *ars_record;
250 
251 	ars_state->deadline = jiffies + 1*HZ;
252 	ars_status = ars_state->ars_status;
253 	ars_status->status = 0;
254 	ars_status->out_length = sizeof(struct nd_cmd_ars_status)
255 		+ sizeof(struct nd_ars_record);
256 	ars_status->address = addr;
257 	ars_status->length = len;
258 	ars_status->type = ND_ARS_PERSISTENT;
259 	ars_status->num_records = 1;
260 	ars_record = &ars_status->records[0];
261 	ars_record->handle = 0;
262 	ars_record->err_address = addr + len / 2;
263 	ars_record->length = SZ_4K;
264 }
265 
266 static int nfit_test_cmd_ars_start(struct ars_state *ars_state,
267 		struct nd_cmd_ars_start *ars_start, unsigned int buf_len,
268 		int *cmd_rc)
269 {
270 	if (buf_len < sizeof(*ars_start))
271 		return -EINVAL;
272 
273 	spin_lock(&ars_state->lock);
274 	if (time_before(jiffies, ars_state->deadline)) {
275 		ars_start->status = NFIT_ARS_START_BUSY;
276 		*cmd_rc = -EBUSY;
277 	} else {
278 		ars_start->status = 0;
279 		ars_start->scrub_time = 1;
280 		post_ars_status(ars_state, ars_start->address,
281 				ars_start->length);
282 		*cmd_rc = 0;
283 	}
284 	spin_unlock(&ars_state->lock);
285 
286 	return 0;
287 }
288 
289 static int nfit_test_cmd_ars_status(struct ars_state *ars_state,
290 		struct nd_cmd_ars_status *ars_status, unsigned int buf_len,
291 		int *cmd_rc)
292 {
293 	if (buf_len < ars_state->ars_status->out_length)
294 		return -EINVAL;
295 
296 	spin_lock(&ars_state->lock);
297 	if (time_before(jiffies, ars_state->deadline)) {
298 		memset(ars_status, 0, buf_len);
299 		ars_status->status = NFIT_ARS_STATUS_BUSY;
300 		ars_status->out_length = sizeof(*ars_status);
301 		*cmd_rc = -EBUSY;
302 	} else {
303 		memcpy(ars_status, ars_state->ars_status,
304 				ars_state->ars_status->out_length);
305 		*cmd_rc = 0;
306 	}
307 	spin_unlock(&ars_state->lock);
308 	return 0;
309 }
310 
311 static int nfit_test_cmd_clear_error(struct nd_cmd_clear_error *clear_err,
312 		unsigned int buf_len, int *cmd_rc)
313 {
314 	const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1;
315 	if (buf_len < sizeof(*clear_err))
316 		return -EINVAL;
317 
318 	if ((clear_err->address & mask) || (clear_err->length & mask))
319 		return -EINVAL;
320 
321 	/*
322 	 * Report 'all clear' success for all commands even though a new
323 	 * scrub will find errors again.  This is enough to have the
324 	 * error removed from the 'badblocks' tracking in the pmem
325 	 * driver.
326 	 */
327 	clear_err->status = 0;
328 	clear_err->cleared = clear_err->length;
329 	*cmd_rc = 0;
330 	return 0;
331 }
332 
333 static int nfit_test_cmd_smart(struct nd_cmd_smart *smart, unsigned int buf_len)
334 {
335 	static const struct nd_smart_payload smart_data = {
336 		.flags = ND_SMART_HEALTH_VALID | ND_SMART_TEMP_VALID
337 			| ND_SMART_SPARES_VALID | ND_SMART_ALARM_VALID
338 			| ND_SMART_USED_VALID | ND_SMART_SHUTDOWN_VALID,
339 		.health = ND_SMART_NON_CRITICAL_HEALTH,
340 		.temperature = 23 * 16,
341 		.spares = 75,
342 		.alarm_flags = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
343 		.life_used = 5,
344 		.shutdown_state = 0,
345 		.vendor_size = 0,
346 	};
347 
348 	if (buf_len < sizeof(*smart))
349 		return -EINVAL;
350 	memcpy(smart->data, &smart_data, sizeof(smart_data));
351 	return 0;
352 }
353 
354 static int nfit_test_cmd_smart_threshold(struct nd_cmd_smart_threshold *smart_t,
355 		unsigned int buf_len)
356 {
357 	static const struct nd_smart_threshold_payload smart_t_data = {
358 		.alarm_control = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
359 		.temperature = 40 * 16,
360 		.spares = 5,
361 	};
362 
363 	if (buf_len < sizeof(*smart_t))
364 		return -EINVAL;
365 	memcpy(smart_t->data, &smart_t_data, sizeof(smart_t_data));
366 	return 0;
367 }
368 
369 static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
370 		struct nvdimm *nvdimm, unsigned int cmd, void *buf,
371 		unsigned int buf_len, int *cmd_rc)
372 {
373 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
374 	struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
375 	unsigned int func = cmd;
376 	int i, rc = 0, __cmd_rc;
377 
378 	if (!cmd_rc)
379 		cmd_rc = &__cmd_rc;
380 	*cmd_rc = 0;
381 
382 	if (nvdimm) {
383 		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
384 		unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
385 
386 		if (!nfit_mem)
387 			return -ENOTTY;
388 
389 		if (cmd == ND_CMD_CALL) {
390 			struct nd_cmd_pkg *call_pkg = buf;
391 
392 			buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
393 			buf = (void *) call_pkg->nd_payload;
394 			func = call_pkg->nd_command;
395 			if (call_pkg->nd_family != nfit_mem->family)
396 				return -ENOTTY;
397 		}
398 
399 		if (!test_bit(cmd, &cmd_mask)
400 				|| !test_bit(func, &nfit_mem->dsm_mask))
401 			return -ENOTTY;
402 
403 		/* lookup label space for the given dimm */
404 		for (i = 0; i < ARRAY_SIZE(handle); i++)
405 			if (__to_nfit_memdev(nfit_mem)->device_handle ==
406 					handle[i])
407 				break;
408 		if (i >= ARRAY_SIZE(handle))
409 			return -ENXIO;
410 
411 		switch (func) {
412 		case ND_CMD_GET_CONFIG_SIZE:
413 			rc = nfit_test_cmd_get_config_size(buf, buf_len);
414 			break;
415 		case ND_CMD_GET_CONFIG_DATA:
416 			rc = nfit_test_cmd_get_config_data(buf, buf_len,
417 				t->label[i]);
418 			break;
419 		case ND_CMD_SET_CONFIG_DATA:
420 			rc = nfit_test_cmd_set_config_data(buf, buf_len,
421 				t->label[i]);
422 			break;
423 		case ND_CMD_SMART:
424 			rc = nfit_test_cmd_smart(buf, buf_len);
425 			break;
426 		case ND_CMD_SMART_THRESHOLD:
427 			rc = nfit_test_cmd_smart_threshold(buf, buf_len);
428 			break;
429 		default:
430 			return -ENOTTY;
431 		}
432 	} else {
433 		struct ars_state *ars_state = &t->ars_state;
434 
435 		if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask))
436 			return -ENOTTY;
437 
438 		switch (func) {
439 		case ND_CMD_ARS_CAP:
440 			rc = nfit_test_cmd_ars_cap(buf, buf_len);
441 			break;
442 		case ND_CMD_ARS_START:
443 			rc = nfit_test_cmd_ars_start(ars_state, buf, buf_len,
444 					cmd_rc);
445 			break;
446 		case ND_CMD_ARS_STATUS:
447 			rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len,
448 					cmd_rc);
449 			break;
450 		case ND_CMD_CLEAR_ERROR:
451 			rc = nfit_test_cmd_clear_error(buf, buf_len, cmd_rc);
452 			break;
453 		default:
454 			return -ENOTTY;
455 		}
456 	}
457 
458 	return rc;
459 }
460 
461 static DEFINE_SPINLOCK(nfit_test_lock);
462 static struct nfit_test *instances[NUM_NFITS];
463 
464 static void release_nfit_res(void *data)
465 {
466 	struct nfit_test_resource *nfit_res = data;
467 	struct resource *res = nfit_res->res;
468 
469 	spin_lock(&nfit_test_lock);
470 	list_del(&nfit_res->list);
471 	spin_unlock(&nfit_test_lock);
472 
473 	if (is_vmalloc_addr(nfit_res->buf))
474 		vfree(nfit_res->buf);
475 	else
476 		dma_free_coherent(nfit_res->dev, resource_size(res),
477 				nfit_res->buf, res->start);
478 	kfree(res);
479 	kfree(nfit_res);
480 }
481 
482 static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
483 		void *buf)
484 {
485 	struct device *dev = &t->pdev.dev;
486 	struct resource *res = kzalloc(sizeof(*res) * 2, GFP_KERNEL);
487 	struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
488 			GFP_KERNEL);
489 	int rc;
490 
491 	if (!res || !buf || !nfit_res)
492 		goto err;
493 	rc = devm_add_action(dev, release_nfit_res, nfit_res);
494 	if (rc)
495 		goto err;
496 	INIT_LIST_HEAD(&nfit_res->list);
497 	memset(buf, 0, size);
498 	nfit_res->dev = dev;
499 	nfit_res->buf = buf;
500 	nfit_res->res = res;
501 	res->start = *dma;
502 	res->end = *dma + size - 1;
503 	res->name = "NFIT";
504 	spin_lock(&nfit_test_lock);
505 	list_add(&nfit_res->list, &t->resources);
506 	spin_unlock(&nfit_test_lock);
507 
508 	return nfit_res->buf;
509  err:
510 	if (buf && !is_vmalloc_addr(buf))
511 		dma_free_coherent(dev, size, buf, *dma);
512 	else if (buf)
513 		vfree(buf);
514 	kfree(res);
515 	kfree(nfit_res);
516 	return NULL;
517 }
518 
519 static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
520 {
521 	void *buf = vmalloc(size);
522 
523 	*dma = (unsigned long) buf;
524 	return __test_alloc(t, size, dma, buf);
525 }
526 
527 static void *test_alloc_coherent(struct nfit_test *t, size_t size,
528 		dma_addr_t *dma)
529 {
530 	struct device *dev = &t->pdev.dev;
531 	void *buf = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
532 
533 	return __test_alloc(t, size, dma, buf);
534 }
535 
536 static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
537 {
538 	int i;
539 
540 	for (i = 0; i < ARRAY_SIZE(instances); i++) {
541 		struct nfit_test_resource *n, *nfit_res = NULL;
542 		struct nfit_test *t = instances[i];
543 
544 		if (!t)
545 			continue;
546 		spin_lock(&nfit_test_lock);
547 		list_for_each_entry(n, &t->resources, list) {
548 			if (addr >= n->res->start && (addr < n->res->start
549 						+ resource_size(n->res))) {
550 				nfit_res = n;
551 				break;
552 			} else if (addr >= (unsigned long) n->buf
553 					&& (addr < (unsigned long) n->buf
554 						+ resource_size(n->res))) {
555 				nfit_res = n;
556 				break;
557 			}
558 		}
559 		spin_unlock(&nfit_test_lock);
560 		if (nfit_res)
561 			return nfit_res;
562 	}
563 
564 	return NULL;
565 }
566 
567 static int ars_state_init(struct device *dev, struct ars_state *ars_state)
568 {
569 	ars_state->ars_status = devm_kzalloc(dev,
570 			sizeof(struct nd_cmd_ars_status)
571 			+ sizeof(struct nd_ars_record) * NFIT_TEST_ARS_RECORDS,
572 			GFP_KERNEL);
573 	if (!ars_state->ars_status)
574 		return -ENOMEM;
575 	spin_lock_init(&ars_state->lock);
576 	return 0;
577 }
578 
579 static int nfit_test0_alloc(struct nfit_test *t)
580 {
581 	size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
582 			+ sizeof(struct acpi_nfit_memory_map) * NUM_MEM
583 			+ sizeof(struct acpi_nfit_control_region) * NUM_DCR
584 			+ offsetof(struct acpi_nfit_control_region,
585 					window_size) * NUM_DCR
586 			+ sizeof(struct acpi_nfit_data_region) * NUM_BDW
587 			+ sizeof(struct acpi_nfit_flush_address) * NUM_DCR;
588 	int i;
589 
590 	t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
591 	if (!t->nfit_buf)
592 		return -ENOMEM;
593 	t->nfit_size = nfit_size;
594 
595 	t->spa_set[0] = test_alloc_coherent(t, SPA0_SIZE, &t->spa_set_dma[0]);
596 	if (!t->spa_set[0])
597 		return -ENOMEM;
598 
599 	t->spa_set[1] = test_alloc_coherent(t, SPA1_SIZE, &t->spa_set_dma[1]);
600 	if (!t->spa_set[1])
601 		return -ENOMEM;
602 
603 	t->spa_set[2] = test_alloc_coherent(t, SPA0_SIZE, &t->spa_set_dma[2]);
604 	if (!t->spa_set[2])
605 		return -ENOMEM;
606 
607 	for (i = 0; i < NUM_DCR; i++) {
608 		t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
609 		if (!t->dimm[i])
610 			return -ENOMEM;
611 
612 		t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
613 		if (!t->label[i])
614 			return -ENOMEM;
615 		sprintf(t->label[i], "label%d", i);
616 
617 		t->flush[i] = test_alloc(t, 8, &t->flush_dma[i]);
618 		if (!t->flush[i])
619 			return -ENOMEM;
620 	}
621 
622 	for (i = 0; i < NUM_DCR; i++) {
623 		t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
624 		if (!t->dcr[i])
625 			return -ENOMEM;
626 	}
627 
628 	return ars_state_init(&t->pdev.dev, &t->ars_state);
629 }
630 
631 static int nfit_test1_alloc(struct nfit_test *t)
632 {
633 	size_t nfit_size = sizeof(struct acpi_nfit_system_address)
634 		+ sizeof(struct acpi_nfit_memory_map)
635 		+ offsetof(struct acpi_nfit_control_region, window_size);
636 
637 	t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
638 	if (!t->nfit_buf)
639 		return -ENOMEM;
640 	t->nfit_size = nfit_size;
641 
642 	t->spa_set[0] = test_alloc_coherent(t, SPA2_SIZE, &t->spa_set_dma[0]);
643 	if (!t->spa_set[0])
644 		return -ENOMEM;
645 
646 	return ars_state_init(&t->pdev.dev, &t->ars_state);
647 }
648 
649 static void nfit_test0_setup(struct nfit_test *t)
650 {
651 	struct acpi_nfit_desc *acpi_desc;
652 	struct acpi_nfit_memory_map *memdev;
653 	void *nfit_buf = t->nfit_buf;
654 	struct acpi_nfit_system_address *spa;
655 	struct acpi_nfit_control_region *dcr;
656 	struct acpi_nfit_data_region *bdw;
657 	struct acpi_nfit_flush_address *flush;
658 	unsigned int offset;
659 
660 	/*
661 	 * spa0 (interleave first half of dimm0 and dimm1, note storage
662 	 * does not actually alias the related block-data-window
663 	 * regions)
664 	 */
665 	spa = nfit_buf;
666 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
667 	spa->header.length = sizeof(*spa);
668 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
669 	spa->range_index = 0+1;
670 	spa->address = t->spa_set_dma[0];
671 	spa->length = SPA0_SIZE;
672 
673 	/*
674 	 * spa1 (interleave last half of the 4 DIMMS, note storage
675 	 * does not actually alias the related block-data-window
676 	 * regions)
677 	 */
678 	spa = nfit_buf + sizeof(*spa);
679 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
680 	spa->header.length = sizeof(*spa);
681 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
682 	spa->range_index = 1+1;
683 	spa->address = t->spa_set_dma[1];
684 	spa->length = SPA1_SIZE;
685 
686 	/* spa2 (dcr0) dimm0 */
687 	spa = nfit_buf + sizeof(*spa) * 2;
688 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
689 	spa->header.length = sizeof(*spa);
690 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
691 	spa->range_index = 2+1;
692 	spa->address = t->dcr_dma[0];
693 	spa->length = DCR_SIZE;
694 
695 	/* spa3 (dcr1) dimm1 */
696 	spa = nfit_buf + sizeof(*spa) * 3;
697 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
698 	spa->header.length = sizeof(*spa);
699 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
700 	spa->range_index = 3+1;
701 	spa->address = t->dcr_dma[1];
702 	spa->length = DCR_SIZE;
703 
704 	/* spa4 (dcr2) dimm2 */
705 	spa = nfit_buf + sizeof(*spa) * 4;
706 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
707 	spa->header.length = sizeof(*spa);
708 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
709 	spa->range_index = 4+1;
710 	spa->address = t->dcr_dma[2];
711 	spa->length = DCR_SIZE;
712 
713 	/* spa5 (dcr3) dimm3 */
714 	spa = nfit_buf + sizeof(*spa) * 5;
715 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
716 	spa->header.length = sizeof(*spa);
717 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
718 	spa->range_index = 5+1;
719 	spa->address = t->dcr_dma[3];
720 	spa->length = DCR_SIZE;
721 
722 	/* spa6 (bdw for dcr0) dimm0 */
723 	spa = nfit_buf + sizeof(*spa) * 6;
724 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
725 	spa->header.length = sizeof(*spa);
726 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
727 	spa->range_index = 6+1;
728 	spa->address = t->dimm_dma[0];
729 	spa->length = DIMM_SIZE;
730 
731 	/* spa7 (bdw for dcr1) dimm1 */
732 	spa = nfit_buf + sizeof(*spa) * 7;
733 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
734 	spa->header.length = sizeof(*spa);
735 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
736 	spa->range_index = 7+1;
737 	spa->address = t->dimm_dma[1];
738 	spa->length = DIMM_SIZE;
739 
740 	/* spa8 (bdw for dcr2) dimm2 */
741 	spa = nfit_buf + sizeof(*spa) * 8;
742 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
743 	spa->header.length = sizeof(*spa);
744 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
745 	spa->range_index = 8+1;
746 	spa->address = t->dimm_dma[2];
747 	spa->length = DIMM_SIZE;
748 
749 	/* spa9 (bdw for dcr3) dimm3 */
750 	spa = nfit_buf + sizeof(*spa) * 9;
751 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
752 	spa->header.length = sizeof(*spa);
753 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
754 	spa->range_index = 9+1;
755 	spa->address = t->dimm_dma[3];
756 	spa->length = DIMM_SIZE;
757 
758 	offset = sizeof(*spa) * 10;
759 	/* mem-region0 (spa0, dimm0) */
760 	memdev = nfit_buf + offset;
761 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
762 	memdev->header.length = sizeof(*memdev);
763 	memdev->device_handle = handle[0];
764 	memdev->physical_id = 0;
765 	memdev->region_id = 0;
766 	memdev->range_index = 0+1;
767 	memdev->region_index = 4+1;
768 	memdev->region_size = SPA0_SIZE/2;
769 	memdev->region_offset = t->spa_set_dma[0];
770 	memdev->address = 0;
771 	memdev->interleave_index = 0;
772 	memdev->interleave_ways = 2;
773 
774 	/* mem-region1 (spa0, dimm1) */
775 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map);
776 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
777 	memdev->header.length = sizeof(*memdev);
778 	memdev->device_handle = handle[1];
779 	memdev->physical_id = 1;
780 	memdev->region_id = 0;
781 	memdev->range_index = 0+1;
782 	memdev->region_index = 5+1;
783 	memdev->region_size = SPA0_SIZE/2;
784 	memdev->region_offset = t->spa_set_dma[0] + SPA0_SIZE/2;
785 	memdev->address = 0;
786 	memdev->interleave_index = 0;
787 	memdev->interleave_ways = 2;
788 
789 	/* mem-region2 (spa1, dimm0) */
790 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2;
791 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
792 	memdev->header.length = sizeof(*memdev);
793 	memdev->device_handle = handle[0];
794 	memdev->physical_id = 0;
795 	memdev->region_id = 1;
796 	memdev->range_index = 1+1;
797 	memdev->region_index = 4+1;
798 	memdev->region_size = SPA1_SIZE/4;
799 	memdev->region_offset = t->spa_set_dma[1];
800 	memdev->address = SPA0_SIZE/2;
801 	memdev->interleave_index = 0;
802 	memdev->interleave_ways = 4;
803 
804 	/* mem-region3 (spa1, dimm1) */
805 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3;
806 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
807 	memdev->header.length = sizeof(*memdev);
808 	memdev->device_handle = handle[1];
809 	memdev->physical_id = 1;
810 	memdev->region_id = 1;
811 	memdev->range_index = 1+1;
812 	memdev->region_index = 5+1;
813 	memdev->region_size = SPA1_SIZE/4;
814 	memdev->region_offset = t->spa_set_dma[1] + SPA1_SIZE/4;
815 	memdev->address = SPA0_SIZE/2;
816 	memdev->interleave_index = 0;
817 	memdev->interleave_ways = 4;
818 
819 	/* mem-region4 (spa1, dimm2) */
820 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4;
821 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
822 	memdev->header.length = sizeof(*memdev);
823 	memdev->device_handle = handle[2];
824 	memdev->physical_id = 2;
825 	memdev->region_id = 0;
826 	memdev->range_index = 1+1;
827 	memdev->region_index = 6+1;
828 	memdev->region_size = SPA1_SIZE/4;
829 	memdev->region_offset = t->spa_set_dma[1] + 2*SPA1_SIZE/4;
830 	memdev->address = SPA0_SIZE/2;
831 	memdev->interleave_index = 0;
832 	memdev->interleave_ways = 4;
833 
834 	/* mem-region5 (spa1, dimm3) */
835 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5;
836 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
837 	memdev->header.length = sizeof(*memdev);
838 	memdev->device_handle = handle[3];
839 	memdev->physical_id = 3;
840 	memdev->region_id = 0;
841 	memdev->range_index = 1+1;
842 	memdev->region_index = 7+1;
843 	memdev->region_size = SPA1_SIZE/4;
844 	memdev->region_offset = t->spa_set_dma[1] + 3*SPA1_SIZE/4;
845 	memdev->address = SPA0_SIZE/2;
846 	memdev->interleave_index = 0;
847 	memdev->interleave_ways = 4;
848 
849 	/* mem-region6 (spa/dcr0, dimm0) */
850 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6;
851 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
852 	memdev->header.length = sizeof(*memdev);
853 	memdev->device_handle = handle[0];
854 	memdev->physical_id = 0;
855 	memdev->region_id = 0;
856 	memdev->range_index = 2+1;
857 	memdev->region_index = 0+1;
858 	memdev->region_size = 0;
859 	memdev->region_offset = 0;
860 	memdev->address = 0;
861 	memdev->interleave_index = 0;
862 	memdev->interleave_ways = 1;
863 
864 	/* mem-region7 (spa/dcr1, dimm1) */
865 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7;
866 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
867 	memdev->header.length = sizeof(*memdev);
868 	memdev->device_handle = handle[1];
869 	memdev->physical_id = 1;
870 	memdev->region_id = 0;
871 	memdev->range_index = 3+1;
872 	memdev->region_index = 1+1;
873 	memdev->region_size = 0;
874 	memdev->region_offset = 0;
875 	memdev->address = 0;
876 	memdev->interleave_index = 0;
877 	memdev->interleave_ways = 1;
878 
879 	/* mem-region8 (spa/dcr2, dimm2) */
880 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8;
881 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
882 	memdev->header.length = sizeof(*memdev);
883 	memdev->device_handle = handle[2];
884 	memdev->physical_id = 2;
885 	memdev->region_id = 0;
886 	memdev->range_index = 4+1;
887 	memdev->region_index = 2+1;
888 	memdev->region_size = 0;
889 	memdev->region_offset = 0;
890 	memdev->address = 0;
891 	memdev->interleave_index = 0;
892 	memdev->interleave_ways = 1;
893 
894 	/* mem-region9 (spa/dcr3, dimm3) */
895 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9;
896 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
897 	memdev->header.length = sizeof(*memdev);
898 	memdev->device_handle = handle[3];
899 	memdev->physical_id = 3;
900 	memdev->region_id = 0;
901 	memdev->range_index = 5+1;
902 	memdev->region_index = 3+1;
903 	memdev->region_size = 0;
904 	memdev->region_offset = 0;
905 	memdev->address = 0;
906 	memdev->interleave_index = 0;
907 	memdev->interleave_ways = 1;
908 
909 	/* mem-region10 (spa/bdw0, dimm0) */
910 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10;
911 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
912 	memdev->header.length = sizeof(*memdev);
913 	memdev->device_handle = handle[0];
914 	memdev->physical_id = 0;
915 	memdev->region_id = 0;
916 	memdev->range_index = 6+1;
917 	memdev->region_index = 0+1;
918 	memdev->region_size = 0;
919 	memdev->region_offset = 0;
920 	memdev->address = 0;
921 	memdev->interleave_index = 0;
922 	memdev->interleave_ways = 1;
923 
924 	/* mem-region11 (spa/bdw1, dimm1) */
925 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11;
926 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
927 	memdev->header.length = sizeof(*memdev);
928 	memdev->device_handle = handle[1];
929 	memdev->physical_id = 1;
930 	memdev->region_id = 0;
931 	memdev->range_index = 7+1;
932 	memdev->region_index = 1+1;
933 	memdev->region_size = 0;
934 	memdev->region_offset = 0;
935 	memdev->address = 0;
936 	memdev->interleave_index = 0;
937 	memdev->interleave_ways = 1;
938 
939 	/* mem-region12 (spa/bdw2, dimm2) */
940 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12;
941 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
942 	memdev->header.length = sizeof(*memdev);
943 	memdev->device_handle = handle[2];
944 	memdev->physical_id = 2;
945 	memdev->region_id = 0;
946 	memdev->range_index = 8+1;
947 	memdev->region_index = 2+1;
948 	memdev->region_size = 0;
949 	memdev->region_offset = 0;
950 	memdev->address = 0;
951 	memdev->interleave_index = 0;
952 	memdev->interleave_ways = 1;
953 
954 	/* mem-region13 (spa/dcr3, dimm3) */
955 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13;
956 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
957 	memdev->header.length = sizeof(*memdev);
958 	memdev->device_handle = handle[3];
959 	memdev->physical_id = 3;
960 	memdev->region_id = 0;
961 	memdev->range_index = 9+1;
962 	memdev->region_index = 3+1;
963 	memdev->region_size = 0;
964 	memdev->region_offset = 0;
965 	memdev->address = 0;
966 	memdev->interleave_index = 0;
967 	memdev->interleave_ways = 1;
968 
969 	offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
970 	/* dcr-descriptor0: blk */
971 	dcr = nfit_buf + offset;
972 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
973 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
974 	dcr->region_index = 0+1;
975 	dcr->vendor_id = 0xabcd;
976 	dcr->device_id = 0;
977 	dcr->revision_id = 1;
978 	dcr->serial_number = ~handle[0];
979 	dcr->code = NFIT_FIC_BLK;
980 	dcr->windows = 1;
981 	dcr->window_size = DCR_SIZE;
982 	dcr->command_offset = 0;
983 	dcr->command_size = 8;
984 	dcr->status_offset = 8;
985 	dcr->status_size = 4;
986 
987 	/* dcr-descriptor1: blk */
988 	dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region);
989 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
990 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
991 	dcr->region_index = 1+1;
992 	dcr->vendor_id = 0xabcd;
993 	dcr->device_id = 0;
994 	dcr->revision_id = 1;
995 	dcr->serial_number = ~handle[1];
996 	dcr->code = NFIT_FIC_BLK;
997 	dcr->windows = 1;
998 	dcr->window_size = DCR_SIZE;
999 	dcr->command_offset = 0;
1000 	dcr->command_size = 8;
1001 	dcr->status_offset = 8;
1002 	dcr->status_size = 4;
1003 
1004 	/* dcr-descriptor2: blk */
1005 	dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2;
1006 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1007 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
1008 	dcr->region_index = 2+1;
1009 	dcr->vendor_id = 0xabcd;
1010 	dcr->device_id = 0;
1011 	dcr->revision_id = 1;
1012 	dcr->serial_number = ~handle[2];
1013 	dcr->code = NFIT_FIC_BLK;
1014 	dcr->windows = 1;
1015 	dcr->window_size = DCR_SIZE;
1016 	dcr->command_offset = 0;
1017 	dcr->command_size = 8;
1018 	dcr->status_offset = 8;
1019 	dcr->status_size = 4;
1020 
1021 	/* dcr-descriptor3: blk */
1022 	dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3;
1023 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1024 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
1025 	dcr->region_index = 3+1;
1026 	dcr->vendor_id = 0xabcd;
1027 	dcr->device_id = 0;
1028 	dcr->revision_id = 1;
1029 	dcr->serial_number = ~handle[3];
1030 	dcr->code = NFIT_FIC_BLK;
1031 	dcr->windows = 1;
1032 	dcr->window_size = DCR_SIZE;
1033 	dcr->command_offset = 0;
1034 	dcr->command_size = 8;
1035 	dcr->status_offset = 8;
1036 	dcr->status_size = 4;
1037 
1038 	offset = offset + sizeof(struct acpi_nfit_control_region) * 4;
1039 	/* dcr-descriptor0: pmem */
1040 	dcr = nfit_buf + offset;
1041 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1042 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1043 			window_size);
1044 	dcr->region_index = 4+1;
1045 	dcr->vendor_id = 0xabcd;
1046 	dcr->device_id = 0;
1047 	dcr->revision_id = 1;
1048 	dcr->serial_number = ~handle[0];
1049 	dcr->code = NFIT_FIC_BYTEN;
1050 	dcr->windows = 0;
1051 
1052 	/* dcr-descriptor1: pmem */
1053 	dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1054 			window_size);
1055 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1056 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1057 			window_size);
1058 	dcr->region_index = 5+1;
1059 	dcr->vendor_id = 0xabcd;
1060 	dcr->device_id = 0;
1061 	dcr->revision_id = 1;
1062 	dcr->serial_number = ~handle[1];
1063 	dcr->code = NFIT_FIC_BYTEN;
1064 	dcr->windows = 0;
1065 
1066 	/* dcr-descriptor2: pmem */
1067 	dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1068 			window_size) * 2;
1069 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1070 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1071 			window_size);
1072 	dcr->region_index = 6+1;
1073 	dcr->vendor_id = 0xabcd;
1074 	dcr->device_id = 0;
1075 	dcr->revision_id = 1;
1076 	dcr->serial_number = ~handle[2];
1077 	dcr->code = NFIT_FIC_BYTEN;
1078 	dcr->windows = 0;
1079 
1080 	/* dcr-descriptor3: pmem */
1081 	dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1082 			window_size) * 3;
1083 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1084 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1085 			window_size);
1086 	dcr->region_index = 7+1;
1087 	dcr->vendor_id = 0xabcd;
1088 	dcr->device_id = 0;
1089 	dcr->revision_id = 1;
1090 	dcr->serial_number = ~handle[3];
1091 	dcr->code = NFIT_FIC_BYTEN;
1092 	dcr->windows = 0;
1093 
1094 	offset = offset + offsetof(struct acpi_nfit_control_region,
1095 			window_size) * 4;
1096 	/* bdw0 (spa/dcr0, dimm0) */
1097 	bdw = nfit_buf + offset;
1098 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1099 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1100 	bdw->region_index = 0+1;
1101 	bdw->windows = 1;
1102 	bdw->offset = 0;
1103 	bdw->size = BDW_SIZE;
1104 	bdw->capacity = DIMM_SIZE;
1105 	bdw->start_address = 0;
1106 
1107 	/* bdw1 (spa/dcr1, dimm1) */
1108 	bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region);
1109 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1110 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1111 	bdw->region_index = 1+1;
1112 	bdw->windows = 1;
1113 	bdw->offset = 0;
1114 	bdw->size = BDW_SIZE;
1115 	bdw->capacity = DIMM_SIZE;
1116 	bdw->start_address = 0;
1117 
1118 	/* bdw2 (spa/dcr2, dimm2) */
1119 	bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2;
1120 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1121 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1122 	bdw->region_index = 2+1;
1123 	bdw->windows = 1;
1124 	bdw->offset = 0;
1125 	bdw->size = BDW_SIZE;
1126 	bdw->capacity = DIMM_SIZE;
1127 	bdw->start_address = 0;
1128 
1129 	/* bdw3 (spa/dcr3, dimm3) */
1130 	bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3;
1131 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1132 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1133 	bdw->region_index = 3+1;
1134 	bdw->windows = 1;
1135 	bdw->offset = 0;
1136 	bdw->size = BDW_SIZE;
1137 	bdw->capacity = DIMM_SIZE;
1138 	bdw->start_address = 0;
1139 
1140 	offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
1141 	/* flush0 (dimm0) */
1142 	flush = nfit_buf + offset;
1143 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1144 	flush->header.length = sizeof(struct acpi_nfit_flush_address);
1145 	flush->device_handle = handle[0];
1146 	flush->hint_count = 1;
1147 	flush->hint_address[0] = t->flush_dma[0];
1148 
1149 	/* flush1 (dimm1) */
1150 	flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 1;
1151 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1152 	flush->header.length = sizeof(struct acpi_nfit_flush_address);
1153 	flush->device_handle = handle[1];
1154 	flush->hint_count = 1;
1155 	flush->hint_address[0] = t->flush_dma[1];
1156 
1157 	/* flush2 (dimm2) */
1158 	flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 2;
1159 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1160 	flush->header.length = sizeof(struct acpi_nfit_flush_address);
1161 	flush->device_handle = handle[2];
1162 	flush->hint_count = 1;
1163 	flush->hint_address[0] = t->flush_dma[2];
1164 
1165 	/* flush3 (dimm3) */
1166 	flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 3;
1167 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1168 	flush->header.length = sizeof(struct acpi_nfit_flush_address);
1169 	flush->device_handle = handle[3];
1170 	flush->hint_count = 1;
1171 	flush->hint_address[0] = t->flush_dma[3];
1172 
1173 	if (t->setup_hotplug) {
1174 		offset = offset + sizeof(struct acpi_nfit_flush_address) * 4;
1175 		/* dcr-descriptor4: blk */
1176 		dcr = nfit_buf + offset;
1177 		dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1178 		dcr->header.length = sizeof(struct acpi_nfit_control_region);
1179 		dcr->region_index = 8+1;
1180 		dcr->vendor_id = 0xabcd;
1181 		dcr->device_id = 0;
1182 		dcr->revision_id = 1;
1183 		dcr->serial_number = ~handle[4];
1184 		dcr->code = NFIT_FIC_BLK;
1185 		dcr->windows = 1;
1186 		dcr->window_size = DCR_SIZE;
1187 		dcr->command_offset = 0;
1188 		dcr->command_size = 8;
1189 		dcr->status_offset = 8;
1190 		dcr->status_size = 4;
1191 
1192 		offset = offset + sizeof(struct acpi_nfit_control_region);
1193 		/* dcr-descriptor4: pmem */
1194 		dcr = nfit_buf + offset;
1195 		dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1196 		dcr->header.length = offsetof(struct acpi_nfit_control_region,
1197 				window_size);
1198 		dcr->region_index = 9+1;
1199 		dcr->vendor_id = 0xabcd;
1200 		dcr->device_id = 0;
1201 		dcr->revision_id = 1;
1202 		dcr->serial_number = ~handle[4];
1203 		dcr->code = NFIT_FIC_BYTEN;
1204 		dcr->windows = 0;
1205 
1206 		offset = offset + offsetof(struct acpi_nfit_control_region,
1207 				window_size);
1208 		/* bdw4 (spa/dcr4, dimm4) */
1209 		bdw = nfit_buf + offset;
1210 		bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1211 		bdw->header.length = sizeof(struct acpi_nfit_data_region);
1212 		bdw->region_index = 8+1;
1213 		bdw->windows = 1;
1214 		bdw->offset = 0;
1215 		bdw->size = BDW_SIZE;
1216 		bdw->capacity = DIMM_SIZE;
1217 		bdw->start_address = 0;
1218 
1219 		offset = offset + sizeof(struct acpi_nfit_data_region);
1220 		/* spa10 (dcr4) dimm4 */
1221 		spa = nfit_buf + offset;
1222 		spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1223 		spa->header.length = sizeof(*spa);
1224 		memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1225 		spa->range_index = 10+1;
1226 		spa->address = t->dcr_dma[4];
1227 		spa->length = DCR_SIZE;
1228 
1229 		/*
1230 		 * spa11 (single-dimm interleave for hotplug, note storage
1231 		 * does not actually alias the related block-data-window
1232 		 * regions)
1233 		 */
1234 		spa = nfit_buf + offset + sizeof(*spa);
1235 		spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1236 		spa->header.length = sizeof(*spa);
1237 		memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1238 		spa->range_index = 11+1;
1239 		spa->address = t->spa_set_dma[2];
1240 		spa->length = SPA0_SIZE;
1241 
1242 		/* spa12 (bdw for dcr4) dimm4 */
1243 		spa = nfit_buf + offset + sizeof(*spa) * 2;
1244 		spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1245 		spa->header.length = sizeof(*spa);
1246 		memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1247 		spa->range_index = 12+1;
1248 		spa->address = t->dimm_dma[4];
1249 		spa->length = DIMM_SIZE;
1250 
1251 		offset = offset + sizeof(*spa) * 3;
1252 		/* mem-region14 (spa/dcr4, dimm4) */
1253 		memdev = nfit_buf + offset;
1254 		memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1255 		memdev->header.length = sizeof(*memdev);
1256 		memdev->device_handle = handle[4];
1257 		memdev->physical_id = 4;
1258 		memdev->region_id = 0;
1259 		memdev->range_index = 10+1;
1260 		memdev->region_index = 8+1;
1261 		memdev->region_size = 0;
1262 		memdev->region_offset = 0;
1263 		memdev->address = 0;
1264 		memdev->interleave_index = 0;
1265 		memdev->interleave_ways = 1;
1266 
1267 		/* mem-region15 (spa0, dimm4) */
1268 		memdev = nfit_buf + offset +
1269 				sizeof(struct acpi_nfit_memory_map);
1270 		memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1271 		memdev->header.length = sizeof(*memdev);
1272 		memdev->device_handle = handle[4];
1273 		memdev->physical_id = 4;
1274 		memdev->region_id = 0;
1275 		memdev->range_index = 11+1;
1276 		memdev->region_index = 9+1;
1277 		memdev->region_size = SPA0_SIZE;
1278 		memdev->region_offset = t->spa_set_dma[2];
1279 		memdev->address = 0;
1280 		memdev->interleave_index = 0;
1281 		memdev->interleave_ways = 1;
1282 
1283 		/* mem-region16 (spa/bdw4, dimm4) */
1284 		memdev = nfit_buf + offset +
1285 				sizeof(struct acpi_nfit_memory_map) * 2;
1286 		memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1287 		memdev->header.length = sizeof(*memdev);
1288 		memdev->device_handle = handle[4];
1289 		memdev->physical_id = 4;
1290 		memdev->region_id = 0;
1291 		memdev->range_index = 12+1;
1292 		memdev->region_index = 8+1;
1293 		memdev->region_size = 0;
1294 		memdev->region_offset = 0;
1295 		memdev->address = 0;
1296 		memdev->interleave_index = 0;
1297 		memdev->interleave_ways = 1;
1298 
1299 		offset = offset + sizeof(struct acpi_nfit_memory_map) * 3;
1300 		/* flush3 (dimm4) */
1301 		flush = nfit_buf + offset;
1302 		flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1303 		flush->header.length = sizeof(struct acpi_nfit_flush_address);
1304 		flush->device_handle = handle[4];
1305 		flush->hint_count = 1;
1306 		flush->hint_address[0] = t->flush_dma[4];
1307 	}
1308 
1309 	post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA0_SIZE);
1310 
1311 	acpi_desc = &t->acpi_desc;
1312 	set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
1313 	set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
1314 	set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
1315 	set_bit(ND_CMD_SMART, &acpi_desc->dimm_cmd_force_en);
1316 	set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
1317 	set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
1318 	set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
1319 	set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
1320 	set_bit(ND_CMD_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
1321 }
1322 
1323 static void nfit_test1_setup(struct nfit_test *t)
1324 {
1325 	size_t offset;
1326 	void *nfit_buf = t->nfit_buf;
1327 	struct acpi_nfit_memory_map *memdev;
1328 	struct acpi_nfit_control_region *dcr;
1329 	struct acpi_nfit_system_address *spa;
1330 	struct acpi_nfit_desc *acpi_desc;
1331 
1332 	offset = 0;
1333 	/* spa0 (flat range with no bdw aliasing) */
1334 	spa = nfit_buf + offset;
1335 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1336 	spa->header.length = sizeof(*spa);
1337 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1338 	spa->range_index = 0+1;
1339 	spa->address = t->spa_set_dma[0];
1340 	spa->length = SPA2_SIZE;
1341 
1342 	offset += sizeof(*spa);
1343 	/* mem-region0 (spa0, dimm0) */
1344 	memdev = nfit_buf + offset;
1345 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1346 	memdev->header.length = sizeof(*memdev);
1347 	memdev->device_handle = 0;
1348 	memdev->physical_id = 0;
1349 	memdev->region_id = 0;
1350 	memdev->range_index = 0+1;
1351 	memdev->region_index = 0+1;
1352 	memdev->region_size = SPA2_SIZE;
1353 	memdev->region_offset = 0;
1354 	memdev->address = 0;
1355 	memdev->interleave_index = 0;
1356 	memdev->interleave_ways = 1;
1357 	memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
1358 		| ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
1359 		| ACPI_NFIT_MEM_NOT_ARMED;
1360 
1361 	offset += sizeof(*memdev);
1362 	/* dcr-descriptor0 */
1363 	dcr = nfit_buf + offset;
1364 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1365 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1366 			window_size);
1367 	dcr->region_index = 0+1;
1368 	dcr->vendor_id = 0xabcd;
1369 	dcr->device_id = 0;
1370 	dcr->revision_id = 1;
1371 	dcr->serial_number = ~0;
1372 	dcr->code = NFIT_FIC_BYTE;
1373 	dcr->windows = 0;
1374 
1375 	post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA2_SIZE);
1376 
1377 	acpi_desc = &t->acpi_desc;
1378 	set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
1379 	set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
1380 	set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
1381 	set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
1382 }
1383 
1384 static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
1385 		void *iobuf, u64 len, int rw)
1386 {
1387 	struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
1388 	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1389 	struct nd_region *nd_region = &ndbr->nd_region;
1390 	unsigned int lane;
1391 
1392 	lane = nd_region_acquire_lane(nd_region);
1393 	if (rw)
1394 		memcpy(mmio->addr.base + dpa, iobuf, len);
1395 	else {
1396 		memcpy(iobuf, mmio->addr.base + dpa, len);
1397 
1398 		/* give us some some coverage of the mmio_flush_range() API */
1399 		mmio_flush_range(mmio->addr.base + dpa, len);
1400 	}
1401 	nd_region_release_lane(nd_region, lane);
1402 
1403 	return 0;
1404 }
1405 
1406 static int nfit_test_probe(struct platform_device *pdev)
1407 {
1408 	struct nvdimm_bus_descriptor *nd_desc;
1409 	struct acpi_nfit_desc *acpi_desc;
1410 	struct device *dev = &pdev->dev;
1411 	struct nfit_test *nfit_test;
1412 	int rc;
1413 
1414 	nfit_test = to_nfit_test(&pdev->dev);
1415 
1416 	/* common alloc */
1417 	if (nfit_test->num_dcr) {
1418 		int num = nfit_test->num_dcr;
1419 
1420 		nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
1421 				GFP_KERNEL);
1422 		nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
1423 				GFP_KERNEL);
1424 		nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
1425 				GFP_KERNEL);
1426 		nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
1427 				GFP_KERNEL);
1428 		nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
1429 				GFP_KERNEL);
1430 		nfit_test->label_dma = devm_kcalloc(dev, num,
1431 				sizeof(dma_addr_t), GFP_KERNEL);
1432 		nfit_test->dcr = devm_kcalloc(dev, num,
1433 				sizeof(struct nfit_test_dcr *), GFP_KERNEL);
1434 		nfit_test->dcr_dma = devm_kcalloc(dev, num,
1435 				sizeof(dma_addr_t), GFP_KERNEL);
1436 		if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
1437 				&& nfit_test->label_dma && nfit_test->dcr
1438 				&& nfit_test->dcr_dma && nfit_test->flush
1439 				&& nfit_test->flush_dma)
1440 			/* pass */;
1441 		else
1442 			return -ENOMEM;
1443 	}
1444 
1445 	if (nfit_test->num_pm) {
1446 		int num = nfit_test->num_pm;
1447 
1448 		nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
1449 				GFP_KERNEL);
1450 		nfit_test->spa_set_dma = devm_kcalloc(dev, num,
1451 				sizeof(dma_addr_t), GFP_KERNEL);
1452 		if (nfit_test->spa_set && nfit_test->spa_set_dma)
1453 			/* pass */;
1454 		else
1455 			return -ENOMEM;
1456 	}
1457 
1458 	/* per-nfit specific alloc */
1459 	if (nfit_test->alloc(nfit_test))
1460 		return -ENOMEM;
1461 
1462 	nfit_test->setup(nfit_test);
1463 	acpi_desc = &nfit_test->acpi_desc;
1464 	acpi_nfit_desc_init(acpi_desc, &pdev->dev);
1465 	acpi_desc->nfit = nfit_test->nfit_buf;
1466 	acpi_desc->blk_do_io = nfit_test_blk_do_io;
1467 	nd_desc = &acpi_desc->nd_desc;
1468 	nd_desc->provider_name = NULL;
1469 	nd_desc->ndctl = nfit_test_ctl;
1470 	acpi_desc->nvdimm_bus = nvdimm_bus_register(&pdev->dev, nd_desc);
1471 	if (!acpi_desc->nvdimm_bus)
1472 		return -ENXIO;
1473 
1474 	rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_size);
1475 	if (rc) {
1476 		nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
1477 		return rc;
1478 	}
1479 
1480 	if (nfit_test->setup != nfit_test0_setup)
1481 		return 0;
1482 
1483 	nfit_test->setup_hotplug = 1;
1484 	nfit_test->setup(nfit_test);
1485 
1486 	rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_size);
1487 	if (rc) {
1488 		nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
1489 		return rc;
1490 	}
1491 
1492 	return 0;
1493 }
1494 
1495 static int nfit_test_remove(struct platform_device *pdev)
1496 {
1497 	struct nfit_test *nfit_test = to_nfit_test(&pdev->dev);
1498 	struct acpi_nfit_desc *acpi_desc = &nfit_test->acpi_desc;
1499 
1500 	nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
1501 
1502 	return 0;
1503 }
1504 
1505 static void nfit_test_release(struct device *dev)
1506 {
1507 	struct nfit_test *nfit_test = to_nfit_test(dev);
1508 
1509 	kfree(nfit_test);
1510 }
1511 
1512 static const struct platform_device_id nfit_test_id[] = {
1513 	{ KBUILD_MODNAME },
1514 	{ },
1515 };
1516 
1517 static struct platform_driver nfit_test_driver = {
1518 	.probe = nfit_test_probe,
1519 	.remove = nfit_test_remove,
1520 	.driver = {
1521 		.name = KBUILD_MODNAME,
1522 	},
1523 	.id_table = nfit_test_id,
1524 };
1525 
1526 #ifdef CONFIG_CMA_SIZE_MBYTES
1527 #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
1528 #else
1529 #define CMA_SIZE_MBYTES 0
1530 #endif
1531 
1532 static __init int nfit_test_init(void)
1533 {
1534 	int rc, i;
1535 
1536 	nfit_test_setup(nfit_test_lookup);
1537 
1538 	for (i = 0; i < NUM_NFITS; i++) {
1539 		struct nfit_test *nfit_test;
1540 		struct platform_device *pdev;
1541 		static int once;
1542 
1543 		nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
1544 		if (!nfit_test) {
1545 			rc = -ENOMEM;
1546 			goto err_register;
1547 		}
1548 		INIT_LIST_HEAD(&nfit_test->resources);
1549 		switch (i) {
1550 		case 0:
1551 			nfit_test->num_pm = NUM_PM;
1552 			nfit_test->num_dcr = NUM_DCR;
1553 			nfit_test->alloc = nfit_test0_alloc;
1554 			nfit_test->setup = nfit_test0_setup;
1555 			break;
1556 		case 1:
1557 			nfit_test->num_pm = 1;
1558 			nfit_test->alloc = nfit_test1_alloc;
1559 			nfit_test->setup = nfit_test1_setup;
1560 			break;
1561 		default:
1562 			rc = -EINVAL;
1563 			goto err_register;
1564 		}
1565 		pdev = &nfit_test->pdev;
1566 		pdev->name = KBUILD_MODNAME;
1567 		pdev->id = i;
1568 		pdev->dev.release = nfit_test_release;
1569 		rc = platform_device_register(pdev);
1570 		if (rc) {
1571 			put_device(&pdev->dev);
1572 			goto err_register;
1573 		}
1574 
1575 		rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1576 		if (rc)
1577 			goto err_register;
1578 
1579 		instances[i] = nfit_test;
1580 
1581 		if (!once++) {
1582 			dma_addr_t dma;
1583 			void *buf;
1584 
1585 			buf = dma_alloc_coherent(&pdev->dev, SZ_128M, &dma,
1586 					GFP_KERNEL);
1587 			if (!buf) {
1588 				rc = -ENOMEM;
1589 				dev_warn(&pdev->dev, "need 128M of free cma\n");
1590 				goto err_register;
1591 			}
1592 			dma_free_coherent(&pdev->dev, SZ_128M, buf, dma);
1593 		}
1594 	}
1595 
1596 	rc = platform_driver_register(&nfit_test_driver);
1597 	if (rc)
1598 		goto err_register;
1599 	return 0;
1600 
1601  err_register:
1602 	for (i = 0; i < NUM_NFITS; i++)
1603 		if (instances[i])
1604 			platform_device_unregister(&instances[i]->pdev);
1605 	nfit_test_teardown();
1606 	return rc;
1607 }
1608 
1609 static __exit void nfit_test_exit(void)
1610 {
1611 	int i;
1612 
1613 	platform_driver_unregister(&nfit_test_driver);
1614 	for (i = 0; i < NUM_NFITS; i++)
1615 		platform_device_unregister(&instances[i]->pdev);
1616 	nfit_test_teardown();
1617 }
1618 
1619 module_init(nfit_test_init);
1620 module_exit(nfit_test_exit);
1621 MODULE_LICENSE("GPL v2");
1622 MODULE_AUTHOR("Intel Corporation");
1623