xref: /freebsd/sys/dev/nvdimm/nvdimm_spa.c (revision c1cdf6a42f0d951ba720688dfc6ce07608b02f6e)
1 /*-
2  * Copyright (c) 2017, 2018 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
6  * under sponsorship from the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_acpi.h"
34 #include "opt_ddb.h"
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bio.h>
39 #include <sys/bus.h>
40 #include <sys/conf.h>
41 #include <sys/devicestat.h>
42 #include <sys/disk.h>
43 #include <sys/efi.h>
44 #include <sys/kernel.h>
45 #include <sys/kthread.h>
46 #include <sys/limits.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/module.h>
50 #include <sys/rwlock.h>
51 #include <sys/sglist.h>
52 #include <sys/uio.h>
53 #include <sys/uuid.h>
54 #include <geom/geom.h>
55 #include <geom/geom_int.h>
56 #include <machine/vmparam.h>
57 #include <vm/vm.h>
58 #include <vm/vm_object.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_pager.h>
61 #include <contrib/dev/acpica/include/acpi.h>
62 #include <contrib/dev/acpica/include/accommon.h>
63 #include <contrib/dev/acpica/include/acuuid.h>
64 #include <dev/acpica/acpivar.h>
65 #include <dev/nvdimm/nvdimm_var.h>
66 
67 struct SPA_mapping *spa_mappings;
68 int spa_mappings_cnt;
69 
70 static int
71 nvdimm_spa_count(void *nfitsubtbl __unused, void *arg)
72 {
73 	int *cnt;
74 
75 	cnt = arg;
76 	(*cnt)++;
77 	return (0);
78 }
79 
80 static struct nvdimm_SPA_uuid_list_elm {
81 	const char		*u_name;
82 	const char		*u_id_str;
83 	struct uuid		u_id;
84 	const bool		u_usr_acc;
85 } nvdimm_SPA_uuid_list[] = {
86 	[SPA_TYPE_VOLATILE_MEMORY] = {
87 		.u_name =	"VOLA MEM ",
88 		.u_id_str =	UUID_VOLATILE_MEMORY,
89 		.u_usr_acc =	true,
90 	},
91 	[SPA_TYPE_PERSISTENT_MEMORY] = {
92 		.u_name =	"PERS MEM",
93 		.u_id_str =	UUID_PERSISTENT_MEMORY,
94 		.u_usr_acc =	true,
95 	},
96 	[SPA_TYPE_CONTROL_REGION] = {
97 		.u_name =	"CTRL RG ",
98 		.u_id_str =	UUID_CONTROL_REGION,
99 		.u_usr_acc =	false,
100 	},
101 	[SPA_TYPE_DATA_REGION] = {
102 		.u_name =	"DATA RG ",
103 		.u_id_str =	UUID_DATA_REGION,
104 		.u_usr_acc =	true,
105 	},
106 	[SPA_TYPE_VOLATILE_VIRTUAL_DISK] = {
107 		.u_name =	"VIRT DSK",
108 		.u_id_str =	UUID_VOLATILE_VIRTUAL_DISK,
109 		.u_usr_acc =	true,
110 	},
111 	[SPA_TYPE_VOLATILE_VIRTUAL_CD] = {
112 		.u_name =	"VIRT CD ",
113 		.u_id_str =	UUID_VOLATILE_VIRTUAL_CD,
114 		.u_usr_acc =	true,
115 	},
116 	[SPA_TYPE_PERSISTENT_VIRTUAL_DISK] = {
117 		.u_name =	"PV DSK  ",
118 		.u_id_str =	UUID_PERSISTENT_VIRTUAL_DISK,
119 		.u_usr_acc =	true,
120 	},
121 	[SPA_TYPE_PERSISTENT_VIRTUAL_CD] = {
122 		.u_name =	"PV CD   ",
123 		.u_id_str =	UUID_PERSISTENT_VIRTUAL_CD,
124 		.u_usr_acc =	true,
125 	},
126 };
127 
128 static vm_memattr_t
129 nvdimm_spa_memattr(struct SPA_mapping *spa)
130 {
131 	vm_memattr_t mode;
132 
133 	if ((spa->spa_efi_mem_flags & EFI_MD_ATTR_WB) != 0)
134 		mode = VM_MEMATTR_WRITE_BACK;
135 	else if ((spa->spa_efi_mem_flags & EFI_MD_ATTR_WT) != 0)
136 		mode = VM_MEMATTR_WRITE_THROUGH;
137 	else if ((spa->spa_efi_mem_flags & EFI_MD_ATTR_WC) != 0)
138 		mode = VM_MEMATTR_WRITE_COMBINING;
139 	else if ((spa->spa_efi_mem_flags & EFI_MD_ATTR_WP) != 0)
140 		mode = VM_MEMATTR_WRITE_PROTECTED;
141 	else if ((spa->spa_efi_mem_flags & EFI_MD_ATTR_UC) != 0)
142 		mode = VM_MEMATTR_UNCACHEABLE;
143 	else {
144 		if (bootverbose)
145 			printf("SPA%d mapping attr unsupported\n",
146 			    spa->spa_nfit_idx);
147 		mode = VM_MEMATTR_UNCACHEABLE;
148 	}
149 	return (mode);
150 }
151 
152 static int
153 nvdimm_spa_uio(struct SPA_mapping *spa, struct uio *uio)
154 {
155 	struct vm_page m, *ma;
156 	off_t off;
157 	vm_memattr_t mattr;
158 	int error, n;
159 
160 	if (spa->spa_kva == NULL) {
161 		mattr = nvdimm_spa_memattr(spa);
162 		vm_page_initfake(&m, 0, mattr);
163 		ma = &m;
164 		while (uio->uio_resid > 0) {
165 			if (uio->uio_offset >= spa->spa_len)
166 				break;
167 			off = spa->spa_phys_base + uio->uio_offset;
168 			vm_page_updatefake(&m, trunc_page(off), mattr);
169 			n = PAGE_SIZE;
170 			if (n > uio->uio_resid)
171 				n = uio->uio_resid;
172 			error = uiomove_fromphys(&ma, off & PAGE_MASK, n, uio);
173 			if (error != 0)
174 				break;
175 		}
176 	} else {
177 		while (uio->uio_resid > 0) {
178 			if (uio->uio_offset >= spa->spa_len)
179 				break;
180 			n = INT_MAX;
181 			if (n > uio->uio_resid)
182 				n = uio->uio_resid;
183 			if (uio->uio_offset + n > spa->spa_len)
184 				n = spa->spa_len - uio->uio_offset;
185 			error = uiomove((char *)spa->spa_kva + uio->uio_offset,
186 			    n, uio);
187 			if (error != 0)
188 				break;
189 		}
190 	}
191 	return (error);
192 }
193 
194 static int
195 nvdimm_spa_rw(struct cdev *dev, struct uio *uio, int ioflag)
196 {
197 
198 	return (nvdimm_spa_uio(dev->si_drv1, uio));
199 }
200 
201 static int
202 nvdimm_spa_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
203     struct thread *td)
204 {
205 	struct SPA_mapping *spa;
206 	int error;
207 
208 	spa = dev->si_drv1;
209 	error = 0;
210 	switch (cmd) {
211 	case DIOCGSECTORSIZE:
212 		*(u_int *)data = DEV_BSIZE;
213 		break;
214 	case DIOCGMEDIASIZE:
215 		*(off_t *)data = spa->spa_len;
216 		break;
217 	default:
218 		error = ENOTTY;
219 		break;
220 	}
221 	return (error);
222 }
223 
224 static int
225 nvdimm_spa_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size,
226     vm_object_t *objp, int nprot)
227 {
228 	struct SPA_mapping *spa;
229 
230 	spa = dev->si_drv1;
231 	if (spa->spa_obj == NULL)
232 		return (ENXIO);
233 	if (*offset >= spa->spa_len || *offset + size < *offset ||
234 	    *offset + size > spa->spa_len)
235 		return (EINVAL);
236 	vm_object_reference(spa->spa_obj);
237 	*objp = spa->spa_obj;
238 	return (0);
239 }
240 
241 static struct cdevsw spa_cdevsw = {
242 	.d_version =	D_VERSION,
243 	.d_flags =	D_DISK,
244 	.d_name =	"nvdimm_spa",
245 	.d_read =	nvdimm_spa_rw,
246 	.d_write =	nvdimm_spa_rw,
247 	.d_ioctl =	nvdimm_spa_ioctl,
248 	.d_mmap_single = nvdimm_spa_mmap_single,
249 };
250 
251 static void
252 nvdimm_spa_g_all_unmapped(struct SPA_mapping *spa, struct bio *bp,
253     int rw)
254 {
255 	struct vm_page maa[bp->bio_ma_n];
256 	vm_page_t ma[bp->bio_ma_n];
257 	vm_memattr_t mattr;
258 	int i;
259 
260 	mattr = nvdimm_spa_memattr(spa);
261 	for (i = 0; i < nitems(ma); i++) {
262 		maa[i].flags = 0;
263 		vm_page_initfake(&maa[i], spa->spa_phys_base +
264 		    trunc_page(bp->bio_offset) + PAGE_SIZE * i, mattr);
265 		ma[i] = &maa[i];
266 	}
267 	if (rw == BIO_READ)
268 		pmap_copy_pages(ma, bp->bio_offset & PAGE_MASK, bp->bio_ma,
269 		    bp->bio_ma_offset, bp->bio_length);
270 	else
271 		pmap_copy_pages(bp->bio_ma, bp->bio_ma_offset, ma,
272 		    bp->bio_offset & PAGE_MASK, bp->bio_length);
273 }
274 
275 static void
276 nvdimm_spa_g_thread(void *arg)
277 {
278 	struct SPA_mapping *spa;
279 	struct bio *bp;
280 	struct uio auio;
281 	struct iovec aiovec;
282 	int error;
283 
284 	spa = arg;
285 	for (;;) {
286 		mtx_lock(&spa->spa_g_mtx);
287 		for (;;) {
288 			bp = bioq_takefirst(&spa->spa_g_queue);
289 			if (bp != NULL)
290 				break;
291 			msleep(&spa->spa_g_queue, &spa->spa_g_mtx, PRIBIO,
292 			    "spa_g", 0);
293 			if (!spa->spa_g_proc_run) {
294 				spa->spa_g_proc_exiting = true;
295 				wakeup(&spa->spa_g_queue);
296 				mtx_unlock(&spa->spa_g_mtx);
297 				kproc_exit(0);
298 			}
299 			continue;
300 		}
301 		mtx_unlock(&spa->spa_g_mtx);
302 		if (bp->bio_cmd != BIO_READ && bp->bio_cmd != BIO_WRITE &&
303 		    bp->bio_cmd != BIO_FLUSH) {
304 			error = EOPNOTSUPP;
305 			goto completed;
306 		}
307 
308 		error = 0;
309 		if (bp->bio_cmd == BIO_FLUSH) {
310 			if (spa->spa_kva != NULL) {
311 				pmap_large_map_wb(spa->spa_kva, spa->spa_len);
312 			} else {
313 				pmap_flush_cache_phys_range(
314 				    (vm_paddr_t)spa->spa_phys_base,
315 				    (vm_paddr_t)spa->spa_phys_base +
316 				    spa->spa_len, nvdimm_spa_memattr(spa));
317 			}
318 			/*
319 			 * XXX flush IMC
320 			 */
321 			goto completed;
322 		}
323 
324 		if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
325 			if (spa->spa_kva != NULL) {
326 				aiovec.iov_base = (char *)spa->spa_kva +
327 				    bp->bio_offset;
328 				aiovec.iov_len = bp->bio_length;
329 				auio.uio_iov = &aiovec;
330 				auio.uio_iovcnt = 1;
331 				auio.uio_resid = bp->bio_length;
332 				auio.uio_offset = bp->bio_offset;
333 				auio.uio_segflg = UIO_SYSSPACE;
334 				auio.uio_rw = bp->bio_cmd == BIO_READ ?
335 				    UIO_WRITE : UIO_READ;
336 				auio.uio_td = curthread;
337 				error = uiomove_fromphys(bp->bio_ma,
338 				    bp->bio_ma_offset, bp->bio_length, &auio);
339 			} else {
340 				nvdimm_spa_g_all_unmapped(spa, bp, bp->bio_cmd);
341 				error = 0;
342 			}
343 		} else {
344 			aiovec.iov_base = bp->bio_data;
345 			aiovec.iov_len = bp->bio_length;
346 			auio.uio_iov = &aiovec;
347 			auio.uio_iovcnt = 1;
348 			auio.uio_resid = bp->bio_length;
349 			auio.uio_offset = bp->bio_offset;
350 			auio.uio_segflg = UIO_SYSSPACE;
351 			auio.uio_rw = bp->bio_cmd == BIO_READ ? UIO_READ :
352 			    UIO_WRITE;
353 			auio.uio_td = curthread;
354 			error = nvdimm_spa_uio(spa, &auio);
355 		}
356 		devstat_end_transaction_bio(spa->spa_g_devstat, bp);
357 completed:
358 		bp->bio_completed = bp->bio_length;
359 		g_io_deliver(bp, error);
360 	}
361 }
362 
363 static void
364 nvdimm_spa_g_start(struct bio *bp)
365 {
366 	struct SPA_mapping *spa;
367 
368 	spa = bp->bio_to->geom->softc;
369 	if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
370 		mtx_lock(&spa->spa_g_stat_mtx);
371 		devstat_start_transaction_bio(spa->spa_g_devstat, bp);
372 		mtx_unlock(&spa->spa_g_stat_mtx);
373 	}
374 	mtx_lock(&spa->spa_g_mtx);
375 	bioq_disksort(&spa->spa_g_queue, bp);
376 	wakeup(&spa->spa_g_queue);
377 	mtx_unlock(&spa->spa_g_mtx);
378 }
379 
380 static int
381 nvdimm_spa_g_access(struct g_provider *pp, int r, int w, int e)
382 {
383 
384 	return (0);
385 }
386 
387 static g_init_t nvdimm_spa_g_init;
388 static g_fini_t nvdimm_spa_g_fini;
389 
390 struct g_class nvdimm_spa_g_class = {
391 	.name =		"SPA",
392 	.version =	G_VERSION,
393 	.start =	nvdimm_spa_g_start,
394 	.access =	nvdimm_spa_g_access,
395 	.init =		nvdimm_spa_g_init,
396 	.fini =		nvdimm_spa_g_fini,
397 };
398 DECLARE_GEOM_CLASS(nvdimm_spa_g_class, g_spa);
399 
400 static int
401 nvdimm_spa_init_one(struct SPA_mapping *spa, ACPI_NFIT_SYSTEM_ADDRESS *nfitaddr,
402     int spa_type)
403 {
404 	struct make_dev_args mda;
405 	struct sglist *spa_sg;
406 	int error, error1;
407 
408 	spa->spa_type = spa_type;
409 	spa->spa_domain = ((nfitaddr->Flags & ACPI_NFIT_PROXIMITY_VALID) != 0) ?
410 	    nfitaddr->ProximityDomain : -1;
411 	spa->spa_nfit_idx = nfitaddr->RangeIndex;
412 	spa->spa_phys_base = nfitaddr->Address;
413 	spa->spa_len = nfitaddr->Length;
414 	spa->spa_efi_mem_flags = nfitaddr->MemoryMapping;
415 	if (bootverbose) {
416 		printf("NVDIMM SPA%d base %#016jx len %#016jx %s fl %#jx\n",
417 		    spa->spa_nfit_idx,
418 		    (uintmax_t)spa->spa_phys_base, (uintmax_t)spa->spa_len,
419 		    nvdimm_SPA_uuid_list[spa_type].u_name,
420 		    spa->spa_efi_mem_flags);
421 	}
422 	if (!nvdimm_SPA_uuid_list[spa_type].u_usr_acc)
423 		return (0);
424 
425 	error1 = pmap_large_map(spa->spa_phys_base, spa->spa_len,
426 	    &spa->spa_kva, nvdimm_spa_memattr(spa));
427 	if (error1 != 0) {
428 		printf("NVDIMM SPA%d cannot map into KVA, error %d\n",
429 		    spa->spa_nfit_idx, error1);
430 		spa->spa_kva = NULL;
431 	}
432 
433 	spa_sg = sglist_alloc(1, M_WAITOK);
434 	error = sglist_append_phys(spa_sg, spa->spa_phys_base,
435 	    spa->spa_len);
436 	if (error == 0) {
437 		spa->spa_obj = vm_pager_allocate(OBJT_SG, spa_sg, spa->spa_len,
438 		    VM_PROT_ALL, 0, NULL);
439 		if (spa->spa_obj == NULL) {
440 			printf("NVDIMM SPA%d failed to alloc vm object",
441 			    spa->spa_nfit_idx);
442 			sglist_free(spa_sg);
443 		}
444 	} else {
445 		printf("NVDIMM SPA%d failed to init sglist, error %d",
446 		    spa->spa_nfit_idx, error);
447 		sglist_free(spa_sg);
448 	}
449 
450 	make_dev_args_init(&mda);
451 	mda.mda_flags = MAKEDEV_WAITOK | MAKEDEV_CHECKNAME;
452 	mda.mda_devsw = &spa_cdevsw;
453 	mda.mda_cr = NULL;
454 	mda.mda_uid = UID_ROOT;
455 	mda.mda_gid = GID_OPERATOR;
456 	mda.mda_mode = 0660;
457 	mda.mda_si_drv1 = spa;
458 	error = make_dev_s(&mda, &spa->spa_dev, "nvdimm_spa%d",
459 	    spa->spa_nfit_idx);
460 	if (error != 0) {
461 		printf("NVDIMM SPA%d cannot create devfs node, error %d\n",
462 		    spa->spa_nfit_idx, error);
463 		if (error1 == 0)
464 			error1 = error;
465 	}
466 
467 	bioq_init(&spa->spa_g_queue);
468 	mtx_init(&spa->spa_g_mtx, "spag", NULL, MTX_DEF);
469 	mtx_init(&spa->spa_g_stat_mtx, "spagst", NULL, MTX_DEF);
470 	spa->spa_g_proc_run = true;
471 	spa->spa_g_proc_exiting = false;
472 	error = kproc_create(nvdimm_spa_g_thread, spa, &spa->spa_g_proc, 0, 0,
473 	    "g_spa%d", spa->spa_nfit_idx);
474 	if (error != 0) {
475 		printf("NVDIMM SPA%d cannot create geom worker, error %d\n",
476 		    spa->spa_nfit_idx, error);
477 		if (error1 == 0)
478 			error1 = error;
479 	} else {
480 		g_topology_assert();
481 		spa->spa_g = g_new_geomf(&nvdimm_spa_g_class, "spa%d",
482 		    spa->spa_nfit_idx);
483 		spa->spa_g->softc = spa;
484 		spa->spa_p = g_new_providerf(spa->spa_g, "spa%d",
485 		    spa->spa_nfit_idx);
486 		spa->spa_p->mediasize = spa->spa_len;
487 		spa->spa_p->sectorsize = DEV_BSIZE;
488 		spa->spa_p->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE |
489 		    G_PF_ACCEPT_UNMAPPED;
490 		g_error_provider(spa->spa_p, 0);
491 		spa->spa_g_devstat = devstat_new_entry("spa", spa->spa_nfit_idx,
492 		    DEV_BSIZE, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT,
493 		    DEVSTAT_PRIORITY_MAX);
494 	}
495 	return (error1);
496 }
497 
498 static void
499 nvdimm_spa_fini_one(struct SPA_mapping *spa)
500 {
501 
502 	mtx_lock(&spa->spa_g_mtx);
503 	spa->spa_g_proc_run = false;
504 	wakeup(&spa->spa_g_queue);
505 	while (!spa->spa_g_proc_exiting)
506 		msleep(&spa->spa_g_queue, &spa->spa_g_mtx, PRIBIO, "spa_e", 0);
507 	mtx_unlock(&spa->spa_g_mtx);
508 	if (spa->spa_g != NULL) {
509 		g_topology_lock();
510 		g_wither_geom(spa->spa_g, ENXIO);
511 		g_topology_unlock();
512 		spa->spa_g = NULL;
513 		spa->spa_p = NULL;
514 	}
515 	if (spa->spa_g_devstat != NULL) {
516 		devstat_remove_entry(spa->spa_g_devstat);
517 		spa->spa_g_devstat = NULL;
518 	}
519 	if (spa->spa_dev != NULL) {
520 		destroy_dev(spa->spa_dev);
521 		spa->spa_dev = NULL;
522 	}
523 	vm_object_deallocate(spa->spa_obj);
524 	if (spa->spa_kva != NULL) {
525 		pmap_large_unmap(spa->spa_kva, spa->spa_len);
526 		spa->spa_kva = NULL;
527 	}
528 	mtx_destroy(&spa->spa_g_mtx);
529 	mtx_destroy(&spa->spa_g_stat_mtx);
530 }
531 
532 static int
533 nvdimm_spa_parse(void *nfitsubtbl, void *arg)
534 {
535 	ACPI_NFIT_SYSTEM_ADDRESS *nfitaddr;
536 	struct SPA_mapping *spa;
537 	int error, *i, j;
538 
539 	i = arg;
540 	spa = &spa_mappings[*i];
541 	nfitaddr = nfitsubtbl;
542 
543 	for (j = 0; j < nitems(nvdimm_SPA_uuid_list); j++) {
544 		/* XXXKIB: is ACPI UUID representation compatible ? */
545 		if (uuidcmp((struct uuid *)&nfitaddr->RangeGuid,
546 		    &nvdimm_SPA_uuid_list[j].u_id) != 0)
547 			continue;
548 		error = nvdimm_spa_init_one(spa, nfitaddr, j);
549 		if (error != 0)
550 			nvdimm_spa_fini_one(spa);
551 		break;
552 	}
553 	if (j == nitems(nvdimm_SPA_uuid_list) && bootverbose) {
554 		printf("Unknown SPA UUID %d ", nfitaddr->RangeIndex);
555 		printf_uuid((struct uuid *)&nfitaddr->RangeGuid);
556 		printf("\n");
557 	}
558 	(*i)++;
559 	return (0);
560 }
561 
562 static int
563 nvdimm_spa_init1(ACPI_TABLE_NFIT *nfitbl)
564 {
565 	struct nvdimm_SPA_uuid_list_elm *sle;
566 	int error, i;
567 
568 	for (i = 0; i < nitems(nvdimm_SPA_uuid_list); i++) {
569 		sle = &nvdimm_SPA_uuid_list[i];
570 		error = parse_uuid(sle->u_id_str, &sle->u_id);
571 		if (error != 0) {
572 			if (bootverbose)
573 				printf("nvdimm_identify: error %d parsing "
574 				    "known SPA UUID %d %s\n", error, i,
575 				    sle->u_id_str);
576 			return (error);
577 		}
578 	}
579 
580 	error = nvdimm_iterate_nfit(nfitbl, ACPI_NFIT_TYPE_SYSTEM_ADDRESS,
581 	    nvdimm_spa_count, &spa_mappings_cnt);
582 	if (error != 0)
583 		return (error);
584 	spa_mappings = malloc(sizeof(struct SPA_mapping) * spa_mappings_cnt,
585 	    M_NVDIMM, M_WAITOK | M_ZERO);
586 	i = 0;
587 	error = nvdimm_iterate_nfit(nfitbl, ACPI_NFIT_TYPE_SYSTEM_ADDRESS,
588 	    nvdimm_spa_parse, &i);
589 	if (error != 0) {
590 		free(spa_mappings, M_NVDIMM);
591 		spa_mappings = NULL;
592 		return (error);
593 	}
594 	return (0);
595 }
596 
597 static void
598 nvdimm_spa_g_init(struct g_class *mp __unused)
599 {
600 	ACPI_TABLE_NFIT *nfitbl;
601 	ACPI_STATUS status;
602 	int error;
603 
604 	spa_mappings_cnt = 0;
605 	spa_mappings = NULL;
606 	if (acpi_disabled("nvdimm"))
607 		return;
608 	status = AcpiGetTable(ACPI_SIG_NFIT, 1, (ACPI_TABLE_HEADER **)&nfitbl);
609 	if (ACPI_FAILURE(status)) {
610 		if (bootverbose)
611 			printf("nvdimm_spa_g_init: cannot find NFIT\n");
612 		return;
613 	}
614 	error = nvdimm_spa_init1(nfitbl);
615 	if (error != 0)
616 		printf("nvdimm_spa_g_init: error %d\n", error);
617 	AcpiPutTable(&nfitbl->Header);
618 }
619 
620 static void
621 nvdimm_spa_g_fini(struct g_class *mp __unused)
622 {
623 	int i;
624 
625 	if (spa_mappings == NULL)
626 		return;
627 	for (i = 0; i < spa_mappings_cnt; i++)
628 		nvdimm_spa_fini_one(&spa_mappings[i]);
629 	free(spa_mappings, M_NVDIMM);
630 	spa_mappings = NULL;
631 	spa_mappings_cnt = 0;
632 }
633