xref: /freebsd/sys/dev/nvdimm/nvdimm_spa.c (revision f18976136625a7d016e97bfd9eabddf640b3e06d)
1 /*-
2  * Copyright (c) 2017, 2018 The FreeBSD Foundation
3  * All rights reserved.
4  * Copyright (c) 2018, 2019 Intel Corporation
5  *
6  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_acpi.h"
35 #include "opt_ddb.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bio.h>
40 #include <sys/bus.h>
41 #include <sys/conf.h>
42 #include <sys/devicestat.h>
43 #include <sys/disk.h>
44 #include <sys/efi.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/limits.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/module.h>
51 #include <sys/rwlock.h>
52 #include <sys/sglist.h>
53 #include <sys/uio.h>
54 #include <sys/uuid.h>
55 #include <geom/geom.h>
56 #include <geom/geom_int.h>
57 #include <machine/vmparam.h>
58 #include <vm/vm.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_pager.h>
62 #include <contrib/dev/acpica/include/acpi.h>
63 #include <contrib/dev/acpica/include/accommon.h>
64 #include <contrib/dev/acpica/include/acuuid.h>
65 #include <dev/acpica/acpivar.h>
66 #include <dev/nvdimm/nvdimm_var.h>
67 
68 #define UUID_INITIALIZER_VOLATILE_MEMORY \
69     {0x7305944f,0xfdda,0x44e3,0xb1,0x6c,{0x3f,0x22,0xd2,0x52,0xe5,0xd0}}
70 #define UUID_INITIALIZER_PERSISTENT_MEMORY \
71     {0x66f0d379,0xb4f3,0x4074,0xac,0x43,{0x0d,0x33,0x18,0xb7,0x8c,0xdb}}
72 #define UUID_INITIALIZER_CONTROL_REGION \
73     {0x92f701f6,0x13b4,0x405d,0x91,0x0b,{0x29,0x93,0x67,0xe8,0x23,0x4c}}
74 #define UUID_INITIALIZER_DATA_REGION \
75     {0x91af0530,0x5d86,0x470e,0xa6,0xb0,{0x0a,0x2d,0xb9,0x40,0x82,0x49}}
76 #define UUID_INITIALIZER_VOLATILE_VIRTUAL_DISK \
77     {0x77ab535a,0x45fc,0x624b,0x55,0x60,{0xf7,0xb2,0x81,0xd1,0xf9,0x6e}}
78 #define UUID_INITIALIZER_VOLATILE_VIRTUAL_CD \
79     {0x3d5abd30,0x4175,0x87ce,0x6d,0x64,{0xd2,0xad,0xe5,0x23,0xc4,0xbb}}
80 #define UUID_INITIALIZER_PERSISTENT_VIRTUAL_DISK \
81     {0x5cea02c9,0x4d07,0x69d3,0x26,0x9f,{0x44,0x96,0xfb,0xe0,0x96,0xf9}}
82 #define UUID_INITIALIZER_PERSISTENT_VIRTUAL_CD \
83     {0x08018188,0x42cd,0xbb48,0x10,0x0f,{0x53,0x87,0xd5,0x3d,0xed,0x3d}}
84 
85 static struct nvdimm_SPA_uuid_list_elm {
86 	const char		*u_name;
87 	struct uuid		u_id;
88 	const bool		u_usr_acc;
89 } nvdimm_SPA_uuid_list[] = {
90 	[SPA_TYPE_VOLATILE_MEMORY] = {
91 		.u_name =	"VOLA MEM ",
92 		.u_id =		UUID_INITIALIZER_VOLATILE_MEMORY,
93 		.u_usr_acc =	true,
94 	},
95 	[SPA_TYPE_PERSISTENT_MEMORY] = {
96 		.u_name =	"PERS MEM",
97 		.u_id =		UUID_INITIALIZER_PERSISTENT_MEMORY,
98 		.u_usr_acc =	true,
99 	},
100 	[SPA_TYPE_CONTROL_REGION] = {
101 		.u_name =	"CTRL RG ",
102 		.u_id =		UUID_INITIALIZER_CONTROL_REGION,
103 		.u_usr_acc =	false,
104 	},
105 	[SPA_TYPE_DATA_REGION] = {
106 		.u_name =	"DATA RG ",
107 		.u_id =		UUID_INITIALIZER_DATA_REGION,
108 		.u_usr_acc =	true,
109 	},
110 	[SPA_TYPE_VOLATILE_VIRTUAL_DISK] = {
111 		.u_name =	"VIRT DSK",
112 		.u_id =		UUID_INITIALIZER_VOLATILE_VIRTUAL_DISK,
113 		.u_usr_acc =	true,
114 	},
115 	[SPA_TYPE_VOLATILE_VIRTUAL_CD] = {
116 		.u_name =	"VIRT CD ",
117 		.u_id =		UUID_INITIALIZER_VOLATILE_VIRTUAL_CD,
118 		.u_usr_acc =	true,
119 	},
120 	[SPA_TYPE_PERSISTENT_VIRTUAL_DISK] = {
121 		.u_name =	"PV DSK  ",
122 		.u_id =		UUID_INITIALIZER_PERSISTENT_VIRTUAL_DISK,
123 		.u_usr_acc =	true,
124 	},
125 	[SPA_TYPE_PERSISTENT_VIRTUAL_CD] = {
126 		.u_name =	"PV CD   ",
127 		.u_id =		UUID_INITIALIZER_PERSISTENT_VIRTUAL_CD,
128 		.u_usr_acc =	true,
129 	},
130 };
131 
132 enum SPA_mapping_type
133 nvdimm_spa_type_from_name(const char *name)
134 {
135 	int j;
136 
137 	for (j = 0; j < nitems(nvdimm_SPA_uuid_list); j++) {
138 		if (strcmp(name, nvdimm_SPA_uuid_list[j].u_name) != 0)
139 			continue;
140 		return (j);
141 	}
142 	return (SPA_TYPE_UNKNOWN);
143 }
144 
145 enum SPA_mapping_type
146 nvdimm_spa_type_from_uuid(struct uuid *uuid)
147 {
148 	int j;
149 
150 	for (j = 0; j < nitems(nvdimm_SPA_uuid_list); j++) {
151 		if (uuidcmp(uuid, &nvdimm_SPA_uuid_list[j].u_id) != 0)
152 			continue;
153 		return (j);
154 	}
155 	return (SPA_TYPE_UNKNOWN);
156 }
157 
158 static vm_memattr_t
159 nvdimm_spa_memattr(struct nvdimm_spa_dev *dev)
160 {
161 	vm_memattr_t mode;
162 
163 	if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_WB) != 0)
164 		mode = VM_MEMATTR_WRITE_BACK;
165 	else if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_WT) != 0)
166 		mode = VM_MEMATTR_WRITE_THROUGH;
167 	else if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_WC) != 0)
168 		mode = VM_MEMATTR_WRITE_COMBINING;
169 	else if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_WP) != 0)
170 		mode = VM_MEMATTR_WRITE_PROTECTED;
171 	else if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_UC) != 0)
172 		mode = VM_MEMATTR_UNCACHEABLE;
173 	else {
174 		if (bootverbose)
175 			printf("SPA mapping attr %#lx unsupported\n",
176 			    dev->spa_efi_mem_flags);
177 		mode = VM_MEMATTR_UNCACHEABLE;
178 	}
179 	return (mode);
180 }
181 
182 static int
183 nvdimm_spa_uio(struct nvdimm_spa_dev *dev, struct uio *uio)
184 {
185 	struct vm_page m, *ma;
186 	off_t off;
187 	vm_memattr_t mattr;
188 	int error, n;
189 
190 	error = 0;
191 	if (dev->spa_kva == NULL) {
192 		mattr = nvdimm_spa_memattr(dev);
193 		bzero(&m, sizeof(m));
194 		vm_page_initfake(&m, 0, mattr);
195 		ma = &m;
196 		while (uio->uio_resid > 0) {
197 			if (uio->uio_offset >= dev->spa_len)
198 				break;
199 			off = dev->spa_phys_base + uio->uio_offset;
200 			vm_page_updatefake(&m, trunc_page(off), mattr);
201 			n = PAGE_SIZE;
202 			if (n > uio->uio_resid)
203 				n = uio->uio_resid;
204 			error = uiomove_fromphys(&ma, off & PAGE_MASK, n, uio);
205 			if (error != 0)
206 				break;
207 		}
208 	} else {
209 		while (uio->uio_resid > 0) {
210 			if (uio->uio_offset >= dev->spa_len)
211 				break;
212 			n = INT_MAX;
213 			if (n > uio->uio_resid)
214 				n = uio->uio_resid;
215 			if (uio->uio_offset + n > dev->spa_len)
216 				n = dev->spa_len - uio->uio_offset;
217 			error = uiomove((char *)dev->spa_kva + uio->uio_offset,
218 			    n, uio);
219 			if (error != 0)
220 				break;
221 		}
222 	}
223 	return (error);
224 }
225 
226 static int
227 nvdimm_spa_rw(struct cdev *dev, struct uio *uio, int ioflag)
228 {
229 
230 	return (nvdimm_spa_uio(dev->si_drv1, uio));
231 }
232 
233 static int
234 nvdimm_spa_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
235     struct thread *td)
236 {
237 	struct nvdimm_spa_dev *dev;
238 	int error;
239 
240 	dev = cdev->si_drv1;
241 	error = 0;
242 	switch (cmd) {
243 	case DIOCGSECTORSIZE:
244 		*(u_int *)data = DEV_BSIZE;
245 		break;
246 	case DIOCGMEDIASIZE:
247 		*(off_t *)data = dev->spa_len;
248 		break;
249 	default:
250 		error = ENOTTY;
251 		break;
252 	}
253 	return (error);
254 }
255 
256 static int
257 nvdimm_spa_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
258     vm_object_t *objp, int nprot)
259 {
260 	struct nvdimm_spa_dev *dev;
261 
262 	dev = cdev->si_drv1;
263 	if (dev->spa_obj == NULL)
264 		return (ENXIO);
265 	if (*offset >= dev->spa_len || *offset + size < *offset ||
266 	    *offset + size > dev->spa_len)
267 		return (EINVAL);
268 	vm_object_reference(dev->spa_obj);
269 	*objp = dev->spa_obj;
270 	return (0);
271 }
272 
273 static struct cdevsw spa_cdevsw = {
274 	.d_version =	D_VERSION,
275 	.d_flags =	D_DISK,
276 	.d_name =	"nvdimm_spa",
277 	.d_read =	nvdimm_spa_rw,
278 	.d_write =	nvdimm_spa_rw,
279 	.d_ioctl =	nvdimm_spa_ioctl,
280 	.d_mmap_single = nvdimm_spa_mmap_single,
281 };
282 
283 static void
284 nvdimm_spa_g_all_unmapped(struct nvdimm_spa_dev *dev, struct bio *bp, int rw)
285 {
286 	struct vm_page maa[bp->bio_ma_n];
287 	vm_page_t ma[bp->bio_ma_n];
288 	vm_memattr_t mattr;
289 	int i;
290 
291 	mattr = nvdimm_spa_memattr(dev);
292 	for (i = 0; i < nitems(ma); i++) {
293 		bzero(&maa[i], sizeof(maa[i]));
294 		vm_page_initfake(&maa[i], dev->spa_phys_base +
295 		    trunc_page(bp->bio_offset) + PAGE_SIZE * i, mattr);
296 		ma[i] = &maa[i];
297 	}
298 	if (rw == BIO_READ)
299 		pmap_copy_pages(ma, bp->bio_offset & PAGE_MASK, bp->bio_ma,
300 		    bp->bio_ma_offset, bp->bio_length);
301 	else
302 		pmap_copy_pages(bp->bio_ma, bp->bio_ma_offset, ma,
303 		    bp->bio_offset & PAGE_MASK, bp->bio_length);
304 }
305 
306 static void
307 nvdimm_spa_g_thread(void *arg)
308 {
309 	struct g_spa *sc;
310 	struct bio *bp;
311 	struct uio auio;
312 	struct iovec aiovec;
313 	int error;
314 
315 	sc = arg;
316 	for (;;) {
317 		mtx_lock(&sc->spa_g_mtx);
318 		for (;;) {
319 			bp = bioq_takefirst(&sc->spa_g_queue);
320 			if (bp != NULL)
321 				break;
322 			msleep(&sc->spa_g_queue, &sc->spa_g_mtx, PRIBIO,
323 			    "spa_g", 0);
324 			if (!sc->spa_g_proc_run) {
325 				sc->spa_g_proc_exiting = true;
326 				wakeup(&sc->spa_g_queue);
327 				mtx_unlock(&sc->spa_g_mtx);
328 				kproc_exit(0);
329 			}
330 			continue;
331 		}
332 		mtx_unlock(&sc->spa_g_mtx);
333 		if (bp->bio_cmd != BIO_READ && bp->bio_cmd != BIO_WRITE &&
334 		    bp->bio_cmd != BIO_FLUSH) {
335 			error = EOPNOTSUPP;
336 			goto completed;
337 		}
338 
339 		error = 0;
340 		if (bp->bio_cmd == BIO_FLUSH) {
341 			if (sc->dev->spa_kva != NULL) {
342 				pmap_large_map_wb(sc->dev->spa_kva,
343 				    sc->dev->spa_len);
344 			} else {
345 				pmap_flush_cache_phys_range(
346 				    (vm_paddr_t)sc->dev->spa_phys_base,
347 				    (vm_paddr_t)sc->dev->spa_phys_base +
348 				    sc->dev->spa_len,
349 				    nvdimm_spa_memattr(sc->dev));
350 			}
351 			/*
352 			 * XXX flush IMC
353 			 */
354 			goto completed;
355 		}
356 
357 		if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
358 			if (sc->dev->spa_kva != NULL) {
359 				aiovec.iov_base = (char *)sc->dev->spa_kva +
360 				    bp->bio_offset;
361 				aiovec.iov_len = bp->bio_length;
362 				auio.uio_iov = &aiovec;
363 				auio.uio_iovcnt = 1;
364 				auio.uio_resid = bp->bio_length;
365 				auio.uio_offset = bp->bio_offset;
366 				auio.uio_segflg = UIO_SYSSPACE;
367 				auio.uio_rw = bp->bio_cmd == BIO_READ ?
368 				    UIO_WRITE : UIO_READ;
369 				auio.uio_td = curthread;
370 				error = uiomove_fromphys(bp->bio_ma,
371 				    bp->bio_ma_offset, bp->bio_length, &auio);
372 				bp->bio_resid = auio.uio_resid;
373 			} else {
374 				nvdimm_spa_g_all_unmapped(sc->dev, bp,
375 				    bp->bio_cmd);
376 				bp->bio_resid = bp->bio_length;
377 				error = 0;
378 			}
379 		} else {
380 			aiovec.iov_base = bp->bio_data;
381 			aiovec.iov_len = bp->bio_length;
382 			auio.uio_iov = &aiovec;
383 			auio.uio_iovcnt = 1;
384 			auio.uio_resid = bp->bio_length;
385 			auio.uio_offset = bp->bio_offset;
386 			auio.uio_segflg = UIO_SYSSPACE;
387 			auio.uio_rw = bp->bio_cmd == BIO_READ ? UIO_READ :
388 			    UIO_WRITE;
389 			auio.uio_td = curthread;
390 			error = nvdimm_spa_uio(sc->dev, &auio);
391 			bp->bio_resid = auio.uio_resid;
392 		}
393 		bp->bio_bcount = bp->bio_length;
394 		devstat_end_transaction_bio(sc->spa_g_devstat, bp);
395 completed:
396 		bp->bio_completed = bp->bio_length;
397 		g_io_deliver(bp, error);
398 	}
399 }
400 
401 static void
402 nvdimm_spa_g_start(struct bio *bp)
403 {
404 	struct g_spa *sc;
405 
406 	sc = bp->bio_to->geom->softc;
407 	if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
408 		mtx_lock(&sc->spa_g_stat_mtx);
409 		devstat_start_transaction_bio(sc->spa_g_devstat, bp);
410 		mtx_unlock(&sc->spa_g_stat_mtx);
411 	}
412 	mtx_lock(&sc->spa_g_mtx);
413 	bioq_disksort(&sc->spa_g_queue, bp);
414 	wakeup(&sc->spa_g_queue);
415 	mtx_unlock(&sc->spa_g_mtx);
416 }
417 
418 static int
419 nvdimm_spa_g_access(struct g_provider *pp, int r, int w, int e)
420 {
421 
422 	return (0);
423 }
424 
425 static struct g_geom * nvdimm_spa_g_create(struct nvdimm_spa_dev *dev,
426     const char *name);
427 static g_ctl_destroy_geom_t nvdimm_spa_g_destroy_geom;
428 
429 struct g_class nvdimm_spa_g_class = {
430 	.name =		"SPA",
431 	.version =	G_VERSION,
432 	.start =	nvdimm_spa_g_start,
433 	.access =	nvdimm_spa_g_access,
434 	.destroy_geom =	nvdimm_spa_g_destroy_geom,
435 };
436 DECLARE_GEOM_CLASS(nvdimm_spa_g_class, g_spa);
437 
438 int
439 nvdimm_spa_init(struct SPA_mapping *spa, ACPI_NFIT_SYSTEM_ADDRESS *nfitaddr,
440     enum SPA_mapping_type spa_type)
441 {
442 	char *name;
443 	int error;
444 
445 	spa->spa_type = spa_type;
446 	spa->spa_nfit_idx = nfitaddr->RangeIndex;
447 	spa->dev.spa_domain =
448 	    ((nfitaddr->Flags & ACPI_NFIT_PROXIMITY_VALID) != 0) ?
449 	    nfitaddr->ProximityDomain : -1;
450 	spa->dev.spa_phys_base = nfitaddr->Address;
451 	spa->dev.spa_len = nfitaddr->Length;
452 	spa->dev.spa_efi_mem_flags = nfitaddr->MemoryMapping;
453 	if (bootverbose) {
454 		printf("NVDIMM SPA%d base %#016jx len %#016jx %s fl %#jx\n",
455 		    spa->spa_nfit_idx,
456 		    (uintmax_t)spa->dev.spa_phys_base,
457 		    (uintmax_t)spa->dev.spa_len,
458 		    nvdimm_SPA_uuid_list[spa_type].u_name,
459 		    spa->dev.spa_efi_mem_flags);
460 	}
461 	if (!nvdimm_SPA_uuid_list[spa_type].u_usr_acc)
462 		return (0);
463 
464 	asprintf(&name, M_NVDIMM, "spa%d", spa->spa_nfit_idx);
465 	error = nvdimm_spa_dev_init(&spa->dev, name);
466 	free(name, M_NVDIMM);
467 	return (error);
468 }
469 
470 int
471 nvdimm_spa_dev_init(struct nvdimm_spa_dev *dev, const char *name)
472 {
473 	struct make_dev_args mda;
474 	struct sglist *spa_sg;
475 	char *devname;
476 	int error, error1;
477 
478 	error1 = pmap_large_map(dev->spa_phys_base, dev->spa_len,
479 	    &dev->spa_kva, nvdimm_spa_memattr(dev));
480 	if (error1 != 0) {
481 		printf("NVDIMM %s cannot map into KVA, error %d\n", name,
482 		    error1);
483 		dev->spa_kva = NULL;
484 	}
485 
486 	spa_sg = sglist_alloc(1, M_WAITOK);
487 	error = sglist_append_phys(spa_sg, dev->spa_phys_base,
488 	    dev->spa_len);
489 	if (error == 0) {
490 		dev->spa_obj = vm_pager_allocate(OBJT_SG, spa_sg, dev->spa_len,
491 		    VM_PROT_ALL, 0, NULL);
492 		if (dev->spa_obj == NULL) {
493 			printf("NVDIMM %s failed to alloc vm object", name);
494 			sglist_free(spa_sg);
495 		}
496 	} else {
497 		printf("NVDIMM %s failed to init sglist, error %d", name,
498 		    error);
499 		sglist_free(spa_sg);
500 	}
501 
502 	make_dev_args_init(&mda);
503 	mda.mda_flags = MAKEDEV_WAITOK | MAKEDEV_CHECKNAME;
504 	mda.mda_devsw = &spa_cdevsw;
505 	mda.mda_cr = NULL;
506 	mda.mda_uid = UID_ROOT;
507 	mda.mda_gid = GID_OPERATOR;
508 	mda.mda_mode = 0660;
509 	mda.mda_si_drv1 = dev;
510 	asprintf(&devname, M_NVDIMM, "nvdimm_%s", name);
511 	error = make_dev_s(&mda, &dev->spa_dev, "%s", devname);
512 	free(devname, M_NVDIMM);
513 	if (error != 0) {
514 		printf("NVDIMM %s cannot create devfs node, error %d\n", name,
515 		    error);
516 		if (error1 == 0)
517 			error1 = error;
518 	}
519 	dev->spa_g = nvdimm_spa_g_create(dev, name);
520 	if (dev->spa_g == NULL && error1 == 0)
521 		error1 = ENXIO;
522 	return (error1);
523 }
524 
525 static struct g_geom *
526 nvdimm_spa_g_create(struct nvdimm_spa_dev *dev, const char *name)
527 {
528 	struct g_geom *gp;
529 	struct g_spa *sc;
530 	int error;
531 
532 	gp = NULL;
533 	sc = malloc(sizeof(struct g_spa), M_NVDIMM, M_WAITOK | M_ZERO);
534 	sc->dev = dev;
535 	bioq_init(&sc->spa_g_queue);
536 	mtx_init(&sc->spa_g_mtx, "spag", NULL, MTX_DEF);
537 	mtx_init(&sc->spa_g_stat_mtx, "spagst", NULL, MTX_DEF);
538 	sc->spa_g_proc_run = true;
539 	sc->spa_g_proc_exiting = false;
540 	error = kproc_create(nvdimm_spa_g_thread, sc, &sc->spa_g_proc, 0, 0,
541 	    "g_spa");
542 	if (error != 0) {
543 		mtx_destroy(&sc->spa_g_mtx);
544 		mtx_destroy(&sc->spa_g_stat_mtx);
545 		free(sc, M_NVDIMM);
546 		printf("NVDIMM %s cannot create geom worker, error %d\n", name,
547 		    error);
548 	} else {
549 		g_topology_lock();
550 		gp = g_new_geomf(&nvdimm_spa_g_class, "%s", name);
551 		gp->softc = sc;
552 		sc->spa_p = g_new_providerf(gp, "%s", name);
553 		sc->spa_p->mediasize = dev->spa_len;
554 		sc->spa_p->sectorsize = DEV_BSIZE;
555 		sc->spa_p->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE |
556 		    G_PF_ACCEPT_UNMAPPED;
557 		g_error_provider(sc->spa_p, 0);
558 		sc->spa_g_devstat = devstat_new_entry("spa", -1, DEV_BSIZE,
559 		    DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT,
560 		    DEVSTAT_PRIORITY_MAX);
561 		g_topology_unlock();
562 	}
563 	return (gp);
564 }
565 
566 void
567 nvdimm_spa_fini(struct SPA_mapping *spa)
568 {
569 
570 	nvdimm_spa_dev_fini(&spa->dev);
571 }
572 
573 void
574 nvdimm_spa_dev_fini(struct nvdimm_spa_dev *dev)
575 {
576 
577 	if (dev->spa_g != NULL) {
578 		g_topology_lock();
579 		nvdimm_spa_g_destroy_geom(NULL, dev->spa_g->class, dev->spa_g);
580 		g_topology_unlock();
581 	}
582 	if (dev->spa_dev != NULL) {
583 		destroy_dev(dev->spa_dev);
584 		dev->spa_dev = NULL;
585 	}
586 	vm_object_deallocate(dev->spa_obj);
587 	if (dev->spa_kva != NULL) {
588 		pmap_large_unmap(dev->spa_kva, dev->spa_len);
589 		dev->spa_kva = NULL;
590 	}
591 }
592 
593 static int
594 nvdimm_spa_g_destroy_geom(struct gctl_req *req, struct g_class *cp,
595     struct g_geom *gp)
596 {
597 	struct g_spa *sc;
598 
599 	sc = gp->softc;
600 	mtx_lock(&sc->spa_g_mtx);
601 	sc->spa_g_proc_run = false;
602 	wakeup(&sc->spa_g_queue);
603 	while (!sc->spa_g_proc_exiting)
604 		msleep(&sc->spa_g_queue, &sc->spa_g_mtx, PRIBIO, "spa_e", 0);
605 	mtx_unlock(&sc->spa_g_mtx);
606 	g_topology_assert();
607 	g_wither_geom(gp, ENXIO);
608 	sc->spa_p = NULL;
609 	if (sc->spa_g_devstat != NULL) {
610 		devstat_remove_entry(sc->spa_g_devstat);
611 		sc->spa_g_devstat = NULL;
612 	}
613 	mtx_destroy(&sc->spa_g_mtx);
614 	mtx_destroy(&sc->spa_g_stat_mtx);
615 	free(sc, M_NVDIMM);
616 	return (0);
617 }
618