xref: /freebsd/sys/dev/nvdimm/nvdimm_spa.c (revision 54e9e4e72d711fb41f88f793f6c64df1126112f9)
1 /*-
2  * Copyright (c) 2017, 2018 The FreeBSD Foundation
3  * All rights reserved.
4  * Copyright (c) 2018, 2019 Intel Corporation
5  *
6  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_acpi.h"
35 #include "opt_ddb.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bio.h>
40 #include <sys/bus.h>
41 #include <sys/conf.h>
42 #include <sys/devicestat.h>
43 #include <sys/disk.h>
44 #include <sys/efi.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/limits.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/module.h>
51 #include <sys/rwlock.h>
52 #include <sys/sglist.h>
53 #include <sys/uio.h>
54 #include <sys/uuid.h>
55 #include <geom/geom.h>
56 #include <geom/geom_int.h>
57 #include <machine/vmparam.h>
58 #include <vm/vm.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_pager.h>
62 #include <contrib/dev/acpica/include/acpi.h>
63 #include <contrib/dev/acpica/include/accommon.h>
64 #include <contrib/dev/acpica/include/acuuid.h>
65 #include <dev/acpica/acpivar.h>
66 #include <dev/nvdimm/nvdimm_var.h>
67 
68 #define UUID_INITIALIZER_VOLATILE_MEMORY \
69     {0x7305944f,0xfdda,0x44e3,0xb1,0x6c,{0x3f,0x22,0xd2,0x52,0xe5,0xd0}}
70 #define UUID_INITIALIZER_PERSISTENT_MEMORY \
71     {0x66f0d379,0xb4f3,0x4074,0xac,0x43,{0x0d,0x33,0x18,0xb7,0x8c,0xdb}}
72 #define UUID_INITIALIZER_CONTROL_REGION \
73     {0x92f701f6,0x13b4,0x405d,0x91,0x0b,{0x29,0x93,0x67,0xe8,0x23,0x4c}}
74 #define UUID_INITIALIZER_DATA_REGION \
75     {0x91af0530,0x5d86,0x470e,0xa6,0xb0,{0x0a,0x2d,0xb9,0x40,0x82,0x49}}
76 #define UUID_INITIALIZER_VOLATILE_VIRTUAL_DISK \
77     {0x77ab535a,0x45fc,0x624b,0x55,0x60,{0xf7,0xb2,0x81,0xd1,0xf9,0x6e}}
78 #define UUID_INITIALIZER_VOLATILE_VIRTUAL_CD \
79     {0x3d5abd30,0x4175,0x87ce,0x6d,0x64,{0xd2,0xad,0xe5,0x23,0xc4,0xbb}}
80 #define UUID_INITIALIZER_PERSISTENT_VIRTUAL_DISK \
81     {0x5cea02c9,0x4d07,0x69d3,0x26,0x9f,{0x44,0x96,0xfb,0xe0,0x96,0xf9}}
82 #define UUID_INITIALIZER_PERSISTENT_VIRTUAL_CD \
83     {0x08018188,0x42cd,0xbb48,0x10,0x0f,{0x53,0x87,0xd5,0x3d,0xed,0x3d}}
84 
85 static struct nvdimm_SPA_uuid_list_elm {
86 	const char		*u_name;
87 	struct uuid		u_id;
88 	const bool		u_usr_acc;
89 } nvdimm_SPA_uuid_list[] = {
90 	[SPA_TYPE_VOLATILE_MEMORY] = {
91 		.u_name =	"VOLA MEM ",
92 		.u_id =		UUID_INITIALIZER_VOLATILE_MEMORY,
93 		.u_usr_acc =	true,
94 	},
95 	[SPA_TYPE_PERSISTENT_MEMORY] = {
96 		.u_name =	"PERS MEM",
97 		.u_id =		UUID_INITIALIZER_PERSISTENT_MEMORY,
98 		.u_usr_acc =	true,
99 	},
100 	[SPA_TYPE_CONTROL_REGION] = {
101 		.u_name =	"CTRL RG ",
102 		.u_id =		UUID_INITIALIZER_CONTROL_REGION,
103 		.u_usr_acc =	false,
104 	},
105 	[SPA_TYPE_DATA_REGION] = {
106 		.u_name =	"DATA RG ",
107 		.u_id =		UUID_INITIALIZER_DATA_REGION,
108 		.u_usr_acc =	true,
109 	},
110 	[SPA_TYPE_VOLATILE_VIRTUAL_DISK] = {
111 		.u_name =	"VIRT DSK",
112 		.u_id =		UUID_INITIALIZER_VOLATILE_VIRTUAL_DISK,
113 		.u_usr_acc =	true,
114 	},
115 	[SPA_TYPE_VOLATILE_VIRTUAL_CD] = {
116 		.u_name =	"VIRT CD ",
117 		.u_id =		UUID_INITIALIZER_VOLATILE_VIRTUAL_CD,
118 		.u_usr_acc =	true,
119 	},
120 	[SPA_TYPE_PERSISTENT_VIRTUAL_DISK] = {
121 		.u_name =	"PV DSK  ",
122 		.u_id =		UUID_INITIALIZER_PERSISTENT_VIRTUAL_DISK,
123 		.u_usr_acc =	true,
124 	},
125 	[SPA_TYPE_PERSISTENT_VIRTUAL_CD] = {
126 		.u_name =	"PV CD   ",
127 		.u_id =		UUID_INITIALIZER_PERSISTENT_VIRTUAL_CD,
128 		.u_usr_acc =	true,
129 	},
130 };
131 
132 enum SPA_mapping_type
133 nvdimm_spa_type_from_uuid(struct uuid *uuid)
134 {
135 	int j;
136 
137 	for (j = 0; j < nitems(nvdimm_SPA_uuid_list); j++) {
138 		if (uuidcmp(uuid, &nvdimm_SPA_uuid_list[j].u_id) != 0)
139 			continue;
140 		return (j);
141 	}
142 	return (SPA_TYPE_UNKNOWN);
143 }
144 
145 static vm_memattr_t
146 nvdimm_spa_memattr(struct nvdimm_spa_dev *dev)
147 {
148 	vm_memattr_t mode;
149 
150 	if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_WB) != 0)
151 		mode = VM_MEMATTR_WRITE_BACK;
152 	else if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_WT) != 0)
153 		mode = VM_MEMATTR_WRITE_THROUGH;
154 	else if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_WC) != 0)
155 		mode = VM_MEMATTR_WRITE_COMBINING;
156 	else if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_WP) != 0)
157 		mode = VM_MEMATTR_WRITE_PROTECTED;
158 	else if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_UC) != 0)
159 		mode = VM_MEMATTR_UNCACHEABLE;
160 	else {
161 		if (bootverbose)
162 			printf("SPA mapping attr %#lx unsupported\n",
163 			    dev->spa_efi_mem_flags);
164 		mode = VM_MEMATTR_UNCACHEABLE;
165 	}
166 	return (mode);
167 }
168 
169 static int
170 nvdimm_spa_uio(struct nvdimm_spa_dev *dev, struct uio *uio)
171 {
172 	struct vm_page m, *ma;
173 	off_t off;
174 	vm_memattr_t mattr;
175 	int error, n;
176 
177 	error = 0;
178 	if (dev->spa_kva == NULL) {
179 		mattr = nvdimm_spa_memattr(dev);
180 		bzero(&m, sizeof(m));
181 		vm_page_initfake(&m, 0, mattr);
182 		ma = &m;
183 		while (uio->uio_resid > 0) {
184 			if (uio->uio_offset >= dev->spa_len)
185 				break;
186 			off = dev->spa_phys_base + uio->uio_offset;
187 			vm_page_updatefake(&m, trunc_page(off), mattr);
188 			n = PAGE_SIZE;
189 			if (n > uio->uio_resid)
190 				n = uio->uio_resid;
191 			error = uiomove_fromphys(&ma, off & PAGE_MASK, n, uio);
192 			if (error != 0)
193 				break;
194 		}
195 	} else {
196 		while (uio->uio_resid > 0) {
197 			if (uio->uio_offset >= dev->spa_len)
198 				break;
199 			n = INT_MAX;
200 			if (n > uio->uio_resid)
201 				n = uio->uio_resid;
202 			if (uio->uio_offset + n > dev->spa_len)
203 				n = dev->spa_len - uio->uio_offset;
204 			error = uiomove((char *)dev->spa_kva + uio->uio_offset,
205 			    n, uio);
206 			if (error != 0)
207 				break;
208 		}
209 	}
210 	return (error);
211 }
212 
213 static int
214 nvdimm_spa_rw(struct cdev *dev, struct uio *uio, int ioflag)
215 {
216 
217 	return (nvdimm_spa_uio(dev->si_drv1, uio));
218 }
219 
220 static int
221 nvdimm_spa_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
222     struct thread *td)
223 {
224 	struct nvdimm_spa_dev *dev;
225 	int error;
226 
227 	dev = cdev->si_drv1;
228 	error = 0;
229 	switch (cmd) {
230 	case DIOCGSECTORSIZE:
231 		*(u_int *)data = DEV_BSIZE;
232 		break;
233 	case DIOCGMEDIASIZE:
234 		*(off_t *)data = dev->spa_len;
235 		break;
236 	default:
237 		error = ENOTTY;
238 		break;
239 	}
240 	return (error);
241 }
242 
243 static int
244 nvdimm_spa_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
245     vm_object_t *objp, int nprot)
246 {
247 	struct nvdimm_spa_dev *dev;
248 
249 	dev = cdev->si_drv1;
250 	if (dev->spa_obj == NULL)
251 		return (ENXIO);
252 	if (*offset >= dev->spa_len || *offset + size < *offset ||
253 	    *offset + size > dev->spa_len)
254 		return (EINVAL);
255 	vm_object_reference(dev->spa_obj);
256 	*objp = dev->spa_obj;
257 	return (0);
258 }
259 
260 static struct cdevsw spa_cdevsw = {
261 	.d_version =	D_VERSION,
262 	.d_flags =	D_DISK,
263 	.d_name =	"nvdimm_spa",
264 	.d_read =	nvdimm_spa_rw,
265 	.d_write =	nvdimm_spa_rw,
266 	.d_ioctl =	nvdimm_spa_ioctl,
267 	.d_mmap_single = nvdimm_spa_mmap_single,
268 };
269 
270 static void
271 nvdimm_spa_g_all_unmapped(struct nvdimm_spa_dev *dev, struct bio *bp, int rw)
272 {
273 	struct vm_page maa[bp->bio_ma_n];
274 	vm_page_t ma[bp->bio_ma_n];
275 	vm_memattr_t mattr;
276 	int i;
277 
278 	mattr = nvdimm_spa_memattr(dev);
279 	for (i = 0; i < nitems(ma); i++) {
280 		bzero(&maa[i], sizeof(maa[i]));
281 		vm_page_initfake(&maa[i], dev->spa_phys_base +
282 		    trunc_page(bp->bio_offset) + PAGE_SIZE * i, mattr);
283 		ma[i] = &maa[i];
284 	}
285 	if (rw == BIO_READ)
286 		pmap_copy_pages(ma, bp->bio_offset & PAGE_MASK, bp->bio_ma,
287 		    bp->bio_ma_offset, bp->bio_length);
288 	else
289 		pmap_copy_pages(bp->bio_ma, bp->bio_ma_offset, ma,
290 		    bp->bio_offset & PAGE_MASK, bp->bio_length);
291 }
292 
293 static void
294 nvdimm_spa_g_thread(void *arg)
295 {
296 	struct g_spa *sc;
297 	struct bio *bp;
298 	struct uio auio;
299 	struct iovec aiovec;
300 	int error;
301 
302 	sc = arg;
303 	for (;;) {
304 		mtx_lock(&sc->spa_g_mtx);
305 		for (;;) {
306 			bp = bioq_takefirst(&sc->spa_g_queue);
307 			if (bp != NULL)
308 				break;
309 			msleep(&sc->spa_g_queue, &sc->spa_g_mtx, PRIBIO,
310 			    "spa_g", 0);
311 			if (!sc->spa_g_proc_run) {
312 				sc->spa_g_proc_exiting = true;
313 				wakeup(&sc->spa_g_queue);
314 				mtx_unlock(&sc->spa_g_mtx);
315 				kproc_exit(0);
316 			}
317 			continue;
318 		}
319 		mtx_unlock(&sc->spa_g_mtx);
320 		if (bp->bio_cmd != BIO_READ && bp->bio_cmd != BIO_WRITE &&
321 		    bp->bio_cmd != BIO_FLUSH) {
322 			error = EOPNOTSUPP;
323 			goto completed;
324 		}
325 
326 		error = 0;
327 		if (bp->bio_cmd == BIO_FLUSH) {
328 			if (sc->dev->spa_kva != NULL) {
329 				pmap_large_map_wb(sc->dev->spa_kva,
330 				    sc->dev->spa_len);
331 			} else {
332 				pmap_flush_cache_phys_range(
333 				    (vm_paddr_t)sc->dev->spa_phys_base,
334 				    (vm_paddr_t)sc->dev->spa_phys_base +
335 				    sc->dev->spa_len,
336 				    nvdimm_spa_memattr(sc->dev));
337 			}
338 			/*
339 			 * XXX flush IMC
340 			 */
341 			goto completed;
342 		}
343 
344 		if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
345 			if (sc->dev->spa_kva != NULL) {
346 				aiovec.iov_base = (char *)sc->dev->spa_kva +
347 				    bp->bio_offset;
348 				aiovec.iov_len = bp->bio_length;
349 				auio.uio_iov = &aiovec;
350 				auio.uio_iovcnt = 1;
351 				auio.uio_resid = bp->bio_length;
352 				auio.uio_offset = bp->bio_offset;
353 				auio.uio_segflg = UIO_SYSSPACE;
354 				auio.uio_rw = bp->bio_cmd == BIO_READ ?
355 				    UIO_WRITE : UIO_READ;
356 				auio.uio_td = curthread;
357 				error = uiomove_fromphys(bp->bio_ma,
358 				    bp->bio_ma_offset, bp->bio_length, &auio);
359 				bp->bio_resid = auio.uio_resid;
360 			} else {
361 				nvdimm_spa_g_all_unmapped(sc->dev, bp,
362 				    bp->bio_cmd);
363 				bp->bio_resid = bp->bio_length;
364 				error = 0;
365 			}
366 		} else {
367 			aiovec.iov_base = bp->bio_data;
368 			aiovec.iov_len = bp->bio_length;
369 			auio.uio_iov = &aiovec;
370 			auio.uio_iovcnt = 1;
371 			auio.uio_resid = bp->bio_length;
372 			auio.uio_offset = bp->bio_offset;
373 			auio.uio_segflg = UIO_SYSSPACE;
374 			auio.uio_rw = bp->bio_cmd == BIO_READ ? UIO_READ :
375 			    UIO_WRITE;
376 			auio.uio_td = curthread;
377 			error = nvdimm_spa_uio(sc->dev, &auio);
378 			bp->bio_resid = auio.uio_resid;
379 		}
380 		bp->bio_bcount = bp->bio_length;
381 		devstat_end_transaction_bio(sc->spa_g_devstat, bp);
382 completed:
383 		bp->bio_completed = bp->bio_length;
384 		g_io_deliver(bp, error);
385 	}
386 }
387 
388 static void
389 nvdimm_spa_g_start(struct bio *bp)
390 {
391 	struct g_spa *sc;
392 
393 	sc = bp->bio_to->geom->softc;
394 	if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
395 		mtx_lock(&sc->spa_g_stat_mtx);
396 		devstat_start_transaction_bio(sc->spa_g_devstat, bp);
397 		mtx_unlock(&sc->spa_g_stat_mtx);
398 	}
399 	mtx_lock(&sc->spa_g_mtx);
400 	bioq_disksort(&sc->spa_g_queue, bp);
401 	wakeup(&sc->spa_g_queue);
402 	mtx_unlock(&sc->spa_g_mtx);
403 }
404 
405 static int
406 nvdimm_spa_g_access(struct g_provider *pp, int r, int w, int e)
407 {
408 
409 	return (0);
410 }
411 
412 static struct g_geom * nvdimm_spa_g_create(struct nvdimm_spa_dev *dev,
413     const char *name);
414 static g_ctl_destroy_geom_t nvdimm_spa_g_destroy_geom;
415 
416 struct g_class nvdimm_spa_g_class = {
417 	.name =		"SPA",
418 	.version =	G_VERSION,
419 	.start =	nvdimm_spa_g_start,
420 	.access =	nvdimm_spa_g_access,
421 	.destroy_geom =	nvdimm_spa_g_destroy_geom,
422 };
423 DECLARE_GEOM_CLASS(nvdimm_spa_g_class, g_spa);
424 
425 int
426 nvdimm_spa_init(struct SPA_mapping *spa, ACPI_NFIT_SYSTEM_ADDRESS *nfitaddr,
427     enum SPA_mapping_type spa_type)
428 {
429 	char *name;
430 	int error;
431 
432 	spa->spa_type = spa_type;
433 	spa->spa_nfit_idx = nfitaddr->RangeIndex;
434 	spa->dev.spa_domain =
435 	    ((nfitaddr->Flags & ACPI_NFIT_PROXIMITY_VALID) != 0) ?
436 	    nfitaddr->ProximityDomain : -1;
437 	spa->dev.spa_phys_base = nfitaddr->Address;
438 	spa->dev.spa_len = nfitaddr->Length;
439 	spa->dev.spa_efi_mem_flags = nfitaddr->MemoryMapping;
440 	if (bootverbose) {
441 		printf("NVDIMM SPA%d base %#016jx len %#016jx %s fl %#jx\n",
442 		    spa->spa_nfit_idx,
443 		    (uintmax_t)spa->dev.spa_phys_base,
444 		    (uintmax_t)spa->dev.spa_len,
445 		    nvdimm_SPA_uuid_list[spa_type].u_name,
446 		    spa->dev.spa_efi_mem_flags);
447 	}
448 	if (!nvdimm_SPA_uuid_list[spa_type].u_usr_acc)
449 		return (0);
450 
451 	asprintf(&name, M_NVDIMM, "spa%d", spa->spa_nfit_idx);
452 	error = nvdimm_spa_dev_init(&spa->dev, name);
453 	free(name, M_NVDIMM);
454 	return (error);
455 }
456 
457 int
458 nvdimm_spa_dev_init(struct nvdimm_spa_dev *dev, const char *name)
459 {
460 	struct make_dev_args mda;
461 	struct sglist *spa_sg;
462 	char *devname;
463 	int error, error1;
464 
465 	error1 = pmap_large_map(dev->spa_phys_base, dev->spa_len,
466 	    &dev->spa_kva, nvdimm_spa_memattr(dev));
467 	if (error1 != 0) {
468 		printf("NVDIMM %s cannot map into KVA, error %d\n", name,
469 		    error1);
470 		dev->spa_kva = NULL;
471 	}
472 
473 	spa_sg = sglist_alloc(1, M_WAITOK);
474 	error = sglist_append_phys(spa_sg, dev->spa_phys_base,
475 	    dev->spa_len);
476 	if (error == 0) {
477 		dev->spa_obj = vm_pager_allocate(OBJT_SG, spa_sg, dev->spa_len,
478 		    VM_PROT_ALL, 0, NULL);
479 		if (dev->spa_obj == NULL) {
480 			printf("NVDIMM %s failed to alloc vm object", name);
481 			sglist_free(spa_sg);
482 		}
483 	} else {
484 		printf("NVDIMM %s failed to init sglist, error %d", name,
485 		    error);
486 		sglist_free(spa_sg);
487 	}
488 
489 	make_dev_args_init(&mda);
490 	mda.mda_flags = MAKEDEV_WAITOK | MAKEDEV_CHECKNAME;
491 	mda.mda_devsw = &spa_cdevsw;
492 	mda.mda_cr = NULL;
493 	mda.mda_uid = UID_ROOT;
494 	mda.mda_gid = GID_OPERATOR;
495 	mda.mda_mode = 0660;
496 	mda.mda_si_drv1 = dev;
497 	asprintf(&devname, M_NVDIMM, "nvdimm_%s", name);
498 	error = make_dev_s(&mda, &dev->spa_dev, "%s", devname);
499 	free(devname, M_NVDIMM);
500 	if (error != 0) {
501 		printf("NVDIMM %s cannot create devfs node, error %d\n", name,
502 		    error);
503 		if (error1 == 0)
504 			error1 = error;
505 	}
506 	dev->spa_g = nvdimm_spa_g_create(dev, name);
507 	if (dev->spa_g == NULL && error1 == 0)
508 		error1 = ENXIO;
509 	return (error1);
510 }
511 
512 static struct g_geom *
513 nvdimm_spa_g_create(struct nvdimm_spa_dev *dev, const char *name)
514 {
515 	struct g_geom *gp;
516 	struct g_spa *sc;
517 	int error;
518 
519 	gp = NULL;
520 	sc = malloc(sizeof(struct g_spa), M_NVDIMM, M_WAITOK | M_ZERO);
521 	sc->dev = dev;
522 	bioq_init(&sc->spa_g_queue);
523 	mtx_init(&sc->spa_g_mtx, "spag", NULL, MTX_DEF);
524 	mtx_init(&sc->spa_g_stat_mtx, "spagst", NULL, MTX_DEF);
525 	sc->spa_g_proc_run = true;
526 	sc->spa_g_proc_exiting = false;
527 	error = kproc_create(nvdimm_spa_g_thread, sc, &sc->spa_g_proc, 0, 0,
528 	    "g_spa");
529 	if (error != 0) {
530 		mtx_destroy(&sc->spa_g_mtx);
531 		mtx_destroy(&sc->spa_g_stat_mtx);
532 		free(sc, M_NVDIMM);
533 		printf("NVDIMM %s cannot create geom worker, error %d\n", name,
534 		    error);
535 	} else {
536 		g_topology_lock();
537 		gp = g_new_geomf(&nvdimm_spa_g_class, "%s", name);
538 		gp->softc = sc;
539 		sc->spa_p = g_new_providerf(gp, "%s", name);
540 		sc->spa_p->mediasize = dev->spa_len;
541 		sc->spa_p->sectorsize = DEV_BSIZE;
542 		sc->spa_p->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE |
543 		    G_PF_ACCEPT_UNMAPPED;
544 		g_error_provider(sc->spa_p, 0);
545 		sc->spa_g_devstat = devstat_new_entry("spa", -1, DEV_BSIZE,
546 		    DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT,
547 		    DEVSTAT_PRIORITY_MAX);
548 		g_topology_unlock();
549 	}
550 	return (gp);
551 }
552 
553 void
554 nvdimm_spa_fini(struct SPA_mapping *spa)
555 {
556 
557 	nvdimm_spa_dev_fini(&spa->dev);
558 }
559 
560 void
561 nvdimm_spa_dev_fini(struct nvdimm_spa_dev *dev)
562 {
563 
564 	if (dev->spa_g != NULL) {
565 		g_topology_lock();
566 		nvdimm_spa_g_destroy_geom(NULL, dev->spa_g->class, dev->spa_g);
567 		g_topology_unlock();
568 	}
569 	if (dev->spa_dev != NULL) {
570 		destroy_dev(dev->spa_dev);
571 		dev->spa_dev = NULL;
572 	}
573 	vm_object_deallocate(dev->spa_obj);
574 	if (dev->spa_kva != NULL) {
575 		pmap_large_unmap(dev->spa_kva, dev->spa_len);
576 		dev->spa_kva = NULL;
577 	}
578 }
579 
580 static int
581 nvdimm_spa_g_destroy_geom(struct gctl_req *req, struct g_class *cp,
582     struct g_geom *gp)
583 {
584 	struct g_spa *sc;
585 
586 	sc = gp->softc;
587 	mtx_lock(&sc->spa_g_mtx);
588 	sc->spa_g_proc_run = false;
589 	wakeup(&sc->spa_g_queue);
590 	while (!sc->spa_g_proc_exiting)
591 		msleep(&sc->spa_g_queue, &sc->spa_g_mtx, PRIBIO, "spa_e", 0);
592 	mtx_unlock(&sc->spa_g_mtx);
593 	g_topology_assert();
594 	g_wither_geom(gp, ENXIO);
595 	sc->spa_p = NULL;
596 	if (sc->spa_g_devstat != NULL) {
597 		devstat_remove_entry(sc->spa_g_devstat);
598 		sc->spa_g_devstat = NULL;
599 	}
600 	mtx_destroy(&sc->spa_g_mtx);
601 	mtx_destroy(&sc->spa_g_stat_mtx);
602 	free(sc, M_NVDIMM);
603 	return (0);
604 }
605