1 /*-
2 * Copyright (c) 2017, 2018 The FreeBSD Foundation
3 * All rights reserved.
4 * Copyright (c) 2018, 2019 Intel Corporation
5 *
6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 #include "opt_acpi.h"
33 #include "opt_ddb.h"
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bio.h>
38 #include <sys/bus.h>
39 #include <sys/conf.h>
40 #include <sys/devicestat.h>
41 #include <sys/disk.h>
42 #include <sys/efi.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/module.h>
49 #include <sys/rwlock.h>
50 #include <sys/sglist.h>
51 #include <sys/uio.h>
52 #include <sys/uuid.h>
53 #include <geom/geom.h>
54 #include <geom/geom_int.h>
55 #include <machine/vmparam.h>
56 #include <vm/vm.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_pager.h>
60 #include <contrib/dev/acpica/include/acpi.h>
61 #include <contrib/dev/acpica/include/accommon.h>
62 #include <contrib/dev/acpica/include/acuuid.h>
63 #include <dev/acpica/acpivar.h>
64 #include <dev/nvdimm/nvdimm_var.h>
65
66 #define UUID_INITIALIZER_VOLATILE_MEMORY \
67 {0x7305944f,0xfdda,0x44e3,0xb1,0x6c,{0x3f,0x22,0xd2,0x52,0xe5,0xd0}}
68 #define UUID_INITIALIZER_PERSISTENT_MEMORY \
69 {0x66f0d379,0xb4f3,0x4074,0xac,0x43,{0x0d,0x33,0x18,0xb7,0x8c,0xdb}}
70 #define UUID_INITIALIZER_CONTROL_REGION \
71 {0x92f701f6,0x13b4,0x405d,0x91,0x0b,{0x29,0x93,0x67,0xe8,0x23,0x4c}}
72 #define UUID_INITIALIZER_DATA_REGION \
73 {0x91af0530,0x5d86,0x470e,0xa6,0xb0,{0x0a,0x2d,0xb9,0x40,0x82,0x49}}
74 #define UUID_INITIALIZER_VOLATILE_VIRTUAL_DISK \
75 {0x77ab535a,0x45fc,0x624b,0x55,0x60,{0xf7,0xb2,0x81,0xd1,0xf9,0x6e}}
76 #define UUID_INITIALIZER_VOLATILE_VIRTUAL_CD \
77 {0x3d5abd30,0x4175,0x87ce,0x6d,0x64,{0xd2,0xad,0xe5,0x23,0xc4,0xbb}}
78 #define UUID_INITIALIZER_PERSISTENT_VIRTUAL_DISK \
79 {0x5cea02c9,0x4d07,0x69d3,0x26,0x9f,{0x44,0x96,0xfb,0xe0,0x96,0xf9}}
80 #define UUID_INITIALIZER_PERSISTENT_VIRTUAL_CD \
81 {0x08018188,0x42cd,0xbb48,0x10,0x0f,{0x53,0x87,0xd5,0x3d,0xed,0x3d}}
82
83 static struct nvdimm_SPA_uuid_list_elm {
84 const char *u_name;
85 struct uuid u_id;
86 const bool u_usr_acc;
87 } nvdimm_SPA_uuid_list[] = {
88 [SPA_TYPE_VOLATILE_MEMORY] = {
89 .u_name = "VOLA MEM ",
90 .u_id = UUID_INITIALIZER_VOLATILE_MEMORY,
91 .u_usr_acc = true,
92 },
93 [SPA_TYPE_PERSISTENT_MEMORY] = {
94 .u_name = "PERS MEM",
95 .u_id = UUID_INITIALIZER_PERSISTENT_MEMORY,
96 .u_usr_acc = true,
97 },
98 [SPA_TYPE_CONTROL_REGION] = {
99 .u_name = "CTRL RG ",
100 .u_id = UUID_INITIALIZER_CONTROL_REGION,
101 .u_usr_acc = false,
102 },
103 [SPA_TYPE_DATA_REGION] = {
104 .u_name = "DATA RG ",
105 .u_id = UUID_INITIALIZER_DATA_REGION,
106 .u_usr_acc = true,
107 },
108 [SPA_TYPE_VOLATILE_VIRTUAL_DISK] = {
109 .u_name = "VIRT DSK",
110 .u_id = UUID_INITIALIZER_VOLATILE_VIRTUAL_DISK,
111 .u_usr_acc = true,
112 },
113 [SPA_TYPE_VOLATILE_VIRTUAL_CD] = {
114 .u_name = "VIRT CD ",
115 .u_id = UUID_INITIALIZER_VOLATILE_VIRTUAL_CD,
116 .u_usr_acc = true,
117 },
118 [SPA_TYPE_PERSISTENT_VIRTUAL_DISK] = {
119 .u_name = "PV DSK ",
120 .u_id = UUID_INITIALIZER_PERSISTENT_VIRTUAL_DISK,
121 .u_usr_acc = true,
122 },
123 [SPA_TYPE_PERSISTENT_VIRTUAL_CD] = {
124 .u_name = "PV CD ",
125 .u_id = UUID_INITIALIZER_PERSISTENT_VIRTUAL_CD,
126 .u_usr_acc = true,
127 },
128 };
129
130 enum SPA_mapping_type
nvdimm_spa_type_from_name(const char * name)131 nvdimm_spa_type_from_name(const char *name)
132 {
133 int j;
134
135 for (j = 0; j < nitems(nvdimm_SPA_uuid_list); j++) {
136 if (strcmp(name, nvdimm_SPA_uuid_list[j].u_name) != 0)
137 continue;
138 return (j);
139 }
140 return (SPA_TYPE_UNKNOWN);
141 }
142
143 enum SPA_mapping_type
nvdimm_spa_type_from_uuid(struct uuid * uuid)144 nvdimm_spa_type_from_uuid(struct uuid *uuid)
145 {
146 int j;
147
148 for (j = 0; j < nitems(nvdimm_SPA_uuid_list); j++) {
149 if (uuidcmp(uuid, &nvdimm_SPA_uuid_list[j].u_id) != 0)
150 continue;
151 return (j);
152 }
153 return (SPA_TYPE_UNKNOWN);
154 }
155
156 bool
nvdimm_spa_type_user_accessible(enum SPA_mapping_type spa_type)157 nvdimm_spa_type_user_accessible(enum SPA_mapping_type spa_type)
158 {
159
160 if ((int)spa_type < 0 || spa_type >= nitems(nvdimm_SPA_uuid_list))
161 return (false);
162 return (nvdimm_SPA_uuid_list[spa_type].u_usr_acc);
163 }
164
165 static vm_memattr_t
nvdimm_spa_memattr(uint64_t efi_mem_flags)166 nvdimm_spa_memattr(uint64_t efi_mem_flags)
167 {
168 vm_memattr_t mode;
169
170 if ((efi_mem_flags & EFI_MD_ATTR_WB) != 0)
171 mode = VM_MEMATTR_WRITE_BACK;
172 else if ((efi_mem_flags & EFI_MD_ATTR_WT) != 0)
173 mode = VM_MEMATTR_WRITE_THROUGH;
174 else if ((efi_mem_flags & EFI_MD_ATTR_WC) != 0)
175 mode = VM_MEMATTR_WRITE_COMBINING;
176 else if ((efi_mem_flags & EFI_MD_ATTR_WP) != 0)
177 mode = VM_MEMATTR_WRITE_PROTECTED;
178 else if ((efi_mem_flags & EFI_MD_ATTR_UC) != 0)
179 mode = VM_MEMATTR_UNCACHEABLE;
180 else {
181 if (bootverbose)
182 printf("SPA mapping attr %#lx unsupported\n",
183 efi_mem_flags);
184 mode = VM_MEMATTR_UNCACHEABLE;
185 }
186 return (mode);
187 }
188
189 static int
nvdimm_spa_uio(struct nvdimm_spa_dev * dev,struct uio * uio)190 nvdimm_spa_uio(struct nvdimm_spa_dev *dev, struct uio *uio)
191 {
192 struct vm_page m, *ma;
193 off_t off;
194 vm_memattr_t mattr;
195 int error, n;
196
197 error = 0;
198 if (dev->spa_kva == NULL) {
199 mattr = dev->spa_memattr;
200 bzero(&m, sizeof(m));
201 vm_page_initfake(&m, 0, mattr);
202 ma = &m;
203 while (uio->uio_resid > 0) {
204 if (uio->uio_offset >= dev->spa_len)
205 break;
206 off = dev->spa_phys_base + uio->uio_offset;
207 vm_page_updatefake(&m, trunc_page(off), mattr);
208 n = PAGE_SIZE;
209 if (n > uio->uio_resid)
210 n = uio->uio_resid;
211 error = uiomove_fromphys(&ma, off & PAGE_MASK, n, uio);
212 if (error != 0)
213 break;
214 }
215 } else {
216 while (uio->uio_resid > 0) {
217 if (uio->uio_offset >= dev->spa_len)
218 break;
219 n = INT_MAX;
220 if (n > uio->uio_resid)
221 n = uio->uio_resid;
222 if (uio->uio_offset + n > dev->spa_len)
223 n = dev->spa_len - uio->uio_offset;
224 error = uiomove((char *)dev->spa_kva + uio->uio_offset,
225 n, uio);
226 if (error != 0)
227 break;
228 }
229 }
230 return (error);
231 }
232
233 static int
nvdimm_spa_rw(struct cdev * dev,struct uio * uio,int ioflag)234 nvdimm_spa_rw(struct cdev *dev, struct uio *uio, int ioflag)
235 {
236
237 return (nvdimm_spa_uio(dev->si_drv1, uio));
238 }
239
240 static int
nvdimm_spa_ioctl(struct cdev * cdev,u_long cmd,caddr_t data,int fflag,struct thread * td)241 nvdimm_spa_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
242 struct thread *td)
243 {
244 struct nvdimm_spa_dev *dev;
245 int error;
246
247 dev = cdev->si_drv1;
248 error = 0;
249 switch (cmd) {
250 case DIOCGSECTORSIZE:
251 *(u_int *)data = DEV_BSIZE;
252 break;
253 case DIOCGMEDIASIZE:
254 *(off_t *)data = dev->spa_len;
255 break;
256 default:
257 error = ENOTTY;
258 break;
259 }
260 return (error);
261 }
262
263 static int
nvdimm_spa_mmap_single(struct cdev * cdev,vm_ooffset_t * offset,vm_size_t size,vm_object_t * objp,int nprot)264 nvdimm_spa_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
265 vm_object_t *objp, int nprot)
266 {
267 struct nvdimm_spa_dev *dev;
268
269 dev = cdev->si_drv1;
270 if (dev->spa_obj == NULL)
271 return (ENXIO);
272 if (*offset >= dev->spa_len || *offset + size < *offset ||
273 *offset + size > dev->spa_len)
274 return (EINVAL);
275 vm_object_reference(dev->spa_obj);
276 *objp = dev->spa_obj;
277 return (0);
278 }
279
280 static struct cdevsw spa_cdevsw = {
281 .d_version = D_VERSION,
282 .d_flags = D_DISK,
283 .d_name = "nvdimm_spa",
284 .d_read = nvdimm_spa_rw,
285 .d_write = nvdimm_spa_rw,
286 .d_ioctl = nvdimm_spa_ioctl,
287 .d_mmap_single = nvdimm_spa_mmap_single,
288 };
289
290 static void
nvdimm_spa_g_all_unmapped(struct nvdimm_spa_dev * dev,struct bio * bp,int rw)291 nvdimm_spa_g_all_unmapped(struct nvdimm_spa_dev *dev, struct bio *bp, int rw)
292 {
293 struct vm_page maa[bp->bio_ma_n];
294 vm_page_t ma[bp->bio_ma_n];
295 vm_memattr_t mattr;
296 int i;
297
298 mattr = dev->spa_memattr;
299 for (i = 0; i < nitems(ma); i++) {
300 bzero(&maa[i], sizeof(maa[i]));
301 vm_page_initfake(&maa[i], dev->spa_phys_base +
302 trunc_page(bp->bio_offset) + PAGE_SIZE * i, mattr);
303 ma[i] = &maa[i];
304 }
305 if (rw == BIO_READ)
306 pmap_copy_pages(ma, bp->bio_offset & PAGE_MASK, bp->bio_ma,
307 bp->bio_ma_offset, bp->bio_length);
308 else
309 pmap_copy_pages(bp->bio_ma, bp->bio_ma_offset, ma,
310 bp->bio_offset & PAGE_MASK, bp->bio_length);
311 }
312
313 static void
nvdimm_spa_g_thread(void * arg)314 nvdimm_spa_g_thread(void *arg)
315 {
316 struct g_spa *sc;
317 struct bio *bp;
318 struct uio auio;
319 struct iovec aiovec;
320 int error;
321
322 sc = arg;
323 for (;;) {
324 mtx_lock(&sc->spa_g_mtx);
325 for (;;) {
326 bp = bioq_takefirst(&sc->spa_g_queue);
327 if (bp != NULL)
328 break;
329 msleep(&sc->spa_g_queue, &sc->spa_g_mtx, PRIBIO,
330 "spa_g", 0);
331 if (!sc->spa_g_proc_run) {
332 sc->spa_g_proc_exiting = true;
333 wakeup(&sc->spa_g_queue);
334 mtx_unlock(&sc->spa_g_mtx);
335 kproc_exit(0);
336 }
337 continue;
338 }
339 mtx_unlock(&sc->spa_g_mtx);
340 if (bp->bio_cmd != BIO_READ && bp->bio_cmd != BIO_WRITE &&
341 bp->bio_cmd != BIO_FLUSH) {
342 error = EOPNOTSUPP;
343 goto completed;
344 }
345
346 error = 0;
347 if (bp->bio_cmd == BIO_FLUSH) {
348 if (sc->dev->spa_kva != NULL) {
349 pmap_large_map_wb(sc->dev->spa_kva,
350 sc->dev->spa_len);
351 } else {
352 pmap_flush_cache_phys_range(
353 (vm_paddr_t)sc->dev->spa_phys_base,
354 (vm_paddr_t)sc->dev->spa_phys_base +
355 sc->dev->spa_len, sc->dev->spa_memattr);
356 }
357 /*
358 * XXX flush IMC
359 */
360 goto completed;
361 }
362
363 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
364 if (sc->dev->spa_kva != NULL) {
365 aiovec.iov_base = (char *)sc->dev->spa_kva +
366 bp->bio_offset;
367 aiovec.iov_len = bp->bio_length;
368 auio.uio_iov = &aiovec;
369 auio.uio_iovcnt = 1;
370 auio.uio_resid = bp->bio_length;
371 auio.uio_offset = bp->bio_offset;
372 auio.uio_segflg = UIO_SYSSPACE;
373 auio.uio_rw = bp->bio_cmd == BIO_READ ?
374 UIO_WRITE : UIO_READ;
375 auio.uio_td = curthread;
376 error = uiomove_fromphys(bp->bio_ma,
377 bp->bio_ma_offset, bp->bio_length, &auio);
378 bp->bio_resid = auio.uio_resid;
379 } else {
380 nvdimm_spa_g_all_unmapped(sc->dev, bp,
381 bp->bio_cmd);
382 bp->bio_resid = bp->bio_length;
383 error = 0;
384 }
385 } else {
386 aiovec.iov_base = bp->bio_data;
387 aiovec.iov_len = bp->bio_length;
388 auio.uio_iov = &aiovec;
389 auio.uio_iovcnt = 1;
390 auio.uio_resid = bp->bio_length;
391 auio.uio_offset = bp->bio_offset;
392 auio.uio_segflg = UIO_SYSSPACE;
393 auio.uio_rw = bp->bio_cmd == BIO_READ ? UIO_READ :
394 UIO_WRITE;
395 auio.uio_td = curthread;
396 error = nvdimm_spa_uio(sc->dev, &auio);
397 bp->bio_resid = auio.uio_resid;
398 }
399 bp->bio_bcount = bp->bio_length;
400 devstat_end_transaction_bio(sc->spa_g_devstat, bp);
401 completed:
402 bp->bio_completed = bp->bio_length;
403 g_io_deliver(bp, error);
404 }
405 }
406
407 static void
nvdimm_spa_g_start(struct bio * bp)408 nvdimm_spa_g_start(struct bio *bp)
409 {
410 struct g_spa *sc;
411
412 sc = bp->bio_to->geom->softc;
413 if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
414 devstat_start_transaction_bio(sc->spa_g_devstat, bp);
415 }
416 mtx_lock(&sc->spa_g_mtx);
417 bioq_disksort(&sc->spa_g_queue, bp);
418 wakeup(&sc->spa_g_queue);
419 mtx_unlock(&sc->spa_g_mtx);
420 }
421
422 static int
nvdimm_spa_g_access(struct g_provider * pp,int r,int w,int e)423 nvdimm_spa_g_access(struct g_provider *pp, int r, int w, int e)
424 {
425
426 return (0);
427 }
428
429 static struct g_geom * nvdimm_spa_g_create(struct nvdimm_spa_dev *dev,
430 const char *name);
431 static g_ctl_destroy_geom_t nvdimm_spa_g_destroy_geom;
432
433 struct g_class nvdimm_spa_g_class = {
434 .name = "SPA",
435 .version = G_VERSION,
436 .start = nvdimm_spa_g_start,
437 .access = nvdimm_spa_g_access,
438 .destroy_geom = nvdimm_spa_g_destroy_geom,
439 };
440 DECLARE_GEOM_CLASS(nvdimm_spa_g_class, g_spa);
441
442 int
nvdimm_spa_init(struct SPA_mapping * spa,ACPI_NFIT_SYSTEM_ADDRESS * nfitaddr,enum SPA_mapping_type spa_type)443 nvdimm_spa_init(struct SPA_mapping *spa, ACPI_NFIT_SYSTEM_ADDRESS *nfitaddr,
444 enum SPA_mapping_type spa_type)
445 {
446 char *name;
447 int error;
448
449 spa->spa_type = spa_type;
450 spa->spa_nfit_idx = nfitaddr->RangeIndex;
451 spa->dev.spa_domain =
452 ((nfitaddr->Flags & ACPI_NFIT_PROXIMITY_VALID) != 0) ?
453 nfitaddr->ProximityDomain : -1;
454 spa->dev.spa_phys_base = nfitaddr->Address;
455 spa->dev.spa_len = nfitaddr->Length;
456 spa->dev.spa_efi_mem_flags = nfitaddr->MemoryMapping;
457 if (bootverbose) {
458 printf("NVDIMM SPA%d base %#016jx len %#016jx %s fl %#jx\n",
459 spa->spa_nfit_idx,
460 (uintmax_t)spa->dev.spa_phys_base,
461 (uintmax_t)spa->dev.spa_len,
462 nvdimm_SPA_uuid_list[spa_type].u_name,
463 spa->dev.spa_efi_mem_flags);
464 }
465 spa->dev.spa_memattr = nvdimm_spa_memattr(nfitaddr->MemoryMapping);
466 if (!nvdimm_SPA_uuid_list[spa_type].u_usr_acc)
467 return (0);
468
469 asprintf(&name, M_NVDIMM, "spa%d", spa->spa_nfit_idx);
470 error = nvdimm_spa_dev_init(&spa->dev, name, spa->spa_nfit_idx);
471 free(name, M_NVDIMM);
472 return (error);
473 }
474
475 int
nvdimm_spa_dev_init(struct nvdimm_spa_dev * dev,const char * name,int unit)476 nvdimm_spa_dev_init(struct nvdimm_spa_dev *dev, const char *name, int unit)
477 {
478 struct make_dev_args mda;
479 struct sglist *spa_sg;
480 char *devname;
481 int error, error1;
482
483 error1 = pmap_large_map(dev->spa_phys_base, dev->spa_len,
484 &dev->spa_kva, dev->spa_memattr);
485 if (error1 != 0) {
486 printf("NVDIMM %s cannot map into KVA, error %d\n", name,
487 error1);
488 dev->spa_kva = NULL;
489 }
490
491 spa_sg = sglist_alloc(1, M_WAITOK);
492 error = sglist_append_phys(spa_sg, dev->spa_phys_base,
493 dev->spa_len);
494 if (error == 0) {
495 dev->spa_obj = vm_pager_allocate(OBJT_SG, spa_sg, dev->spa_len,
496 VM_PROT_ALL, 0, NULL);
497 if (dev->spa_obj == NULL) {
498 printf("NVDIMM %s failed to alloc vm object", name);
499 sglist_free(spa_sg);
500 }
501 } else {
502 printf("NVDIMM %s failed to init sglist, error %d", name,
503 error);
504 sglist_free(spa_sg);
505 }
506
507 make_dev_args_init(&mda);
508 mda.mda_flags = MAKEDEV_WAITOK | MAKEDEV_CHECKNAME;
509 mda.mda_devsw = &spa_cdevsw;
510 mda.mda_cr = NULL;
511 mda.mda_uid = UID_ROOT;
512 mda.mda_gid = GID_OPERATOR;
513 mda.mda_mode = 0660;
514 mda.mda_si_drv1 = dev;
515 mda.mda_unit = unit;
516 asprintf(&devname, M_NVDIMM, "nvdimm_%s", name);
517 error = make_dev_s(&mda, &dev->spa_dev, "%s", devname);
518 free(devname, M_NVDIMM);
519 if (error != 0) {
520 printf("NVDIMM %s cannot create devfs node, error %d\n", name,
521 error);
522 if (error1 == 0)
523 error1 = error;
524 }
525 dev->spa_g = nvdimm_spa_g_create(dev, name);
526 if (dev->spa_g == NULL && error1 == 0)
527 error1 = ENXIO;
528 return (error1);
529 }
530
531 static struct g_geom *
nvdimm_spa_g_create(struct nvdimm_spa_dev * dev,const char * name)532 nvdimm_spa_g_create(struct nvdimm_spa_dev *dev, const char *name)
533 {
534 struct g_geom *gp;
535 struct g_spa *sc;
536 int error;
537
538 gp = NULL;
539 sc = malloc(sizeof(struct g_spa), M_NVDIMM, M_WAITOK | M_ZERO);
540 sc->dev = dev;
541 bioq_init(&sc->spa_g_queue);
542 mtx_init(&sc->spa_g_mtx, "spag", NULL, MTX_DEF);
543 sc->spa_g_proc_run = true;
544 sc->spa_g_proc_exiting = false;
545 error = kproc_create(nvdimm_spa_g_thread, sc, &sc->spa_g_proc, 0, 0,
546 "g_spa");
547 if (error != 0) {
548 mtx_destroy(&sc->spa_g_mtx);
549 free(sc, M_NVDIMM);
550 printf("NVDIMM %s cannot create geom worker, error %d\n", name,
551 error);
552 } else {
553 g_topology_lock();
554 gp = g_new_geomf(&nvdimm_spa_g_class, "%s", name);
555 gp->softc = sc;
556 sc->spa_p = g_new_providerf(gp, "%s", name);
557 sc->spa_p->mediasize = dev->spa_len;
558 sc->spa_p->sectorsize = DEV_BSIZE;
559 sc->spa_p->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE |
560 G_PF_ACCEPT_UNMAPPED;
561 g_error_provider(sc->spa_p, 0);
562 sc->spa_g_devstat = devstat_new_entry("spa", -1, DEV_BSIZE,
563 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT,
564 DEVSTAT_PRIORITY_MAX);
565 g_topology_unlock();
566 }
567 return (gp);
568 }
569
570 void
nvdimm_spa_fini(struct SPA_mapping * spa)571 nvdimm_spa_fini(struct SPA_mapping *spa)
572 {
573
574 nvdimm_spa_dev_fini(&spa->dev);
575 }
576
577 void
nvdimm_spa_dev_fini(struct nvdimm_spa_dev * dev)578 nvdimm_spa_dev_fini(struct nvdimm_spa_dev *dev)
579 {
580
581 if (dev->spa_g != NULL) {
582 g_topology_lock();
583 nvdimm_spa_g_destroy_geom(NULL, dev->spa_g->class, dev->spa_g);
584 g_topology_unlock();
585 }
586 if (dev->spa_dev != NULL) {
587 destroy_dev(dev->spa_dev);
588 dev->spa_dev = NULL;
589 }
590 vm_object_deallocate(dev->spa_obj);
591 if (dev->spa_kva != NULL) {
592 pmap_large_unmap(dev->spa_kva, dev->spa_len);
593 dev->spa_kva = NULL;
594 }
595 }
596
597 static int
nvdimm_spa_g_destroy_geom(struct gctl_req * req,struct g_class * cp,struct g_geom * gp)598 nvdimm_spa_g_destroy_geom(struct gctl_req *req, struct g_class *cp,
599 struct g_geom *gp)
600 {
601 struct g_spa *sc;
602
603 sc = gp->softc;
604 mtx_lock(&sc->spa_g_mtx);
605 sc->spa_g_proc_run = false;
606 wakeup(&sc->spa_g_queue);
607 while (!sc->spa_g_proc_exiting)
608 msleep(&sc->spa_g_queue, &sc->spa_g_mtx, PRIBIO, "spa_e", 0);
609 mtx_unlock(&sc->spa_g_mtx);
610 g_topology_assert();
611 g_wither_geom(gp, ENXIO);
612 sc->spa_p = NULL;
613 if (sc->spa_g_devstat != NULL) {
614 devstat_remove_entry(sc->spa_g_devstat);
615 sc->spa_g_devstat = NULL;
616 }
617 mtx_destroy(&sc->spa_g_mtx);
618 free(sc, M_NVDIMM);
619 return (0);
620 }
621