1 /*- 2 * Copyright (c) 2017, 2018 The FreeBSD Foundation 3 * All rights reserved. 4 * Copyright (c) 2018, 2019 Intel Corporation 5 * 6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_acpi.h" 35 #include "opt_ddb.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/bio.h> 40 #include <sys/bus.h> 41 #include <sys/conf.h> 42 #include <sys/devicestat.h> 43 #include <sys/disk.h> 44 #include <sys/efi.h> 45 #include <sys/kernel.h> 46 #include <sys/kthread.h> 47 #include <sys/limits.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/module.h> 51 #include <sys/rwlock.h> 52 #include <sys/sglist.h> 53 #include <sys/uio.h> 54 #include <sys/uuid.h> 55 #include <geom/geom.h> 56 #include <geom/geom_int.h> 57 #include <machine/vmparam.h> 58 #include <vm/vm.h> 59 #include <vm/vm_object.h> 60 #include <vm/vm_page.h> 61 #include <vm/vm_pager.h> 62 #include <contrib/dev/acpica/include/acpi.h> 63 #include <contrib/dev/acpica/include/accommon.h> 64 #include <contrib/dev/acpica/include/acuuid.h> 65 #include <dev/acpica/acpivar.h> 66 #include <dev/nvdimm/nvdimm_var.h> 67 68 #define UUID_INITIALIZER_VOLATILE_MEMORY \ 69 {0x7305944f,0xfdda,0x44e3,0xb1,0x6c,{0x3f,0x22,0xd2,0x52,0xe5,0xd0}} 70 #define UUID_INITIALIZER_PERSISTENT_MEMORY \ 71 {0x66f0d379,0xb4f3,0x4074,0xac,0x43,{0x0d,0x33,0x18,0xb7,0x8c,0xdb}} 72 #define UUID_INITIALIZER_CONTROL_REGION \ 73 {0x92f701f6,0x13b4,0x405d,0x91,0x0b,{0x29,0x93,0x67,0xe8,0x23,0x4c}} 74 #define UUID_INITIALIZER_DATA_REGION \ 75 {0x91af0530,0x5d86,0x470e,0xa6,0xb0,{0x0a,0x2d,0xb9,0x40,0x82,0x49}} 76 #define UUID_INITIALIZER_VOLATILE_VIRTUAL_DISK \ 77 {0x77ab535a,0x45fc,0x624b,0x55,0x60,{0xf7,0xb2,0x81,0xd1,0xf9,0x6e}} 78 #define UUID_INITIALIZER_VOLATILE_VIRTUAL_CD \ 79 {0x3d5abd30,0x4175,0x87ce,0x6d,0x64,{0xd2,0xad,0xe5,0x23,0xc4,0xbb}} 80 #define UUID_INITIALIZER_PERSISTENT_VIRTUAL_DISK \ 81 {0x5cea02c9,0x4d07,0x69d3,0x26,0x9f,{0x44,0x96,0xfb,0xe0,0x96,0xf9}} 82 #define UUID_INITIALIZER_PERSISTENT_VIRTUAL_CD \ 83 {0x08018188,0x42cd,0xbb48,0x10,0x0f,{0x53,0x87,0xd5,0x3d,0xed,0x3d}} 84 85 static struct nvdimm_SPA_uuid_list_elm { 86 const char *u_name; 87 struct uuid u_id; 88 const bool u_usr_acc; 89 } nvdimm_SPA_uuid_list[] = { 90 [SPA_TYPE_VOLATILE_MEMORY] = { 91 .u_name = "VOLA MEM ", 92 .u_id = UUID_INITIALIZER_VOLATILE_MEMORY, 93 .u_usr_acc = true, 94 }, 95 [SPA_TYPE_PERSISTENT_MEMORY] = { 96 .u_name = "PERS MEM", 97 .u_id = UUID_INITIALIZER_PERSISTENT_MEMORY, 98 .u_usr_acc = true, 99 }, 100 [SPA_TYPE_CONTROL_REGION] = { 101 .u_name = "CTRL RG ", 102 .u_id = UUID_INITIALIZER_CONTROL_REGION, 103 .u_usr_acc = false, 104 }, 105 [SPA_TYPE_DATA_REGION] = { 106 .u_name = "DATA RG ", 107 .u_id = UUID_INITIALIZER_DATA_REGION, 108 .u_usr_acc = true, 109 }, 110 [SPA_TYPE_VOLATILE_VIRTUAL_DISK] = { 111 .u_name = "VIRT DSK", 112 .u_id = UUID_INITIALIZER_VOLATILE_VIRTUAL_DISK, 113 .u_usr_acc = true, 114 }, 115 [SPA_TYPE_VOLATILE_VIRTUAL_CD] = { 116 .u_name = "VIRT CD ", 117 .u_id = UUID_INITIALIZER_VOLATILE_VIRTUAL_CD, 118 .u_usr_acc = true, 119 }, 120 [SPA_TYPE_PERSISTENT_VIRTUAL_DISK] = { 121 .u_name = "PV DSK ", 122 .u_id = UUID_INITIALIZER_PERSISTENT_VIRTUAL_DISK, 123 .u_usr_acc = true, 124 }, 125 [SPA_TYPE_PERSISTENT_VIRTUAL_CD] = { 126 .u_name = "PV CD ", 127 .u_id = UUID_INITIALIZER_PERSISTENT_VIRTUAL_CD, 128 .u_usr_acc = true, 129 }, 130 }; 131 132 enum SPA_mapping_type 133 nvdimm_spa_type_from_name(const char *name) 134 { 135 int j; 136 137 for (j = 0; j < nitems(nvdimm_SPA_uuid_list); j++) { 138 if (strcmp(name, nvdimm_SPA_uuid_list[j].u_name) != 0) 139 continue; 140 return (j); 141 } 142 return (SPA_TYPE_UNKNOWN); 143 } 144 145 enum SPA_mapping_type 146 nvdimm_spa_type_from_uuid(struct uuid *uuid) 147 { 148 int j; 149 150 for (j = 0; j < nitems(nvdimm_SPA_uuid_list); j++) { 151 if (uuidcmp(uuid, &nvdimm_SPA_uuid_list[j].u_id) != 0) 152 continue; 153 return (j); 154 } 155 return (SPA_TYPE_UNKNOWN); 156 } 157 158 static vm_memattr_t 159 nvdimm_spa_memattr(uint64_t efi_mem_flags) 160 { 161 vm_memattr_t mode; 162 163 if ((efi_mem_flags & EFI_MD_ATTR_WB) != 0) 164 mode = VM_MEMATTR_WRITE_BACK; 165 else if ((efi_mem_flags & EFI_MD_ATTR_WT) != 0) 166 mode = VM_MEMATTR_WRITE_THROUGH; 167 else if ((efi_mem_flags & EFI_MD_ATTR_WC) != 0) 168 mode = VM_MEMATTR_WRITE_COMBINING; 169 else if ((efi_mem_flags & EFI_MD_ATTR_WP) != 0) 170 mode = VM_MEMATTR_WRITE_PROTECTED; 171 else if ((efi_mem_flags & EFI_MD_ATTR_UC) != 0) 172 mode = VM_MEMATTR_UNCACHEABLE; 173 else { 174 if (bootverbose) 175 printf("SPA mapping attr %#lx unsupported\n", 176 efi_mem_flags); 177 mode = VM_MEMATTR_UNCACHEABLE; 178 } 179 return (mode); 180 } 181 182 static int 183 nvdimm_spa_uio(struct nvdimm_spa_dev *dev, struct uio *uio) 184 { 185 struct vm_page m, *ma; 186 off_t off; 187 vm_memattr_t mattr; 188 int error, n; 189 190 error = 0; 191 if (dev->spa_kva == NULL) { 192 mattr = dev->spa_memattr; 193 bzero(&m, sizeof(m)); 194 vm_page_initfake(&m, 0, mattr); 195 ma = &m; 196 while (uio->uio_resid > 0) { 197 if (uio->uio_offset >= dev->spa_len) 198 break; 199 off = dev->spa_phys_base + uio->uio_offset; 200 vm_page_updatefake(&m, trunc_page(off), mattr); 201 n = PAGE_SIZE; 202 if (n > uio->uio_resid) 203 n = uio->uio_resid; 204 error = uiomove_fromphys(&ma, off & PAGE_MASK, n, uio); 205 if (error != 0) 206 break; 207 } 208 } else { 209 while (uio->uio_resid > 0) { 210 if (uio->uio_offset >= dev->spa_len) 211 break; 212 n = INT_MAX; 213 if (n > uio->uio_resid) 214 n = uio->uio_resid; 215 if (uio->uio_offset + n > dev->spa_len) 216 n = dev->spa_len - uio->uio_offset; 217 error = uiomove((char *)dev->spa_kva + uio->uio_offset, 218 n, uio); 219 if (error != 0) 220 break; 221 } 222 } 223 return (error); 224 } 225 226 static int 227 nvdimm_spa_rw(struct cdev *dev, struct uio *uio, int ioflag) 228 { 229 230 return (nvdimm_spa_uio(dev->si_drv1, uio)); 231 } 232 233 static int 234 nvdimm_spa_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, 235 struct thread *td) 236 { 237 struct nvdimm_spa_dev *dev; 238 int error; 239 240 dev = cdev->si_drv1; 241 error = 0; 242 switch (cmd) { 243 case DIOCGSECTORSIZE: 244 *(u_int *)data = DEV_BSIZE; 245 break; 246 case DIOCGMEDIASIZE: 247 *(off_t *)data = dev->spa_len; 248 break; 249 default: 250 error = ENOTTY; 251 break; 252 } 253 return (error); 254 } 255 256 static int 257 nvdimm_spa_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size, 258 vm_object_t *objp, int nprot) 259 { 260 struct nvdimm_spa_dev *dev; 261 262 dev = cdev->si_drv1; 263 if (dev->spa_obj == NULL) 264 return (ENXIO); 265 if (*offset >= dev->spa_len || *offset + size < *offset || 266 *offset + size > dev->spa_len) 267 return (EINVAL); 268 vm_object_reference(dev->spa_obj); 269 *objp = dev->spa_obj; 270 return (0); 271 } 272 273 static struct cdevsw spa_cdevsw = { 274 .d_version = D_VERSION, 275 .d_flags = D_DISK, 276 .d_name = "nvdimm_spa", 277 .d_read = nvdimm_spa_rw, 278 .d_write = nvdimm_spa_rw, 279 .d_ioctl = nvdimm_spa_ioctl, 280 .d_mmap_single = nvdimm_spa_mmap_single, 281 }; 282 283 static void 284 nvdimm_spa_g_all_unmapped(struct nvdimm_spa_dev *dev, struct bio *bp, int rw) 285 { 286 struct vm_page maa[bp->bio_ma_n]; 287 vm_page_t ma[bp->bio_ma_n]; 288 vm_memattr_t mattr; 289 int i; 290 291 mattr = dev->spa_memattr; 292 for (i = 0; i < nitems(ma); i++) { 293 bzero(&maa[i], sizeof(maa[i])); 294 vm_page_initfake(&maa[i], dev->spa_phys_base + 295 trunc_page(bp->bio_offset) + PAGE_SIZE * i, mattr); 296 ma[i] = &maa[i]; 297 } 298 if (rw == BIO_READ) 299 pmap_copy_pages(ma, bp->bio_offset & PAGE_MASK, bp->bio_ma, 300 bp->bio_ma_offset, bp->bio_length); 301 else 302 pmap_copy_pages(bp->bio_ma, bp->bio_ma_offset, ma, 303 bp->bio_offset & PAGE_MASK, bp->bio_length); 304 } 305 306 static void 307 nvdimm_spa_g_thread(void *arg) 308 { 309 struct g_spa *sc; 310 struct bio *bp; 311 struct uio auio; 312 struct iovec aiovec; 313 int error; 314 315 sc = arg; 316 for (;;) { 317 mtx_lock(&sc->spa_g_mtx); 318 for (;;) { 319 bp = bioq_takefirst(&sc->spa_g_queue); 320 if (bp != NULL) 321 break; 322 msleep(&sc->spa_g_queue, &sc->spa_g_mtx, PRIBIO, 323 "spa_g", 0); 324 if (!sc->spa_g_proc_run) { 325 sc->spa_g_proc_exiting = true; 326 wakeup(&sc->spa_g_queue); 327 mtx_unlock(&sc->spa_g_mtx); 328 kproc_exit(0); 329 } 330 continue; 331 } 332 mtx_unlock(&sc->spa_g_mtx); 333 if (bp->bio_cmd != BIO_READ && bp->bio_cmd != BIO_WRITE && 334 bp->bio_cmd != BIO_FLUSH) { 335 error = EOPNOTSUPP; 336 goto completed; 337 } 338 339 error = 0; 340 if (bp->bio_cmd == BIO_FLUSH) { 341 if (sc->dev->spa_kva != NULL) { 342 pmap_large_map_wb(sc->dev->spa_kva, 343 sc->dev->spa_len); 344 } else { 345 pmap_flush_cache_phys_range( 346 (vm_paddr_t)sc->dev->spa_phys_base, 347 (vm_paddr_t)sc->dev->spa_phys_base + 348 sc->dev->spa_len, sc->dev->spa_memattr); 349 } 350 /* 351 * XXX flush IMC 352 */ 353 goto completed; 354 } 355 356 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 357 if (sc->dev->spa_kva != NULL) { 358 aiovec.iov_base = (char *)sc->dev->spa_kva + 359 bp->bio_offset; 360 aiovec.iov_len = bp->bio_length; 361 auio.uio_iov = &aiovec; 362 auio.uio_iovcnt = 1; 363 auio.uio_resid = bp->bio_length; 364 auio.uio_offset = bp->bio_offset; 365 auio.uio_segflg = UIO_SYSSPACE; 366 auio.uio_rw = bp->bio_cmd == BIO_READ ? 367 UIO_WRITE : UIO_READ; 368 auio.uio_td = curthread; 369 error = uiomove_fromphys(bp->bio_ma, 370 bp->bio_ma_offset, bp->bio_length, &auio); 371 bp->bio_resid = auio.uio_resid; 372 } else { 373 nvdimm_spa_g_all_unmapped(sc->dev, bp, 374 bp->bio_cmd); 375 bp->bio_resid = bp->bio_length; 376 error = 0; 377 } 378 } else { 379 aiovec.iov_base = bp->bio_data; 380 aiovec.iov_len = bp->bio_length; 381 auio.uio_iov = &aiovec; 382 auio.uio_iovcnt = 1; 383 auio.uio_resid = bp->bio_length; 384 auio.uio_offset = bp->bio_offset; 385 auio.uio_segflg = UIO_SYSSPACE; 386 auio.uio_rw = bp->bio_cmd == BIO_READ ? UIO_READ : 387 UIO_WRITE; 388 auio.uio_td = curthread; 389 error = nvdimm_spa_uio(sc->dev, &auio); 390 bp->bio_resid = auio.uio_resid; 391 } 392 bp->bio_bcount = bp->bio_length; 393 devstat_end_transaction_bio(sc->spa_g_devstat, bp); 394 completed: 395 bp->bio_completed = bp->bio_length; 396 g_io_deliver(bp, error); 397 } 398 } 399 400 static void 401 nvdimm_spa_g_start(struct bio *bp) 402 { 403 struct g_spa *sc; 404 405 sc = bp->bio_to->geom->softc; 406 if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) { 407 mtx_lock(&sc->spa_g_stat_mtx); 408 devstat_start_transaction_bio(sc->spa_g_devstat, bp); 409 mtx_unlock(&sc->spa_g_stat_mtx); 410 } 411 mtx_lock(&sc->spa_g_mtx); 412 bioq_disksort(&sc->spa_g_queue, bp); 413 wakeup(&sc->spa_g_queue); 414 mtx_unlock(&sc->spa_g_mtx); 415 } 416 417 static int 418 nvdimm_spa_g_access(struct g_provider *pp, int r, int w, int e) 419 { 420 421 return (0); 422 } 423 424 static struct g_geom * nvdimm_spa_g_create(struct nvdimm_spa_dev *dev, 425 const char *name); 426 static g_ctl_destroy_geom_t nvdimm_spa_g_destroy_geom; 427 428 struct g_class nvdimm_spa_g_class = { 429 .name = "SPA", 430 .version = G_VERSION, 431 .start = nvdimm_spa_g_start, 432 .access = nvdimm_spa_g_access, 433 .destroy_geom = nvdimm_spa_g_destroy_geom, 434 }; 435 DECLARE_GEOM_CLASS(nvdimm_spa_g_class, g_spa); 436 437 int 438 nvdimm_spa_init(struct SPA_mapping *spa, ACPI_NFIT_SYSTEM_ADDRESS *nfitaddr, 439 enum SPA_mapping_type spa_type) 440 { 441 char *name; 442 int error; 443 444 spa->spa_type = spa_type; 445 spa->spa_nfit_idx = nfitaddr->RangeIndex; 446 spa->dev.spa_domain = 447 ((nfitaddr->Flags & ACPI_NFIT_PROXIMITY_VALID) != 0) ? 448 nfitaddr->ProximityDomain : -1; 449 spa->dev.spa_phys_base = nfitaddr->Address; 450 spa->dev.spa_len = nfitaddr->Length; 451 spa->dev.spa_efi_mem_flags = nfitaddr->MemoryMapping; 452 if (bootverbose) { 453 printf("NVDIMM SPA%d base %#016jx len %#016jx %s fl %#jx\n", 454 spa->spa_nfit_idx, 455 (uintmax_t)spa->dev.spa_phys_base, 456 (uintmax_t)spa->dev.spa_len, 457 nvdimm_SPA_uuid_list[spa_type].u_name, 458 spa->dev.spa_efi_mem_flags); 459 } 460 spa->dev.spa_memattr = nvdimm_spa_memattr(nfitaddr->MemoryMapping); 461 if (!nvdimm_SPA_uuid_list[spa_type].u_usr_acc) 462 return (0); 463 464 asprintf(&name, M_NVDIMM, "spa%d", spa->spa_nfit_idx); 465 error = nvdimm_spa_dev_init(&spa->dev, name, spa->spa_nfit_idx); 466 free(name, M_NVDIMM); 467 return (error); 468 } 469 470 int 471 nvdimm_spa_dev_init(struct nvdimm_spa_dev *dev, const char *name, int unit) 472 { 473 struct make_dev_args mda; 474 struct sglist *spa_sg; 475 char *devname; 476 int error, error1; 477 478 error1 = pmap_large_map(dev->spa_phys_base, dev->spa_len, 479 &dev->spa_kva, dev->spa_memattr); 480 if (error1 != 0) { 481 printf("NVDIMM %s cannot map into KVA, error %d\n", name, 482 error1); 483 dev->spa_kva = NULL; 484 } 485 486 spa_sg = sglist_alloc(1, M_WAITOK); 487 error = sglist_append_phys(spa_sg, dev->spa_phys_base, 488 dev->spa_len); 489 if (error == 0) { 490 dev->spa_obj = vm_pager_allocate(OBJT_SG, spa_sg, dev->spa_len, 491 VM_PROT_ALL, 0, NULL); 492 if (dev->spa_obj == NULL) { 493 printf("NVDIMM %s failed to alloc vm object", name); 494 sglist_free(spa_sg); 495 } 496 } else { 497 printf("NVDIMM %s failed to init sglist, error %d", name, 498 error); 499 sglist_free(spa_sg); 500 } 501 502 make_dev_args_init(&mda); 503 mda.mda_flags = MAKEDEV_WAITOK | MAKEDEV_CHECKNAME; 504 mda.mda_devsw = &spa_cdevsw; 505 mda.mda_cr = NULL; 506 mda.mda_uid = UID_ROOT; 507 mda.mda_gid = GID_OPERATOR; 508 mda.mda_mode = 0660; 509 mda.mda_si_drv1 = dev; 510 mda.mda_unit = unit; 511 asprintf(&devname, M_NVDIMM, "nvdimm_%s", name); 512 error = make_dev_s(&mda, &dev->spa_dev, "%s", devname); 513 free(devname, M_NVDIMM); 514 if (error != 0) { 515 printf("NVDIMM %s cannot create devfs node, error %d\n", name, 516 error); 517 if (error1 == 0) 518 error1 = error; 519 } 520 dev->spa_g = nvdimm_spa_g_create(dev, name); 521 if (dev->spa_g == NULL && error1 == 0) 522 error1 = ENXIO; 523 return (error1); 524 } 525 526 static struct g_geom * 527 nvdimm_spa_g_create(struct nvdimm_spa_dev *dev, const char *name) 528 { 529 struct g_geom *gp; 530 struct g_spa *sc; 531 int error; 532 533 gp = NULL; 534 sc = malloc(sizeof(struct g_spa), M_NVDIMM, M_WAITOK | M_ZERO); 535 sc->dev = dev; 536 bioq_init(&sc->spa_g_queue); 537 mtx_init(&sc->spa_g_mtx, "spag", NULL, MTX_DEF); 538 mtx_init(&sc->spa_g_stat_mtx, "spagst", NULL, MTX_DEF); 539 sc->spa_g_proc_run = true; 540 sc->spa_g_proc_exiting = false; 541 error = kproc_create(nvdimm_spa_g_thread, sc, &sc->spa_g_proc, 0, 0, 542 "g_spa"); 543 if (error != 0) { 544 mtx_destroy(&sc->spa_g_mtx); 545 mtx_destroy(&sc->spa_g_stat_mtx); 546 free(sc, M_NVDIMM); 547 printf("NVDIMM %s cannot create geom worker, error %d\n", name, 548 error); 549 } else { 550 g_topology_lock(); 551 gp = g_new_geomf(&nvdimm_spa_g_class, "%s", name); 552 gp->softc = sc; 553 sc->spa_p = g_new_providerf(gp, "%s", name); 554 sc->spa_p->mediasize = dev->spa_len; 555 sc->spa_p->sectorsize = DEV_BSIZE; 556 sc->spa_p->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE | 557 G_PF_ACCEPT_UNMAPPED; 558 g_error_provider(sc->spa_p, 0); 559 sc->spa_g_devstat = devstat_new_entry("spa", -1, DEV_BSIZE, 560 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, 561 DEVSTAT_PRIORITY_MAX); 562 g_topology_unlock(); 563 } 564 return (gp); 565 } 566 567 void 568 nvdimm_spa_fini(struct SPA_mapping *spa) 569 { 570 571 nvdimm_spa_dev_fini(&spa->dev); 572 } 573 574 void 575 nvdimm_spa_dev_fini(struct nvdimm_spa_dev *dev) 576 { 577 578 if (dev->spa_g != NULL) { 579 g_topology_lock(); 580 nvdimm_spa_g_destroy_geom(NULL, dev->spa_g->class, dev->spa_g); 581 g_topology_unlock(); 582 } 583 if (dev->spa_dev != NULL) { 584 destroy_dev(dev->spa_dev); 585 dev->spa_dev = NULL; 586 } 587 vm_object_deallocate(dev->spa_obj); 588 if (dev->spa_kva != NULL) { 589 pmap_large_unmap(dev->spa_kva, dev->spa_len); 590 dev->spa_kva = NULL; 591 } 592 } 593 594 static int 595 nvdimm_spa_g_destroy_geom(struct gctl_req *req, struct g_class *cp, 596 struct g_geom *gp) 597 { 598 struct g_spa *sc; 599 600 sc = gp->softc; 601 mtx_lock(&sc->spa_g_mtx); 602 sc->spa_g_proc_run = false; 603 wakeup(&sc->spa_g_queue); 604 while (!sc->spa_g_proc_exiting) 605 msleep(&sc->spa_g_queue, &sc->spa_g_mtx, PRIBIO, "spa_e", 0); 606 mtx_unlock(&sc->spa_g_mtx); 607 g_topology_assert(); 608 g_wither_geom(gp, ENXIO); 609 sc->spa_p = NULL; 610 if (sc->spa_g_devstat != NULL) { 611 devstat_remove_entry(sc->spa_g_devstat); 612 sc->spa_g_devstat = NULL; 613 } 614 mtx_destroy(&sc->spa_g_mtx); 615 mtx_destroy(&sc->spa_g_stat_mtx); 616 free(sc, M_NVDIMM); 617 return (0); 618 } 619