1 /*- 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * $FreeBSD$ 10 * 11 */ 12 13 /*- 14 * The following functions are based in the vn(4) driver: mdstart_swap(), 15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 16 * and as such under the following copyright: 17 * 18 * Copyright (c) 1988 University of Utah. 19 * Copyright (c) 1990, 1993 20 * The Regents of the University of California. All rights reserved. 21 * 22 * This code is derived from software contributed to Berkeley by 23 * the Systems Programming Group of the University of Utah Computer 24 * Science Department. 25 * 26 * Redistribution and use in source and binary forms, with or without 27 * modification, are permitted provided that the following conditions 28 * are met: 29 * 1. Redistributions of source code must retain the above copyright 30 * notice, this list of conditions and the following disclaimer. 31 * 2. Redistributions in binary form must reproduce the above copyright 32 * notice, this list of conditions and the following disclaimer in the 33 * documentation and/or other materials provided with the distribution. 34 * 4. Neither the name of the University nor the names of its contributors 35 * may be used to endorse or promote products derived from this software 36 * without specific prior written permission. 37 * 38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 48 * SUCH DAMAGE. 49 * 50 * from: Utah Hdr: vn.c 1.13 94/04/02 51 * 52 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 53 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 54 */ 55 56 #include "opt_geom.h" 57 #include "opt_md.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/bio.h> 62 #include <sys/conf.h> 63 #include <sys/devicestat.h> 64 #include <sys/fcntl.h> 65 #include <sys/kernel.h> 66 #include <sys/kthread.h> 67 #include <sys/limits.h> 68 #include <sys/linker.h> 69 #include <sys/lock.h> 70 #include <sys/malloc.h> 71 #include <sys/mdioctl.h> 72 #include <sys/mount.h> 73 #include <sys/mutex.h> 74 #include <sys/sx.h> 75 #include <sys/namei.h> 76 #include <sys/proc.h> 77 #include <sys/queue.h> 78 #include <sys/rwlock.h> 79 #include <sys/sbuf.h> 80 #include <sys/sched.h> 81 #include <sys/sf_buf.h> 82 #include <sys/sysctl.h> 83 #include <sys/vnode.h> 84 85 #include <geom/geom.h> 86 87 #include <vm/vm.h> 88 #include <vm/vm_param.h> 89 #include <vm/vm_object.h> 90 #include <vm/vm_page.h> 91 #include <vm/vm_pager.h> 92 #include <vm/swap_pager.h> 93 #include <vm/uma.h> 94 95 #define MD_MODVER 1 96 97 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 98 #define MD_EXITING 0x20000 /* Worker thread is exiting. */ 99 100 #ifndef MD_NSECT 101 #define MD_NSECT (10000 * 2) 102 #endif 103 104 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk"); 105 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors"); 106 107 static int md_debug; 108 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, 109 "Enable md(4) debug messages"); 110 static int md_malloc_wait; 111 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, 112 "Allow malloc to wait for memory allocations"); 113 114 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE) 115 #define MD_ROOT_FSTYPE "ufs" 116 #endif 117 118 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE) 119 /* 120 * Preloaded image gets put here. 121 * Applications that patch the object with the image can determine 122 * the size looking at the start and end markers (strings), 123 * so we want them contiguous. 124 */ 125 static struct { 126 u_char start[MD_ROOT_SIZE*1024]; 127 u_char end[128]; 128 } mfs_root = { 129 .start = "MFS Filesystem goes here", 130 .end = "MFS Filesystem had better STOP here", 131 }; 132 #endif 133 134 static g_init_t g_md_init; 135 static g_fini_t g_md_fini; 136 static g_start_t g_md_start; 137 static g_access_t g_md_access; 138 static void g_md_dumpconf(struct sbuf *sb, const char *indent, 139 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp); 140 141 static struct cdev *status_dev = 0; 142 static struct sx md_sx; 143 static struct unrhdr *md_uh; 144 145 static d_ioctl_t mdctlioctl; 146 147 static struct cdevsw mdctl_cdevsw = { 148 .d_version = D_VERSION, 149 .d_ioctl = mdctlioctl, 150 .d_name = MD_NAME, 151 }; 152 153 struct g_class g_md_class = { 154 .name = "MD", 155 .version = G_VERSION, 156 .init = g_md_init, 157 .fini = g_md_fini, 158 .start = g_md_start, 159 .access = g_md_access, 160 .dumpconf = g_md_dumpconf, 161 }; 162 163 DECLARE_GEOM_CLASS(g_md_class, g_md); 164 165 166 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list); 167 168 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 169 #define NMASK (NINDIR-1) 170 static int nshift; 171 172 struct indir { 173 uintptr_t *array; 174 u_int total; 175 u_int used; 176 u_int shift; 177 }; 178 179 struct md_s { 180 int unit; 181 LIST_ENTRY(md_s) list; 182 struct bio_queue_head bio_queue; 183 struct mtx queue_mtx; 184 struct cdev *dev; 185 enum md_types type; 186 off_t mediasize; 187 unsigned sectorsize; 188 unsigned opencount; 189 unsigned fwheads; 190 unsigned fwsectors; 191 unsigned flags; 192 char name[20]; 193 struct proc *procp; 194 struct g_geom *gp; 195 struct g_provider *pp; 196 int (*start)(struct md_s *sc, struct bio *bp); 197 struct devstat *devstat; 198 199 /* MD_MALLOC related fields */ 200 struct indir *indir; 201 uma_zone_t uma; 202 203 /* MD_PRELOAD related fields */ 204 u_char *pl_ptr; 205 size_t pl_len; 206 207 /* MD_VNODE related fields */ 208 struct vnode *vnode; 209 char file[PATH_MAX]; 210 struct ucred *cred; 211 212 /* MD_SWAP related fields */ 213 vm_object_t object; 214 }; 215 216 static struct indir * 217 new_indir(u_int shift) 218 { 219 struct indir *ip; 220 221 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT) 222 | M_ZERO); 223 if (ip == NULL) 224 return (NULL); 225 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 226 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); 227 if (ip->array == NULL) { 228 free(ip, M_MD); 229 return (NULL); 230 } 231 ip->total = NINDIR; 232 ip->shift = shift; 233 return (ip); 234 } 235 236 static void 237 del_indir(struct indir *ip) 238 { 239 240 free(ip->array, M_MDSECT); 241 free(ip, M_MD); 242 } 243 244 static void 245 destroy_indir(struct md_s *sc, struct indir *ip) 246 { 247 int i; 248 249 for (i = 0; i < NINDIR; i++) { 250 if (!ip->array[i]) 251 continue; 252 if (ip->shift) 253 destroy_indir(sc, (struct indir*)(ip->array[i])); 254 else if (ip->array[i] > 255) 255 uma_zfree(sc->uma, (void *)(ip->array[i])); 256 } 257 del_indir(ip); 258 } 259 260 /* 261 * This function does the math and allocates the top level "indir" structure 262 * for a device of "size" sectors. 263 */ 264 265 static struct indir * 266 dimension(off_t size) 267 { 268 off_t rcnt; 269 struct indir *ip; 270 int layer; 271 272 rcnt = size; 273 layer = 0; 274 while (rcnt > NINDIR) { 275 rcnt /= NINDIR; 276 layer++; 277 } 278 279 /* 280 * XXX: the top layer is probably not fully populated, so we allocate 281 * too much space for ip->array in here. 282 */ 283 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 284 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 285 M_MDSECT, M_WAITOK | M_ZERO); 286 ip->total = NINDIR; 287 ip->shift = layer * nshift; 288 return (ip); 289 } 290 291 /* 292 * Read a given sector 293 */ 294 295 static uintptr_t 296 s_read(struct indir *ip, off_t offset) 297 { 298 struct indir *cip; 299 int idx; 300 uintptr_t up; 301 302 if (md_debug > 1) 303 printf("s_read(%jd)\n", (intmax_t)offset); 304 up = 0; 305 for (cip = ip; cip != NULL;) { 306 if (cip->shift) { 307 idx = (offset >> cip->shift) & NMASK; 308 up = cip->array[idx]; 309 cip = (struct indir *)up; 310 continue; 311 } 312 idx = offset & NMASK; 313 return (cip->array[idx]); 314 } 315 return (0); 316 } 317 318 /* 319 * Write a given sector, prune the tree if the value is 0 320 */ 321 322 static int 323 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 324 { 325 struct indir *cip, *lip[10]; 326 int idx, li; 327 uintptr_t up; 328 329 if (md_debug > 1) 330 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 331 up = 0; 332 li = 0; 333 cip = ip; 334 for (;;) { 335 lip[li++] = cip; 336 if (cip->shift) { 337 idx = (offset >> cip->shift) & NMASK; 338 up = cip->array[idx]; 339 if (up != 0) { 340 cip = (struct indir *)up; 341 continue; 342 } 343 /* Allocate branch */ 344 cip->array[idx] = 345 (uintptr_t)new_indir(cip->shift - nshift); 346 if (cip->array[idx] == 0) 347 return (ENOSPC); 348 cip->used++; 349 up = cip->array[idx]; 350 cip = (struct indir *)up; 351 continue; 352 } 353 /* leafnode */ 354 idx = offset & NMASK; 355 up = cip->array[idx]; 356 if (up != 0) 357 cip->used--; 358 cip->array[idx] = ptr; 359 if (ptr != 0) 360 cip->used++; 361 break; 362 } 363 if (cip->used != 0 || li == 1) 364 return (0); 365 li--; 366 while (cip->used == 0 && cip != ip) { 367 li--; 368 idx = (offset >> lip[li]->shift) & NMASK; 369 up = lip[li]->array[idx]; 370 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 371 del_indir(cip); 372 lip[li]->array[idx] = 0; 373 lip[li]->used--; 374 cip = lip[li]; 375 } 376 return (0); 377 } 378 379 380 static int 381 g_md_access(struct g_provider *pp, int r, int w, int e) 382 { 383 struct md_s *sc; 384 385 sc = pp->geom->softc; 386 if (sc == NULL) { 387 if (r <= 0 && w <= 0 && e <= 0) 388 return (0); 389 return (ENXIO); 390 } 391 r += pp->acr; 392 w += pp->acw; 393 e += pp->ace; 394 if ((sc->flags & MD_READONLY) != 0 && w > 0) 395 return (EROFS); 396 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 397 sc->opencount = 1; 398 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 399 sc->opencount = 0; 400 } 401 return (0); 402 } 403 404 static void 405 g_md_start(struct bio *bp) 406 { 407 struct md_s *sc; 408 409 sc = bp->bio_to->geom->softc; 410 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) 411 devstat_start_transaction_bio(sc->devstat, bp); 412 mtx_lock(&sc->queue_mtx); 413 bioq_disksort(&sc->bio_queue, bp); 414 mtx_unlock(&sc->queue_mtx); 415 wakeup(sc); 416 } 417 418 static int 419 mdstart_malloc(struct md_s *sc, struct bio *bp) 420 { 421 int i, error; 422 u_char *dst; 423 off_t secno, nsec, uc; 424 uintptr_t sp, osp; 425 426 switch (bp->bio_cmd) { 427 case BIO_READ: 428 case BIO_WRITE: 429 case BIO_DELETE: 430 break; 431 default: 432 return (EOPNOTSUPP); 433 } 434 435 nsec = bp->bio_length / sc->sectorsize; 436 secno = bp->bio_offset / sc->sectorsize; 437 dst = bp->bio_data; 438 error = 0; 439 while (nsec--) { 440 osp = s_read(sc->indir, secno); 441 if (bp->bio_cmd == BIO_DELETE) { 442 if (osp != 0) 443 error = s_write(sc->indir, secno, 0); 444 } else if (bp->bio_cmd == BIO_READ) { 445 if (osp == 0) 446 bzero(dst, sc->sectorsize); 447 else if (osp <= 255) 448 memset(dst, osp, sc->sectorsize); 449 else { 450 bcopy((void *)osp, dst, sc->sectorsize); 451 cpu_flush_dcache(dst, sc->sectorsize); 452 } 453 osp = 0; 454 } else if (bp->bio_cmd == BIO_WRITE) { 455 if (sc->flags & MD_COMPRESS) { 456 uc = dst[0]; 457 for (i = 1; i < sc->sectorsize; i++) 458 if (dst[i] != uc) 459 break; 460 } else { 461 i = 0; 462 uc = 0; 463 } 464 if (i == sc->sectorsize) { 465 if (osp != uc) 466 error = s_write(sc->indir, secno, uc); 467 } else { 468 if (osp <= 255) { 469 sp = (uintptr_t)uma_zalloc(sc->uma, 470 md_malloc_wait ? M_WAITOK : 471 M_NOWAIT); 472 if (sp == 0) { 473 error = ENOSPC; 474 break; 475 } 476 bcopy(dst, (void *)sp, sc->sectorsize); 477 error = s_write(sc->indir, secno, sp); 478 } else { 479 bcopy(dst, (void *)osp, sc->sectorsize); 480 osp = 0; 481 } 482 } 483 } else { 484 error = EOPNOTSUPP; 485 } 486 if (osp > 255) 487 uma_zfree(sc->uma, (void*)osp); 488 if (error != 0) 489 break; 490 secno++; 491 dst += sc->sectorsize; 492 } 493 bp->bio_resid = 0; 494 return (error); 495 } 496 497 static int 498 mdstart_preload(struct md_s *sc, struct bio *bp) 499 { 500 501 switch (bp->bio_cmd) { 502 case BIO_READ: 503 bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data, 504 bp->bio_length); 505 cpu_flush_dcache(bp->bio_data, bp->bio_length); 506 break; 507 case BIO_WRITE: 508 bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset, 509 bp->bio_length); 510 break; 511 } 512 bp->bio_resid = 0; 513 return (0); 514 } 515 516 static int 517 mdstart_vnode(struct md_s *sc, struct bio *bp) 518 { 519 int error; 520 struct uio auio; 521 struct iovec aiov; 522 struct mount *mp; 523 struct vnode *vp; 524 struct thread *td; 525 off_t end, zerosize; 526 527 switch (bp->bio_cmd) { 528 case BIO_READ: 529 case BIO_WRITE: 530 case BIO_DELETE: 531 case BIO_FLUSH: 532 break; 533 default: 534 return (EOPNOTSUPP); 535 } 536 537 td = curthread; 538 vp = sc->vnode; 539 540 /* 541 * VNODE I/O 542 * 543 * If an error occurs, we set BIO_ERROR but we do not set 544 * B_INVAL because (for a write anyway), the buffer is 545 * still valid. 546 */ 547 548 if (bp->bio_cmd == BIO_FLUSH) { 549 (void) vn_start_write(vp, &mp, V_WAIT); 550 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 551 error = VOP_FSYNC(vp, MNT_WAIT, td); 552 VOP_UNLOCK(vp, 0); 553 vn_finished_write(mp); 554 return (error); 555 } 556 557 bzero(&auio, sizeof(auio)); 558 559 /* 560 * Special case for BIO_DELETE. On the surface, this is very 561 * similar to BIO_WRITE, except that we write from our own 562 * fixed-length buffer, so we have to loop. The net result is 563 * that the two cases end up having very little in common. 564 */ 565 if (bp->bio_cmd == BIO_DELETE) { 566 zerosize = ZERO_REGION_SIZE - 567 (ZERO_REGION_SIZE % sc->sectorsize); 568 auio.uio_iov = &aiov; 569 auio.uio_iovcnt = 1; 570 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 571 auio.uio_segflg = UIO_SYSSPACE; 572 auio.uio_rw = UIO_WRITE; 573 auio.uio_td = td; 574 end = bp->bio_offset + bp->bio_length; 575 (void) vn_start_write(vp, &mp, V_WAIT); 576 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 577 error = 0; 578 while (auio.uio_offset < end) { 579 aiov.iov_base = __DECONST(void *, zero_region); 580 aiov.iov_len = end - auio.uio_offset; 581 if (aiov.iov_len > zerosize) 582 aiov.iov_len = zerosize; 583 auio.uio_resid = aiov.iov_len; 584 error = VOP_WRITE(vp, &auio, 585 sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred); 586 if (error != 0) 587 break; 588 } 589 VOP_UNLOCK(vp, 0); 590 vn_finished_write(mp); 591 bp->bio_resid = end - auio.uio_offset; 592 return (error); 593 } 594 595 aiov.iov_base = bp->bio_data; 596 aiov.iov_len = bp->bio_length; 597 auio.uio_iov = &aiov; 598 auio.uio_iovcnt = 1; 599 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 600 auio.uio_segflg = UIO_SYSSPACE; 601 if (bp->bio_cmd == BIO_READ) 602 auio.uio_rw = UIO_READ; 603 else if (bp->bio_cmd == BIO_WRITE) 604 auio.uio_rw = UIO_WRITE; 605 else 606 panic("wrong BIO_OP in mdstart_vnode"); 607 auio.uio_resid = bp->bio_length; 608 auio.uio_td = td; 609 /* 610 * When reading set IO_DIRECT to try to avoid double-caching 611 * the data. When writing IO_DIRECT is not optimal. 612 */ 613 if (bp->bio_cmd == BIO_READ) { 614 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 615 error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred); 616 VOP_UNLOCK(vp, 0); 617 } else { 618 (void) vn_start_write(vp, &mp, V_WAIT); 619 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 620 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC, 621 sc->cred); 622 VOP_UNLOCK(vp, 0); 623 vn_finished_write(mp); 624 } 625 bp->bio_resid = auio.uio_resid; 626 return (error); 627 } 628 629 static int 630 mdstart_swap(struct md_s *sc, struct bio *bp) 631 { 632 struct sf_buf *sf; 633 int rv, offs, len, lastend; 634 vm_pindex_t i, lastp; 635 vm_page_t m; 636 u_char *p; 637 638 switch (bp->bio_cmd) { 639 case BIO_READ: 640 case BIO_WRITE: 641 case BIO_DELETE: 642 break; 643 default: 644 return (EOPNOTSUPP); 645 } 646 647 p = bp->bio_data; 648 649 /* 650 * offs is the offset at which to start operating on the 651 * next (ie, first) page. lastp is the last page on 652 * which we're going to operate. lastend is the ending 653 * position within that last page (ie, PAGE_SIZE if 654 * we're operating on complete aligned pages). 655 */ 656 offs = bp->bio_offset % PAGE_SIZE; 657 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 658 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 659 660 rv = VM_PAGER_OK; 661 VM_OBJECT_WLOCK(sc->object); 662 vm_object_pip_add(sc->object, 1); 663 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 664 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 665 666 m = vm_page_grab(sc->object, i, 667 VM_ALLOC_NORMAL|VM_ALLOC_RETRY); 668 VM_OBJECT_WUNLOCK(sc->object); 669 sched_pin(); 670 sf = sf_buf_alloc(m, SFB_CPUPRIVATE); 671 VM_OBJECT_WLOCK(sc->object); 672 if (bp->bio_cmd == BIO_READ) { 673 if (m->valid != VM_PAGE_BITS_ALL) 674 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 675 if (rv == VM_PAGER_ERROR) { 676 sf_buf_free(sf); 677 sched_unpin(); 678 vm_page_wakeup(m); 679 break; 680 } else if (rv == VM_PAGER_FAIL) { 681 /* 682 * Pager does not have the page. Zero 683 * the allocated page, and mark it as 684 * valid. Do not set dirty, the page 685 * can be recreated if thrown out. 686 */ 687 bzero((void *)sf_buf_kva(sf), PAGE_SIZE); 688 m->valid = VM_PAGE_BITS_ALL; 689 } 690 bcopy((void *)(sf_buf_kva(sf) + offs), p, len); 691 cpu_flush_dcache(p, len); 692 } else if (bp->bio_cmd == BIO_WRITE) { 693 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 694 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 695 if (rv == VM_PAGER_ERROR) { 696 sf_buf_free(sf); 697 sched_unpin(); 698 vm_page_wakeup(m); 699 break; 700 } 701 bcopy(p, (void *)(sf_buf_kva(sf) + offs), len); 702 m->valid = VM_PAGE_BITS_ALL; 703 } else if (bp->bio_cmd == BIO_DELETE) { 704 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 705 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 706 if (rv == VM_PAGER_ERROR) { 707 sf_buf_free(sf); 708 sched_unpin(); 709 vm_page_wakeup(m); 710 break; 711 } 712 if (len != PAGE_SIZE) { 713 bzero((void *)(sf_buf_kva(sf) + offs), len); 714 vm_page_clear_dirty(m, offs, len); 715 m->valid = VM_PAGE_BITS_ALL; 716 } else 717 vm_pager_page_unswapped(m); 718 } 719 sf_buf_free(sf); 720 sched_unpin(); 721 vm_page_wakeup(m); 722 vm_page_lock(m); 723 if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE) 724 vm_page_free(m); 725 else 726 vm_page_activate(m); 727 vm_page_unlock(m); 728 if (bp->bio_cmd == BIO_WRITE) 729 vm_page_dirty(m); 730 731 /* Actions on further pages start at offset 0 */ 732 p += PAGE_SIZE - offs; 733 offs = 0; 734 } 735 vm_object_pip_subtract(sc->object, 1); 736 VM_OBJECT_WUNLOCK(sc->object); 737 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 738 } 739 740 static void 741 md_kthread(void *arg) 742 { 743 struct md_s *sc; 744 struct bio *bp; 745 int error; 746 747 sc = arg; 748 thread_lock(curthread); 749 sched_prio(curthread, PRIBIO); 750 thread_unlock(curthread); 751 if (sc->type == MD_VNODE) 752 curthread->td_pflags |= TDP_NORUNNINGBUF; 753 754 for (;;) { 755 mtx_lock(&sc->queue_mtx); 756 if (sc->flags & MD_SHUTDOWN) { 757 sc->flags |= MD_EXITING; 758 mtx_unlock(&sc->queue_mtx); 759 kproc_exit(0); 760 } 761 bp = bioq_takefirst(&sc->bio_queue); 762 if (!bp) { 763 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 764 continue; 765 } 766 mtx_unlock(&sc->queue_mtx); 767 if (bp->bio_cmd == BIO_GETATTR) { 768 if ((sc->fwsectors && sc->fwheads && 769 (g_handleattr_int(bp, "GEOM::fwsectors", 770 sc->fwsectors) || 771 g_handleattr_int(bp, "GEOM::fwheads", 772 sc->fwheads))) || 773 g_handleattr_int(bp, "GEOM::candelete", 1)) 774 error = -1; 775 else 776 error = EOPNOTSUPP; 777 } else { 778 error = sc->start(sc, bp); 779 } 780 781 if (error != -1) { 782 bp->bio_completed = bp->bio_length; 783 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) 784 devstat_end_transaction_bio(sc->devstat, bp); 785 g_io_deliver(bp, error); 786 } 787 } 788 } 789 790 static struct md_s * 791 mdfind(int unit) 792 { 793 struct md_s *sc; 794 795 LIST_FOREACH(sc, &md_softc_list, list) { 796 if (sc->unit == unit) 797 break; 798 } 799 return (sc); 800 } 801 802 static struct md_s * 803 mdnew(int unit, int *errp, enum md_types type) 804 { 805 struct md_s *sc; 806 int error; 807 808 *errp = 0; 809 if (unit == -1) 810 unit = alloc_unr(md_uh); 811 else 812 unit = alloc_unr_specific(md_uh, unit); 813 814 if (unit == -1) { 815 *errp = EBUSY; 816 return (NULL); 817 } 818 819 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); 820 sc->type = type; 821 bioq_init(&sc->bio_queue); 822 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 823 sc->unit = unit; 824 sprintf(sc->name, "md%d", unit); 825 LIST_INSERT_HEAD(&md_softc_list, sc, list); 826 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 827 if (error == 0) 828 return (sc); 829 LIST_REMOVE(sc, list); 830 mtx_destroy(&sc->queue_mtx); 831 free_unr(md_uh, sc->unit); 832 free(sc, M_MD); 833 *errp = error; 834 return (NULL); 835 } 836 837 static void 838 mdinit(struct md_s *sc) 839 { 840 struct g_geom *gp; 841 struct g_provider *pp; 842 843 g_topology_lock(); 844 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 845 gp->softc = sc; 846 pp = g_new_providerf(gp, "md%d", sc->unit); 847 pp->mediasize = sc->mediasize; 848 pp->sectorsize = sc->sectorsize; 849 sc->gp = gp; 850 sc->pp = pp; 851 g_error_provider(pp, 0); 852 g_topology_unlock(); 853 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize, 854 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 855 } 856 857 static int 858 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio) 859 { 860 uintptr_t sp; 861 int error; 862 off_t u; 863 864 error = 0; 865 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 866 return (EINVAL); 867 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize)) 868 return (EINVAL); 869 /* Compression doesn't make sense if we have reserved space */ 870 if (mdio->md_options & MD_RESERVE) 871 mdio->md_options &= ~MD_COMPRESS; 872 if (mdio->md_fwsectors != 0) 873 sc->fwsectors = mdio->md_fwsectors; 874 if (mdio->md_fwheads != 0) 875 sc->fwheads = mdio->md_fwheads; 876 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE); 877 sc->indir = dimension(sc->mediasize / sc->sectorsize); 878 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 879 0x1ff, 0); 880 if (mdio->md_options & MD_RESERVE) { 881 off_t nsectors; 882 883 nsectors = sc->mediasize / sc->sectorsize; 884 for (u = 0; u < nsectors; u++) { 885 sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ? 886 M_WAITOK : M_NOWAIT) | M_ZERO); 887 if (sp != 0) 888 error = s_write(sc->indir, u, sp); 889 else 890 error = ENOMEM; 891 if (error != 0) 892 break; 893 } 894 } 895 return (error); 896 } 897 898 899 static int 900 mdsetcred(struct md_s *sc, struct ucred *cred) 901 { 902 char *tmpbuf; 903 int error = 0; 904 905 /* 906 * Set credits in our softc 907 */ 908 909 if (sc->cred) 910 crfree(sc->cred); 911 sc->cred = crhold(cred); 912 913 /* 914 * Horrible kludge to establish credentials for NFS XXX. 915 */ 916 917 if (sc->vnode) { 918 struct uio auio; 919 struct iovec aiov; 920 921 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 922 bzero(&auio, sizeof(auio)); 923 924 aiov.iov_base = tmpbuf; 925 aiov.iov_len = sc->sectorsize; 926 auio.uio_iov = &aiov; 927 auio.uio_iovcnt = 1; 928 auio.uio_offset = 0; 929 auio.uio_rw = UIO_READ; 930 auio.uio_segflg = UIO_SYSSPACE; 931 auio.uio_resid = aiov.iov_len; 932 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 933 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 934 VOP_UNLOCK(sc->vnode, 0); 935 free(tmpbuf, M_TEMP); 936 } 937 return (error); 938 } 939 940 static int 941 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 942 { 943 struct vattr vattr; 944 struct nameidata nd; 945 char *fname; 946 int error, flags; 947 948 /* 949 * Kernel-originated requests must have the filename appended 950 * to the mdio structure to protect against malicious software. 951 */ 952 fname = mdio->md_file; 953 if ((void *)fname != (void *)(mdio + 1)) { 954 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL); 955 if (error != 0) 956 return (error); 957 } else 958 strlcpy(sc->file, fname, sizeof(sc->file)); 959 960 /* 961 * If the user specified that this is a read only device, don't 962 * set the FWRITE mask before trying to open the backing store. 963 */ 964 flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE); 965 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td); 966 error = vn_open(&nd, &flags, 0, NULL); 967 if (error != 0) 968 return (error); 969 NDFREE(&nd, NDF_ONLY_PNBUF); 970 if (nd.ni_vp->v_type != VREG) { 971 error = EINVAL; 972 goto bad; 973 } 974 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred); 975 if (error != 0) 976 goto bad; 977 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { 978 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); 979 if (nd.ni_vp->v_iflag & VI_DOOMED) { 980 /* Forced unmount. */ 981 error = EBADF; 982 goto bad; 983 } 984 } 985 nd.ni_vp->v_vflag |= VV_MD; 986 VOP_UNLOCK(nd.ni_vp, 0); 987 988 if (mdio->md_fwsectors != 0) 989 sc->fwsectors = mdio->md_fwsectors; 990 if (mdio->md_fwheads != 0) 991 sc->fwheads = mdio->md_fwheads; 992 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC); 993 if (!(flags & FWRITE)) 994 sc->flags |= MD_READONLY; 995 sc->vnode = nd.ni_vp; 996 997 error = mdsetcred(sc, td->td_ucred); 998 if (error != 0) { 999 sc->vnode = NULL; 1000 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); 1001 nd.ni_vp->v_vflag &= ~VV_MD; 1002 goto bad; 1003 } 1004 return (0); 1005 bad: 1006 VOP_UNLOCK(nd.ni_vp, 0); 1007 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 1008 return (error); 1009 } 1010 1011 static int 1012 mddestroy(struct md_s *sc, struct thread *td) 1013 { 1014 1015 if (sc->gp) { 1016 sc->gp->softc = NULL; 1017 g_topology_lock(); 1018 g_wither_geom(sc->gp, ENXIO); 1019 g_topology_unlock(); 1020 sc->gp = NULL; 1021 sc->pp = NULL; 1022 } 1023 if (sc->devstat) { 1024 devstat_remove_entry(sc->devstat); 1025 sc->devstat = NULL; 1026 } 1027 mtx_lock(&sc->queue_mtx); 1028 sc->flags |= MD_SHUTDOWN; 1029 wakeup(sc); 1030 while (!(sc->flags & MD_EXITING)) 1031 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10); 1032 mtx_unlock(&sc->queue_mtx); 1033 mtx_destroy(&sc->queue_mtx); 1034 if (sc->vnode != NULL) { 1035 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1036 sc->vnode->v_vflag &= ~VV_MD; 1037 VOP_UNLOCK(sc->vnode, 0); 1038 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 1039 FREAD : (FREAD|FWRITE), sc->cred, td); 1040 } 1041 if (sc->cred != NULL) 1042 crfree(sc->cred); 1043 if (sc->object != NULL) 1044 vm_object_deallocate(sc->object); 1045 if (sc->indir) 1046 destroy_indir(sc, sc->indir); 1047 if (sc->uma) 1048 uma_zdestroy(sc->uma); 1049 1050 LIST_REMOVE(sc, list); 1051 free_unr(md_uh, sc->unit); 1052 free(sc, M_MD); 1053 return (0); 1054 } 1055 1056 static int 1057 mdresize(struct md_s *sc, struct md_ioctl *mdio) 1058 { 1059 int error, res; 1060 vm_pindex_t oldpages, newpages; 1061 1062 switch (sc->type) { 1063 case MD_VNODE: 1064 break; 1065 case MD_SWAP: 1066 if (mdio->md_mediasize <= 0 || 1067 (mdio->md_mediasize % PAGE_SIZE) != 0) 1068 return (EDOM); 1069 oldpages = OFF_TO_IDX(round_page(sc->mediasize)); 1070 newpages = OFF_TO_IDX(round_page(mdio->md_mediasize)); 1071 if (newpages < oldpages) { 1072 VM_OBJECT_WLOCK(sc->object); 1073 vm_object_page_remove(sc->object, newpages, 0, 0); 1074 swap_pager_freespace(sc->object, newpages, 1075 oldpages - newpages); 1076 swap_release_by_cred(IDX_TO_OFF(oldpages - 1077 newpages), sc->cred); 1078 sc->object->charge = IDX_TO_OFF(newpages); 1079 sc->object->size = newpages; 1080 VM_OBJECT_WUNLOCK(sc->object); 1081 } else if (newpages > oldpages) { 1082 res = swap_reserve_by_cred(IDX_TO_OFF(newpages - 1083 oldpages), sc->cred); 1084 if (!res) 1085 return (ENOMEM); 1086 if ((mdio->md_options & MD_RESERVE) || 1087 (sc->flags & MD_RESERVE)) { 1088 error = swap_pager_reserve(sc->object, 1089 oldpages, newpages - oldpages); 1090 if (error < 0) { 1091 swap_release_by_cred( 1092 IDX_TO_OFF(newpages - oldpages), 1093 sc->cred); 1094 return (EDOM); 1095 } 1096 } 1097 VM_OBJECT_WLOCK(sc->object); 1098 sc->object->charge = IDX_TO_OFF(newpages); 1099 sc->object->size = newpages; 1100 VM_OBJECT_WUNLOCK(sc->object); 1101 } 1102 break; 1103 default: 1104 return (EOPNOTSUPP); 1105 } 1106 1107 sc->mediasize = mdio->md_mediasize; 1108 g_topology_lock(); 1109 g_resize_provider(sc->pp, sc->mediasize); 1110 g_topology_unlock(); 1111 return (0); 1112 } 1113 1114 static int 1115 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1116 { 1117 vm_ooffset_t npage; 1118 int error; 1119 1120 /* 1121 * Range check. Disallow negative sizes or any size less then the 1122 * size of a page. Then round to a page. 1123 */ 1124 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1125 return (EDOM); 1126 1127 /* 1128 * Allocate an OBJT_SWAP object. 1129 * 1130 * Note the truncation. 1131 */ 1132 1133 npage = mdio->md_mediasize / PAGE_SIZE; 1134 if (mdio->md_fwsectors != 0) 1135 sc->fwsectors = mdio->md_fwsectors; 1136 if (mdio->md_fwheads != 0) 1137 sc->fwheads = mdio->md_fwheads; 1138 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, 1139 VM_PROT_DEFAULT, 0, td->td_ucred); 1140 if (sc->object == NULL) 1141 return (ENOMEM); 1142 sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE); 1143 if (mdio->md_options & MD_RESERVE) { 1144 if (swap_pager_reserve(sc->object, 0, npage) < 0) { 1145 error = EDOM; 1146 goto finish; 1147 } 1148 } 1149 error = mdsetcred(sc, td->td_ucred); 1150 finish: 1151 if (error != 0) { 1152 vm_object_deallocate(sc->object); 1153 sc->object = NULL; 1154 } 1155 return (error); 1156 } 1157 1158 1159 static int 1160 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1161 { 1162 struct md_ioctl *mdio; 1163 struct md_s *sc; 1164 int error, i; 1165 unsigned sectsize; 1166 1167 if (md_debug) 1168 printf("mdctlioctl(%s %lx %p %x %p)\n", 1169 devtoname(dev), cmd, addr, flags, td); 1170 1171 mdio = (struct md_ioctl *)addr; 1172 if (mdio->md_version != MDIOVERSION) 1173 return (EINVAL); 1174 1175 /* 1176 * We assert the version number in the individual ioctl 1177 * handlers instead of out here because (a) it is possible we 1178 * may add another ioctl in the future which doesn't read an 1179 * mdio, and (b) the correct return value for an unknown ioctl 1180 * is ENOIOCTL, not EINVAL. 1181 */ 1182 error = 0; 1183 switch (cmd) { 1184 case MDIOCATTACH: 1185 switch (mdio->md_type) { 1186 case MD_MALLOC: 1187 case MD_PRELOAD: 1188 case MD_VNODE: 1189 case MD_SWAP: 1190 break; 1191 default: 1192 return (EINVAL); 1193 } 1194 if (mdio->md_sectorsize == 0) 1195 sectsize = DEV_BSIZE; 1196 else 1197 sectsize = mdio->md_sectorsize; 1198 if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize) 1199 return (EINVAL); 1200 if (mdio->md_options & MD_AUTOUNIT) 1201 sc = mdnew(-1, &error, mdio->md_type); 1202 else { 1203 if (mdio->md_unit > INT_MAX) 1204 return (EINVAL); 1205 sc = mdnew(mdio->md_unit, &error, mdio->md_type); 1206 } 1207 if (sc == NULL) 1208 return (error); 1209 if (mdio->md_options & MD_AUTOUNIT) 1210 mdio->md_unit = sc->unit; 1211 sc->mediasize = mdio->md_mediasize; 1212 sc->sectorsize = sectsize; 1213 error = EDOOFUS; 1214 switch (sc->type) { 1215 case MD_MALLOC: 1216 sc->start = mdstart_malloc; 1217 error = mdcreate_malloc(sc, mdio); 1218 break; 1219 case MD_PRELOAD: 1220 /* 1221 * We disallow attaching preloaded memory disks via 1222 * ioctl. Preloaded memory disks are automatically 1223 * attached in g_md_init(). 1224 */ 1225 error = EOPNOTSUPP; 1226 break; 1227 case MD_VNODE: 1228 sc->start = mdstart_vnode; 1229 error = mdcreate_vnode(sc, mdio, td); 1230 break; 1231 case MD_SWAP: 1232 sc->start = mdstart_swap; 1233 error = mdcreate_swap(sc, mdio, td); 1234 break; 1235 } 1236 if (error != 0) { 1237 mddestroy(sc, td); 1238 return (error); 1239 } 1240 1241 /* Prune off any residual fractional sector */ 1242 i = sc->mediasize % sc->sectorsize; 1243 sc->mediasize -= i; 1244 1245 mdinit(sc); 1246 return (0); 1247 case MDIOCDETACH: 1248 if (mdio->md_mediasize != 0 || 1249 (mdio->md_options & ~MD_FORCE) != 0) 1250 return (EINVAL); 1251 1252 sc = mdfind(mdio->md_unit); 1253 if (sc == NULL) 1254 return (ENOENT); 1255 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) && 1256 !(mdio->md_options & MD_FORCE)) 1257 return (EBUSY); 1258 return (mddestroy(sc, td)); 1259 case MDIOCRESIZE: 1260 if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0) 1261 return (EINVAL); 1262 1263 sc = mdfind(mdio->md_unit); 1264 if (sc == NULL) 1265 return (ENOENT); 1266 if (mdio->md_mediasize < sc->sectorsize) 1267 return (EINVAL); 1268 if (mdio->md_mediasize < sc->mediasize && 1269 !(sc->flags & MD_FORCE) && 1270 !(mdio->md_options & MD_FORCE)) 1271 return (EBUSY); 1272 return (mdresize(sc, mdio)); 1273 case MDIOCQUERY: 1274 sc = mdfind(mdio->md_unit); 1275 if (sc == NULL) 1276 return (ENOENT); 1277 mdio->md_type = sc->type; 1278 mdio->md_options = sc->flags; 1279 mdio->md_mediasize = sc->mediasize; 1280 mdio->md_sectorsize = sc->sectorsize; 1281 if (sc->type == MD_VNODE) 1282 error = copyout(sc->file, mdio->md_file, 1283 strlen(sc->file) + 1); 1284 return (error); 1285 case MDIOCLIST: 1286 i = 1; 1287 LIST_FOREACH(sc, &md_softc_list, list) { 1288 if (i == MDNPAD - 1) 1289 mdio->md_pad[i] = -1; 1290 else 1291 mdio->md_pad[i++] = sc->unit; 1292 } 1293 mdio->md_pad[0] = i - 1; 1294 return (0); 1295 default: 1296 return (ENOIOCTL); 1297 }; 1298 } 1299 1300 static int 1301 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1302 { 1303 int error; 1304 1305 sx_xlock(&md_sx); 1306 error = xmdctlioctl(dev, cmd, addr, flags, td); 1307 sx_xunlock(&md_sx); 1308 return (error); 1309 } 1310 1311 static void 1312 md_preloaded(u_char *image, size_t length, const char *name) 1313 { 1314 struct md_s *sc; 1315 int error; 1316 1317 sc = mdnew(-1, &error, MD_PRELOAD); 1318 if (sc == NULL) 1319 return; 1320 sc->mediasize = length; 1321 sc->sectorsize = DEV_BSIZE; 1322 sc->pl_ptr = image; 1323 sc->pl_len = length; 1324 sc->start = mdstart_preload; 1325 #ifdef MD_ROOT 1326 if (sc->unit == 0) 1327 rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0"; 1328 #endif 1329 mdinit(sc); 1330 if (name != NULL) { 1331 printf("%s%d: Preloaded image <%s> %zd bytes at %p\n", 1332 MD_NAME, sc->unit, name, length, image); 1333 } 1334 } 1335 1336 static void 1337 g_md_init(struct g_class *mp __unused) 1338 { 1339 caddr_t mod; 1340 u_char *ptr, *name, *type; 1341 unsigned len; 1342 int i; 1343 1344 /* figure out log2(NINDIR) */ 1345 for (i = NINDIR, nshift = -1; i; nshift++) 1346 i >>= 1; 1347 1348 mod = NULL; 1349 sx_init(&md_sx, "MD config lock"); 1350 g_topology_unlock(); 1351 md_uh = new_unrhdr(0, INT_MAX, NULL); 1352 #ifdef MD_ROOT_SIZE 1353 sx_xlock(&md_sx); 1354 md_preloaded(mfs_root.start, sizeof(mfs_root.start), NULL); 1355 sx_xunlock(&md_sx); 1356 #endif 1357 /* XXX: are preload_* static or do they need Giant ? */ 1358 while ((mod = preload_search_next_name(mod)) != NULL) { 1359 name = (char *)preload_search_info(mod, MODINFO_NAME); 1360 if (name == NULL) 1361 continue; 1362 type = (char *)preload_search_info(mod, MODINFO_TYPE); 1363 if (type == NULL) 1364 continue; 1365 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 1366 continue; 1367 ptr = preload_fetch_addr(mod); 1368 len = preload_fetch_size(mod); 1369 if (ptr != NULL && len != 0) { 1370 sx_xlock(&md_sx); 1371 md_preloaded(ptr, len, name); 1372 sx_xunlock(&md_sx); 1373 } 1374 } 1375 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL, 1376 0600, MDCTL_NAME); 1377 g_topology_lock(); 1378 } 1379 1380 static void 1381 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1382 struct g_consumer *cp __unused, struct g_provider *pp) 1383 { 1384 struct md_s *mp; 1385 char *type; 1386 1387 mp = gp->softc; 1388 if (mp == NULL) 1389 return; 1390 1391 switch (mp->type) { 1392 case MD_MALLOC: 1393 type = "malloc"; 1394 break; 1395 case MD_PRELOAD: 1396 type = "preload"; 1397 break; 1398 case MD_VNODE: 1399 type = "vnode"; 1400 break; 1401 case MD_SWAP: 1402 type = "swap"; 1403 break; 1404 default: 1405 type = "unknown"; 1406 break; 1407 } 1408 1409 if (pp != NULL) { 1410 if (indent == NULL) { 1411 sbuf_printf(sb, " u %d", mp->unit); 1412 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize); 1413 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads); 1414 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors); 1415 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize); 1416 sbuf_printf(sb, " t %s", type); 1417 if (mp->type == MD_VNODE && mp->vnode != NULL) 1418 sbuf_printf(sb, " file %s", mp->file); 1419 } else { 1420 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, 1421 mp->unit); 1422 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n", 1423 indent, (uintmax_t) mp->sectorsize); 1424 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n", 1425 indent, (uintmax_t) mp->fwheads); 1426 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n", 1427 indent, (uintmax_t) mp->fwsectors); 1428 sbuf_printf(sb, "%s<length>%ju</length>\n", 1429 indent, (uintmax_t) mp->mediasize); 1430 sbuf_printf(sb, "%s<compression>%s</compression>\n", indent, 1431 (mp->flags & MD_COMPRESS) == 0 ? "off": "on"); 1432 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 1433 (mp->flags & MD_READONLY) == 0 ? "read-write": 1434 "read-only"); 1435 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1436 type); 1437 if (mp->type == MD_VNODE && mp->vnode != NULL) 1438 sbuf_printf(sb, "%s<file>%s</file>\n", 1439 indent, mp->file); 1440 } 1441 } 1442 } 1443 1444 static void 1445 g_md_fini(struct g_class *mp __unused) 1446 { 1447 1448 sx_destroy(&md_sx); 1449 if (status_dev != NULL) 1450 destroy_dev(status_dev); 1451 delete_unrhdr(md_uh); 1452 } 1453