1 /*- 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * $FreeBSD$ 10 * 11 */ 12 13 /*- 14 * The following functions are based in the vn(4) driver: mdstart_swap(), 15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 16 * and as such under the following copyright: 17 * 18 * Copyright (c) 1988 University of Utah. 19 * Copyright (c) 1990, 1993 20 * The Regents of the University of California. All rights reserved. 21 * 22 * This code is derived from software contributed to Berkeley by 23 * the Systems Programming Group of the University of Utah Computer 24 * Science Department. 25 * 26 * Redistribution and use in source and binary forms, with or without 27 * modification, are permitted provided that the following conditions 28 * are met: 29 * 1. Redistributions of source code must retain the above copyright 30 * notice, this list of conditions and the following disclaimer. 31 * 2. Redistributions in binary form must reproduce the above copyright 32 * notice, this list of conditions and the following disclaimer in the 33 * documentation and/or other materials provided with the distribution. 34 * 4. Neither the name of the University nor the names of its contributors 35 * may be used to endorse or promote products derived from this software 36 * without specific prior written permission. 37 * 38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 48 * SUCH DAMAGE. 49 * 50 * from: Utah Hdr: vn.c 1.13 94/04/02 51 * 52 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 53 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 54 */ 55 56 #include "opt_geom.h" 57 #include "opt_md.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/bio.h> 62 #include <sys/conf.h> 63 #include <sys/devicestat.h> 64 #include <sys/fcntl.h> 65 #include <sys/kernel.h> 66 #include <sys/kthread.h> 67 #include <sys/limits.h> 68 #include <sys/linker.h> 69 #include <sys/lock.h> 70 #include <sys/malloc.h> 71 #include <sys/mdioctl.h> 72 #include <sys/mount.h> 73 #include <sys/mutex.h> 74 #include <sys/sx.h> 75 #include <sys/namei.h> 76 #include <sys/proc.h> 77 #include <sys/queue.h> 78 #include <sys/sbuf.h> 79 #include <sys/sched.h> 80 #include <sys/sf_buf.h> 81 #include <sys/sysctl.h> 82 #include <sys/vnode.h> 83 84 #include <geom/geom.h> 85 86 #include <vm/vm.h> 87 #include <vm/vm_param.h> 88 #include <vm/vm_object.h> 89 #include <vm/vm_page.h> 90 #include <vm/vm_pager.h> 91 #include <vm/swap_pager.h> 92 #include <vm/uma.h> 93 94 #define MD_MODVER 1 95 96 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 97 #define MD_EXITING 0x20000 /* Worker thread is exiting. */ 98 99 #ifndef MD_NSECT 100 #define MD_NSECT (10000 * 2) 101 #endif 102 103 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk"); 104 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors"); 105 106 static int md_debug; 107 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, 108 "Enable md(4) debug messages"); 109 static int md_malloc_wait; 110 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, 111 "Allow malloc to wait for memory allocations"); 112 113 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE) 114 /* 115 * Preloaded image gets put here. 116 * Applications that patch the object with the image can determine 117 * the size looking at the start and end markers (strings), 118 * so we want them contiguous. 119 */ 120 static struct { 121 u_char start[MD_ROOT_SIZE*1024]; 122 u_char end[128]; 123 } mfs_root = { 124 .start = "MFS Filesystem goes here", 125 .end = "MFS Filesystem had better STOP here", 126 }; 127 #endif 128 129 static g_init_t g_md_init; 130 static g_fini_t g_md_fini; 131 static g_start_t g_md_start; 132 static g_access_t g_md_access; 133 static void g_md_dumpconf(struct sbuf *sb, const char *indent, 134 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp); 135 136 static int mdunits; 137 static struct cdev *status_dev = 0; 138 static struct sx md_sx; 139 static struct unrhdr *md_uh; 140 141 static d_ioctl_t mdctlioctl; 142 143 static struct cdevsw mdctl_cdevsw = { 144 .d_version = D_VERSION, 145 .d_ioctl = mdctlioctl, 146 .d_name = MD_NAME, 147 }; 148 149 struct g_class g_md_class = { 150 .name = "MD", 151 .version = G_VERSION, 152 .init = g_md_init, 153 .fini = g_md_fini, 154 .start = g_md_start, 155 .access = g_md_access, 156 .dumpconf = g_md_dumpconf, 157 }; 158 159 DECLARE_GEOM_CLASS(g_md_class, g_md); 160 161 162 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list); 163 164 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 165 #define NMASK (NINDIR-1) 166 static int nshift; 167 168 struct indir { 169 uintptr_t *array; 170 u_int total; 171 u_int used; 172 u_int shift; 173 }; 174 175 struct md_s { 176 int unit; 177 LIST_ENTRY(md_s) list; 178 struct bio_queue_head bio_queue; 179 struct mtx queue_mtx; 180 struct cdev *dev; 181 enum md_types type; 182 off_t mediasize; 183 unsigned sectorsize; 184 unsigned opencount; 185 unsigned fwheads; 186 unsigned fwsectors; 187 unsigned flags; 188 char name[20]; 189 struct proc *procp; 190 struct g_geom *gp; 191 struct g_provider *pp; 192 int (*start)(struct md_s *sc, struct bio *bp); 193 struct devstat *devstat; 194 195 /* MD_MALLOC related fields */ 196 struct indir *indir; 197 uma_zone_t uma; 198 199 /* MD_PRELOAD related fields */ 200 u_char *pl_ptr; 201 size_t pl_len; 202 203 /* MD_VNODE related fields */ 204 struct vnode *vnode; 205 char file[PATH_MAX]; 206 struct ucred *cred; 207 208 /* MD_SWAP related fields */ 209 vm_object_t object; 210 }; 211 212 static struct indir * 213 new_indir(u_int shift) 214 { 215 struct indir *ip; 216 217 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT) 218 | M_ZERO); 219 if (ip == NULL) 220 return (NULL); 221 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 222 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); 223 if (ip->array == NULL) { 224 free(ip, M_MD); 225 return (NULL); 226 } 227 ip->total = NINDIR; 228 ip->shift = shift; 229 return (ip); 230 } 231 232 static void 233 del_indir(struct indir *ip) 234 { 235 236 free(ip->array, M_MDSECT); 237 free(ip, M_MD); 238 } 239 240 static void 241 destroy_indir(struct md_s *sc, struct indir *ip) 242 { 243 int i; 244 245 for (i = 0; i < NINDIR; i++) { 246 if (!ip->array[i]) 247 continue; 248 if (ip->shift) 249 destroy_indir(sc, (struct indir*)(ip->array[i])); 250 else if (ip->array[i] > 255) 251 uma_zfree(sc->uma, (void *)(ip->array[i])); 252 } 253 del_indir(ip); 254 } 255 256 /* 257 * This function does the math and allocates the top level "indir" structure 258 * for a device of "size" sectors. 259 */ 260 261 static struct indir * 262 dimension(off_t size) 263 { 264 off_t rcnt; 265 struct indir *ip; 266 int layer; 267 268 rcnt = size; 269 layer = 0; 270 while (rcnt > NINDIR) { 271 rcnt /= NINDIR; 272 layer++; 273 } 274 275 /* 276 * XXX: the top layer is probably not fully populated, so we allocate 277 * too much space for ip->array in here. 278 */ 279 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 280 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 281 M_MDSECT, M_WAITOK | M_ZERO); 282 ip->total = NINDIR; 283 ip->shift = layer * nshift; 284 return (ip); 285 } 286 287 /* 288 * Read a given sector 289 */ 290 291 static uintptr_t 292 s_read(struct indir *ip, off_t offset) 293 { 294 struct indir *cip; 295 int idx; 296 uintptr_t up; 297 298 if (md_debug > 1) 299 printf("s_read(%jd)\n", (intmax_t)offset); 300 up = 0; 301 for (cip = ip; cip != NULL;) { 302 if (cip->shift) { 303 idx = (offset >> cip->shift) & NMASK; 304 up = cip->array[idx]; 305 cip = (struct indir *)up; 306 continue; 307 } 308 idx = offset & NMASK; 309 return (cip->array[idx]); 310 } 311 return (0); 312 } 313 314 /* 315 * Write a given sector, prune the tree if the value is 0 316 */ 317 318 static int 319 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 320 { 321 struct indir *cip, *lip[10]; 322 int idx, li; 323 uintptr_t up; 324 325 if (md_debug > 1) 326 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 327 up = 0; 328 li = 0; 329 cip = ip; 330 for (;;) { 331 lip[li++] = cip; 332 if (cip->shift) { 333 idx = (offset >> cip->shift) & NMASK; 334 up = cip->array[idx]; 335 if (up != 0) { 336 cip = (struct indir *)up; 337 continue; 338 } 339 /* Allocate branch */ 340 cip->array[idx] = 341 (uintptr_t)new_indir(cip->shift - nshift); 342 if (cip->array[idx] == 0) 343 return (ENOSPC); 344 cip->used++; 345 up = cip->array[idx]; 346 cip = (struct indir *)up; 347 continue; 348 } 349 /* leafnode */ 350 idx = offset & NMASK; 351 up = cip->array[idx]; 352 if (up != 0) 353 cip->used--; 354 cip->array[idx] = ptr; 355 if (ptr != 0) 356 cip->used++; 357 break; 358 } 359 if (cip->used != 0 || li == 1) 360 return (0); 361 li--; 362 while (cip->used == 0 && cip != ip) { 363 li--; 364 idx = (offset >> lip[li]->shift) & NMASK; 365 up = lip[li]->array[idx]; 366 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 367 del_indir(cip); 368 lip[li]->array[idx] = 0; 369 lip[li]->used--; 370 cip = lip[li]; 371 } 372 return (0); 373 } 374 375 376 static int 377 g_md_access(struct g_provider *pp, int r, int w, int e) 378 { 379 struct md_s *sc; 380 381 sc = pp->geom->softc; 382 if (sc == NULL) { 383 if (r <= 0 && w <= 0 && e <= 0) 384 return (0); 385 return (ENXIO); 386 } 387 r += pp->acr; 388 w += pp->acw; 389 e += pp->ace; 390 if ((sc->flags & MD_READONLY) != 0 && w > 0) 391 return (EROFS); 392 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 393 sc->opencount = 1; 394 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 395 sc->opencount = 0; 396 } 397 return (0); 398 } 399 400 static void 401 g_md_start(struct bio *bp) 402 { 403 struct md_s *sc; 404 405 sc = bp->bio_to->geom->softc; 406 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) 407 devstat_start_transaction_bio(sc->devstat, bp); 408 mtx_lock(&sc->queue_mtx); 409 bioq_disksort(&sc->bio_queue, bp); 410 mtx_unlock(&sc->queue_mtx); 411 wakeup(sc); 412 } 413 414 static int 415 mdstart_malloc(struct md_s *sc, struct bio *bp) 416 { 417 int i, error; 418 u_char *dst; 419 off_t secno, nsec, uc; 420 uintptr_t sp, osp; 421 422 switch (bp->bio_cmd) { 423 case BIO_READ: 424 case BIO_WRITE: 425 case BIO_DELETE: 426 break; 427 default: 428 return (EOPNOTSUPP); 429 } 430 431 nsec = bp->bio_length / sc->sectorsize; 432 secno = bp->bio_offset / sc->sectorsize; 433 dst = bp->bio_data; 434 error = 0; 435 while (nsec--) { 436 osp = s_read(sc->indir, secno); 437 if (bp->bio_cmd == BIO_DELETE) { 438 if (osp != 0) 439 error = s_write(sc->indir, secno, 0); 440 } else if (bp->bio_cmd == BIO_READ) { 441 if (osp == 0) 442 bzero(dst, sc->sectorsize); 443 else if (osp <= 255) 444 memset(dst, osp, sc->sectorsize); 445 else { 446 bcopy((void *)osp, dst, sc->sectorsize); 447 cpu_flush_dcache(dst, sc->sectorsize); 448 } 449 osp = 0; 450 } else if (bp->bio_cmd == BIO_WRITE) { 451 if (sc->flags & MD_COMPRESS) { 452 uc = dst[0]; 453 for (i = 1; i < sc->sectorsize; i++) 454 if (dst[i] != uc) 455 break; 456 } else { 457 i = 0; 458 uc = 0; 459 } 460 if (i == sc->sectorsize) { 461 if (osp != uc) 462 error = s_write(sc->indir, secno, uc); 463 } else { 464 if (osp <= 255) { 465 sp = (uintptr_t)uma_zalloc(sc->uma, 466 md_malloc_wait ? M_WAITOK : 467 M_NOWAIT); 468 if (sp == 0) { 469 error = ENOSPC; 470 break; 471 } 472 bcopy(dst, (void *)sp, sc->sectorsize); 473 error = s_write(sc->indir, secno, sp); 474 } else { 475 bcopy(dst, (void *)osp, sc->sectorsize); 476 osp = 0; 477 } 478 } 479 } else { 480 error = EOPNOTSUPP; 481 } 482 if (osp > 255) 483 uma_zfree(sc->uma, (void*)osp); 484 if (error != 0) 485 break; 486 secno++; 487 dst += sc->sectorsize; 488 } 489 bp->bio_resid = 0; 490 return (error); 491 } 492 493 static int 494 mdstart_preload(struct md_s *sc, struct bio *bp) 495 { 496 497 switch (bp->bio_cmd) { 498 case BIO_READ: 499 bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data, 500 bp->bio_length); 501 cpu_flush_dcache(bp->bio_data, bp->bio_length); 502 break; 503 case BIO_WRITE: 504 bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset, 505 bp->bio_length); 506 break; 507 } 508 bp->bio_resid = 0; 509 return (0); 510 } 511 512 static int 513 mdstart_vnode(struct md_s *sc, struct bio *bp) 514 { 515 int error; 516 struct uio auio; 517 struct iovec aiov; 518 struct mount *mp; 519 struct vnode *vp; 520 struct thread *td; 521 off_t end, zerosize; 522 523 switch (bp->bio_cmd) { 524 case BIO_READ: 525 case BIO_WRITE: 526 case BIO_DELETE: 527 case BIO_FLUSH: 528 break; 529 default: 530 return (EOPNOTSUPP); 531 } 532 533 td = curthread; 534 vp = sc->vnode; 535 536 /* 537 * VNODE I/O 538 * 539 * If an error occurs, we set BIO_ERROR but we do not set 540 * B_INVAL because (for a write anyway), the buffer is 541 * still valid. 542 */ 543 544 if (bp->bio_cmd == BIO_FLUSH) { 545 (void) vn_start_write(vp, &mp, V_WAIT); 546 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 547 error = VOP_FSYNC(vp, MNT_WAIT, td); 548 VOP_UNLOCK(vp, 0); 549 vn_finished_write(mp); 550 return (error); 551 } 552 553 bzero(&auio, sizeof(auio)); 554 555 /* 556 * Special case for BIO_DELETE. On the surface, this is very 557 * similar to BIO_WRITE, except that we write from our own 558 * fixed-length buffer, so we have to loop. The net result is 559 * that the two cases end up having very little in common. 560 */ 561 if (bp->bio_cmd == BIO_DELETE) { 562 zerosize = ZERO_REGION_SIZE - 563 (ZERO_REGION_SIZE % sc->sectorsize); 564 auio.uio_iov = &aiov; 565 auio.uio_iovcnt = 1; 566 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 567 auio.uio_segflg = UIO_SYSSPACE; 568 auio.uio_rw = UIO_WRITE; 569 auio.uio_td = td; 570 end = bp->bio_offset + bp->bio_length; 571 (void) vn_start_write(vp, &mp, V_WAIT); 572 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 573 error = 0; 574 while (auio.uio_offset < end) { 575 aiov.iov_base = __DECONST(void *, zero_region); 576 aiov.iov_len = end - auio.uio_offset; 577 if (aiov.iov_len > zerosize) 578 aiov.iov_len = zerosize; 579 auio.uio_resid = aiov.iov_len; 580 error = VOP_WRITE(vp, &auio, 581 sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred); 582 if (error != 0) 583 break; 584 } 585 VOP_UNLOCK(vp, 0); 586 vn_finished_write(mp); 587 bp->bio_resid = end - auio.uio_offset; 588 return (error); 589 } 590 591 aiov.iov_base = bp->bio_data; 592 aiov.iov_len = bp->bio_length; 593 auio.uio_iov = &aiov; 594 auio.uio_iovcnt = 1; 595 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 596 auio.uio_segflg = UIO_SYSSPACE; 597 if (bp->bio_cmd == BIO_READ) 598 auio.uio_rw = UIO_READ; 599 else if (bp->bio_cmd == BIO_WRITE) 600 auio.uio_rw = UIO_WRITE; 601 else 602 panic("wrong BIO_OP in mdstart_vnode"); 603 auio.uio_resid = bp->bio_length; 604 auio.uio_td = td; 605 /* 606 * When reading set IO_DIRECT to try to avoid double-caching 607 * the data. When writing IO_DIRECT is not optimal. 608 */ 609 if (bp->bio_cmd == BIO_READ) { 610 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 611 error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred); 612 VOP_UNLOCK(vp, 0); 613 } else { 614 (void) vn_start_write(vp, &mp, V_WAIT); 615 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 616 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC, 617 sc->cred); 618 VOP_UNLOCK(vp, 0); 619 vn_finished_write(mp); 620 } 621 bp->bio_resid = auio.uio_resid; 622 return (error); 623 } 624 625 static int 626 mdstart_swap(struct md_s *sc, struct bio *bp) 627 { 628 struct sf_buf *sf; 629 int rv, offs, len, lastend; 630 vm_pindex_t i, lastp; 631 vm_page_t m; 632 u_char *p; 633 634 switch (bp->bio_cmd) { 635 case BIO_READ: 636 case BIO_WRITE: 637 case BIO_DELETE: 638 break; 639 default: 640 return (EOPNOTSUPP); 641 } 642 643 p = bp->bio_data; 644 645 /* 646 * offs is the offset at which to start operating on the 647 * next (ie, first) page. lastp is the last page on 648 * which we're going to operate. lastend is the ending 649 * position within that last page (ie, PAGE_SIZE if 650 * we're operating on complete aligned pages). 651 */ 652 offs = bp->bio_offset % PAGE_SIZE; 653 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 654 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 655 656 rv = VM_PAGER_OK; 657 VM_OBJECT_LOCK(sc->object); 658 vm_object_pip_add(sc->object, 1); 659 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 660 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 661 662 m = vm_page_grab(sc->object, i, 663 VM_ALLOC_NORMAL|VM_ALLOC_RETRY); 664 VM_OBJECT_UNLOCK(sc->object); 665 sched_pin(); 666 sf = sf_buf_alloc(m, SFB_CPUPRIVATE); 667 VM_OBJECT_LOCK(sc->object); 668 if (bp->bio_cmd == BIO_READ) { 669 if (m->valid != VM_PAGE_BITS_ALL) 670 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 671 if (rv == VM_PAGER_ERROR) { 672 sf_buf_free(sf); 673 sched_unpin(); 674 vm_page_wakeup(m); 675 break; 676 } 677 bcopy((void *)(sf_buf_kva(sf) + offs), p, len); 678 cpu_flush_dcache(p, len); 679 } else if (bp->bio_cmd == BIO_WRITE) { 680 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 681 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 682 if (rv == VM_PAGER_ERROR) { 683 sf_buf_free(sf); 684 sched_unpin(); 685 vm_page_wakeup(m); 686 break; 687 } 688 bcopy(p, (void *)(sf_buf_kva(sf) + offs), len); 689 m->valid = VM_PAGE_BITS_ALL; 690 } else if (bp->bio_cmd == BIO_DELETE) { 691 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 692 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 693 if (rv == VM_PAGER_ERROR) { 694 sf_buf_free(sf); 695 sched_unpin(); 696 vm_page_wakeup(m); 697 break; 698 } 699 if (len != PAGE_SIZE) { 700 bzero((void *)(sf_buf_kva(sf) + offs), len); 701 vm_page_clear_dirty(m, offs, len); 702 m->valid = VM_PAGE_BITS_ALL; 703 } else 704 vm_pager_page_unswapped(m); 705 } 706 sf_buf_free(sf); 707 sched_unpin(); 708 vm_page_wakeup(m); 709 vm_page_lock(m); 710 if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE) 711 vm_page_free(m); 712 else 713 vm_page_activate(m); 714 vm_page_unlock(m); 715 if (bp->bio_cmd == BIO_WRITE) 716 vm_page_dirty(m); 717 718 /* Actions on further pages start at offset 0 */ 719 p += PAGE_SIZE - offs; 720 offs = 0; 721 } 722 vm_object_pip_subtract(sc->object, 1); 723 VM_OBJECT_UNLOCK(sc->object); 724 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 725 } 726 727 static void 728 md_kthread(void *arg) 729 { 730 struct md_s *sc; 731 struct bio *bp; 732 int error; 733 734 sc = arg; 735 thread_lock(curthread); 736 sched_prio(curthread, PRIBIO); 737 thread_unlock(curthread); 738 if (sc->type == MD_VNODE) 739 curthread->td_pflags |= TDP_NORUNNINGBUF; 740 741 for (;;) { 742 mtx_lock(&sc->queue_mtx); 743 if (sc->flags & MD_SHUTDOWN) { 744 sc->flags |= MD_EXITING; 745 mtx_unlock(&sc->queue_mtx); 746 kproc_exit(0); 747 } 748 bp = bioq_takefirst(&sc->bio_queue); 749 if (!bp) { 750 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 751 continue; 752 } 753 mtx_unlock(&sc->queue_mtx); 754 if (bp->bio_cmd == BIO_GETATTR) { 755 if ((sc->fwsectors && sc->fwheads && 756 (g_handleattr_int(bp, "GEOM::fwsectors", 757 sc->fwsectors) || 758 g_handleattr_int(bp, "GEOM::fwheads", 759 sc->fwheads))) || 760 g_handleattr_int(bp, "GEOM::candelete", 1)) 761 error = -1; 762 else 763 error = EOPNOTSUPP; 764 } else { 765 error = sc->start(sc, bp); 766 } 767 768 if (error != -1) { 769 bp->bio_completed = bp->bio_length; 770 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) 771 devstat_end_transaction_bio(sc->devstat, bp); 772 g_io_deliver(bp, error); 773 } 774 } 775 } 776 777 static struct md_s * 778 mdfind(int unit) 779 { 780 struct md_s *sc; 781 782 LIST_FOREACH(sc, &md_softc_list, list) { 783 if (sc->unit == unit) 784 break; 785 } 786 return (sc); 787 } 788 789 static struct md_s * 790 mdnew(int unit, int *errp, enum md_types type) 791 { 792 struct md_s *sc; 793 int error; 794 795 *errp = 0; 796 if (unit == -1) 797 unit = alloc_unr(md_uh); 798 else 799 unit = alloc_unr_specific(md_uh, unit); 800 801 if (unit == -1) { 802 *errp = EBUSY; 803 return (NULL); 804 } 805 806 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); 807 sc->type = type; 808 bioq_init(&sc->bio_queue); 809 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 810 sc->unit = unit; 811 sprintf(sc->name, "md%d", unit); 812 LIST_INSERT_HEAD(&md_softc_list, sc, list); 813 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 814 if (error == 0) 815 return (sc); 816 LIST_REMOVE(sc, list); 817 mtx_destroy(&sc->queue_mtx); 818 free_unr(md_uh, sc->unit); 819 free(sc, M_MD); 820 *errp = error; 821 return (NULL); 822 } 823 824 static void 825 mdinit(struct md_s *sc) 826 { 827 struct g_geom *gp; 828 struct g_provider *pp; 829 830 g_topology_lock(); 831 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 832 gp->softc = sc; 833 pp = g_new_providerf(gp, "md%d", sc->unit); 834 pp->mediasize = sc->mediasize; 835 pp->sectorsize = sc->sectorsize; 836 sc->gp = gp; 837 sc->pp = pp; 838 g_error_provider(pp, 0); 839 g_topology_unlock(); 840 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize, 841 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 842 } 843 844 /* 845 * XXX: we should check that the range they feed us is mapped. 846 * XXX: we should implement read-only. 847 */ 848 849 static int 850 mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio) 851 { 852 853 if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE)) 854 return (EINVAL); 855 if (mdio->md_base == 0) 856 return (EINVAL); 857 sc->flags = mdio->md_options & MD_FORCE; 858 /* Cast to pointer size, then to pointer to avoid warning */ 859 sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base; 860 sc->pl_len = (size_t)sc->mediasize; 861 return (0); 862 } 863 864 865 static int 866 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio) 867 { 868 uintptr_t sp; 869 int error; 870 off_t u; 871 872 error = 0; 873 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 874 return (EINVAL); 875 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize)) 876 return (EINVAL); 877 /* Compression doesn't make sense if we have reserved space */ 878 if (mdio->md_options & MD_RESERVE) 879 mdio->md_options &= ~MD_COMPRESS; 880 if (mdio->md_fwsectors != 0) 881 sc->fwsectors = mdio->md_fwsectors; 882 if (mdio->md_fwheads != 0) 883 sc->fwheads = mdio->md_fwheads; 884 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE); 885 sc->indir = dimension(sc->mediasize / sc->sectorsize); 886 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 887 0x1ff, 0); 888 if (mdio->md_options & MD_RESERVE) { 889 off_t nsectors; 890 891 nsectors = sc->mediasize / sc->sectorsize; 892 for (u = 0; u < nsectors; u++) { 893 sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ? 894 M_WAITOK : M_NOWAIT) | M_ZERO); 895 if (sp != 0) 896 error = s_write(sc->indir, u, sp); 897 else 898 error = ENOMEM; 899 if (error != 0) 900 break; 901 } 902 } 903 return (error); 904 } 905 906 907 static int 908 mdsetcred(struct md_s *sc, struct ucred *cred) 909 { 910 char *tmpbuf; 911 int error = 0; 912 913 /* 914 * Set credits in our softc 915 */ 916 917 if (sc->cred) 918 crfree(sc->cred); 919 sc->cred = crhold(cred); 920 921 /* 922 * Horrible kludge to establish credentials for NFS XXX. 923 */ 924 925 if (sc->vnode) { 926 struct uio auio; 927 struct iovec aiov; 928 929 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 930 bzero(&auio, sizeof(auio)); 931 932 aiov.iov_base = tmpbuf; 933 aiov.iov_len = sc->sectorsize; 934 auio.uio_iov = &aiov; 935 auio.uio_iovcnt = 1; 936 auio.uio_offset = 0; 937 auio.uio_rw = UIO_READ; 938 auio.uio_segflg = UIO_SYSSPACE; 939 auio.uio_resid = aiov.iov_len; 940 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 941 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 942 VOP_UNLOCK(sc->vnode, 0); 943 free(tmpbuf, M_TEMP); 944 } 945 return (error); 946 } 947 948 static int 949 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 950 { 951 struct vattr vattr; 952 struct nameidata nd; 953 char *fname; 954 int error, flags; 955 956 /* 957 * Kernel-originated requests must have the filename appended 958 * to the mdio structure to protect against malicious software. 959 */ 960 fname = mdio->md_file; 961 if ((void *)fname != (void *)(mdio + 1)) { 962 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL); 963 if (error != 0) 964 return (error); 965 } else 966 strlcpy(sc->file, fname, sizeof(sc->file)); 967 968 /* 969 * If the user specified that this is a read only device, don't 970 * set the FWRITE mask before trying to open the backing store. 971 */ 972 flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE); 973 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td); 974 error = vn_open(&nd, &flags, 0, NULL); 975 if (error != 0) 976 return (error); 977 NDFREE(&nd, NDF_ONLY_PNBUF); 978 if (nd.ni_vp->v_type != VREG) { 979 error = EINVAL; 980 goto bad; 981 } 982 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred); 983 if (error != 0) 984 goto bad; 985 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { 986 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); 987 if (nd.ni_vp->v_iflag & VI_DOOMED) { 988 /* Forced unmount. */ 989 error = EBADF; 990 goto bad; 991 } 992 } 993 nd.ni_vp->v_vflag |= VV_MD; 994 VOP_UNLOCK(nd.ni_vp, 0); 995 996 if (mdio->md_fwsectors != 0) 997 sc->fwsectors = mdio->md_fwsectors; 998 if (mdio->md_fwheads != 0) 999 sc->fwheads = mdio->md_fwheads; 1000 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC); 1001 if (!(flags & FWRITE)) 1002 sc->flags |= MD_READONLY; 1003 sc->vnode = nd.ni_vp; 1004 1005 error = mdsetcred(sc, td->td_ucred); 1006 if (error != 0) { 1007 sc->vnode = NULL; 1008 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); 1009 nd.ni_vp->v_vflag &= ~VV_MD; 1010 goto bad; 1011 } 1012 return (0); 1013 bad: 1014 VOP_UNLOCK(nd.ni_vp, 0); 1015 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 1016 return (error); 1017 } 1018 1019 static int 1020 mddestroy(struct md_s *sc, struct thread *td) 1021 { 1022 1023 if (sc->gp) { 1024 sc->gp->softc = NULL; 1025 g_topology_lock(); 1026 g_wither_geom(sc->gp, ENXIO); 1027 g_topology_unlock(); 1028 sc->gp = NULL; 1029 sc->pp = NULL; 1030 } 1031 if (sc->devstat) { 1032 devstat_remove_entry(sc->devstat); 1033 sc->devstat = NULL; 1034 } 1035 mtx_lock(&sc->queue_mtx); 1036 sc->flags |= MD_SHUTDOWN; 1037 wakeup(sc); 1038 while (!(sc->flags & MD_EXITING)) 1039 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10); 1040 mtx_unlock(&sc->queue_mtx); 1041 mtx_destroy(&sc->queue_mtx); 1042 if (sc->vnode != NULL) { 1043 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1044 sc->vnode->v_vflag &= ~VV_MD; 1045 VOP_UNLOCK(sc->vnode, 0); 1046 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 1047 FREAD : (FREAD|FWRITE), sc->cred, td); 1048 } 1049 if (sc->cred != NULL) 1050 crfree(sc->cred); 1051 if (sc->object != NULL) 1052 vm_object_deallocate(sc->object); 1053 if (sc->indir) 1054 destroy_indir(sc, sc->indir); 1055 if (sc->uma) 1056 uma_zdestroy(sc->uma); 1057 1058 LIST_REMOVE(sc, list); 1059 free_unr(md_uh, sc->unit); 1060 free(sc, M_MD); 1061 return (0); 1062 } 1063 1064 static int 1065 mdresize(struct md_s *sc, struct md_ioctl *mdio) 1066 { 1067 int error, res; 1068 vm_pindex_t oldpages, newpages; 1069 1070 switch (sc->type) { 1071 case MD_VNODE: 1072 break; 1073 case MD_SWAP: 1074 if (mdio->md_mediasize <= 0 || 1075 (mdio->md_mediasize % PAGE_SIZE) != 0) 1076 return (EDOM); 1077 oldpages = OFF_TO_IDX(round_page(sc->mediasize)); 1078 newpages = OFF_TO_IDX(round_page(mdio->md_mediasize)); 1079 if (newpages < oldpages) { 1080 VM_OBJECT_LOCK(sc->object); 1081 vm_object_page_remove(sc->object, newpages, 0, 0); 1082 swap_pager_freespace(sc->object, newpages, 1083 oldpages - newpages); 1084 swap_release_by_cred(IDX_TO_OFF(oldpages - 1085 newpages), sc->cred); 1086 sc->object->charge = IDX_TO_OFF(newpages); 1087 sc->object->size = newpages; 1088 VM_OBJECT_UNLOCK(sc->object); 1089 } else if (newpages > oldpages) { 1090 res = swap_reserve_by_cred(IDX_TO_OFF(newpages - 1091 oldpages), sc->cred); 1092 if (!res) 1093 return (ENOMEM); 1094 if ((mdio->md_options & MD_RESERVE) || 1095 (sc->flags & MD_RESERVE)) { 1096 error = swap_pager_reserve(sc->object, 1097 oldpages, newpages - oldpages); 1098 if (error < 0) { 1099 swap_release_by_cred( 1100 IDX_TO_OFF(newpages - oldpages), 1101 sc->cred); 1102 return (EDOM); 1103 } 1104 } 1105 VM_OBJECT_LOCK(sc->object); 1106 sc->object->charge = IDX_TO_OFF(newpages); 1107 sc->object->size = newpages; 1108 VM_OBJECT_UNLOCK(sc->object); 1109 } 1110 break; 1111 default: 1112 return (EOPNOTSUPP); 1113 } 1114 1115 sc->mediasize = mdio->md_mediasize; 1116 g_topology_lock(); 1117 g_resize_provider(sc->pp, sc->mediasize); 1118 g_topology_unlock(); 1119 return (0); 1120 } 1121 1122 static int 1123 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1124 { 1125 vm_ooffset_t npage; 1126 int error; 1127 1128 /* 1129 * Range check. Disallow negative sizes or any size less then the 1130 * size of a page. Then round to a page. 1131 */ 1132 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1133 return (EDOM); 1134 1135 /* 1136 * Allocate an OBJT_SWAP object. 1137 * 1138 * Note the truncation. 1139 */ 1140 1141 npage = mdio->md_mediasize / PAGE_SIZE; 1142 if (mdio->md_fwsectors != 0) 1143 sc->fwsectors = mdio->md_fwsectors; 1144 if (mdio->md_fwheads != 0) 1145 sc->fwheads = mdio->md_fwheads; 1146 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, 1147 VM_PROT_DEFAULT, 0, td->td_ucred); 1148 if (sc->object == NULL) 1149 return (ENOMEM); 1150 sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE); 1151 if (mdio->md_options & MD_RESERVE) { 1152 if (swap_pager_reserve(sc->object, 0, npage) < 0) { 1153 error = EDOM; 1154 goto finish; 1155 } 1156 } 1157 error = mdsetcred(sc, td->td_ucred); 1158 finish: 1159 if (error != 0) { 1160 vm_object_deallocate(sc->object); 1161 sc->object = NULL; 1162 } 1163 return (error); 1164 } 1165 1166 1167 static int 1168 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1169 { 1170 struct md_ioctl *mdio; 1171 struct md_s *sc; 1172 int error, i; 1173 unsigned sectsize; 1174 1175 if (md_debug) 1176 printf("mdctlioctl(%s %lx %p %x %p)\n", 1177 devtoname(dev), cmd, addr, flags, td); 1178 1179 mdio = (struct md_ioctl *)addr; 1180 if (mdio->md_version != MDIOVERSION) 1181 return (EINVAL); 1182 1183 /* 1184 * We assert the version number in the individual ioctl 1185 * handlers instead of out here because (a) it is possible we 1186 * may add another ioctl in the future which doesn't read an 1187 * mdio, and (b) the correct return value for an unknown ioctl 1188 * is ENOIOCTL, not EINVAL. 1189 */ 1190 error = 0; 1191 switch (cmd) { 1192 case MDIOCATTACH: 1193 switch (mdio->md_type) { 1194 case MD_MALLOC: 1195 case MD_PRELOAD: 1196 case MD_VNODE: 1197 case MD_SWAP: 1198 break; 1199 default: 1200 return (EINVAL); 1201 } 1202 if (mdio->md_sectorsize == 0) 1203 sectsize = DEV_BSIZE; 1204 else 1205 sectsize = mdio->md_sectorsize; 1206 if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize) 1207 return (EINVAL); 1208 if (mdio->md_options & MD_AUTOUNIT) 1209 sc = mdnew(-1, &error, mdio->md_type); 1210 else { 1211 if (mdio->md_unit > INT_MAX) 1212 return (EINVAL); 1213 sc = mdnew(mdio->md_unit, &error, mdio->md_type); 1214 } 1215 if (sc == NULL) 1216 return (error); 1217 if (mdio->md_options & MD_AUTOUNIT) 1218 mdio->md_unit = sc->unit; 1219 sc->mediasize = mdio->md_mediasize; 1220 sc->sectorsize = sectsize; 1221 error = EDOOFUS; 1222 switch (sc->type) { 1223 case MD_MALLOC: 1224 sc->start = mdstart_malloc; 1225 error = mdcreate_malloc(sc, mdio); 1226 break; 1227 case MD_PRELOAD: 1228 sc->start = mdstart_preload; 1229 error = mdcreate_preload(sc, mdio); 1230 break; 1231 case MD_VNODE: 1232 sc->start = mdstart_vnode; 1233 error = mdcreate_vnode(sc, mdio, td); 1234 break; 1235 case MD_SWAP: 1236 sc->start = mdstart_swap; 1237 error = mdcreate_swap(sc, mdio, td); 1238 break; 1239 } 1240 if (error != 0) { 1241 mddestroy(sc, td); 1242 return (error); 1243 } 1244 1245 /* Prune off any residual fractional sector */ 1246 i = sc->mediasize % sc->sectorsize; 1247 sc->mediasize -= i; 1248 1249 mdinit(sc); 1250 return (0); 1251 case MDIOCDETACH: 1252 if (mdio->md_mediasize != 0 || 1253 (mdio->md_options & ~MD_FORCE) != 0) 1254 return (EINVAL); 1255 1256 sc = mdfind(mdio->md_unit); 1257 if (sc == NULL) 1258 return (ENOENT); 1259 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) && 1260 !(mdio->md_options & MD_FORCE)) 1261 return (EBUSY); 1262 return (mddestroy(sc, td)); 1263 case MDIOCRESIZE: 1264 if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0) 1265 return (EINVAL); 1266 1267 sc = mdfind(mdio->md_unit); 1268 if (sc == NULL) 1269 return (ENOENT); 1270 if (mdio->md_mediasize < sc->sectorsize) 1271 return (EINVAL); 1272 if (mdio->md_mediasize < sc->mediasize && 1273 !(sc->flags & MD_FORCE) && 1274 !(mdio->md_options & MD_FORCE)) 1275 return (EBUSY); 1276 return (mdresize(sc, mdio)); 1277 case MDIOCQUERY: 1278 sc = mdfind(mdio->md_unit); 1279 if (sc == NULL) 1280 return (ENOENT); 1281 mdio->md_type = sc->type; 1282 mdio->md_options = sc->flags; 1283 mdio->md_mediasize = sc->mediasize; 1284 mdio->md_sectorsize = sc->sectorsize; 1285 if (sc->type == MD_VNODE) 1286 error = copyout(sc->file, mdio->md_file, 1287 strlen(sc->file) + 1); 1288 return (error); 1289 case MDIOCLIST: 1290 i = 1; 1291 LIST_FOREACH(sc, &md_softc_list, list) { 1292 if (i == MDNPAD - 1) 1293 mdio->md_pad[i] = -1; 1294 else 1295 mdio->md_pad[i++] = sc->unit; 1296 } 1297 mdio->md_pad[0] = i - 1; 1298 return (0); 1299 default: 1300 return (ENOIOCTL); 1301 }; 1302 } 1303 1304 static int 1305 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1306 { 1307 int error; 1308 1309 sx_xlock(&md_sx); 1310 error = xmdctlioctl(dev, cmd, addr, flags, td); 1311 sx_xunlock(&md_sx); 1312 return (error); 1313 } 1314 1315 static void 1316 md_preloaded(u_char *image, size_t length) 1317 { 1318 struct md_s *sc; 1319 int error; 1320 1321 sc = mdnew(-1, &error, MD_PRELOAD); 1322 if (sc == NULL) 1323 return; 1324 sc->mediasize = length; 1325 sc->sectorsize = DEV_BSIZE; 1326 sc->pl_ptr = image; 1327 sc->pl_len = length; 1328 sc->start = mdstart_preload; 1329 #ifdef MD_ROOT 1330 if (sc->unit == 0) 1331 rootdevnames[0] = "ufs:/dev/md0"; 1332 #endif 1333 mdinit(sc); 1334 } 1335 1336 static void 1337 g_md_init(struct g_class *mp __unused) 1338 { 1339 caddr_t mod; 1340 u_char *ptr, *name, *type; 1341 unsigned len; 1342 int i; 1343 1344 /* figure out log2(NINDIR) */ 1345 for (i = NINDIR, nshift = -1; i; nshift++) 1346 i >>= 1; 1347 1348 mod = NULL; 1349 sx_init(&md_sx, "MD config lock"); 1350 g_topology_unlock(); 1351 md_uh = new_unrhdr(0, INT_MAX, NULL); 1352 #ifdef MD_ROOT_SIZE 1353 sx_xlock(&md_sx); 1354 md_preloaded(mfs_root.start, sizeof(mfs_root.start)); 1355 sx_xunlock(&md_sx); 1356 #endif 1357 /* XXX: are preload_* static or do they need Giant ? */ 1358 while ((mod = preload_search_next_name(mod)) != NULL) { 1359 name = (char *)preload_search_info(mod, MODINFO_NAME); 1360 if (name == NULL) 1361 continue; 1362 type = (char *)preload_search_info(mod, MODINFO_TYPE); 1363 if (type == NULL) 1364 continue; 1365 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 1366 continue; 1367 ptr = preload_fetch_addr(mod); 1368 len = preload_fetch_size(mod); 1369 if (ptr != NULL && len != 0) { 1370 printf("%s%d: Preloaded image <%s> %d bytes at %p\n", 1371 MD_NAME, mdunits, name, len, ptr); 1372 sx_xlock(&md_sx); 1373 md_preloaded(ptr, len); 1374 sx_xunlock(&md_sx); 1375 } 1376 } 1377 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL, 1378 0600, MDCTL_NAME); 1379 g_topology_lock(); 1380 } 1381 1382 static void 1383 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1384 struct g_consumer *cp __unused, struct g_provider *pp) 1385 { 1386 struct md_s *mp; 1387 char *type; 1388 1389 mp = gp->softc; 1390 if (mp == NULL) 1391 return; 1392 1393 switch (mp->type) { 1394 case MD_MALLOC: 1395 type = "malloc"; 1396 break; 1397 case MD_PRELOAD: 1398 type = "preload"; 1399 break; 1400 case MD_VNODE: 1401 type = "vnode"; 1402 break; 1403 case MD_SWAP: 1404 type = "swap"; 1405 break; 1406 default: 1407 type = "unknown"; 1408 break; 1409 } 1410 1411 if (pp != NULL) { 1412 if (indent == NULL) { 1413 sbuf_printf(sb, " u %d", mp->unit); 1414 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize); 1415 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads); 1416 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors); 1417 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize); 1418 sbuf_printf(sb, " t %s", type); 1419 if (mp->type == MD_VNODE && mp->vnode != NULL) 1420 sbuf_printf(sb, " file %s", mp->file); 1421 } else { 1422 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, 1423 mp->unit); 1424 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n", 1425 indent, (uintmax_t) mp->sectorsize); 1426 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n", 1427 indent, (uintmax_t) mp->fwheads); 1428 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n", 1429 indent, (uintmax_t) mp->fwsectors); 1430 sbuf_printf(sb, "%s<length>%ju</length>\n", 1431 indent, (uintmax_t) mp->mediasize); 1432 sbuf_printf(sb, "%s<compression>%s</compression>\n", indent, 1433 (mp->flags & MD_COMPRESS) == 0 ? "off": "on"); 1434 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 1435 (mp->flags & MD_READONLY) == 0 ? "read-write": 1436 "read-only"); 1437 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1438 type); 1439 if (mp->type == MD_VNODE && mp->vnode != NULL) 1440 sbuf_printf(sb, "%s<file>%s</file>\n", 1441 indent, mp->file); 1442 } 1443 } 1444 } 1445 1446 static void 1447 g_md_fini(struct g_class *mp __unused) 1448 { 1449 1450 sx_destroy(&md_sx); 1451 if (status_dev != NULL) 1452 destroy_dev(status_dev); 1453 delete_unrhdr(md_uh); 1454 } 1455