1 /*- 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * $FreeBSD$ 10 * 11 */ 12 13 /*- 14 * The following functions are based in the vn(4) driver: mdstart_swap(), 15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 16 * and as such under the following copyright: 17 * 18 * Copyright (c) 1988 University of Utah. 19 * Copyright (c) 1990, 1993 20 * The Regents of the University of California. All rights reserved. 21 * 22 * This code is derived from software contributed to Berkeley by 23 * the Systems Programming Group of the University of Utah Computer 24 * Science Department. 25 * 26 * Redistribution and use in source and binary forms, with or without 27 * modification, are permitted provided that the following conditions 28 * are met: 29 * 1. Redistributions of source code must retain the above copyright 30 * notice, this list of conditions and the following disclaimer. 31 * 2. Redistributions in binary form must reproduce the above copyright 32 * notice, this list of conditions and the following disclaimer in the 33 * documentation and/or other materials provided with the distribution. 34 * 4. Neither the name of the University nor the names of its contributors 35 * may be used to endorse or promote products derived from this software 36 * without specific prior written permission. 37 * 38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 48 * SUCH DAMAGE. 49 * 50 * from: Utah Hdr: vn.c 1.13 94/04/02 51 * 52 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 53 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 54 */ 55 56 #include "opt_geom.h" 57 #include "opt_md.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/bio.h> 62 #include <sys/conf.h> 63 #include <sys/devicestat.h> 64 #include <sys/fcntl.h> 65 #include <sys/kernel.h> 66 #include <sys/kthread.h> 67 #include <sys/limits.h> 68 #include <sys/linker.h> 69 #include <sys/lock.h> 70 #include <sys/malloc.h> 71 #include <sys/mdioctl.h> 72 #include <sys/mount.h> 73 #include <sys/mutex.h> 74 #include <sys/sx.h> 75 #include <sys/namei.h> 76 #include <sys/proc.h> 77 #include <sys/queue.h> 78 #include <sys/sbuf.h> 79 #include <sys/sched.h> 80 #include <sys/sf_buf.h> 81 #include <sys/sysctl.h> 82 #include <sys/vnode.h> 83 84 #include <geom/geom.h> 85 86 #include <vm/vm.h> 87 #include <vm/vm_param.h> 88 #include <vm/vm_object.h> 89 #include <vm/vm_page.h> 90 #include <vm/vm_pager.h> 91 #include <vm/swap_pager.h> 92 #include <vm/uma.h> 93 94 #define MD_MODVER 1 95 96 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 97 #define MD_EXITING 0x20000 /* Worker thread is exiting. */ 98 99 #ifndef MD_NSECT 100 #define MD_NSECT (10000 * 2) 101 #endif 102 103 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk"); 104 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors"); 105 106 static int md_debug; 107 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, 108 "Enable md(4) debug messages"); 109 static int md_malloc_wait; 110 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, 111 "Allow malloc to wait for memory allocations"); 112 113 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE) 114 #define MD_ROOT_FSTYPE "ufs" 115 #endif 116 117 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE) 118 /* 119 * Preloaded image gets put here. 120 * Applications that patch the object with the image can determine 121 * the size looking at the start and end markers (strings), 122 * so we want them contiguous. 123 */ 124 static struct { 125 u_char start[MD_ROOT_SIZE*1024]; 126 u_char end[128]; 127 } mfs_root = { 128 .start = "MFS Filesystem goes here", 129 .end = "MFS Filesystem had better STOP here", 130 }; 131 #endif 132 133 static g_init_t g_md_init; 134 static g_fini_t g_md_fini; 135 static g_start_t g_md_start; 136 static g_access_t g_md_access; 137 static void g_md_dumpconf(struct sbuf *sb, const char *indent, 138 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp); 139 140 static struct cdev *status_dev = 0; 141 static struct sx md_sx; 142 static struct unrhdr *md_uh; 143 144 static d_ioctl_t mdctlioctl; 145 146 static struct cdevsw mdctl_cdevsw = { 147 .d_version = D_VERSION, 148 .d_ioctl = mdctlioctl, 149 .d_name = MD_NAME, 150 }; 151 152 struct g_class g_md_class = { 153 .name = "MD", 154 .version = G_VERSION, 155 .init = g_md_init, 156 .fini = g_md_fini, 157 .start = g_md_start, 158 .access = g_md_access, 159 .dumpconf = g_md_dumpconf, 160 }; 161 162 DECLARE_GEOM_CLASS(g_md_class, g_md); 163 164 165 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list); 166 167 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 168 #define NMASK (NINDIR-1) 169 static int nshift; 170 171 struct indir { 172 uintptr_t *array; 173 u_int total; 174 u_int used; 175 u_int shift; 176 }; 177 178 struct md_s { 179 int unit; 180 LIST_ENTRY(md_s) list; 181 struct bio_queue_head bio_queue; 182 struct mtx queue_mtx; 183 struct cdev *dev; 184 enum md_types type; 185 off_t mediasize; 186 unsigned sectorsize; 187 unsigned opencount; 188 unsigned fwheads; 189 unsigned fwsectors; 190 unsigned flags; 191 char name[20]; 192 struct proc *procp; 193 struct g_geom *gp; 194 struct g_provider *pp; 195 int (*start)(struct md_s *sc, struct bio *bp); 196 struct devstat *devstat; 197 198 /* MD_MALLOC related fields */ 199 struct indir *indir; 200 uma_zone_t uma; 201 202 /* MD_PRELOAD related fields */ 203 u_char *pl_ptr; 204 size_t pl_len; 205 206 /* MD_VNODE related fields */ 207 struct vnode *vnode; 208 char file[PATH_MAX]; 209 struct ucred *cred; 210 211 /* MD_SWAP related fields */ 212 vm_object_t object; 213 }; 214 215 static struct indir * 216 new_indir(u_int shift) 217 { 218 struct indir *ip; 219 220 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT) 221 | M_ZERO); 222 if (ip == NULL) 223 return (NULL); 224 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 225 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); 226 if (ip->array == NULL) { 227 free(ip, M_MD); 228 return (NULL); 229 } 230 ip->total = NINDIR; 231 ip->shift = shift; 232 return (ip); 233 } 234 235 static void 236 del_indir(struct indir *ip) 237 { 238 239 free(ip->array, M_MDSECT); 240 free(ip, M_MD); 241 } 242 243 static void 244 destroy_indir(struct md_s *sc, struct indir *ip) 245 { 246 int i; 247 248 for (i = 0; i < NINDIR; i++) { 249 if (!ip->array[i]) 250 continue; 251 if (ip->shift) 252 destroy_indir(sc, (struct indir*)(ip->array[i])); 253 else if (ip->array[i] > 255) 254 uma_zfree(sc->uma, (void *)(ip->array[i])); 255 } 256 del_indir(ip); 257 } 258 259 /* 260 * This function does the math and allocates the top level "indir" structure 261 * for a device of "size" sectors. 262 */ 263 264 static struct indir * 265 dimension(off_t size) 266 { 267 off_t rcnt; 268 struct indir *ip; 269 int layer; 270 271 rcnt = size; 272 layer = 0; 273 while (rcnt > NINDIR) { 274 rcnt /= NINDIR; 275 layer++; 276 } 277 278 /* 279 * XXX: the top layer is probably not fully populated, so we allocate 280 * too much space for ip->array in here. 281 */ 282 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 283 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 284 M_MDSECT, M_WAITOK | M_ZERO); 285 ip->total = NINDIR; 286 ip->shift = layer * nshift; 287 return (ip); 288 } 289 290 /* 291 * Read a given sector 292 */ 293 294 static uintptr_t 295 s_read(struct indir *ip, off_t offset) 296 { 297 struct indir *cip; 298 int idx; 299 uintptr_t up; 300 301 if (md_debug > 1) 302 printf("s_read(%jd)\n", (intmax_t)offset); 303 up = 0; 304 for (cip = ip; cip != NULL;) { 305 if (cip->shift) { 306 idx = (offset >> cip->shift) & NMASK; 307 up = cip->array[idx]; 308 cip = (struct indir *)up; 309 continue; 310 } 311 idx = offset & NMASK; 312 return (cip->array[idx]); 313 } 314 return (0); 315 } 316 317 /* 318 * Write a given sector, prune the tree if the value is 0 319 */ 320 321 static int 322 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 323 { 324 struct indir *cip, *lip[10]; 325 int idx, li; 326 uintptr_t up; 327 328 if (md_debug > 1) 329 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 330 up = 0; 331 li = 0; 332 cip = ip; 333 for (;;) { 334 lip[li++] = cip; 335 if (cip->shift) { 336 idx = (offset >> cip->shift) & NMASK; 337 up = cip->array[idx]; 338 if (up != 0) { 339 cip = (struct indir *)up; 340 continue; 341 } 342 /* Allocate branch */ 343 cip->array[idx] = 344 (uintptr_t)new_indir(cip->shift - nshift); 345 if (cip->array[idx] == 0) 346 return (ENOSPC); 347 cip->used++; 348 up = cip->array[idx]; 349 cip = (struct indir *)up; 350 continue; 351 } 352 /* leafnode */ 353 idx = offset & NMASK; 354 up = cip->array[idx]; 355 if (up != 0) 356 cip->used--; 357 cip->array[idx] = ptr; 358 if (ptr != 0) 359 cip->used++; 360 break; 361 } 362 if (cip->used != 0 || li == 1) 363 return (0); 364 li--; 365 while (cip->used == 0 && cip != ip) { 366 li--; 367 idx = (offset >> lip[li]->shift) & NMASK; 368 up = lip[li]->array[idx]; 369 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 370 del_indir(cip); 371 lip[li]->array[idx] = 0; 372 lip[li]->used--; 373 cip = lip[li]; 374 } 375 return (0); 376 } 377 378 379 static int 380 g_md_access(struct g_provider *pp, int r, int w, int e) 381 { 382 struct md_s *sc; 383 384 sc = pp->geom->softc; 385 if (sc == NULL) { 386 if (r <= 0 && w <= 0 && e <= 0) 387 return (0); 388 return (ENXIO); 389 } 390 r += pp->acr; 391 w += pp->acw; 392 e += pp->ace; 393 if ((sc->flags & MD_READONLY) != 0 && w > 0) 394 return (EROFS); 395 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 396 sc->opencount = 1; 397 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 398 sc->opencount = 0; 399 } 400 return (0); 401 } 402 403 static void 404 g_md_start(struct bio *bp) 405 { 406 struct md_s *sc; 407 408 sc = bp->bio_to->geom->softc; 409 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) 410 devstat_start_transaction_bio(sc->devstat, bp); 411 mtx_lock(&sc->queue_mtx); 412 bioq_disksort(&sc->bio_queue, bp); 413 mtx_unlock(&sc->queue_mtx); 414 wakeup(sc); 415 } 416 417 static int 418 mdstart_malloc(struct md_s *sc, struct bio *bp) 419 { 420 int i, error; 421 u_char *dst; 422 off_t secno, nsec, uc; 423 uintptr_t sp, osp; 424 425 switch (bp->bio_cmd) { 426 case BIO_READ: 427 case BIO_WRITE: 428 case BIO_DELETE: 429 break; 430 default: 431 return (EOPNOTSUPP); 432 } 433 434 nsec = bp->bio_length / sc->sectorsize; 435 secno = bp->bio_offset / sc->sectorsize; 436 dst = bp->bio_data; 437 error = 0; 438 while (nsec--) { 439 osp = s_read(sc->indir, secno); 440 if (bp->bio_cmd == BIO_DELETE) { 441 if (osp != 0) 442 error = s_write(sc->indir, secno, 0); 443 } else if (bp->bio_cmd == BIO_READ) { 444 if (osp == 0) 445 bzero(dst, sc->sectorsize); 446 else if (osp <= 255) 447 memset(dst, osp, sc->sectorsize); 448 else { 449 bcopy((void *)osp, dst, sc->sectorsize); 450 cpu_flush_dcache(dst, sc->sectorsize); 451 } 452 osp = 0; 453 } else if (bp->bio_cmd == BIO_WRITE) { 454 if (sc->flags & MD_COMPRESS) { 455 uc = dst[0]; 456 for (i = 1; i < sc->sectorsize; i++) 457 if (dst[i] != uc) 458 break; 459 } else { 460 i = 0; 461 uc = 0; 462 } 463 if (i == sc->sectorsize) { 464 if (osp != uc) 465 error = s_write(sc->indir, secno, uc); 466 } else { 467 if (osp <= 255) { 468 sp = (uintptr_t)uma_zalloc(sc->uma, 469 md_malloc_wait ? M_WAITOK : 470 M_NOWAIT); 471 if (sp == 0) { 472 error = ENOSPC; 473 break; 474 } 475 bcopy(dst, (void *)sp, sc->sectorsize); 476 error = s_write(sc->indir, secno, sp); 477 } else { 478 bcopy(dst, (void *)osp, sc->sectorsize); 479 osp = 0; 480 } 481 } 482 } else { 483 error = EOPNOTSUPP; 484 } 485 if (osp > 255) 486 uma_zfree(sc->uma, (void*)osp); 487 if (error != 0) 488 break; 489 secno++; 490 dst += sc->sectorsize; 491 } 492 bp->bio_resid = 0; 493 return (error); 494 } 495 496 static int 497 mdstart_preload(struct md_s *sc, struct bio *bp) 498 { 499 500 switch (bp->bio_cmd) { 501 case BIO_READ: 502 bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data, 503 bp->bio_length); 504 cpu_flush_dcache(bp->bio_data, bp->bio_length); 505 break; 506 case BIO_WRITE: 507 bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset, 508 bp->bio_length); 509 break; 510 } 511 bp->bio_resid = 0; 512 return (0); 513 } 514 515 static int 516 mdstart_vnode(struct md_s *sc, struct bio *bp) 517 { 518 int error; 519 struct uio auio; 520 struct iovec aiov; 521 struct mount *mp; 522 struct vnode *vp; 523 struct thread *td; 524 off_t end, zerosize; 525 526 switch (bp->bio_cmd) { 527 case BIO_READ: 528 case BIO_WRITE: 529 case BIO_DELETE: 530 case BIO_FLUSH: 531 break; 532 default: 533 return (EOPNOTSUPP); 534 } 535 536 td = curthread; 537 vp = sc->vnode; 538 539 /* 540 * VNODE I/O 541 * 542 * If an error occurs, we set BIO_ERROR but we do not set 543 * B_INVAL because (for a write anyway), the buffer is 544 * still valid. 545 */ 546 547 if (bp->bio_cmd == BIO_FLUSH) { 548 (void) vn_start_write(vp, &mp, V_WAIT); 549 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 550 error = VOP_FSYNC(vp, MNT_WAIT, td); 551 VOP_UNLOCK(vp, 0); 552 vn_finished_write(mp); 553 return (error); 554 } 555 556 bzero(&auio, sizeof(auio)); 557 558 /* 559 * Special case for BIO_DELETE. On the surface, this is very 560 * similar to BIO_WRITE, except that we write from our own 561 * fixed-length buffer, so we have to loop. The net result is 562 * that the two cases end up having very little in common. 563 */ 564 if (bp->bio_cmd == BIO_DELETE) { 565 zerosize = ZERO_REGION_SIZE - 566 (ZERO_REGION_SIZE % sc->sectorsize); 567 auio.uio_iov = &aiov; 568 auio.uio_iovcnt = 1; 569 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 570 auio.uio_segflg = UIO_SYSSPACE; 571 auio.uio_rw = UIO_WRITE; 572 auio.uio_td = td; 573 end = bp->bio_offset + bp->bio_length; 574 (void) vn_start_write(vp, &mp, V_WAIT); 575 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 576 error = 0; 577 while (auio.uio_offset < end) { 578 aiov.iov_base = __DECONST(void *, zero_region); 579 aiov.iov_len = end - auio.uio_offset; 580 if (aiov.iov_len > zerosize) 581 aiov.iov_len = zerosize; 582 auio.uio_resid = aiov.iov_len; 583 error = VOP_WRITE(vp, &auio, 584 sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred); 585 if (error != 0) 586 break; 587 } 588 VOP_UNLOCK(vp, 0); 589 vn_finished_write(mp); 590 bp->bio_resid = end - auio.uio_offset; 591 return (error); 592 } 593 594 aiov.iov_base = bp->bio_data; 595 aiov.iov_len = bp->bio_length; 596 auio.uio_iov = &aiov; 597 auio.uio_iovcnt = 1; 598 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 599 auio.uio_segflg = UIO_SYSSPACE; 600 if (bp->bio_cmd == BIO_READ) 601 auio.uio_rw = UIO_READ; 602 else if (bp->bio_cmd == BIO_WRITE) 603 auio.uio_rw = UIO_WRITE; 604 else 605 panic("wrong BIO_OP in mdstart_vnode"); 606 auio.uio_resid = bp->bio_length; 607 auio.uio_td = td; 608 /* 609 * When reading set IO_DIRECT to try to avoid double-caching 610 * the data. When writing IO_DIRECT is not optimal. 611 */ 612 if (bp->bio_cmd == BIO_READ) { 613 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 614 error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred); 615 VOP_UNLOCK(vp, 0); 616 } else { 617 (void) vn_start_write(vp, &mp, V_WAIT); 618 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 619 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC, 620 sc->cred); 621 VOP_UNLOCK(vp, 0); 622 vn_finished_write(mp); 623 } 624 bp->bio_resid = auio.uio_resid; 625 return (error); 626 } 627 628 static int 629 mdstart_swap(struct md_s *sc, struct bio *bp) 630 { 631 struct sf_buf *sf; 632 int rv, offs, len, lastend; 633 vm_pindex_t i, lastp; 634 vm_page_t m; 635 u_char *p; 636 637 switch (bp->bio_cmd) { 638 case BIO_READ: 639 case BIO_WRITE: 640 case BIO_DELETE: 641 break; 642 default: 643 return (EOPNOTSUPP); 644 } 645 646 p = bp->bio_data; 647 648 /* 649 * offs is the offset at which to start operating on the 650 * next (ie, first) page. lastp is the last page on 651 * which we're going to operate. lastend is the ending 652 * position within that last page (ie, PAGE_SIZE if 653 * we're operating on complete aligned pages). 654 */ 655 offs = bp->bio_offset % PAGE_SIZE; 656 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 657 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 658 659 rv = VM_PAGER_OK; 660 VM_OBJECT_LOCK(sc->object); 661 vm_object_pip_add(sc->object, 1); 662 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 663 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 664 665 m = vm_page_grab(sc->object, i, 666 VM_ALLOC_NORMAL|VM_ALLOC_RETRY); 667 VM_OBJECT_UNLOCK(sc->object); 668 sched_pin(); 669 sf = sf_buf_alloc(m, SFB_CPUPRIVATE); 670 VM_OBJECT_LOCK(sc->object); 671 if (bp->bio_cmd == BIO_READ) { 672 if (m->valid != VM_PAGE_BITS_ALL) 673 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 674 if (rv == VM_PAGER_ERROR) { 675 sf_buf_free(sf); 676 sched_unpin(); 677 vm_page_wakeup(m); 678 break; 679 } else if (rv == VM_PAGER_FAIL) { 680 /* 681 * Pager does not have the page. Zero 682 * the allocated page, and mark it as 683 * valid. Do not set dirty, the page 684 * can be recreated if thrown out. 685 */ 686 bzero((void *)sf_buf_kva(sf), PAGE_SIZE); 687 m->valid = VM_PAGE_BITS_ALL; 688 } 689 bcopy((void *)(sf_buf_kva(sf) + offs), p, len); 690 cpu_flush_dcache(p, len); 691 } else if (bp->bio_cmd == BIO_WRITE) { 692 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 693 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 694 if (rv == VM_PAGER_ERROR) { 695 sf_buf_free(sf); 696 sched_unpin(); 697 vm_page_wakeup(m); 698 break; 699 } 700 bcopy(p, (void *)(sf_buf_kva(sf) + offs), len); 701 m->valid = VM_PAGE_BITS_ALL; 702 } else if (bp->bio_cmd == BIO_DELETE) { 703 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 704 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 705 if (rv == VM_PAGER_ERROR) { 706 sf_buf_free(sf); 707 sched_unpin(); 708 vm_page_wakeup(m); 709 break; 710 } 711 if (len != PAGE_SIZE) { 712 bzero((void *)(sf_buf_kva(sf) + offs), len); 713 vm_page_clear_dirty(m, offs, len); 714 m->valid = VM_PAGE_BITS_ALL; 715 } else 716 vm_pager_page_unswapped(m); 717 } 718 sf_buf_free(sf); 719 sched_unpin(); 720 vm_page_wakeup(m); 721 vm_page_lock(m); 722 if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE) 723 vm_page_free(m); 724 else 725 vm_page_activate(m); 726 vm_page_unlock(m); 727 if (bp->bio_cmd == BIO_WRITE) 728 vm_page_dirty(m); 729 730 /* Actions on further pages start at offset 0 */ 731 p += PAGE_SIZE - offs; 732 offs = 0; 733 } 734 vm_object_pip_subtract(sc->object, 1); 735 VM_OBJECT_UNLOCK(sc->object); 736 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 737 } 738 739 static void 740 md_kthread(void *arg) 741 { 742 struct md_s *sc; 743 struct bio *bp; 744 int error; 745 746 sc = arg; 747 thread_lock(curthread); 748 sched_prio(curthread, PRIBIO); 749 thread_unlock(curthread); 750 if (sc->type == MD_VNODE) 751 curthread->td_pflags |= TDP_NORUNNINGBUF; 752 753 for (;;) { 754 mtx_lock(&sc->queue_mtx); 755 if (sc->flags & MD_SHUTDOWN) { 756 sc->flags |= MD_EXITING; 757 mtx_unlock(&sc->queue_mtx); 758 kproc_exit(0); 759 } 760 bp = bioq_takefirst(&sc->bio_queue); 761 if (!bp) { 762 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 763 continue; 764 } 765 mtx_unlock(&sc->queue_mtx); 766 if (bp->bio_cmd == BIO_GETATTR) { 767 if ((sc->fwsectors && sc->fwheads && 768 (g_handleattr_int(bp, "GEOM::fwsectors", 769 sc->fwsectors) || 770 g_handleattr_int(bp, "GEOM::fwheads", 771 sc->fwheads))) || 772 g_handleattr_int(bp, "GEOM::candelete", 1)) 773 error = -1; 774 else 775 error = EOPNOTSUPP; 776 } else { 777 error = sc->start(sc, bp); 778 } 779 780 if (error != -1) { 781 bp->bio_completed = bp->bio_length; 782 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) 783 devstat_end_transaction_bio(sc->devstat, bp); 784 g_io_deliver(bp, error); 785 } 786 } 787 } 788 789 static struct md_s * 790 mdfind(int unit) 791 { 792 struct md_s *sc; 793 794 LIST_FOREACH(sc, &md_softc_list, list) { 795 if (sc->unit == unit) 796 break; 797 } 798 return (sc); 799 } 800 801 static struct md_s * 802 mdnew(int unit, int *errp, enum md_types type) 803 { 804 struct md_s *sc; 805 int error; 806 807 *errp = 0; 808 if (unit == -1) 809 unit = alloc_unr(md_uh); 810 else 811 unit = alloc_unr_specific(md_uh, unit); 812 813 if (unit == -1) { 814 *errp = EBUSY; 815 return (NULL); 816 } 817 818 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); 819 sc->type = type; 820 bioq_init(&sc->bio_queue); 821 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 822 sc->unit = unit; 823 sprintf(sc->name, "md%d", unit); 824 LIST_INSERT_HEAD(&md_softc_list, sc, list); 825 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 826 if (error == 0) 827 return (sc); 828 LIST_REMOVE(sc, list); 829 mtx_destroy(&sc->queue_mtx); 830 free_unr(md_uh, sc->unit); 831 free(sc, M_MD); 832 *errp = error; 833 return (NULL); 834 } 835 836 static void 837 mdinit(struct md_s *sc) 838 { 839 struct g_geom *gp; 840 struct g_provider *pp; 841 842 g_topology_lock(); 843 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 844 gp->softc = sc; 845 pp = g_new_providerf(gp, "md%d", sc->unit); 846 pp->mediasize = sc->mediasize; 847 pp->sectorsize = sc->sectorsize; 848 sc->gp = gp; 849 sc->pp = pp; 850 g_error_provider(pp, 0); 851 g_topology_unlock(); 852 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize, 853 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 854 } 855 856 static int 857 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio) 858 { 859 uintptr_t sp; 860 int error; 861 off_t u; 862 863 error = 0; 864 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 865 return (EINVAL); 866 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize)) 867 return (EINVAL); 868 /* Compression doesn't make sense if we have reserved space */ 869 if (mdio->md_options & MD_RESERVE) 870 mdio->md_options &= ~MD_COMPRESS; 871 if (mdio->md_fwsectors != 0) 872 sc->fwsectors = mdio->md_fwsectors; 873 if (mdio->md_fwheads != 0) 874 sc->fwheads = mdio->md_fwheads; 875 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE); 876 sc->indir = dimension(sc->mediasize / sc->sectorsize); 877 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 878 0x1ff, 0); 879 if (mdio->md_options & MD_RESERVE) { 880 off_t nsectors; 881 882 nsectors = sc->mediasize / sc->sectorsize; 883 for (u = 0; u < nsectors; u++) { 884 sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ? 885 M_WAITOK : M_NOWAIT) | M_ZERO); 886 if (sp != 0) 887 error = s_write(sc->indir, u, sp); 888 else 889 error = ENOMEM; 890 if (error != 0) 891 break; 892 } 893 } 894 return (error); 895 } 896 897 898 static int 899 mdsetcred(struct md_s *sc, struct ucred *cred) 900 { 901 char *tmpbuf; 902 int error = 0; 903 904 /* 905 * Set credits in our softc 906 */ 907 908 if (sc->cred) 909 crfree(sc->cred); 910 sc->cred = crhold(cred); 911 912 /* 913 * Horrible kludge to establish credentials for NFS XXX. 914 */ 915 916 if (sc->vnode) { 917 struct uio auio; 918 struct iovec aiov; 919 920 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 921 bzero(&auio, sizeof(auio)); 922 923 aiov.iov_base = tmpbuf; 924 aiov.iov_len = sc->sectorsize; 925 auio.uio_iov = &aiov; 926 auio.uio_iovcnt = 1; 927 auio.uio_offset = 0; 928 auio.uio_rw = UIO_READ; 929 auio.uio_segflg = UIO_SYSSPACE; 930 auio.uio_resid = aiov.iov_len; 931 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 932 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 933 VOP_UNLOCK(sc->vnode, 0); 934 free(tmpbuf, M_TEMP); 935 } 936 return (error); 937 } 938 939 static int 940 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 941 { 942 struct vattr vattr; 943 struct nameidata nd; 944 char *fname; 945 int error, flags; 946 947 /* 948 * Kernel-originated requests must have the filename appended 949 * to the mdio structure to protect against malicious software. 950 */ 951 fname = mdio->md_file; 952 if ((void *)fname != (void *)(mdio + 1)) { 953 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL); 954 if (error != 0) 955 return (error); 956 } else 957 strlcpy(sc->file, fname, sizeof(sc->file)); 958 959 /* 960 * If the user specified that this is a read only device, don't 961 * set the FWRITE mask before trying to open the backing store. 962 */ 963 flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE); 964 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td); 965 error = vn_open(&nd, &flags, 0, NULL); 966 if (error != 0) 967 return (error); 968 NDFREE(&nd, NDF_ONLY_PNBUF); 969 if (nd.ni_vp->v_type != VREG) { 970 error = EINVAL; 971 goto bad; 972 } 973 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred); 974 if (error != 0) 975 goto bad; 976 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { 977 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); 978 if (nd.ni_vp->v_iflag & VI_DOOMED) { 979 /* Forced unmount. */ 980 error = EBADF; 981 goto bad; 982 } 983 } 984 nd.ni_vp->v_vflag |= VV_MD; 985 VOP_UNLOCK(nd.ni_vp, 0); 986 987 if (mdio->md_fwsectors != 0) 988 sc->fwsectors = mdio->md_fwsectors; 989 if (mdio->md_fwheads != 0) 990 sc->fwheads = mdio->md_fwheads; 991 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC); 992 if (!(flags & FWRITE)) 993 sc->flags |= MD_READONLY; 994 sc->vnode = nd.ni_vp; 995 996 error = mdsetcred(sc, td->td_ucred); 997 if (error != 0) { 998 sc->vnode = NULL; 999 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); 1000 nd.ni_vp->v_vflag &= ~VV_MD; 1001 goto bad; 1002 } 1003 return (0); 1004 bad: 1005 VOP_UNLOCK(nd.ni_vp, 0); 1006 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 1007 return (error); 1008 } 1009 1010 static int 1011 mddestroy(struct md_s *sc, struct thread *td) 1012 { 1013 1014 if (sc->gp) { 1015 sc->gp->softc = NULL; 1016 g_topology_lock(); 1017 g_wither_geom(sc->gp, ENXIO); 1018 g_topology_unlock(); 1019 sc->gp = NULL; 1020 sc->pp = NULL; 1021 } 1022 if (sc->devstat) { 1023 devstat_remove_entry(sc->devstat); 1024 sc->devstat = NULL; 1025 } 1026 mtx_lock(&sc->queue_mtx); 1027 sc->flags |= MD_SHUTDOWN; 1028 wakeup(sc); 1029 while (!(sc->flags & MD_EXITING)) 1030 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10); 1031 mtx_unlock(&sc->queue_mtx); 1032 mtx_destroy(&sc->queue_mtx); 1033 if (sc->vnode != NULL) { 1034 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1035 sc->vnode->v_vflag &= ~VV_MD; 1036 VOP_UNLOCK(sc->vnode, 0); 1037 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 1038 FREAD : (FREAD|FWRITE), sc->cred, td); 1039 } 1040 if (sc->cred != NULL) 1041 crfree(sc->cred); 1042 if (sc->object != NULL) 1043 vm_object_deallocate(sc->object); 1044 if (sc->indir) 1045 destroy_indir(sc, sc->indir); 1046 if (sc->uma) 1047 uma_zdestroy(sc->uma); 1048 1049 LIST_REMOVE(sc, list); 1050 free_unr(md_uh, sc->unit); 1051 free(sc, M_MD); 1052 return (0); 1053 } 1054 1055 static int 1056 mdresize(struct md_s *sc, struct md_ioctl *mdio) 1057 { 1058 int error, res; 1059 vm_pindex_t oldpages, newpages; 1060 1061 switch (sc->type) { 1062 case MD_VNODE: 1063 break; 1064 case MD_SWAP: 1065 if (mdio->md_mediasize <= 0 || 1066 (mdio->md_mediasize % PAGE_SIZE) != 0) 1067 return (EDOM); 1068 oldpages = OFF_TO_IDX(round_page(sc->mediasize)); 1069 newpages = OFF_TO_IDX(round_page(mdio->md_mediasize)); 1070 if (newpages < oldpages) { 1071 VM_OBJECT_LOCK(sc->object); 1072 vm_object_page_remove(sc->object, newpages, 0, 0); 1073 swap_pager_freespace(sc->object, newpages, 1074 oldpages - newpages); 1075 swap_release_by_cred(IDX_TO_OFF(oldpages - 1076 newpages), sc->cred); 1077 sc->object->charge = IDX_TO_OFF(newpages); 1078 sc->object->size = newpages; 1079 VM_OBJECT_UNLOCK(sc->object); 1080 } else if (newpages > oldpages) { 1081 res = swap_reserve_by_cred(IDX_TO_OFF(newpages - 1082 oldpages), sc->cred); 1083 if (!res) 1084 return (ENOMEM); 1085 if ((mdio->md_options & MD_RESERVE) || 1086 (sc->flags & MD_RESERVE)) { 1087 error = swap_pager_reserve(sc->object, 1088 oldpages, newpages - oldpages); 1089 if (error < 0) { 1090 swap_release_by_cred( 1091 IDX_TO_OFF(newpages - oldpages), 1092 sc->cred); 1093 return (EDOM); 1094 } 1095 } 1096 VM_OBJECT_LOCK(sc->object); 1097 sc->object->charge = IDX_TO_OFF(newpages); 1098 sc->object->size = newpages; 1099 VM_OBJECT_UNLOCK(sc->object); 1100 } 1101 break; 1102 default: 1103 return (EOPNOTSUPP); 1104 } 1105 1106 sc->mediasize = mdio->md_mediasize; 1107 g_topology_lock(); 1108 g_resize_provider(sc->pp, sc->mediasize); 1109 g_topology_unlock(); 1110 return (0); 1111 } 1112 1113 static int 1114 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1115 { 1116 vm_ooffset_t npage; 1117 int error; 1118 1119 /* 1120 * Range check. Disallow negative sizes or any size less then the 1121 * size of a page. Then round to a page. 1122 */ 1123 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1124 return (EDOM); 1125 1126 /* 1127 * Allocate an OBJT_SWAP object. 1128 * 1129 * Note the truncation. 1130 */ 1131 1132 npage = mdio->md_mediasize / PAGE_SIZE; 1133 if (mdio->md_fwsectors != 0) 1134 sc->fwsectors = mdio->md_fwsectors; 1135 if (mdio->md_fwheads != 0) 1136 sc->fwheads = mdio->md_fwheads; 1137 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, 1138 VM_PROT_DEFAULT, 0, td->td_ucred); 1139 if (sc->object == NULL) 1140 return (ENOMEM); 1141 sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE); 1142 if (mdio->md_options & MD_RESERVE) { 1143 if (swap_pager_reserve(sc->object, 0, npage) < 0) { 1144 error = EDOM; 1145 goto finish; 1146 } 1147 } 1148 error = mdsetcred(sc, td->td_ucred); 1149 finish: 1150 if (error != 0) { 1151 vm_object_deallocate(sc->object); 1152 sc->object = NULL; 1153 } 1154 return (error); 1155 } 1156 1157 1158 static int 1159 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1160 { 1161 struct md_ioctl *mdio; 1162 struct md_s *sc; 1163 int error, i; 1164 unsigned sectsize; 1165 1166 if (md_debug) 1167 printf("mdctlioctl(%s %lx %p %x %p)\n", 1168 devtoname(dev), cmd, addr, flags, td); 1169 1170 mdio = (struct md_ioctl *)addr; 1171 if (mdio->md_version != MDIOVERSION) 1172 return (EINVAL); 1173 1174 /* 1175 * We assert the version number in the individual ioctl 1176 * handlers instead of out here because (a) it is possible we 1177 * may add another ioctl in the future which doesn't read an 1178 * mdio, and (b) the correct return value for an unknown ioctl 1179 * is ENOIOCTL, not EINVAL. 1180 */ 1181 error = 0; 1182 switch (cmd) { 1183 case MDIOCATTACH: 1184 switch (mdio->md_type) { 1185 case MD_MALLOC: 1186 case MD_PRELOAD: 1187 case MD_VNODE: 1188 case MD_SWAP: 1189 break; 1190 default: 1191 return (EINVAL); 1192 } 1193 if (mdio->md_sectorsize == 0) 1194 sectsize = DEV_BSIZE; 1195 else 1196 sectsize = mdio->md_sectorsize; 1197 if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize) 1198 return (EINVAL); 1199 if (mdio->md_options & MD_AUTOUNIT) 1200 sc = mdnew(-1, &error, mdio->md_type); 1201 else { 1202 if (mdio->md_unit > INT_MAX) 1203 return (EINVAL); 1204 sc = mdnew(mdio->md_unit, &error, mdio->md_type); 1205 } 1206 if (sc == NULL) 1207 return (error); 1208 if (mdio->md_options & MD_AUTOUNIT) 1209 mdio->md_unit = sc->unit; 1210 sc->mediasize = mdio->md_mediasize; 1211 sc->sectorsize = sectsize; 1212 error = EDOOFUS; 1213 switch (sc->type) { 1214 case MD_MALLOC: 1215 sc->start = mdstart_malloc; 1216 error = mdcreate_malloc(sc, mdio); 1217 break; 1218 case MD_PRELOAD: 1219 /* 1220 * We disallow attaching preloaded memory disks via 1221 * ioctl. Preloaded memory disks are automatically 1222 * attached in g_md_init(). 1223 */ 1224 error = EOPNOTSUPP; 1225 break; 1226 case MD_VNODE: 1227 sc->start = mdstart_vnode; 1228 error = mdcreate_vnode(sc, mdio, td); 1229 break; 1230 case MD_SWAP: 1231 sc->start = mdstart_swap; 1232 error = mdcreate_swap(sc, mdio, td); 1233 break; 1234 } 1235 if (error != 0) { 1236 mddestroy(sc, td); 1237 return (error); 1238 } 1239 1240 /* Prune off any residual fractional sector */ 1241 i = sc->mediasize % sc->sectorsize; 1242 sc->mediasize -= i; 1243 1244 mdinit(sc); 1245 return (0); 1246 case MDIOCDETACH: 1247 if (mdio->md_mediasize != 0 || 1248 (mdio->md_options & ~MD_FORCE) != 0) 1249 return (EINVAL); 1250 1251 sc = mdfind(mdio->md_unit); 1252 if (sc == NULL) 1253 return (ENOENT); 1254 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) && 1255 !(mdio->md_options & MD_FORCE)) 1256 return (EBUSY); 1257 return (mddestroy(sc, td)); 1258 case MDIOCRESIZE: 1259 if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0) 1260 return (EINVAL); 1261 1262 sc = mdfind(mdio->md_unit); 1263 if (sc == NULL) 1264 return (ENOENT); 1265 if (mdio->md_mediasize < sc->sectorsize) 1266 return (EINVAL); 1267 if (mdio->md_mediasize < sc->mediasize && 1268 !(sc->flags & MD_FORCE) && 1269 !(mdio->md_options & MD_FORCE)) 1270 return (EBUSY); 1271 return (mdresize(sc, mdio)); 1272 case MDIOCQUERY: 1273 sc = mdfind(mdio->md_unit); 1274 if (sc == NULL) 1275 return (ENOENT); 1276 mdio->md_type = sc->type; 1277 mdio->md_options = sc->flags; 1278 mdio->md_mediasize = sc->mediasize; 1279 mdio->md_sectorsize = sc->sectorsize; 1280 if (sc->type == MD_VNODE) 1281 error = copyout(sc->file, mdio->md_file, 1282 strlen(sc->file) + 1); 1283 return (error); 1284 case MDIOCLIST: 1285 i = 1; 1286 LIST_FOREACH(sc, &md_softc_list, list) { 1287 if (i == MDNPAD - 1) 1288 mdio->md_pad[i] = -1; 1289 else 1290 mdio->md_pad[i++] = sc->unit; 1291 } 1292 mdio->md_pad[0] = i - 1; 1293 return (0); 1294 default: 1295 return (ENOIOCTL); 1296 }; 1297 } 1298 1299 static int 1300 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1301 { 1302 int error; 1303 1304 sx_xlock(&md_sx); 1305 error = xmdctlioctl(dev, cmd, addr, flags, td); 1306 sx_xunlock(&md_sx); 1307 return (error); 1308 } 1309 1310 static void 1311 md_preloaded(u_char *image, size_t length, const char *name) 1312 { 1313 struct md_s *sc; 1314 int error; 1315 1316 sc = mdnew(-1, &error, MD_PRELOAD); 1317 if (sc == NULL) 1318 return; 1319 sc->mediasize = length; 1320 sc->sectorsize = DEV_BSIZE; 1321 sc->pl_ptr = image; 1322 sc->pl_len = length; 1323 sc->start = mdstart_preload; 1324 #ifdef MD_ROOT 1325 if (sc->unit == 0) 1326 rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0"; 1327 #endif 1328 mdinit(sc); 1329 if (name != NULL) { 1330 printf("%s%d: Preloaded image <%s> %zd bytes at %p\n", 1331 MD_NAME, sc->unit, name, length, image); 1332 } 1333 } 1334 1335 static void 1336 g_md_init(struct g_class *mp __unused) 1337 { 1338 caddr_t mod; 1339 u_char *ptr, *name, *type; 1340 unsigned len; 1341 int i; 1342 1343 /* figure out log2(NINDIR) */ 1344 for (i = NINDIR, nshift = -1; i; nshift++) 1345 i >>= 1; 1346 1347 mod = NULL; 1348 sx_init(&md_sx, "MD config lock"); 1349 g_topology_unlock(); 1350 md_uh = new_unrhdr(0, INT_MAX, NULL); 1351 #ifdef MD_ROOT_SIZE 1352 sx_xlock(&md_sx); 1353 md_preloaded(mfs_root.start, sizeof(mfs_root.start), NULL); 1354 sx_xunlock(&md_sx); 1355 #endif 1356 /* XXX: are preload_* static or do they need Giant ? */ 1357 while ((mod = preload_search_next_name(mod)) != NULL) { 1358 name = (char *)preload_search_info(mod, MODINFO_NAME); 1359 if (name == NULL) 1360 continue; 1361 type = (char *)preload_search_info(mod, MODINFO_TYPE); 1362 if (type == NULL) 1363 continue; 1364 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 1365 continue; 1366 ptr = preload_fetch_addr(mod); 1367 len = preload_fetch_size(mod); 1368 if (ptr != NULL && len != 0) { 1369 sx_xlock(&md_sx); 1370 md_preloaded(ptr, len, name); 1371 sx_xunlock(&md_sx); 1372 } 1373 } 1374 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL, 1375 0600, MDCTL_NAME); 1376 g_topology_lock(); 1377 } 1378 1379 static void 1380 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1381 struct g_consumer *cp __unused, struct g_provider *pp) 1382 { 1383 struct md_s *mp; 1384 char *type; 1385 1386 mp = gp->softc; 1387 if (mp == NULL) 1388 return; 1389 1390 switch (mp->type) { 1391 case MD_MALLOC: 1392 type = "malloc"; 1393 break; 1394 case MD_PRELOAD: 1395 type = "preload"; 1396 break; 1397 case MD_VNODE: 1398 type = "vnode"; 1399 break; 1400 case MD_SWAP: 1401 type = "swap"; 1402 break; 1403 default: 1404 type = "unknown"; 1405 break; 1406 } 1407 1408 if (pp != NULL) { 1409 if (indent == NULL) { 1410 sbuf_printf(sb, " u %d", mp->unit); 1411 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize); 1412 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads); 1413 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors); 1414 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize); 1415 sbuf_printf(sb, " t %s", type); 1416 if (mp->type == MD_VNODE && mp->vnode != NULL) 1417 sbuf_printf(sb, " file %s", mp->file); 1418 } else { 1419 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, 1420 mp->unit); 1421 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n", 1422 indent, (uintmax_t) mp->sectorsize); 1423 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n", 1424 indent, (uintmax_t) mp->fwheads); 1425 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n", 1426 indent, (uintmax_t) mp->fwsectors); 1427 sbuf_printf(sb, "%s<length>%ju</length>\n", 1428 indent, (uintmax_t) mp->mediasize); 1429 sbuf_printf(sb, "%s<compression>%s</compression>\n", indent, 1430 (mp->flags & MD_COMPRESS) == 0 ? "off": "on"); 1431 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 1432 (mp->flags & MD_READONLY) == 0 ? "read-write": 1433 "read-only"); 1434 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1435 type); 1436 if (mp->type == MD_VNODE && mp->vnode != NULL) 1437 sbuf_printf(sb, "%s<file>%s</file>\n", 1438 indent, mp->file); 1439 } 1440 } 1441 } 1442 1443 static void 1444 g_md_fini(struct g_class *mp __unused) 1445 { 1446 1447 sx_destroy(&md_sx); 1448 if (status_dev != NULL) 1449 destroy_dev(status_dev); 1450 delete_unrhdr(md_uh); 1451 } 1452