1 /*- 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * $FreeBSD$ 10 * 11 */ 12 13 /*- 14 * The following functions are based in the vn(4) driver: mdstart_swap(), 15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 16 * and as such under the following copyright: 17 * 18 * Copyright (c) 1988 University of Utah. 19 * Copyright (c) 1990, 1993 20 * The Regents of the University of California. All rights reserved. 21 * 22 * This code is derived from software contributed to Berkeley by 23 * the Systems Programming Group of the University of Utah Computer 24 * Science Department. 25 * 26 * Redistribution and use in source and binary forms, with or without 27 * modification, are permitted provided that the following conditions 28 * are met: 29 * 1. Redistributions of source code must retain the above copyright 30 * notice, this list of conditions and the following disclaimer. 31 * 2. Redistributions in binary form must reproduce the above copyright 32 * notice, this list of conditions and the following disclaimer in the 33 * documentation and/or other materials provided with the distribution. 34 * 4. Neither the name of the University nor the names of its contributors 35 * may be used to endorse or promote products derived from this software 36 * without specific prior written permission. 37 * 38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 48 * SUCH DAMAGE. 49 * 50 * from: Utah Hdr: vn.c 1.13 94/04/02 51 * 52 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 53 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 54 */ 55 56 #include "opt_geom.h" 57 #include "opt_md.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/bio.h> 62 #include <sys/conf.h> 63 #include <sys/devicestat.h> 64 #include <sys/fcntl.h> 65 #include <sys/kernel.h> 66 #include <sys/kthread.h> 67 #include <sys/limits.h> 68 #include <sys/linker.h> 69 #include <sys/lock.h> 70 #include <sys/malloc.h> 71 #include <sys/mdioctl.h> 72 #include <sys/mount.h> 73 #include <sys/mutex.h> 74 #include <sys/sx.h> 75 #include <sys/namei.h> 76 #include <sys/proc.h> 77 #include <sys/queue.h> 78 #include <sys/sched.h> 79 #include <sys/sf_buf.h> 80 #include <sys/sysctl.h> 81 #include <sys/vnode.h> 82 83 #include <geom/geom.h> 84 85 #include <vm/vm.h> 86 #include <vm/vm_object.h> 87 #include <vm/vm_page.h> 88 #include <vm/vm_pager.h> 89 #include <vm/swap_pager.h> 90 #include <vm/uma.h> 91 92 #define MD_MODVER 1 93 94 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 95 #define MD_EXITING 0x20000 /* Worker thread is exiting. */ 96 97 #ifndef MD_NSECT 98 #define MD_NSECT (10000 * 2) 99 #endif 100 101 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk"); 102 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors"); 103 104 static int md_debug; 105 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, ""); 106 107 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE) 108 /* 109 * Preloaded image gets put here. 110 * Applications that patch the object with the image can determine 111 * the size looking at the start and end markers (strings), 112 * so we want them contiguous. 113 */ 114 static struct { 115 u_char start[MD_ROOT_SIZE*1024]; 116 u_char end[128]; 117 } mfs_root = { 118 .start = "MFS Filesystem goes here", 119 .end = "MFS Filesystem had better STOP here", 120 }; 121 #endif 122 123 static g_init_t g_md_init; 124 static g_fini_t g_md_fini; 125 static g_start_t g_md_start; 126 static g_access_t g_md_access; 127 static void g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 128 struct g_consumer *cp __unused, struct g_provider *pp); 129 130 static int mdunits; 131 static struct cdev *status_dev = 0; 132 static struct sx md_sx; 133 134 static d_ioctl_t mdctlioctl; 135 136 static struct cdevsw mdctl_cdevsw = { 137 .d_version = D_VERSION, 138 .d_ioctl = mdctlioctl, 139 .d_name = MD_NAME, 140 }; 141 142 struct g_class g_md_class = { 143 .name = "MD", 144 .version = G_VERSION, 145 .init = g_md_init, 146 .fini = g_md_fini, 147 .start = g_md_start, 148 .access = g_md_access, 149 .dumpconf = g_md_dumpconf, 150 }; 151 152 DECLARE_GEOM_CLASS(g_md_class, g_md); 153 154 155 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list); 156 157 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 158 #define NMASK (NINDIR-1) 159 static int nshift; 160 161 struct indir { 162 uintptr_t *array; 163 u_int total; 164 u_int used; 165 u_int shift; 166 }; 167 168 struct md_s { 169 int unit; 170 LIST_ENTRY(md_s) list; 171 struct bio_queue_head bio_queue; 172 struct mtx queue_mtx; 173 struct cdev *dev; 174 enum md_types type; 175 off_t mediasize; 176 unsigned sectorsize; 177 unsigned opencount; 178 unsigned fwheads; 179 unsigned fwsectors; 180 unsigned flags; 181 char name[20]; 182 struct proc *procp; 183 struct g_geom *gp; 184 struct g_provider *pp; 185 int (*start)(struct md_s *sc, struct bio *bp); 186 struct devstat *devstat; 187 188 /* MD_MALLOC related fields */ 189 struct indir *indir; 190 uma_zone_t uma; 191 192 /* MD_PRELOAD related fields */ 193 u_char *pl_ptr; 194 size_t pl_len; 195 196 /* MD_VNODE related fields */ 197 struct vnode *vnode; 198 char file[PATH_MAX]; 199 struct ucred *cred; 200 201 /* MD_SWAP related fields */ 202 vm_object_t object; 203 }; 204 205 static struct indir * 206 new_indir(u_int shift) 207 { 208 struct indir *ip; 209 210 ip = malloc(sizeof *ip, M_MD, M_NOWAIT | M_ZERO); 211 if (ip == NULL) 212 return (NULL); 213 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 214 M_MDSECT, M_NOWAIT | M_ZERO); 215 if (ip->array == NULL) { 216 free(ip, M_MD); 217 return (NULL); 218 } 219 ip->total = NINDIR; 220 ip->shift = shift; 221 return (ip); 222 } 223 224 static void 225 del_indir(struct indir *ip) 226 { 227 228 free(ip->array, M_MDSECT); 229 free(ip, M_MD); 230 } 231 232 static void 233 destroy_indir(struct md_s *sc, struct indir *ip) 234 { 235 int i; 236 237 for (i = 0; i < NINDIR; i++) { 238 if (!ip->array[i]) 239 continue; 240 if (ip->shift) 241 destroy_indir(sc, (struct indir*)(ip->array[i])); 242 else if (ip->array[i] > 255) 243 uma_zfree(sc->uma, (void *)(ip->array[i])); 244 } 245 del_indir(ip); 246 } 247 248 /* 249 * This function does the math and allocates the top level "indir" structure 250 * for a device of "size" sectors. 251 */ 252 253 static struct indir * 254 dimension(off_t size) 255 { 256 off_t rcnt; 257 struct indir *ip; 258 int i, layer; 259 260 rcnt = size; 261 layer = 0; 262 while (rcnt > NINDIR) { 263 rcnt /= NINDIR; 264 layer++; 265 } 266 /* figure out log2(NINDIR) */ 267 for (i = NINDIR, nshift = -1; i; nshift++) 268 i >>= 1; 269 270 /* 271 * XXX: the top layer is probably not fully populated, so we allocate 272 * too much space for ip->array in here. 273 */ 274 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 275 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 276 M_MDSECT, M_WAITOK | M_ZERO); 277 ip->total = NINDIR; 278 ip->shift = layer * nshift; 279 return (ip); 280 } 281 282 /* 283 * Read a given sector 284 */ 285 286 static uintptr_t 287 s_read(struct indir *ip, off_t offset) 288 { 289 struct indir *cip; 290 int idx; 291 uintptr_t up; 292 293 if (md_debug > 1) 294 printf("s_read(%jd)\n", (intmax_t)offset); 295 up = 0; 296 for (cip = ip; cip != NULL;) { 297 if (cip->shift) { 298 idx = (offset >> cip->shift) & NMASK; 299 up = cip->array[idx]; 300 cip = (struct indir *)up; 301 continue; 302 } 303 idx = offset & NMASK; 304 return (cip->array[idx]); 305 } 306 return (0); 307 } 308 309 /* 310 * Write a given sector, prune the tree if the value is 0 311 */ 312 313 static int 314 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 315 { 316 struct indir *cip, *lip[10]; 317 int idx, li; 318 uintptr_t up; 319 320 if (md_debug > 1) 321 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 322 up = 0; 323 li = 0; 324 cip = ip; 325 for (;;) { 326 lip[li++] = cip; 327 if (cip->shift) { 328 idx = (offset >> cip->shift) & NMASK; 329 up = cip->array[idx]; 330 if (up != 0) { 331 cip = (struct indir *)up; 332 continue; 333 } 334 /* Allocate branch */ 335 cip->array[idx] = 336 (uintptr_t)new_indir(cip->shift - nshift); 337 if (cip->array[idx] == 0) 338 return (ENOSPC); 339 cip->used++; 340 up = cip->array[idx]; 341 cip = (struct indir *)up; 342 continue; 343 } 344 /* leafnode */ 345 idx = offset & NMASK; 346 up = cip->array[idx]; 347 if (up != 0) 348 cip->used--; 349 cip->array[idx] = ptr; 350 if (ptr != 0) 351 cip->used++; 352 break; 353 } 354 if (cip->used != 0 || li == 1) 355 return (0); 356 li--; 357 while (cip->used == 0 && cip != ip) { 358 li--; 359 idx = (offset >> lip[li]->shift) & NMASK; 360 up = lip[li]->array[idx]; 361 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 362 del_indir(cip); 363 lip[li]->array[idx] = 0; 364 lip[li]->used--; 365 cip = lip[li]; 366 } 367 return (0); 368 } 369 370 371 static int 372 g_md_access(struct g_provider *pp, int r, int w, int e) 373 { 374 struct md_s *sc; 375 376 sc = pp->geom->softc; 377 if (sc == NULL) { 378 if (r <= 0 && w <= 0 && e <= 0) 379 return (0); 380 return (ENXIO); 381 } 382 r += pp->acr; 383 w += pp->acw; 384 e += pp->ace; 385 if ((sc->flags & MD_READONLY) != 0 && w > 0) 386 return (EROFS); 387 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 388 sc->opencount = 1; 389 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 390 sc->opencount = 0; 391 } 392 return (0); 393 } 394 395 static void 396 g_md_start(struct bio *bp) 397 { 398 struct md_s *sc; 399 400 sc = bp->bio_to->geom->softc; 401 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) 402 devstat_start_transaction_bio(sc->devstat, bp); 403 mtx_lock(&sc->queue_mtx); 404 bioq_disksort(&sc->bio_queue, bp); 405 mtx_unlock(&sc->queue_mtx); 406 wakeup(sc); 407 } 408 409 static int 410 mdstart_malloc(struct md_s *sc, struct bio *bp) 411 { 412 int i, error; 413 u_char *dst; 414 off_t secno, nsec, uc; 415 uintptr_t sp, osp; 416 417 switch (bp->bio_cmd) { 418 case BIO_READ: 419 case BIO_WRITE: 420 case BIO_DELETE: 421 break; 422 default: 423 return (EOPNOTSUPP); 424 } 425 426 nsec = bp->bio_length / sc->sectorsize; 427 secno = bp->bio_offset / sc->sectorsize; 428 dst = bp->bio_data; 429 error = 0; 430 while (nsec--) { 431 osp = s_read(sc->indir, secno); 432 if (bp->bio_cmd == BIO_DELETE) { 433 if (osp != 0) 434 error = s_write(sc->indir, secno, 0); 435 } else if (bp->bio_cmd == BIO_READ) { 436 if (osp == 0) 437 bzero(dst, sc->sectorsize); 438 else if (osp <= 255) 439 memset(dst, osp, sc->sectorsize); 440 else { 441 bcopy((void *)osp, dst, sc->sectorsize); 442 cpu_flush_dcache(dst, sc->sectorsize); 443 } 444 osp = 0; 445 } else if (bp->bio_cmd == BIO_WRITE) { 446 if (sc->flags & MD_COMPRESS) { 447 uc = dst[0]; 448 for (i = 1; i < sc->sectorsize; i++) 449 if (dst[i] != uc) 450 break; 451 } else { 452 i = 0; 453 uc = 0; 454 } 455 if (i == sc->sectorsize) { 456 if (osp != uc) 457 error = s_write(sc->indir, secno, uc); 458 } else { 459 if (osp <= 255) { 460 sp = (uintptr_t)uma_zalloc(sc->uma, 461 M_NOWAIT); 462 if (sp == 0) { 463 error = ENOSPC; 464 break; 465 } 466 bcopy(dst, (void *)sp, sc->sectorsize); 467 error = s_write(sc->indir, secno, sp); 468 } else { 469 bcopy(dst, (void *)osp, sc->sectorsize); 470 osp = 0; 471 } 472 } 473 } else { 474 error = EOPNOTSUPP; 475 } 476 if (osp > 255) 477 uma_zfree(sc->uma, (void*)osp); 478 if (error != 0) 479 break; 480 secno++; 481 dst += sc->sectorsize; 482 } 483 bp->bio_resid = 0; 484 return (error); 485 } 486 487 static int 488 mdstart_preload(struct md_s *sc, struct bio *bp) 489 { 490 491 switch (bp->bio_cmd) { 492 case BIO_READ: 493 bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data, 494 bp->bio_length); 495 cpu_flush_dcache(bp->bio_data, bp->bio_length); 496 break; 497 case BIO_WRITE: 498 bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset, 499 bp->bio_length); 500 break; 501 } 502 bp->bio_resid = 0; 503 return (0); 504 } 505 506 static int 507 mdstart_vnode(struct md_s *sc, struct bio *bp) 508 { 509 int error, vfslocked; 510 struct uio auio; 511 struct iovec aiov; 512 struct mount *mp; 513 struct vnode *vp; 514 struct thread *td; 515 516 switch (bp->bio_cmd) { 517 case BIO_READ: 518 case BIO_WRITE: 519 case BIO_FLUSH: 520 break; 521 default: 522 return (EOPNOTSUPP); 523 } 524 525 td = curthread; 526 vp = sc->vnode; 527 528 /* 529 * VNODE I/O 530 * 531 * If an error occurs, we set BIO_ERROR but we do not set 532 * B_INVAL because (for a write anyway), the buffer is 533 * still valid. 534 */ 535 536 if (bp->bio_cmd == BIO_FLUSH) { 537 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 538 (void) vn_start_write(vp, &mp, V_WAIT); 539 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 540 error = VOP_FSYNC(vp, MNT_WAIT, td); 541 VOP_UNLOCK(vp, 0); 542 vn_finished_write(mp); 543 VFS_UNLOCK_GIANT(vfslocked); 544 return (error); 545 } 546 547 bzero(&auio, sizeof(auio)); 548 549 aiov.iov_base = bp->bio_data; 550 aiov.iov_len = bp->bio_length; 551 auio.uio_iov = &aiov; 552 auio.uio_iovcnt = 1; 553 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 554 auio.uio_segflg = UIO_SYSSPACE; 555 if (bp->bio_cmd == BIO_READ) 556 auio.uio_rw = UIO_READ; 557 else if (bp->bio_cmd == BIO_WRITE) 558 auio.uio_rw = UIO_WRITE; 559 else 560 panic("wrong BIO_OP in mdstart_vnode"); 561 auio.uio_resid = bp->bio_length; 562 auio.uio_td = td; 563 /* 564 * When reading set IO_DIRECT to try to avoid double-caching 565 * the data. When writing IO_DIRECT is not optimal. 566 */ 567 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 568 if (bp->bio_cmd == BIO_READ) { 569 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 570 error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred); 571 VOP_UNLOCK(vp, 0); 572 } else { 573 (void) vn_start_write(vp, &mp, V_WAIT); 574 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 575 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC, 576 sc->cred); 577 VOP_UNLOCK(vp, 0); 578 vn_finished_write(mp); 579 } 580 VFS_UNLOCK_GIANT(vfslocked); 581 bp->bio_resid = auio.uio_resid; 582 return (error); 583 } 584 585 static int 586 mdstart_swap(struct md_s *sc, struct bio *bp) 587 { 588 struct sf_buf *sf; 589 int rv, offs, len, lastend; 590 vm_pindex_t i, lastp; 591 vm_page_t m; 592 u_char *p; 593 594 switch (bp->bio_cmd) { 595 case BIO_READ: 596 case BIO_WRITE: 597 case BIO_DELETE: 598 break; 599 default: 600 return (EOPNOTSUPP); 601 } 602 603 p = bp->bio_data; 604 605 /* 606 * offs is the offset at which to start operating on the 607 * next (ie, first) page. lastp is the last page on 608 * which we're going to operate. lastend is the ending 609 * position within that last page (ie, PAGE_SIZE if 610 * we're operating on complete aligned pages). 611 */ 612 offs = bp->bio_offset % PAGE_SIZE; 613 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 614 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 615 616 rv = VM_PAGER_OK; 617 VM_OBJECT_LOCK(sc->object); 618 vm_object_pip_add(sc->object, 1); 619 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 620 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 621 622 m = vm_page_grab(sc->object, i, 623 VM_ALLOC_NORMAL|VM_ALLOC_RETRY); 624 VM_OBJECT_UNLOCK(sc->object); 625 sched_pin(); 626 sf = sf_buf_alloc(m, SFB_CPUPRIVATE); 627 VM_OBJECT_LOCK(sc->object); 628 if (bp->bio_cmd == BIO_READ) { 629 if (m->valid != VM_PAGE_BITS_ALL) 630 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 631 if (rv == VM_PAGER_ERROR) { 632 sf_buf_free(sf); 633 sched_unpin(); 634 vm_page_wakeup(m); 635 break; 636 } 637 bcopy((void *)(sf_buf_kva(sf) + offs), p, len); 638 cpu_flush_dcache(p, len); 639 } else if (bp->bio_cmd == BIO_WRITE) { 640 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 641 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 642 if (rv == VM_PAGER_ERROR) { 643 sf_buf_free(sf); 644 sched_unpin(); 645 vm_page_wakeup(m); 646 break; 647 } 648 bcopy(p, (void *)(sf_buf_kva(sf) + offs), len); 649 m->valid = VM_PAGE_BITS_ALL; 650 #if 0 651 } else if (bp->bio_cmd == BIO_DELETE) { 652 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 653 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 654 if (rv == VM_PAGER_ERROR) { 655 sf_buf_free(sf); 656 sched_unpin(); 657 vm_page_wakeup(m); 658 break; 659 } 660 bzero((void *)(sf_buf_kva(sf) + offs), len); 661 vm_page_dirty(m); 662 m->valid = VM_PAGE_BITS_ALL; 663 #endif 664 } 665 sf_buf_free(sf); 666 sched_unpin(); 667 vm_page_wakeup(m); 668 vm_page_lock_queues(); 669 vm_page_activate(m); 670 if (bp->bio_cmd == BIO_WRITE) 671 vm_page_dirty(m); 672 vm_page_unlock_queues(); 673 674 /* Actions on further pages start at offset 0 */ 675 p += PAGE_SIZE - offs; 676 offs = 0; 677 #if 0 678 if (bootverbose || bp->bio_offset / PAGE_SIZE < 17) 679 printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n", 680 m->wire_count, m->busy, 681 m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i); 682 #endif 683 } 684 vm_object_pip_subtract(sc->object, 1); 685 vm_object_set_writeable_dirty(sc->object); 686 VM_OBJECT_UNLOCK(sc->object); 687 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 688 } 689 690 static void 691 md_kthread(void *arg) 692 { 693 struct md_s *sc; 694 struct bio *bp; 695 int error; 696 697 sc = arg; 698 thread_lock(curthread); 699 sched_prio(curthread, PRIBIO); 700 thread_unlock(curthread); 701 if (sc->type == MD_VNODE) 702 curthread->td_pflags |= TDP_NORUNNINGBUF; 703 704 for (;;) { 705 mtx_lock(&sc->queue_mtx); 706 if (sc->flags & MD_SHUTDOWN) { 707 sc->flags |= MD_EXITING; 708 mtx_unlock(&sc->queue_mtx); 709 kproc_exit(0); 710 } 711 bp = bioq_takefirst(&sc->bio_queue); 712 if (!bp) { 713 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 714 continue; 715 } 716 mtx_unlock(&sc->queue_mtx); 717 if (bp->bio_cmd == BIO_GETATTR) { 718 if (sc->fwsectors && sc->fwheads && 719 (g_handleattr_int(bp, "GEOM::fwsectors", 720 sc->fwsectors) || 721 g_handleattr_int(bp, "GEOM::fwheads", 722 sc->fwheads))) 723 error = -1; 724 else 725 error = EOPNOTSUPP; 726 } else { 727 error = sc->start(sc, bp); 728 } 729 730 if (error != -1) { 731 bp->bio_completed = bp->bio_length; 732 g_io_deliver(bp, error); 733 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) 734 devstat_end_transaction_bio(sc->devstat, bp); 735 } 736 } 737 } 738 739 static struct md_s * 740 mdfind(int unit) 741 { 742 struct md_s *sc; 743 744 LIST_FOREACH(sc, &md_softc_list, list) { 745 if (sc->unit == unit) 746 break; 747 } 748 return (sc); 749 } 750 751 static struct md_s * 752 mdnew(int unit, int *errp, enum md_types type) 753 { 754 struct md_s *sc, *sc2; 755 int error, max = -1; 756 757 *errp = 0; 758 LIST_FOREACH(sc2, &md_softc_list, list) { 759 if (unit == sc2->unit) { 760 *errp = EBUSY; 761 return (NULL); 762 } 763 if (unit == -1 && sc2->unit > max) 764 max = sc2->unit; 765 } 766 if (unit == -1) 767 unit = max + 1; 768 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); 769 sc->type = type; 770 bioq_init(&sc->bio_queue); 771 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 772 sc->unit = unit; 773 sprintf(sc->name, "md%d", unit); 774 LIST_INSERT_HEAD(&md_softc_list, sc, list); 775 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 776 if (error == 0) 777 return (sc); 778 LIST_REMOVE(sc, list); 779 mtx_destroy(&sc->queue_mtx); 780 free(sc, M_MD); 781 *errp = error; 782 return (NULL); 783 } 784 785 static void 786 mdinit(struct md_s *sc) 787 { 788 789 struct g_geom *gp; 790 struct g_provider *pp; 791 792 g_topology_lock(); 793 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 794 gp->softc = sc; 795 pp = g_new_providerf(gp, "md%d", sc->unit); 796 pp->mediasize = sc->mediasize; 797 pp->sectorsize = sc->sectorsize; 798 sc->gp = gp; 799 sc->pp = pp; 800 g_error_provider(pp, 0); 801 g_topology_unlock(); 802 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize, 803 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 804 } 805 806 /* 807 * XXX: we should check that the range they feed us is mapped. 808 * XXX: we should implement read-only. 809 */ 810 811 static int 812 mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio) 813 { 814 815 if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE)) 816 return (EINVAL); 817 sc->flags = mdio->md_options & MD_FORCE; 818 /* Cast to pointer size, then to pointer to avoid warning */ 819 sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base; 820 sc->pl_len = (size_t)sc->mediasize; 821 return (0); 822 } 823 824 825 static int 826 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio) 827 { 828 uintptr_t sp; 829 int error; 830 off_t u; 831 832 error = 0; 833 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 834 return (EINVAL); 835 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize)) 836 return (EINVAL); 837 /* Compression doesn't make sense if we have reserved space */ 838 if (mdio->md_options & MD_RESERVE) 839 mdio->md_options &= ~MD_COMPRESS; 840 if (mdio->md_fwsectors != 0) 841 sc->fwsectors = mdio->md_fwsectors; 842 if (mdio->md_fwheads != 0) 843 sc->fwheads = mdio->md_fwheads; 844 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE); 845 sc->indir = dimension(sc->mediasize / sc->sectorsize); 846 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 847 0x1ff, 0); 848 if (mdio->md_options & MD_RESERVE) { 849 off_t nsectors; 850 851 nsectors = sc->mediasize / sc->sectorsize; 852 for (u = 0; u < nsectors; u++) { 853 sp = (uintptr_t)uma_zalloc(sc->uma, M_NOWAIT | M_ZERO); 854 if (sp != 0) 855 error = s_write(sc->indir, u, sp); 856 else 857 error = ENOMEM; 858 if (error != 0) 859 break; 860 } 861 } 862 return (error); 863 } 864 865 866 static int 867 mdsetcred(struct md_s *sc, struct ucred *cred) 868 { 869 char *tmpbuf; 870 int error = 0; 871 872 /* 873 * Set credits in our softc 874 */ 875 876 if (sc->cred) 877 crfree(sc->cred); 878 sc->cred = crhold(cred); 879 880 /* 881 * Horrible kludge to establish credentials for NFS XXX. 882 */ 883 884 if (sc->vnode) { 885 struct uio auio; 886 struct iovec aiov; 887 888 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 889 bzero(&auio, sizeof(auio)); 890 891 aiov.iov_base = tmpbuf; 892 aiov.iov_len = sc->sectorsize; 893 auio.uio_iov = &aiov; 894 auio.uio_iovcnt = 1; 895 auio.uio_offset = 0; 896 auio.uio_rw = UIO_READ; 897 auio.uio_segflg = UIO_SYSSPACE; 898 auio.uio_resid = aiov.iov_len; 899 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 900 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 901 VOP_UNLOCK(sc->vnode, 0); 902 free(tmpbuf, M_TEMP); 903 } 904 return (error); 905 } 906 907 static int 908 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 909 { 910 struct vattr vattr; 911 struct nameidata nd; 912 int error, flags, vfslocked; 913 914 error = copyinstr(mdio->md_file, sc->file, sizeof(sc->file), NULL); 915 if (error != 0) 916 return (error); 917 flags = FREAD|FWRITE; 918 /* 919 * If the user specified that this is a read only device, unset the 920 * FWRITE mask before trying to open the backing store. 921 */ 922 if ((mdio->md_options & MD_READONLY) != 0) 923 flags &= ~FWRITE; 924 NDINIT(&nd, LOOKUP, FOLLOW | MPSAFE, UIO_SYSSPACE, sc->file, td); 925 error = vn_open(&nd, &flags, 0, NULL); 926 if (error != 0) 927 return (error); 928 vfslocked = NDHASGIANT(&nd); 929 NDFREE(&nd, NDF_ONLY_PNBUF); 930 if (nd.ni_vp->v_type != VREG) { 931 error = EINVAL; 932 goto bad; 933 } 934 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred); 935 if (error != 0) 936 goto bad; 937 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { 938 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); 939 if (nd.ni_vp->v_iflag & VI_DOOMED) { 940 /* Forced unmount. */ 941 error = EBADF; 942 goto bad; 943 } 944 } 945 nd.ni_vp->v_vflag |= VV_MD; 946 VOP_UNLOCK(nd.ni_vp, 0); 947 948 if (mdio->md_fwsectors != 0) 949 sc->fwsectors = mdio->md_fwsectors; 950 if (mdio->md_fwheads != 0) 951 sc->fwheads = mdio->md_fwheads; 952 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC); 953 if (!(flags & FWRITE)) 954 sc->flags |= MD_READONLY; 955 sc->vnode = nd.ni_vp; 956 957 error = mdsetcred(sc, td->td_ucred); 958 if (error != 0) { 959 sc->vnode = NULL; 960 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); 961 nd.ni_vp->v_vflag &= ~VV_MD; 962 goto bad; 963 } 964 VFS_UNLOCK_GIANT(vfslocked); 965 return (0); 966 bad: 967 VOP_UNLOCK(nd.ni_vp, 0); 968 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 969 VFS_UNLOCK_GIANT(vfslocked); 970 return (error); 971 } 972 973 static int 974 mddestroy(struct md_s *sc, struct thread *td) 975 { 976 int vfslocked; 977 978 if (sc->gp) { 979 sc->gp->softc = NULL; 980 g_topology_lock(); 981 g_wither_geom(sc->gp, ENXIO); 982 g_topology_unlock(); 983 sc->gp = NULL; 984 sc->pp = NULL; 985 } 986 if (sc->devstat) { 987 devstat_remove_entry(sc->devstat); 988 sc->devstat = NULL; 989 } 990 mtx_lock(&sc->queue_mtx); 991 sc->flags |= MD_SHUTDOWN; 992 wakeup(sc); 993 while (!(sc->flags & MD_EXITING)) 994 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10); 995 mtx_unlock(&sc->queue_mtx); 996 mtx_destroy(&sc->queue_mtx); 997 if (sc->vnode != NULL) { 998 vfslocked = VFS_LOCK_GIANT(sc->vnode->v_mount); 999 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1000 sc->vnode->v_vflag &= ~VV_MD; 1001 VOP_UNLOCK(sc->vnode, 0); 1002 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 1003 FREAD : (FREAD|FWRITE), sc->cred, td); 1004 VFS_UNLOCK_GIANT(vfslocked); 1005 } 1006 if (sc->cred != NULL) 1007 crfree(sc->cred); 1008 if (sc->object != NULL) 1009 vm_object_deallocate(sc->object); 1010 if (sc->indir) 1011 destroy_indir(sc, sc->indir); 1012 if (sc->uma) 1013 uma_zdestroy(sc->uma); 1014 1015 LIST_REMOVE(sc, list); 1016 free(sc, M_MD); 1017 return (0); 1018 } 1019 1020 static int 1021 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1022 { 1023 vm_ooffset_t npage; 1024 int error; 1025 1026 /* 1027 * Range check. Disallow negative sizes or any size less then the 1028 * size of a page. Then round to a page. 1029 */ 1030 if (sc->mediasize == 0 || (sc->mediasize % PAGE_SIZE) != 0) 1031 return (EDOM); 1032 1033 /* 1034 * Allocate an OBJT_SWAP object. 1035 * 1036 * Note the truncation. 1037 */ 1038 1039 npage = mdio->md_mediasize / PAGE_SIZE; 1040 if (mdio->md_fwsectors != 0) 1041 sc->fwsectors = mdio->md_fwsectors; 1042 if (mdio->md_fwheads != 0) 1043 sc->fwheads = mdio->md_fwheads; 1044 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, 1045 VM_PROT_DEFAULT, 0, td->td_ucred); 1046 if (sc->object == NULL) 1047 return (ENOMEM); 1048 sc->flags = mdio->md_options & MD_FORCE; 1049 if (mdio->md_options & MD_RESERVE) { 1050 if (swap_pager_reserve(sc->object, 0, npage) < 0) { 1051 error = EDOM; 1052 goto finish; 1053 } 1054 } 1055 error = mdsetcred(sc, td->td_ucred); 1056 finish: 1057 if (error != 0) { 1058 vm_object_deallocate(sc->object); 1059 sc->object = NULL; 1060 } 1061 return (error); 1062 } 1063 1064 1065 static int 1066 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1067 { 1068 struct md_ioctl *mdio; 1069 struct md_s *sc; 1070 int error, i; 1071 1072 if (md_debug) 1073 printf("mdctlioctl(%s %lx %p %x %p)\n", 1074 devtoname(dev), cmd, addr, flags, td); 1075 1076 mdio = (struct md_ioctl *)addr; 1077 if (mdio->md_version != MDIOVERSION) 1078 return (EINVAL); 1079 1080 /* 1081 * We assert the version number in the individual ioctl 1082 * handlers instead of out here because (a) it is possible we 1083 * may add another ioctl in the future which doesn't read an 1084 * mdio, and (b) the correct return value for an unknown ioctl 1085 * is ENOIOCTL, not EINVAL. 1086 */ 1087 error = 0; 1088 switch (cmd) { 1089 case MDIOCATTACH: 1090 switch (mdio->md_type) { 1091 case MD_MALLOC: 1092 case MD_PRELOAD: 1093 case MD_VNODE: 1094 case MD_SWAP: 1095 break; 1096 default: 1097 return (EINVAL); 1098 } 1099 if (mdio->md_options & MD_AUTOUNIT) 1100 sc = mdnew(-1, &error, mdio->md_type); 1101 else 1102 sc = mdnew(mdio->md_unit, &error, mdio->md_type); 1103 if (sc == NULL) 1104 return (error); 1105 if (mdio->md_options & MD_AUTOUNIT) 1106 mdio->md_unit = sc->unit; 1107 sc->mediasize = mdio->md_mediasize; 1108 if (mdio->md_sectorsize == 0) 1109 sc->sectorsize = DEV_BSIZE; 1110 else 1111 sc->sectorsize = mdio->md_sectorsize; 1112 error = EDOOFUS; 1113 switch (sc->type) { 1114 case MD_MALLOC: 1115 sc->start = mdstart_malloc; 1116 error = mdcreate_malloc(sc, mdio); 1117 break; 1118 case MD_PRELOAD: 1119 sc->start = mdstart_preload; 1120 error = mdcreate_preload(sc, mdio); 1121 break; 1122 case MD_VNODE: 1123 sc->start = mdstart_vnode; 1124 error = mdcreate_vnode(sc, mdio, td); 1125 break; 1126 case MD_SWAP: 1127 sc->start = mdstart_swap; 1128 error = mdcreate_swap(sc, mdio, td); 1129 break; 1130 } 1131 if (error != 0) { 1132 mddestroy(sc, td); 1133 return (error); 1134 } 1135 1136 /* Prune off any residual fractional sector */ 1137 i = sc->mediasize % sc->sectorsize; 1138 sc->mediasize -= i; 1139 1140 mdinit(sc); 1141 return (0); 1142 case MDIOCDETACH: 1143 if (mdio->md_mediasize != 0 || 1144 (mdio->md_options & ~MD_FORCE) != 0) 1145 return (EINVAL); 1146 1147 sc = mdfind(mdio->md_unit); 1148 if (sc == NULL) 1149 return (ENOENT); 1150 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) && 1151 !(mdio->md_options & MD_FORCE)) 1152 return (EBUSY); 1153 return (mddestroy(sc, td)); 1154 case MDIOCQUERY: 1155 sc = mdfind(mdio->md_unit); 1156 if (sc == NULL) 1157 return (ENOENT); 1158 mdio->md_type = sc->type; 1159 mdio->md_options = sc->flags; 1160 mdio->md_mediasize = sc->mediasize; 1161 mdio->md_sectorsize = sc->sectorsize; 1162 if (sc->type == MD_VNODE) 1163 error = copyout(sc->file, mdio->md_file, 1164 strlen(sc->file) + 1); 1165 return (error); 1166 case MDIOCLIST: 1167 i = 1; 1168 LIST_FOREACH(sc, &md_softc_list, list) { 1169 if (i == MDNPAD - 1) 1170 mdio->md_pad[i] = -1; 1171 else 1172 mdio->md_pad[i++] = sc->unit; 1173 } 1174 mdio->md_pad[0] = i - 1; 1175 return (0); 1176 default: 1177 return (ENOIOCTL); 1178 }; 1179 } 1180 1181 static int 1182 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1183 { 1184 int error; 1185 1186 sx_xlock(&md_sx); 1187 error = xmdctlioctl(dev, cmd, addr, flags, td); 1188 sx_xunlock(&md_sx); 1189 return (error); 1190 } 1191 1192 static void 1193 md_preloaded(u_char *image, size_t length) 1194 { 1195 struct md_s *sc; 1196 int error; 1197 1198 sc = mdnew(-1, &error, MD_PRELOAD); 1199 if (sc == NULL) 1200 return; 1201 sc->mediasize = length; 1202 sc->sectorsize = DEV_BSIZE; 1203 sc->pl_ptr = image; 1204 sc->pl_len = length; 1205 sc->start = mdstart_preload; 1206 #ifdef MD_ROOT 1207 if (sc->unit == 0) 1208 rootdevnames[0] = "ufs:/dev/md0"; 1209 #endif 1210 mdinit(sc); 1211 } 1212 1213 static void 1214 g_md_init(struct g_class *mp __unused) 1215 { 1216 1217 caddr_t mod; 1218 caddr_t c; 1219 u_char *ptr, *name, *type; 1220 unsigned len; 1221 1222 mod = NULL; 1223 sx_init(&md_sx, "MD config lock"); 1224 g_topology_unlock(); 1225 #ifdef MD_ROOT_SIZE 1226 sx_xlock(&md_sx); 1227 md_preloaded(mfs_root.start, sizeof(mfs_root.start)); 1228 sx_xunlock(&md_sx); 1229 #endif 1230 /* XXX: are preload_* static or do they need Giant ? */ 1231 while ((mod = preload_search_next_name(mod)) != NULL) { 1232 name = (char *)preload_search_info(mod, MODINFO_NAME); 1233 if (name == NULL) 1234 continue; 1235 type = (char *)preload_search_info(mod, MODINFO_TYPE); 1236 if (type == NULL) 1237 continue; 1238 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 1239 continue; 1240 c = preload_search_info(mod, MODINFO_ADDR); 1241 ptr = *(u_char **)c; 1242 c = preload_search_info(mod, MODINFO_SIZE); 1243 len = *(size_t *)c; 1244 printf("%s%d: Preloaded image <%s> %d bytes at %p\n", 1245 MD_NAME, mdunits, name, len, ptr); 1246 sx_xlock(&md_sx); 1247 md_preloaded(ptr, len); 1248 sx_xunlock(&md_sx); 1249 } 1250 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL, 1251 0600, MDCTL_NAME); 1252 g_topology_lock(); 1253 } 1254 1255 static void 1256 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1257 struct g_consumer *cp __unused, struct g_provider *pp) 1258 { 1259 struct md_s *mp; 1260 char *type; 1261 1262 mp = gp->softc; 1263 if (mp == NULL) 1264 return; 1265 1266 switch (mp->type) { 1267 case MD_MALLOC: 1268 type = "malloc"; 1269 break; 1270 case MD_PRELOAD: 1271 type = "preload"; 1272 break; 1273 case MD_VNODE: 1274 type = "vnode"; 1275 break; 1276 case MD_SWAP: 1277 type = "swap"; 1278 break; 1279 default: 1280 type = "unknown"; 1281 break; 1282 } 1283 1284 if (pp != NULL) { 1285 if (indent == NULL) { 1286 sbuf_printf(sb, " u %d", mp->unit); 1287 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize); 1288 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads); 1289 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors); 1290 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize); 1291 sbuf_printf(sb, " t %s", type); 1292 if (mp->type == MD_VNODE && mp->vnode != NULL) 1293 sbuf_printf(sb, " file %s", mp->file); 1294 } else { 1295 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, 1296 mp->unit); 1297 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n", 1298 indent, (uintmax_t) mp->sectorsize); 1299 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n", 1300 indent, (uintmax_t) mp->fwheads); 1301 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n", 1302 indent, (uintmax_t) mp->fwsectors); 1303 sbuf_printf(sb, "%s<length>%ju</length>\n", 1304 indent, (uintmax_t) mp->mediasize); 1305 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1306 type); 1307 if (mp->type == MD_VNODE && mp->vnode != NULL) 1308 sbuf_printf(sb, "%s<file>%s</file>\n", 1309 indent, mp->file); 1310 } 1311 } 1312 } 1313 1314 static void 1315 g_md_fini(struct g_class *mp __unused) 1316 { 1317 1318 sx_destroy(&md_sx); 1319 if (status_dev != NULL) 1320 destroy_dev(status_dev); 1321 } 1322