1 /*- 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * $FreeBSD$ 10 * 11 */ 12 13 /*- 14 * The following functions are based in the vn(4) driver: mdstart_swap(), 15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 16 * and as such under the following copyright: 17 * 18 * Copyright (c) 1988 University of Utah. 19 * Copyright (c) 1990, 1993 20 * The Regents of the University of California. All rights reserved. 21 * 22 * This code is derived from software contributed to Berkeley by 23 * the Systems Programming Group of the University of Utah Computer 24 * Science Department. 25 * 26 * Redistribution and use in source and binary forms, with or without 27 * modification, are permitted provided that the following conditions 28 * are met: 29 * 1. Redistributions of source code must retain the above copyright 30 * notice, this list of conditions and the following disclaimer. 31 * 2. Redistributions in binary form must reproduce the above copyright 32 * notice, this list of conditions and the following disclaimer in the 33 * documentation and/or other materials provided with the distribution. 34 * 4. Neither the name of the University nor the names of its contributors 35 * may be used to endorse or promote products derived from this software 36 * without specific prior written permission. 37 * 38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 48 * SUCH DAMAGE. 49 * 50 * from: Utah Hdr: vn.c 1.13 94/04/02 51 * 52 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 53 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 54 */ 55 56 #include "opt_geom.h" 57 #include "opt_md.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/bio.h> 62 #include <sys/conf.h> 63 #include <sys/devicestat.h> 64 #include <sys/fcntl.h> 65 #include <sys/kernel.h> 66 #include <sys/kthread.h> 67 #include <sys/limits.h> 68 #include <sys/linker.h> 69 #include <sys/lock.h> 70 #include <sys/malloc.h> 71 #include <sys/mdioctl.h> 72 #include <sys/mount.h> 73 #include <sys/mutex.h> 74 #include <sys/sx.h> 75 #include <sys/namei.h> 76 #include <sys/proc.h> 77 #include <sys/queue.h> 78 #include <sys/sched.h> 79 #include <sys/sf_buf.h> 80 #include <sys/sysctl.h> 81 #include <sys/vnode.h> 82 83 #include <geom/geom.h> 84 85 #include <vm/vm.h> 86 #include <vm/vm_object.h> 87 #include <vm/vm_page.h> 88 #include <vm/vm_pager.h> 89 #include <vm/swap_pager.h> 90 #include <vm/uma.h> 91 92 #define MD_MODVER 1 93 94 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 95 #define MD_EXITING 0x20000 /* Worker thread is exiting. */ 96 97 #ifndef MD_NSECT 98 #define MD_NSECT (10000 * 2) 99 #endif 100 101 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk"); 102 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors"); 103 104 static int md_debug; 105 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, ""); 106 107 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE) 108 /* 109 * Preloaded image gets put here. 110 * Applications that patch the object with the image can determine 111 * the size looking at the start and end markers (strings), 112 * so we want them contiguous. 113 */ 114 static struct { 115 u_char start[MD_ROOT_SIZE*1024]; 116 u_char end[128]; 117 } mfs_root = { 118 .start = "MFS Filesystem goes here", 119 .end = "MFS Filesystem had better STOP here", 120 }; 121 #endif 122 123 static g_init_t g_md_init; 124 static g_fini_t g_md_fini; 125 static g_start_t g_md_start; 126 static g_access_t g_md_access; 127 static void g_md_dumpconf(struct sbuf *sb, const char *indent, 128 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp); 129 130 static int mdunits; 131 static struct cdev *status_dev = 0; 132 static struct sx md_sx; 133 static struct unrhdr *md_uh; 134 135 static d_ioctl_t mdctlioctl; 136 137 static struct cdevsw mdctl_cdevsw = { 138 .d_version = D_VERSION, 139 .d_ioctl = mdctlioctl, 140 .d_name = MD_NAME, 141 }; 142 143 struct g_class g_md_class = { 144 .name = "MD", 145 .version = G_VERSION, 146 .init = g_md_init, 147 .fini = g_md_fini, 148 .start = g_md_start, 149 .access = g_md_access, 150 .dumpconf = g_md_dumpconf, 151 }; 152 153 DECLARE_GEOM_CLASS(g_md_class, g_md); 154 155 156 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list); 157 158 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 159 #define NMASK (NINDIR-1) 160 static int nshift; 161 162 struct indir { 163 uintptr_t *array; 164 u_int total; 165 u_int used; 166 u_int shift; 167 }; 168 169 struct md_s { 170 int unit; 171 LIST_ENTRY(md_s) list; 172 struct bio_queue_head bio_queue; 173 struct mtx queue_mtx; 174 struct cdev *dev; 175 enum md_types type; 176 off_t mediasize; 177 unsigned sectorsize; 178 unsigned opencount; 179 unsigned fwheads; 180 unsigned fwsectors; 181 unsigned flags; 182 char name[20]; 183 struct proc *procp; 184 struct g_geom *gp; 185 struct g_provider *pp; 186 int (*start)(struct md_s *sc, struct bio *bp); 187 struct devstat *devstat; 188 189 /* MD_MALLOC related fields */ 190 struct indir *indir; 191 uma_zone_t uma; 192 193 /* MD_PRELOAD related fields */ 194 u_char *pl_ptr; 195 size_t pl_len; 196 197 /* MD_VNODE related fields */ 198 struct vnode *vnode; 199 char file[PATH_MAX]; 200 struct ucred *cred; 201 202 /* MD_SWAP related fields */ 203 vm_object_t object; 204 }; 205 206 static struct indir * 207 new_indir(u_int shift) 208 { 209 struct indir *ip; 210 211 ip = malloc(sizeof *ip, M_MD, M_NOWAIT | M_ZERO); 212 if (ip == NULL) 213 return (NULL); 214 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 215 M_MDSECT, M_NOWAIT | M_ZERO); 216 if (ip->array == NULL) { 217 free(ip, M_MD); 218 return (NULL); 219 } 220 ip->total = NINDIR; 221 ip->shift = shift; 222 return (ip); 223 } 224 225 static void 226 del_indir(struct indir *ip) 227 { 228 229 free(ip->array, M_MDSECT); 230 free(ip, M_MD); 231 } 232 233 static void 234 destroy_indir(struct md_s *sc, struct indir *ip) 235 { 236 int i; 237 238 for (i = 0; i < NINDIR; i++) { 239 if (!ip->array[i]) 240 continue; 241 if (ip->shift) 242 destroy_indir(sc, (struct indir*)(ip->array[i])); 243 else if (ip->array[i] > 255) 244 uma_zfree(sc->uma, (void *)(ip->array[i])); 245 } 246 del_indir(ip); 247 } 248 249 /* 250 * This function does the math and allocates the top level "indir" structure 251 * for a device of "size" sectors. 252 */ 253 254 static struct indir * 255 dimension(off_t size) 256 { 257 off_t rcnt; 258 struct indir *ip; 259 int layer; 260 261 rcnt = size; 262 layer = 0; 263 while (rcnt > NINDIR) { 264 rcnt /= NINDIR; 265 layer++; 266 } 267 268 /* 269 * XXX: the top layer is probably not fully populated, so we allocate 270 * too much space for ip->array in here. 271 */ 272 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 273 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 274 M_MDSECT, M_WAITOK | M_ZERO); 275 ip->total = NINDIR; 276 ip->shift = layer * nshift; 277 return (ip); 278 } 279 280 /* 281 * Read a given sector 282 */ 283 284 static uintptr_t 285 s_read(struct indir *ip, off_t offset) 286 { 287 struct indir *cip; 288 int idx; 289 uintptr_t up; 290 291 if (md_debug > 1) 292 printf("s_read(%jd)\n", (intmax_t)offset); 293 up = 0; 294 for (cip = ip; cip != NULL;) { 295 if (cip->shift) { 296 idx = (offset >> cip->shift) & NMASK; 297 up = cip->array[idx]; 298 cip = (struct indir *)up; 299 continue; 300 } 301 idx = offset & NMASK; 302 return (cip->array[idx]); 303 } 304 return (0); 305 } 306 307 /* 308 * Write a given sector, prune the tree if the value is 0 309 */ 310 311 static int 312 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 313 { 314 struct indir *cip, *lip[10]; 315 int idx, li; 316 uintptr_t up; 317 318 if (md_debug > 1) 319 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 320 up = 0; 321 li = 0; 322 cip = ip; 323 for (;;) { 324 lip[li++] = cip; 325 if (cip->shift) { 326 idx = (offset >> cip->shift) & NMASK; 327 up = cip->array[idx]; 328 if (up != 0) { 329 cip = (struct indir *)up; 330 continue; 331 } 332 /* Allocate branch */ 333 cip->array[idx] = 334 (uintptr_t)new_indir(cip->shift - nshift); 335 if (cip->array[idx] == 0) 336 return (ENOSPC); 337 cip->used++; 338 up = cip->array[idx]; 339 cip = (struct indir *)up; 340 continue; 341 } 342 /* leafnode */ 343 idx = offset & NMASK; 344 up = cip->array[idx]; 345 if (up != 0) 346 cip->used--; 347 cip->array[idx] = ptr; 348 if (ptr != 0) 349 cip->used++; 350 break; 351 } 352 if (cip->used != 0 || li == 1) 353 return (0); 354 li--; 355 while (cip->used == 0 && cip != ip) { 356 li--; 357 idx = (offset >> lip[li]->shift) & NMASK; 358 up = lip[li]->array[idx]; 359 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 360 del_indir(cip); 361 lip[li]->array[idx] = 0; 362 lip[li]->used--; 363 cip = lip[li]; 364 } 365 return (0); 366 } 367 368 369 static int 370 g_md_access(struct g_provider *pp, int r, int w, int e) 371 { 372 struct md_s *sc; 373 374 sc = pp->geom->softc; 375 if (sc == NULL) { 376 if (r <= 0 && w <= 0 && e <= 0) 377 return (0); 378 return (ENXIO); 379 } 380 r += pp->acr; 381 w += pp->acw; 382 e += pp->ace; 383 if ((sc->flags & MD_READONLY) != 0 && w > 0) 384 return (EROFS); 385 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 386 sc->opencount = 1; 387 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 388 sc->opencount = 0; 389 } 390 return (0); 391 } 392 393 static void 394 g_md_start(struct bio *bp) 395 { 396 struct md_s *sc; 397 398 sc = bp->bio_to->geom->softc; 399 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) 400 devstat_start_transaction_bio(sc->devstat, bp); 401 mtx_lock(&sc->queue_mtx); 402 bioq_disksort(&sc->bio_queue, bp); 403 mtx_unlock(&sc->queue_mtx); 404 wakeup(sc); 405 } 406 407 static int 408 mdstart_malloc(struct md_s *sc, struct bio *bp) 409 { 410 int i, error; 411 u_char *dst; 412 off_t secno, nsec, uc; 413 uintptr_t sp, osp; 414 415 switch (bp->bio_cmd) { 416 case BIO_READ: 417 case BIO_WRITE: 418 case BIO_DELETE: 419 break; 420 default: 421 return (EOPNOTSUPP); 422 } 423 424 nsec = bp->bio_length / sc->sectorsize; 425 secno = bp->bio_offset / sc->sectorsize; 426 dst = bp->bio_data; 427 error = 0; 428 while (nsec--) { 429 osp = s_read(sc->indir, secno); 430 if (bp->bio_cmd == BIO_DELETE) { 431 if (osp != 0) 432 error = s_write(sc->indir, secno, 0); 433 } else if (bp->bio_cmd == BIO_READ) { 434 if (osp == 0) 435 bzero(dst, sc->sectorsize); 436 else if (osp <= 255) 437 memset(dst, osp, sc->sectorsize); 438 else { 439 bcopy((void *)osp, dst, sc->sectorsize); 440 cpu_flush_dcache(dst, sc->sectorsize); 441 } 442 osp = 0; 443 } else if (bp->bio_cmd == BIO_WRITE) { 444 if (sc->flags & MD_COMPRESS) { 445 uc = dst[0]; 446 for (i = 1; i < sc->sectorsize; i++) 447 if (dst[i] != uc) 448 break; 449 } else { 450 i = 0; 451 uc = 0; 452 } 453 if (i == sc->sectorsize) { 454 if (osp != uc) 455 error = s_write(sc->indir, secno, uc); 456 } else { 457 if (osp <= 255) { 458 sp = (uintptr_t)uma_zalloc(sc->uma, 459 M_NOWAIT); 460 if (sp == 0) { 461 error = ENOSPC; 462 break; 463 } 464 bcopy(dst, (void *)sp, sc->sectorsize); 465 error = s_write(sc->indir, secno, sp); 466 } else { 467 bcopy(dst, (void *)osp, sc->sectorsize); 468 osp = 0; 469 } 470 } 471 } else { 472 error = EOPNOTSUPP; 473 } 474 if (osp > 255) 475 uma_zfree(sc->uma, (void*)osp); 476 if (error != 0) 477 break; 478 secno++; 479 dst += sc->sectorsize; 480 } 481 bp->bio_resid = 0; 482 return (error); 483 } 484 485 static int 486 mdstart_preload(struct md_s *sc, struct bio *bp) 487 { 488 489 switch (bp->bio_cmd) { 490 case BIO_READ: 491 bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data, 492 bp->bio_length); 493 cpu_flush_dcache(bp->bio_data, bp->bio_length); 494 break; 495 case BIO_WRITE: 496 bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset, 497 bp->bio_length); 498 break; 499 } 500 bp->bio_resid = 0; 501 return (0); 502 } 503 504 static int 505 mdstart_vnode(struct md_s *sc, struct bio *bp) 506 { 507 int error, vfslocked; 508 struct uio auio; 509 struct iovec aiov; 510 struct mount *mp; 511 struct vnode *vp; 512 struct thread *td; 513 514 switch (bp->bio_cmd) { 515 case BIO_READ: 516 case BIO_WRITE: 517 case BIO_FLUSH: 518 break; 519 default: 520 return (EOPNOTSUPP); 521 } 522 523 td = curthread; 524 vp = sc->vnode; 525 526 /* 527 * VNODE I/O 528 * 529 * If an error occurs, we set BIO_ERROR but we do not set 530 * B_INVAL because (for a write anyway), the buffer is 531 * still valid. 532 */ 533 534 if (bp->bio_cmd == BIO_FLUSH) { 535 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 536 (void) vn_start_write(vp, &mp, V_WAIT); 537 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 538 error = VOP_FSYNC(vp, MNT_WAIT, td); 539 VOP_UNLOCK(vp, 0); 540 vn_finished_write(mp); 541 VFS_UNLOCK_GIANT(vfslocked); 542 return (error); 543 } 544 545 bzero(&auio, sizeof(auio)); 546 547 aiov.iov_base = bp->bio_data; 548 aiov.iov_len = bp->bio_length; 549 auio.uio_iov = &aiov; 550 auio.uio_iovcnt = 1; 551 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 552 auio.uio_segflg = UIO_SYSSPACE; 553 if (bp->bio_cmd == BIO_READ) 554 auio.uio_rw = UIO_READ; 555 else if (bp->bio_cmd == BIO_WRITE) 556 auio.uio_rw = UIO_WRITE; 557 else 558 panic("wrong BIO_OP in mdstart_vnode"); 559 auio.uio_resid = bp->bio_length; 560 auio.uio_td = td; 561 /* 562 * When reading set IO_DIRECT to try to avoid double-caching 563 * the data. When writing IO_DIRECT is not optimal. 564 */ 565 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 566 if (bp->bio_cmd == BIO_READ) { 567 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 568 error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred); 569 VOP_UNLOCK(vp, 0); 570 } else { 571 (void) vn_start_write(vp, &mp, V_WAIT); 572 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 573 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC, 574 sc->cred); 575 VOP_UNLOCK(vp, 0); 576 vn_finished_write(mp); 577 } 578 VFS_UNLOCK_GIANT(vfslocked); 579 bp->bio_resid = auio.uio_resid; 580 return (error); 581 } 582 583 static int 584 mdstart_swap(struct md_s *sc, struct bio *bp) 585 { 586 struct sf_buf *sf; 587 int rv, offs, len, lastend; 588 vm_pindex_t i, lastp; 589 vm_page_t m; 590 u_char *p; 591 592 switch (bp->bio_cmd) { 593 case BIO_READ: 594 case BIO_WRITE: 595 case BIO_DELETE: 596 break; 597 default: 598 return (EOPNOTSUPP); 599 } 600 601 p = bp->bio_data; 602 603 /* 604 * offs is the offset at which to start operating on the 605 * next (ie, first) page. lastp is the last page on 606 * which we're going to operate. lastend is the ending 607 * position within that last page (ie, PAGE_SIZE if 608 * we're operating on complete aligned pages). 609 */ 610 offs = bp->bio_offset % PAGE_SIZE; 611 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 612 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 613 614 rv = VM_PAGER_OK; 615 VM_OBJECT_LOCK(sc->object); 616 vm_object_pip_add(sc->object, 1); 617 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 618 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 619 620 m = vm_page_grab(sc->object, i, 621 VM_ALLOC_NORMAL|VM_ALLOC_RETRY); 622 VM_OBJECT_UNLOCK(sc->object); 623 sched_pin(); 624 sf = sf_buf_alloc(m, SFB_CPUPRIVATE); 625 VM_OBJECT_LOCK(sc->object); 626 if (bp->bio_cmd == BIO_READ) { 627 if (m->valid != VM_PAGE_BITS_ALL) 628 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 629 if (rv == VM_PAGER_ERROR) { 630 sf_buf_free(sf); 631 sched_unpin(); 632 vm_page_wakeup(m); 633 break; 634 } 635 bcopy((void *)(sf_buf_kva(sf) + offs), p, len); 636 cpu_flush_dcache(p, len); 637 } else if (bp->bio_cmd == BIO_WRITE) { 638 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 639 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 640 if (rv == VM_PAGER_ERROR) { 641 sf_buf_free(sf); 642 sched_unpin(); 643 vm_page_wakeup(m); 644 break; 645 } 646 bcopy(p, (void *)(sf_buf_kva(sf) + offs), len); 647 m->valid = VM_PAGE_BITS_ALL; 648 #if 0 649 } else if (bp->bio_cmd == BIO_DELETE) { 650 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 651 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 652 if (rv == VM_PAGER_ERROR) { 653 sf_buf_free(sf); 654 sched_unpin(); 655 vm_page_wakeup(m); 656 break; 657 } 658 bzero((void *)(sf_buf_kva(sf) + offs), len); 659 vm_page_dirty(m); 660 m->valid = VM_PAGE_BITS_ALL; 661 #endif 662 } 663 sf_buf_free(sf); 664 sched_unpin(); 665 vm_page_wakeup(m); 666 vm_page_lock(m); 667 vm_page_activate(m); 668 vm_page_unlock(m); 669 if (bp->bio_cmd == BIO_WRITE) 670 vm_page_dirty(m); 671 672 /* Actions on further pages start at offset 0 */ 673 p += PAGE_SIZE - offs; 674 offs = 0; 675 #if 0 676 if (bootverbose || bp->bio_offset / PAGE_SIZE < 17) 677 printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n", 678 m->wire_count, m->busy, 679 m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i); 680 #endif 681 } 682 vm_object_pip_subtract(sc->object, 1); 683 vm_object_set_writeable_dirty(sc->object); 684 VM_OBJECT_UNLOCK(sc->object); 685 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 686 } 687 688 static void 689 md_kthread(void *arg) 690 { 691 struct md_s *sc; 692 struct bio *bp; 693 int error; 694 695 sc = arg; 696 thread_lock(curthread); 697 sched_prio(curthread, PRIBIO); 698 thread_unlock(curthread); 699 if (sc->type == MD_VNODE) 700 curthread->td_pflags |= TDP_NORUNNINGBUF; 701 702 for (;;) { 703 mtx_lock(&sc->queue_mtx); 704 if (sc->flags & MD_SHUTDOWN) { 705 sc->flags |= MD_EXITING; 706 mtx_unlock(&sc->queue_mtx); 707 kproc_exit(0); 708 } 709 bp = bioq_takefirst(&sc->bio_queue); 710 if (!bp) { 711 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 712 continue; 713 } 714 mtx_unlock(&sc->queue_mtx); 715 if (bp->bio_cmd == BIO_GETATTR) { 716 if (sc->fwsectors && sc->fwheads && 717 (g_handleattr_int(bp, "GEOM::fwsectors", 718 sc->fwsectors) || 719 g_handleattr_int(bp, "GEOM::fwheads", 720 sc->fwheads))) 721 error = -1; 722 else 723 error = EOPNOTSUPP; 724 } else { 725 error = sc->start(sc, bp); 726 } 727 728 if (error != -1) { 729 bp->bio_completed = bp->bio_length; 730 g_io_deliver(bp, error); 731 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) 732 devstat_end_transaction_bio(sc->devstat, bp); 733 } 734 } 735 } 736 737 static struct md_s * 738 mdfind(int unit) 739 { 740 struct md_s *sc; 741 742 LIST_FOREACH(sc, &md_softc_list, list) { 743 if (sc->unit == unit) 744 break; 745 } 746 return (sc); 747 } 748 749 static struct md_s * 750 mdnew(int unit, int *errp, enum md_types type) 751 { 752 struct md_s *sc; 753 int error; 754 755 *errp = 0; 756 if (unit == -1) 757 unit = alloc_unr(md_uh); 758 else 759 unit = alloc_unr_specific(md_uh, unit); 760 761 if (unit == -1) { 762 *errp = EBUSY; 763 return (NULL); 764 } 765 766 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); 767 sc->type = type; 768 bioq_init(&sc->bio_queue); 769 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 770 sc->unit = unit; 771 sprintf(sc->name, "md%d", unit); 772 LIST_INSERT_HEAD(&md_softc_list, sc, list); 773 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 774 if (error == 0) 775 return (sc); 776 LIST_REMOVE(sc, list); 777 mtx_destroy(&sc->queue_mtx); 778 free_unr(md_uh, sc->unit); 779 free(sc, M_MD); 780 *errp = error; 781 return (NULL); 782 } 783 784 static void 785 mdinit(struct md_s *sc) 786 { 787 struct g_geom *gp; 788 struct g_provider *pp; 789 790 g_topology_lock(); 791 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 792 gp->softc = sc; 793 pp = g_new_providerf(gp, "md%d", sc->unit); 794 pp->mediasize = sc->mediasize; 795 pp->sectorsize = sc->sectorsize; 796 sc->gp = gp; 797 sc->pp = pp; 798 g_error_provider(pp, 0); 799 g_topology_unlock(); 800 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize, 801 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 802 } 803 804 /* 805 * XXX: we should check that the range they feed us is mapped. 806 * XXX: we should implement read-only. 807 */ 808 809 static int 810 mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio) 811 { 812 813 if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE)) 814 return (EINVAL); 815 if (mdio->md_base == 0) 816 return (EINVAL); 817 sc->flags = mdio->md_options & MD_FORCE; 818 /* Cast to pointer size, then to pointer to avoid warning */ 819 sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base; 820 sc->pl_len = (size_t)sc->mediasize; 821 return (0); 822 } 823 824 825 static int 826 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio) 827 { 828 uintptr_t sp; 829 int error; 830 off_t u; 831 832 error = 0; 833 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 834 return (EINVAL); 835 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize)) 836 return (EINVAL); 837 /* Compression doesn't make sense if we have reserved space */ 838 if (mdio->md_options & MD_RESERVE) 839 mdio->md_options &= ~MD_COMPRESS; 840 if (mdio->md_fwsectors != 0) 841 sc->fwsectors = mdio->md_fwsectors; 842 if (mdio->md_fwheads != 0) 843 sc->fwheads = mdio->md_fwheads; 844 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE); 845 sc->indir = dimension(sc->mediasize / sc->sectorsize); 846 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 847 0x1ff, 0); 848 if (mdio->md_options & MD_RESERVE) { 849 off_t nsectors; 850 851 nsectors = sc->mediasize / sc->sectorsize; 852 for (u = 0; u < nsectors; u++) { 853 sp = (uintptr_t)uma_zalloc(sc->uma, M_NOWAIT | M_ZERO); 854 if (sp != 0) 855 error = s_write(sc->indir, u, sp); 856 else 857 error = ENOMEM; 858 if (error != 0) 859 break; 860 } 861 } 862 return (error); 863 } 864 865 866 static int 867 mdsetcred(struct md_s *sc, struct ucred *cred) 868 { 869 char *tmpbuf; 870 int error = 0; 871 872 /* 873 * Set credits in our softc 874 */ 875 876 if (sc->cred) 877 crfree(sc->cred); 878 sc->cred = crhold(cred); 879 880 /* 881 * Horrible kludge to establish credentials for NFS XXX. 882 */ 883 884 if (sc->vnode) { 885 struct uio auio; 886 struct iovec aiov; 887 888 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 889 bzero(&auio, sizeof(auio)); 890 891 aiov.iov_base = tmpbuf; 892 aiov.iov_len = sc->sectorsize; 893 auio.uio_iov = &aiov; 894 auio.uio_iovcnt = 1; 895 auio.uio_offset = 0; 896 auio.uio_rw = UIO_READ; 897 auio.uio_segflg = UIO_SYSSPACE; 898 auio.uio_resid = aiov.iov_len; 899 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 900 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 901 VOP_UNLOCK(sc->vnode, 0); 902 free(tmpbuf, M_TEMP); 903 } 904 return (error); 905 } 906 907 static int 908 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 909 { 910 struct vattr vattr; 911 struct nameidata nd; 912 char *fname; 913 int error, flags, vfslocked; 914 915 /* 916 * Kernel-originated requests must have the filename appended 917 * to the mdio structure to protect against malicious software. 918 */ 919 fname = mdio->md_file; 920 if ((void *)fname != (void *)(mdio + 1)) { 921 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL); 922 if (error != 0) 923 return (error); 924 } else 925 strlcpy(sc->file, fname, sizeof(sc->file)); 926 927 /* 928 * If the user specified that this is a read only device, don't 929 * set the FWRITE mask before trying to open the backing store. 930 */ 931 flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE); 932 NDINIT(&nd, LOOKUP, FOLLOW | MPSAFE, UIO_SYSSPACE, sc->file, td); 933 error = vn_open(&nd, &flags, 0, NULL); 934 if (error != 0) 935 return (error); 936 vfslocked = NDHASGIANT(&nd); 937 NDFREE(&nd, NDF_ONLY_PNBUF); 938 if (nd.ni_vp->v_type != VREG) { 939 error = EINVAL; 940 goto bad; 941 } 942 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred); 943 if (error != 0) 944 goto bad; 945 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { 946 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); 947 if (nd.ni_vp->v_iflag & VI_DOOMED) { 948 /* Forced unmount. */ 949 error = EBADF; 950 goto bad; 951 } 952 } 953 nd.ni_vp->v_vflag |= VV_MD; 954 VOP_UNLOCK(nd.ni_vp, 0); 955 956 if (mdio->md_fwsectors != 0) 957 sc->fwsectors = mdio->md_fwsectors; 958 if (mdio->md_fwheads != 0) 959 sc->fwheads = mdio->md_fwheads; 960 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC); 961 if (!(flags & FWRITE)) 962 sc->flags |= MD_READONLY; 963 sc->vnode = nd.ni_vp; 964 965 error = mdsetcred(sc, td->td_ucred); 966 if (error != 0) { 967 sc->vnode = NULL; 968 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); 969 nd.ni_vp->v_vflag &= ~VV_MD; 970 goto bad; 971 } 972 VFS_UNLOCK_GIANT(vfslocked); 973 return (0); 974 bad: 975 VOP_UNLOCK(nd.ni_vp, 0); 976 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 977 VFS_UNLOCK_GIANT(vfslocked); 978 return (error); 979 } 980 981 static int 982 mddestroy(struct md_s *sc, struct thread *td) 983 { 984 int vfslocked; 985 986 if (sc->gp) { 987 sc->gp->softc = NULL; 988 g_topology_lock(); 989 g_wither_geom(sc->gp, ENXIO); 990 g_topology_unlock(); 991 sc->gp = NULL; 992 sc->pp = NULL; 993 } 994 if (sc->devstat) { 995 devstat_remove_entry(sc->devstat); 996 sc->devstat = NULL; 997 } 998 mtx_lock(&sc->queue_mtx); 999 sc->flags |= MD_SHUTDOWN; 1000 wakeup(sc); 1001 while (!(sc->flags & MD_EXITING)) 1002 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10); 1003 mtx_unlock(&sc->queue_mtx); 1004 mtx_destroy(&sc->queue_mtx); 1005 if (sc->vnode != NULL) { 1006 vfslocked = VFS_LOCK_GIANT(sc->vnode->v_mount); 1007 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1008 sc->vnode->v_vflag &= ~VV_MD; 1009 VOP_UNLOCK(sc->vnode, 0); 1010 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 1011 FREAD : (FREAD|FWRITE), sc->cred, td); 1012 VFS_UNLOCK_GIANT(vfslocked); 1013 } 1014 if (sc->cred != NULL) 1015 crfree(sc->cred); 1016 if (sc->object != NULL) 1017 vm_object_deallocate(sc->object); 1018 if (sc->indir) 1019 destroy_indir(sc, sc->indir); 1020 if (sc->uma) 1021 uma_zdestroy(sc->uma); 1022 1023 LIST_REMOVE(sc, list); 1024 free_unr(md_uh, sc->unit); 1025 free(sc, M_MD); 1026 return (0); 1027 } 1028 1029 static int 1030 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1031 { 1032 vm_ooffset_t npage; 1033 int error; 1034 1035 /* 1036 * Range check. Disallow negative sizes or any size less then the 1037 * size of a page. Then round to a page. 1038 */ 1039 if (sc->mediasize == 0 || (sc->mediasize % PAGE_SIZE) != 0) 1040 return (EDOM); 1041 1042 /* 1043 * Allocate an OBJT_SWAP object. 1044 * 1045 * Note the truncation. 1046 */ 1047 1048 npage = mdio->md_mediasize / PAGE_SIZE; 1049 if (mdio->md_fwsectors != 0) 1050 sc->fwsectors = mdio->md_fwsectors; 1051 if (mdio->md_fwheads != 0) 1052 sc->fwheads = mdio->md_fwheads; 1053 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, 1054 VM_PROT_DEFAULT, 0, td->td_ucred); 1055 if (sc->object == NULL) 1056 return (ENOMEM); 1057 sc->flags = mdio->md_options & MD_FORCE; 1058 if (mdio->md_options & MD_RESERVE) { 1059 if (swap_pager_reserve(sc->object, 0, npage) < 0) { 1060 error = EDOM; 1061 goto finish; 1062 } 1063 } 1064 error = mdsetcred(sc, td->td_ucred); 1065 finish: 1066 if (error != 0) { 1067 vm_object_deallocate(sc->object); 1068 sc->object = NULL; 1069 } 1070 return (error); 1071 } 1072 1073 1074 static int 1075 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1076 { 1077 struct md_ioctl *mdio; 1078 struct md_s *sc; 1079 int error, i; 1080 1081 if (md_debug) 1082 printf("mdctlioctl(%s %lx %p %x %p)\n", 1083 devtoname(dev), cmd, addr, flags, td); 1084 1085 mdio = (struct md_ioctl *)addr; 1086 if (mdio->md_version != MDIOVERSION) 1087 return (EINVAL); 1088 1089 /* 1090 * We assert the version number in the individual ioctl 1091 * handlers instead of out here because (a) it is possible we 1092 * may add another ioctl in the future which doesn't read an 1093 * mdio, and (b) the correct return value for an unknown ioctl 1094 * is ENOIOCTL, not EINVAL. 1095 */ 1096 error = 0; 1097 switch (cmd) { 1098 case MDIOCATTACH: 1099 switch (mdio->md_type) { 1100 case MD_MALLOC: 1101 case MD_PRELOAD: 1102 case MD_VNODE: 1103 case MD_SWAP: 1104 break; 1105 default: 1106 return (EINVAL); 1107 } 1108 if (mdio->md_options & MD_AUTOUNIT) 1109 sc = mdnew(-1, &error, mdio->md_type); 1110 else { 1111 if (mdio->md_unit > INT_MAX) 1112 return (EINVAL); 1113 sc = mdnew(mdio->md_unit, &error, mdio->md_type); 1114 } 1115 if (sc == NULL) 1116 return (error); 1117 if (mdio->md_options & MD_AUTOUNIT) 1118 mdio->md_unit = sc->unit; 1119 sc->mediasize = mdio->md_mediasize; 1120 if (mdio->md_sectorsize == 0) 1121 sc->sectorsize = DEV_BSIZE; 1122 else 1123 sc->sectorsize = mdio->md_sectorsize; 1124 error = EDOOFUS; 1125 switch (sc->type) { 1126 case MD_MALLOC: 1127 sc->start = mdstart_malloc; 1128 error = mdcreate_malloc(sc, mdio); 1129 break; 1130 case MD_PRELOAD: 1131 sc->start = mdstart_preload; 1132 error = mdcreate_preload(sc, mdio); 1133 break; 1134 case MD_VNODE: 1135 sc->start = mdstart_vnode; 1136 error = mdcreate_vnode(sc, mdio, td); 1137 break; 1138 case MD_SWAP: 1139 sc->start = mdstart_swap; 1140 error = mdcreate_swap(sc, mdio, td); 1141 break; 1142 } 1143 if (error != 0) { 1144 mddestroy(sc, td); 1145 return (error); 1146 } 1147 1148 /* Prune off any residual fractional sector */ 1149 i = sc->mediasize % sc->sectorsize; 1150 sc->mediasize -= i; 1151 1152 mdinit(sc); 1153 return (0); 1154 case MDIOCDETACH: 1155 if (mdio->md_mediasize != 0 || 1156 (mdio->md_options & ~MD_FORCE) != 0) 1157 return (EINVAL); 1158 1159 sc = mdfind(mdio->md_unit); 1160 if (sc == NULL) 1161 return (ENOENT); 1162 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) && 1163 !(mdio->md_options & MD_FORCE)) 1164 return (EBUSY); 1165 return (mddestroy(sc, td)); 1166 case MDIOCQUERY: 1167 sc = mdfind(mdio->md_unit); 1168 if (sc == NULL) 1169 return (ENOENT); 1170 mdio->md_type = sc->type; 1171 mdio->md_options = sc->flags; 1172 mdio->md_mediasize = sc->mediasize; 1173 mdio->md_sectorsize = sc->sectorsize; 1174 if (sc->type == MD_VNODE) 1175 error = copyout(sc->file, mdio->md_file, 1176 strlen(sc->file) + 1); 1177 return (error); 1178 case MDIOCLIST: 1179 i = 1; 1180 LIST_FOREACH(sc, &md_softc_list, list) { 1181 if (i == MDNPAD - 1) 1182 mdio->md_pad[i] = -1; 1183 else 1184 mdio->md_pad[i++] = sc->unit; 1185 } 1186 mdio->md_pad[0] = i - 1; 1187 return (0); 1188 default: 1189 return (ENOIOCTL); 1190 }; 1191 } 1192 1193 static int 1194 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1195 { 1196 int error; 1197 1198 sx_xlock(&md_sx); 1199 error = xmdctlioctl(dev, cmd, addr, flags, td); 1200 sx_xunlock(&md_sx); 1201 return (error); 1202 } 1203 1204 static void 1205 md_preloaded(u_char *image, size_t length) 1206 { 1207 struct md_s *sc; 1208 int error; 1209 1210 sc = mdnew(-1, &error, MD_PRELOAD); 1211 if (sc == NULL) 1212 return; 1213 sc->mediasize = length; 1214 sc->sectorsize = DEV_BSIZE; 1215 sc->pl_ptr = image; 1216 sc->pl_len = length; 1217 sc->start = mdstart_preload; 1218 #ifdef MD_ROOT 1219 if (sc->unit == 0) 1220 rootdevnames[0] = "ufs:/dev/md0"; 1221 #endif 1222 mdinit(sc); 1223 } 1224 1225 static void 1226 g_md_init(struct g_class *mp __unused) 1227 { 1228 caddr_t mod; 1229 caddr_t c; 1230 u_char *ptr, *name, *type; 1231 unsigned len; 1232 int i; 1233 1234 /* figure out log2(NINDIR) */ 1235 for (i = NINDIR, nshift = -1; i; nshift++) 1236 i >>= 1; 1237 1238 mod = NULL; 1239 sx_init(&md_sx, "MD config lock"); 1240 g_topology_unlock(); 1241 md_uh = new_unrhdr(0, INT_MAX, NULL); 1242 #ifdef MD_ROOT_SIZE 1243 sx_xlock(&md_sx); 1244 md_preloaded(mfs_root.start, sizeof(mfs_root.start)); 1245 sx_xunlock(&md_sx); 1246 #endif 1247 /* XXX: are preload_* static or do they need Giant ? */ 1248 while ((mod = preload_search_next_name(mod)) != NULL) { 1249 name = (char *)preload_search_info(mod, MODINFO_NAME); 1250 if (name == NULL) 1251 continue; 1252 type = (char *)preload_search_info(mod, MODINFO_TYPE); 1253 if (type == NULL) 1254 continue; 1255 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 1256 continue; 1257 c = preload_search_info(mod, MODINFO_ADDR); 1258 ptr = *(u_char **)c; 1259 c = preload_search_info(mod, MODINFO_SIZE); 1260 len = *(size_t *)c; 1261 printf("%s%d: Preloaded image <%s> %d bytes at %p\n", 1262 MD_NAME, mdunits, name, len, ptr); 1263 sx_xlock(&md_sx); 1264 md_preloaded(ptr, len); 1265 sx_xunlock(&md_sx); 1266 } 1267 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL, 1268 0600, MDCTL_NAME); 1269 g_topology_lock(); 1270 } 1271 1272 static void 1273 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1274 struct g_consumer *cp __unused, struct g_provider *pp) 1275 { 1276 struct md_s *mp; 1277 char *type; 1278 1279 mp = gp->softc; 1280 if (mp == NULL) 1281 return; 1282 1283 switch (mp->type) { 1284 case MD_MALLOC: 1285 type = "malloc"; 1286 break; 1287 case MD_PRELOAD: 1288 type = "preload"; 1289 break; 1290 case MD_VNODE: 1291 type = "vnode"; 1292 break; 1293 case MD_SWAP: 1294 type = "swap"; 1295 break; 1296 default: 1297 type = "unknown"; 1298 break; 1299 } 1300 1301 if (pp != NULL) { 1302 if (indent == NULL) { 1303 sbuf_printf(sb, " u %d", mp->unit); 1304 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize); 1305 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads); 1306 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors); 1307 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize); 1308 sbuf_printf(sb, " t %s", type); 1309 if (mp->type == MD_VNODE && mp->vnode != NULL) 1310 sbuf_printf(sb, " file %s", mp->file); 1311 } else { 1312 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, 1313 mp->unit); 1314 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n", 1315 indent, (uintmax_t) mp->sectorsize); 1316 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n", 1317 indent, (uintmax_t) mp->fwheads); 1318 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n", 1319 indent, (uintmax_t) mp->fwsectors); 1320 sbuf_printf(sb, "%s<length>%ju</length>\n", 1321 indent, (uintmax_t) mp->mediasize); 1322 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1323 type); 1324 if (mp->type == MD_VNODE && mp->vnode != NULL) 1325 sbuf_printf(sb, "%s<file>%s</file>\n", 1326 indent, mp->file); 1327 } 1328 } 1329 } 1330 1331 static void 1332 g_md_fini(struct g_class *mp __unused) 1333 { 1334 1335 sx_destroy(&md_sx); 1336 if (status_dev != NULL) 1337 destroy_dev(status_dev); 1338 delete_unrhdr(md_uh); 1339 } 1340