1 /*- 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * $FreeBSD$ 10 * 11 */ 12 13 /*- 14 * The following functions are based in the vn(4) driver: mdstart_swap(), 15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 16 * and as such under the following copyright: 17 * 18 * Copyright (c) 1988 University of Utah. 19 * Copyright (c) 1990, 1993 20 * The Regents of the University of California. All rights reserved. 21 * 22 * This code is derived from software contributed to Berkeley by 23 * the Systems Programming Group of the University of Utah Computer 24 * Science Department. 25 * 26 * Redistribution and use in source and binary forms, with or without 27 * modification, are permitted provided that the following conditions 28 * are met: 29 * 1. Redistributions of source code must retain the above copyright 30 * notice, this list of conditions and the following disclaimer. 31 * 2. Redistributions in binary form must reproduce the above copyright 32 * notice, this list of conditions and the following disclaimer in the 33 * documentation and/or other materials provided with the distribution. 34 * 4. Neither the name of the University nor the names of its contributors 35 * may be used to endorse or promote products derived from this software 36 * without specific prior written permission. 37 * 38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 48 * SUCH DAMAGE. 49 * 50 * from: Utah Hdr: vn.c 1.13 94/04/02 51 * 52 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 53 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 54 */ 55 56 #include "opt_geom.h" 57 #include "opt_md.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/bio.h> 62 #include <sys/conf.h> 63 #include <sys/devicestat.h> 64 #include <sys/fcntl.h> 65 #include <sys/kernel.h> 66 #include <sys/kthread.h> 67 #include <sys/limits.h> 68 #include <sys/linker.h> 69 #include <sys/lock.h> 70 #include <sys/malloc.h> 71 #include <sys/mdioctl.h> 72 #include <sys/mount.h> 73 #include <sys/mutex.h> 74 #include <sys/sx.h> 75 #include <sys/namei.h> 76 #include <sys/proc.h> 77 #include <sys/queue.h> 78 #include <sys/sched.h> 79 #include <sys/sf_buf.h> 80 #include <sys/sysctl.h> 81 #include <sys/vnode.h> 82 83 #include <geom/geom.h> 84 85 #include <vm/vm.h> 86 #include <vm/vm_object.h> 87 #include <vm/vm_page.h> 88 #include <vm/vm_pager.h> 89 #include <vm/swap_pager.h> 90 #include <vm/uma.h> 91 92 #define MD_MODVER 1 93 94 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 95 #define MD_EXITING 0x20000 /* Worker thread is exiting. */ 96 97 #ifndef MD_NSECT 98 #define MD_NSECT (10000 * 2) 99 #endif 100 101 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk"); 102 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors"); 103 104 static int md_debug; 105 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, ""); 106 static int md_malloc_wait; 107 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, ""); 108 109 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE) 110 /* 111 * Preloaded image gets put here. 112 * Applications that patch the object with the image can determine 113 * the size looking at the start and end markers (strings), 114 * so we want them contiguous. 115 */ 116 static struct { 117 u_char start[MD_ROOT_SIZE*1024]; 118 u_char end[128]; 119 } mfs_root = { 120 .start = "MFS Filesystem goes here", 121 .end = "MFS Filesystem had better STOP here", 122 }; 123 #endif 124 125 static g_init_t g_md_init; 126 static g_fini_t g_md_fini; 127 static g_start_t g_md_start; 128 static g_access_t g_md_access; 129 static void g_md_dumpconf(struct sbuf *sb, const char *indent, 130 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp); 131 132 static int mdunits; 133 static struct cdev *status_dev = 0; 134 static struct sx md_sx; 135 static struct unrhdr *md_uh; 136 137 static d_ioctl_t mdctlioctl; 138 139 static struct cdevsw mdctl_cdevsw = { 140 .d_version = D_VERSION, 141 .d_ioctl = mdctlioctl, 142 .d_name = MD_NAME, 143 }; 144 145 struct g_class g_md_class = { 146 .name = "MD", 147 .version = G_VERSION, 148 .init = g_md_init, 149 .fini = g_md_fini, 150 .start = g_md_start, 151 .access = g_md_access, 152 .dumpconf = g_md_dumpconf, 153 }; 154 155 DECLARE_GEOM_CLASS(g_md_class, g_md); 156 157 158 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list); 159 160 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 161 #define NMASK (NINDIR-1) 162 static int nshift; 163 164 struct indir { 165 uintptr_t *array; 166 u_int total; 167 u_int used; 168 u_int shift; 169 }; 170 171 struct md_s { 172 int unit; 173 LIST_ENTRY(md_s) list; 174 struct bio_queue_head bio_queue; 175 struct mtx queue_mtx; 176 struct cdev *dev; 177 enum md_types type; 178 off_t mediasize; 179 unsigned sectorsize; 180 unsigned opencount; 181 unsigned fwheads; 182 unsigned fwsectors; 183 unsigned flags; 184 char name[20]; 185 struct proc *procp; 186 struct g_geom *gp; 187 struct g_provider *pp; 188 int (*start)(struct md_s *sc, struct bio *bp); 189 struct devstat *devstat; 190 191 /* MD_MALLOC related fields */ 192 struct indir *indir; 193 uma_zone_t uma; 194 195 /* MD_PRELOAD related fields */ 196 u_char *pl_ptr; 197 size_t pl_len; 198 199 /* MD_VNODE related fields */ 200 struct vnode *vnode; 201 char file[PATH_MAX]; 202 struct ucred *cred; 203 204 /* MD_SWAP related fields */ 205 vm_object_t object; 206 }; 207 208 static struct indir * 209 new_indir(u_int shift) 210 { 211 struct indir *ip; 212 213 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT) 214 | M_ZERO); 215 if (ip == NULL) 216 return (NULL); 217 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 218 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); 219 if (ip->array == NULL) { 220 free(ip, M_MD); 221 return (NULL); 222 } 223 ip->total = NINDIR; 224 ip->shift = shift; 225 return (ip); 226 } 227 228 static void 229 del_indir(struct indir *ip) 230 { 231 232 free(ip->array, M_MDSECT); 233 free(ip, M_MD); 234 } 235 236 static void 237 destroy_indir(struct md_s *sc, struct indir *ip) 238 { 239 int i; 240 241 for (i = 0; i < NINDIR; i++) { 242 if (!ip->array[i]) 243 continue; 244 if (ip->shift) 245 destroy_indir(sc, (struct indir*)(ip->array[i])); 246 else if (ip->array[i] > 255) 247 uma_zfree(sc->uma, (void *)(ip->array[i])); 248 } 249 del_indir(ip); 250 } 251 252 /* 253 * This function does the math and allocates the top level "indir" structure 254 * for a device of "size" sectors. 255 */ 256 257 static struct indir * 258 dimension(off_t size) 259 { 260 off_t rcnt; 261 struct indir *ip; 262 int layer; 263 264 rcnt = size; 265 layer = 0; 266 while (rcnt > NINDIR) { 267 rcnt /= NINDIR; 268 layer++; 269 } 270 271 /* 272 * XXX: the top layer is probably not fully populated, so we allocate 273 * too much space for ip->array in here. 274 */ 275 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 276 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 277 M_MDSECT, M_WAITOK | M_ZERO); 278 ip->total = NINDIR; 279 ip->shift = layer * nshift; 280 return (ip); 281 } 282 283 /* 284 * Read a given sector 285 */ 286 287 static uintptr_t 288 s_read(struct indir *ip, off_t offset) 289 { 290 struct indir *cip; 291 int idx; 292 uintptr_t up; 293 294 if (md_debug > 1) 295 printf("s_read(%jd)\n", (intmax_t)offset); 296 up = 0; 297 for (cip = ip; cip != NULL;) { 298 if (cip->shift) { 299 idx = (offset >> cip->shift) & NMASK; 300 up = cip->array[idx]; 301 cip = (struct indir *)up; 302 continue; 303 } 304 idx = offset & NMASK; 305 return (cip->array[idx]); 306 } 307 return (0); 308 } 309 310 /* 311 * Write a given sector, prune the tree if the value is 0 312 */ 313 314 static int 315 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 316 { 317 struct indir *cip, *lip[10]; 318 int idx, li; 319 uintptr_t up; 320 321 if (md_debug > 1) 322 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 323 up = 0; 324 li = 0; 325 cip = ip; 326 for (;;) { 327 lip[li++] = cip; 328 if (cip->shift) { 329 idx = (offset >> cip->shift) & NMASK; 330 up = cip->array[idx]; 331 if (up != 0) { 332 cip = (struct indir *)up; 333 continue; 334 } 335 /* Allocate branch */ 336 cip->array[idx] = 337 (uintptr_t)new_indir(cip->shift - nshift); 338 if (cip->array[idx] == 0) 339 return (ENOSPC); 340 cip->used++; 341 up = cip->array[idx]; 342 cip = (struct indir *)up; 343 continue; 344 } 345 /* leafnode */ 346 idx = offset & NMASK; 347 up = cip->array[idx]; 348 if (up != 0) 349 cip->used--; 350 cip->array[idx] = ptr; 351 if (ptr != 0) 352 cip->used++; 353 break; 354 } 355 if (cip->used != 0 || li == 1) 356 return (0); 357 li--; 358 while (cip->used == 0 && cip != ip) { 359 li--; 360 idx = (offset >> lip[li]->shift) & NMASK; 361 up = lip[li]->array[idx]; 362 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 363 del_indir(cip); 364 lip[li]->array[idx] = 0; 365 lip[li]->used--; 366 cip = lip[li]; 367 } 368 return (0); 369 } 370 371 372 static int 373 g_md_access(struct g_provider *pp, int r, int w, int e) 374 { 375 struct md_s *sc; 376 377 sc = pp->geom->softc; 378 if (sc == NULL) { 379 if (r <= 0 && w <= 0 && e <= 0) 380 return (0); 381 return (ENXIO); 382 } 383 r += pp->acr; 384 w += pp->acw; 385 e += pp->ace; 386 if ((sc->flags & MD_READONLY) != 0 && w > 0) 387 return (EROFS); 388 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 389 sc->opencount = 1; 390 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 391 sc->opencount = 0; 392 } 393 return (0); 394 } 395 396 static void 397 g_md_start(struct bio *bp) 398 { 399 struct md_s *sc; 400 401 sc = bp->bio_to->geom->softc; 402 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) 403 devstat_start_transaction_bio(sc->devstat, bp); 404 mtx_lock(&sc->queue_mtx); 405 bioq_disksort(&sc->bio_queue, bp); 406 mtx_unlock(&sc->queue_mtx); 407 wakeup(sc); 408 } 409 410 static int 411 mdstart_malloc(struct md_s *sc, struct bio *bp) 412 { 413 int i, error; 414 u_char *dst; 415 off_t secno, nsec, uc; 416 uintptr_t sp, osp; 417 418 switch (bp->bio_cmd) { 419 case BIO_READ: 420 case BIO_WRITE: 421 case BIO_DELETE: 422 break; 423 default: 424 return (EOPNOTSUPP); 425 } 426 427 nsec = bp->bio_length / sc->sectorsize; 428 secno = bp->bio_offset / sc->sectorsize; 429 dst = bp->bio_data; 430 error = 0; 431 while (nsec--) { 432 osp = s_read(sc->indir, secno); 433 if (bp->bio_cmd == BIO_DELETE) { 434 if (osp != 0) 435 error = s_write(sc->indir, secno, 0); 436 } else if (bp->bio_cmd == BIO_READ) { 437 if (osp == 0) 438 bzero(dst, sc->sectorsize); 439 else if (osp <= 255) 440 memset(dst, osp, sc->sectorsize); 441 else { 442 bcopy((void *)osp, dst, sc->sectorsize); 443 cpu_flush_dcache(dst, sc->sectorsize); 444 } 445 osp = 0; 446 } else if (bp->bio_cmd == BIO_WRITE) { 447 if (sc->flags & MD_COMPRESS) { 448 uc = dst[0]; 449 for (i = 1; i < sc->sectorsize; i++) 450 if (dst[i] != uc) 451 break; 452 } else { 453 i = 0; 454 uc = 0; 455 } 456 if (i == sc->sectorsize) { 457 if (osp != uc) 458 error = s_write(sc->indir, secno, uc); 459 } else { 460 if (osp <= 255) { 461 sp = (uintptr_t)uma_zalloc(sc->uma, 462 md_malloc_wait ? M_WAITOK : 463 M_NOWAIT); 464 if (sp == 0) { 465 error = ENOSPC; 466 break; 467 } 468 bcopy(dst, (void *)sp, sc->sectorsize); 469 error = s_write(sc->indir, secno, sp); 470 } else { 471 bcopy(dst, (void *)osp, sc->sectorsize); 472 osp = 0; 473 } 474 } 475 } else { 476 error = EOPNOTSUPP; 477 } 478 if (osp > 255) 479 uma_zfree(sc->uma, (void*)osp); 480 if (error != 0) 481 break; 482 secno++; 483 dst += sc->sectorsize; 484 } 485 bp->bio_resid = 0; 486 return (error); 487 } 488 489 static int 490 mdstart_preload(struct md_s *sc, struct bio *bp) 491 { 492 493 switch (bp->bio_cmd) { 494 case BIO_READ: 495 bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data, 496 bp->bio_length); 497 cpu_flush_dcache(bp->bio_data, bp->bio_length); 498 break; 499 case BIO_WRITE: 500 bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset, 501 bp->bio_length); 502 break; 503 } 504 bp->bio_resid = 0; 505 return (0); 506 } 507 508 static int 509 mdstart_vnode(struct md_s *sc, struct bio *bp) 510 { 511 int error, vfslocked; 512 struct uio auio; 513 struct iovec aiov; 514 struct mount *mp; 515 struct vnode *vp; 516 struct thread *td; 517 518 switch (bp->bio_cmd) { 519 case BIO_READ: 520 case BIO_WRITE: 521 case BIO_FLUSH: 522 break; 523 default: 524 return (EOPNOTSUPP); 525 } 526 527 td = curthread; 528 vp = sc->vnode; 529 530 /* 531 * VNODE I/O 532 * 533 * If an error occurs, we set BIO_ERROR but we do not set 534 * B_INVAL because (for a write anyway), the buffer is 535 * still valid. 536 */ 537 538 if (bp->bio_cmd == BIO_FLUSH) { 539 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 540 (void) vn_start_write(vp, &mp, V_WAIT); 541 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 542 error = VOP_FSYNC(vp, MNT_WAIT, td); 543 VOP_UNLOCK(vp, 0); 544 vn_finished_write(mp); 545 VFS_UNLOCK_GIANT(vfslocked); 546 return (error); 547 } 548 549 bzero(&auio, sizeof(auio)); 550 551 aiov.iov_base = bp->bio_data; 552 aiov.iov_len = bp->bio_length; 553 auio.uio_iov = &aiov; 554 auio.uio_iovcnt = 1; 555 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 556 auio.uio_segflg = UIO_SYSSPACE; 557 if (bp->bio_cmd == BIO_READ) 558 auio.uio_rw = UIO_READ; 559 else if (bp->bio_cmd == BIO_WRITE) 560 auio.uio_rw = UIO_WRITE; 561 else 562 panic("wrong BIO_OP in mdstart_vnode"); 563 auio.uio_resid = bp->bio_length; 564 auio.uio_td = td; 565 /* 566 * When reading set IO_DIRECT to try to avoid double-caching 567 * the data. When writing IO_DIRECT is not optimal. 568 */ 569 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 570 if (bp->bio_cmd == BIO_READ) { 571 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 572 error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred); 573 VOP_UNLOCK(vp, 0); 574 } else { 575 (void) vn_start_write(vp, &mp, V_WAIT); 576 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 577 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC, 578 sc->cred); 579 VOP_UNLOCK(vp, 0); 580 vn_finished_write(mp); 581 } 582 VFS_UNLOCK_GIANT(vfslocked); 583 bp->bio_resid = auio.uio_resid; 584 return (error); 585 } 586 587 static int 588 mdstart_swap(struct md_s *sc, struct bio *bp) 589 { 590 struct sf_buf *sf; 591 int rv, offs, len, lastend; 592 vm_pindex_t i, lastp; 593 vm_page_t m; 594 u_char *p; 595 596 switch (bp->bio_cmd) { 597 case BIO_READ: 598 case BIO_WRITE: 599 case BIO_DELETE: 600 break; 601 default: 602 return (EOPNOTSUPP); 603 } 604 605 p = bp->bio_data; 606 607 /* 608 * offs is the offset at which to start operating on the 609 * next (ie, first) page. lastp is the last page on 610 * which we're going to operate. lastend is the ending 611 * position within that last page (ie, PAGE_SIZE if 612 * we're operating on complete aligned pages). 613 */ 614 offs = bp->bio_offset % PAGE_SIZE; 615 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 616 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 617 618 rv = VM_PAGER_OK; 619 VM_OBJECT_LOCK(sc->object); 620 vm_object_pip_add(sc->object, 1); 621 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 622 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 623 624 m = vm_page_grab(sc->object, i, 625 VM_ALLOC_NORMAL|VM_ALLOC_RETRY); 626 VM_OBJECT_UNLOCK(sc->object); 627 sched_pin(); 628 sf = sf_buf_alloc(m, SFB_CPUPRIVATE); 629 VM_OBJECT_LOCK(sc->object); 630 if (bp->bio_cmd == BIO_READ) { 631 if (m->valid != VM_PAGE_BITS_ALL) 632 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 633 if (rv == VM_PAGER_ERROR) { 634 sf_buf_free(sf); 635 sched_unpin(); 636 vm_page_wakeup(m); 637 break; 638 } 639 bcopy((void *)(sf_buf_kva(sf) + offs), p, len); 640 cpu_flush_dcache(p, len); 641 } else if (bp->bio_cmd == BIO_WRITE) { 642 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 643 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 644 if (rv == VM_PAGER_ERROR) { 645 sf_buf_free(sf); 646 sched_unpin(); 647 vm_page_wakeup(m); 648 break; 649 } 650 bcopy(p, (void *)(sf_buf_kva(sf) + offs), len); 651 m->valid = VM_PAGE_BITS_ALL; 652 } else if (bp->bio_cmd == BIO_DELETE) { 653 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 654 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 655 if (rv == VM_PAGER_ERROR) { 656 sf_buf_free(sf); 657 sched_unpin(); 658 vm_page_wakeup(m); 659 break; 660 } 661 if (len != PAGE_SIZE) { 662 bzero((void *)(sf_buf_kva(sf) + offs), len); 663 vm_page_clear_dirty(m, offs, len); 664 m->valid = VM_PAGE_BITS_ALL; 665 } else 666 vm_pager_page_unswapped(m); 667 } 668 sf_buf_free(sf); 669 sched_unpin(); 670 vm_page_wakeup(m); 671 vm_page_lock(m); 672 if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE) 673 vm_page_free(m); 674 else 675 vm_page_activate(m); 676 vm_page_unlock(m); 677 if (bp->bio_cmd == BIO_WRITE) 678 vm_page_dirty(m); 679 680 /* Actions on further pages start at offset 0 */ 681 p += PAGE_SIZE - offs; 682 offs = 0; 683 #if 0 684 if (bootverbose || bp->bio_offset / PAGE_SIZE < 17) 685 printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n", 686 m->wire_count, m->busy, 687 m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i); 688 #endif 689 } 690 vm_object_pip_subtract(sc->object, 1); 691 VM_OBJECT_UNLOCK(sc->object); 692 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 693 } 694 695 static void 696 md_kthread(void *arg) 697 { 698 struct md_s *sc; 699 struct bio *bp; 700 int error; 701 702 sc = arg; 703 thread_lock(curthread); 704 sched_prio(curthread, PRIBIO); 705 thread_unlock(curthread); 706 if (sc->type == MD_VNODE) 707 curthread->td_pflags |= TDP_NORUNNINGBUF; 708 709 for (;;) { 710 mtx_lock(&sc->queue_mtx); 711 if (sc->flags & MD_SHUTDOWN) { 712 sc->flags |= MD_EXITING; 713 mtx_unlock(&sc->queue_mtx); 714 kproc_exit(0); 715 } 716 bp = bioq_takefirst(&sc->bio_queue); 717 if (!bp) { 718 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 719 continue; 720 } 721 mtx_unlock(&sc->queue_mtx); 722 if (bp->bio_cmd == BIO_GETATTR) { 723 if ((sc->fwsectors && sc->fwheads && 724 (g_handleattr_int(bp, "GEOM::fwsectors", 725 sc->fwsectors) || 726 g_handleattr_int(bp, "GEOM::fwheads", 727 sc->fwheads))) || 728 g_handleattr_int(bp, "GEOM::candelete", 1)) 729 error = -1; 730 else 731 error = EOPNOTSUPP; 732 } else { 733 error = sc->start(sc, bp); 734 } 735 736 if (error != -1) { 737 bp->bio_completed = bp->bio_length; 738 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) 739 devstat_end_transaction_bio(sc->devstat, bp); 740 g_io_deliver(bp, error); 741 } 742 } 743 } 744 745 static struct md_s * 746 mdfind(int unit) 747 { 748 struct md_s *sc; 749 750 LIST_FOREACH(sc, &md_softc_list, list) { 751 if (sc->unit == unit) 752 break; 753 } 754 return (sc); 755 } 756 757 static struct md_s * 758 mdnew(int unit, int *errp, enum md_types type) 759 { 760 struct md_s *sc; 761 int error; 762 763 *errp = 0; 764 if (unit == -1) 765 unit = alloc_unr(md_uh); 766 else 767 unit = alloc_unr_specific(md_uh, unit); 768 769 if (unit == -1) { 770 *errp = EBUSY; 771 return (NULL); 772 } 773 774 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); 775 sc->type = type; 776 bioq_init(&sc->bio_queue); 777 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 778 sc->unit = unit; 779 sprintf(sc->name, "md%d", unit); 780 LIST_INSERT_HEAD(&md_softc_list, sc, list); 781 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 782 if (error == 0) 783 return (sc); 784 LIST_REMOVE(sc, list); 785 mtx_destroy(&sc->queue_mtx); 786 free_unr(md_uh, sc->unit); 787 free(sc, M_MD); 788 *errp = error; 789 return (NULL); 790 } 791 792 static void 793 mdinit(struct md_s *sc) 794 { 795 struct g_geom *gp; 796 struct g_provider *pp; 797 798 g_topology_lock(); 799 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 800 gp->softc = sc; 801 pp = g_new_providerf(gp, "md%d", sc->unit); 802 pp->mediasize = sc->mediasize; 803 pp->sectorsize = sc->sectorsize; 804 sc->gp = gp; 805 sc->pp = pp; 806 g_error_provider(pp, 0); 807 g_topology_unlock(); 808 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize, 809 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 810 } 811 812 /* 813 * XXX: we should check that the range they feed us is mapped. 814 * XXX: we should implement read-only. 815 */ 816 817 static int 818 mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio) 819 { 820 821 if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE)) 822 return (EINVAL); 823 if (mdio->md_base == 0) 824 return (EINVAL); 825 sc->flags = mdio->md_options & MD_FORCE; 826 /* Cast to pointer size, then to pointer to avoid warning */ 827 sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base; 828 sc->pl_len = (size_t)sc->mediasize; 829 return (0); 830 } 831 832 833 static int 834 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio) 835 { 836 uintptr_t sp; 837 int error; 838 off_t u; 839 840 error = 0; 841 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 842 return (EINVAL); 843 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize)) 844 return (EINVAL); 845 /* Compression doesn't make sense if we have reserved space */ 846 if (mdio->md_options & MD_RESERVE) 847 mdio->md_options &= ~MD_COMPRESS; 848 if (mdio->md_fwsectors != 0) 849 sc->fwsectors = mdio->md_fwsectors; 850 if (mdio->md_fwheads != 0) 851 sc->fwheads = mdio->md_fwheads; 852 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE); 853 sc->indir = dimension(sc->mediasize / sc->sectorsize); 854 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 855 0x1ff, 0); 856 if (mdio->md_options & MD_RESERVE) { 857 off_t nsectors; 858 859 nsectors = sc->mediasize / sc->sectorsize; 860 for (u = 0; u < nsectors; u++) { 861 sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ? 862 M_WAITOK : M_NOWAIT) | M_ZERO); 863 if (sp != 0) 864 error = s_write(sc->indir, u, sp); 865 else 866 error = ENOMEM; 867 if (error != 0) 868 break; 869 } 870 } 871 return (error); 872 } 873 874 875 static int 876 mdsetcred(struct md_s *sc, struct ucred *cred) 877 { 878 char *tmpbuf; 879 int error = 0; 880 881 /* 882 * Set credits in our softc 883 */ 884 885 if (sc->cred) 886 crfree(sc->cred); 887 sc->cred = crhold(cred); 888 889 /* 890 * Horrible kludge to establish credentials for NFS XXX. 891 */ 892 893 if (sc->vnode) { 894 struct uio auio; 895 struct iovec aiov; 896 897 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 898 bzero(&auio, sizeof(auio)); 899 900 aiov.iov_base = tmpbuf; 901 aiov.iov_len = sc->sectorsize; 902 auio.uio_iov = &aiov; 903 auio.uio_iovcnt = 1; 904 auio.uio_offset = 0; 905 auio.uio_rw = UIO_READ; 906 auio.uio_segflg = UIO_SYSSPACE; 907 auio.uio_resid = aiov.iov_len; 908 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 909 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 910 VOP_UNLOCK(sc->vnode, 0); 911 free(tmpbuf, M_TEMP); 912 } 913 return (error); 914 } 915 916 static int 917 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 918 { 919 struct vattr vattr; 920 struct nameidata nd; 921 char *fname; 922 int error, flags, vfslocked; 923 924 /* 925 * Kernel-originated requests must have the filename appended 926 * to the mdio structure to protect against malicious software. 927 */ 928 fname = mdio->md_file; 929 if ((void *)fname != (void *)(mdio + 1)) { 930 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL); 931 if (error != 0) 932 return (error); 933 } else 934 strlcpy(sc->file, fname, sizeof(sc->file)); 935 936 /* 937 * If the user specified that this is a read only device, don't 938 * set the FWRITE mask before trying to open the backing store. 939 */ 940 flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE); 941 NDINIT(&nd, LOOKUP, FOLLOW | MPSAFE, UIO_SYSSPACE, sc->file, td); 942 error = vn_open(&nd, &flags, 0, NULL); 943 if (error != 0) 944 return (error); 945 vfslocked = NDHASGIANT(&nd); 946 NDFREE(&nd, NDF_ONLY_PNBUF); 947 if (nd.ni_vp->v_type != VREG) { 948 error = EINVAL; 949 goto bad; 950 } 951 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred); 952 if (error != 0) 953 goto bad; 954 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { 955 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); 956 if (nd.ni_vp->v_iflag & VI_DOOMED) { 957 /* Forced unmount. */ 958 error = EBADF; 959 goto bad; 960 } 961 } 962 nd.ni_vp->v_vflag |= VV_MD; 963 VOP_UNLOCK(nd.ni_vp, 0); 964 965 if (mdio->md_fwsectors != 0) 966 sc->fwsectors = mdio->md_fwsectors; 967 if (mdio->md_fwheads != 0) 968 sc->fwheads = mdio->md_fwheads; 969 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC); 970 if (!(flags & FWRITE)) 971 sc->flags |= MD_READONLY; 972 sc->vnode = nd.ni_vp; 973 974 error = mdsetcred(sc, td->td_ucred); 975 if (error != 0) { 976 sc->vnode = NULL; 977 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); 978 nd.ni_vp->v_vflag &= ~VV_MD; 979 goto bad; 980 } 981 VFS_UNLOCK_GIANT(vfslocked); 982 return (0); 983 bad: 984 VOP_UNLOCK(nd.ni_vp, 0); 985 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 986 VFS_UNLOCK_GIANT(vfslocked); 987 return (error); 988 } 989 990 static int 991 mddestroy(struct md_s *sc, struct thread *td) 992 { 993 int vfslocked; 994 995 if (sc->gp) { 996 sc->gp->softc = NULL; 997 g_topology_lock(); 998 g_wither_geom(sc->gp, ENXIO); 999 g_topology_unlock(); 1000 sc->gp = NULL; 1001 sc->pp = NULL; 1002 } 1003 if (sc->devstat) { 1004 devstat_remove_entry(sc->devstat); 1005 sc->devstat = NULL; 1006 } 1007 mtx_lock(&sc->queue_mtx); 1008 sc->flags |= MD_SHUTDOWN; 1009 wakeup(sc); 1010 while (!(sc->flags & MD_EXITING)) 1011 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10); 1012 mtx_unlock(&sc->queue_mtx); 1013 mtx_destroy(&sc->queue_mtx); 1014 if (sc->vnode != NULL) { 1015 vfslocked = VFS_LOCK_GIANT(sc->vnode->v_mount); 1016 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1017 sc->vnode->v_vflag &= ~VV_MD; 1018 VOP_UNLOCK(sc->vnode, 0); 1019 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 1020 FREAD : (FREAD|FWRITE), sc->cred, td); 1021 VFS_UNLOCK_GIANT(vfslocked); 1022 } 1023 if (sc->cred != NULL) 1024 crfree(sc->cred); 1025 if (sc->object != NULL) 1026 vm_object_deallocate(sc->object); 1027 if (sc->indir) 1028 destroy_indir(sc, sc->indir); 1029 if (sc->uma) 1030 uma_zdestroy(sc->uma); 1031 1032 LIST_REMOVE(sc, list); 1033 free_unr(md_uh, sc->unit); 1034 free(sc, M_MD); 1035 return (0); 1036 } 1037 1038 static int 1039 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1040 { 1041 vm_ooffset_t npage; 1042 int error; 1043 1044 /* 1045 * Range check. Disallow negative sizes or any size less then the 1046 * size of a page. Then round to a page. 1047 */ 1048 if (sc->mediasize == 0 || (sc->mediasize % PAGE_SIZE) != 0) 1049 return (EDOM); 1050 1051 /* 1052 * Allocate an OBJT_SWAP object. 1053 * 1054 * Note the truncation. 1055 */ 1056 1057 npage = mdio->md_mediasize / PAGE_SIZE; 1058 if (mdio->md_fwsectors != 0) 1059 sc->fwsectors = mdio->md_fwsectors; 1060 if (mdio->md_fwheads != 0) 1061 sc->fwheads = mdio->md_fwheads; 1062 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, 1063 VM_PROT_DEFAULT, 0, td->td_ucred); 1064 if (sc->object == NULL) 1065 return (ENOMEM); 1066 sc->flags = mdio->md_options & MD_FORCE; 1067 if (mdio->md_options & MD_RESERVE) { 1068 if (swap_pager_reserve(sc->object, 0, npage) < 0) { 1069 error = EDOM; 1070 goto finish; 1071 } 1072 } 1073 error = mdsetcred(sc, td->td_ucred); 1074 finish: 1075 if (error != 0) { 1076 vm_object_deallocate(sc->object); 1077 sc->object = NULL; 1078 } 1079 return (error); 1080 } 1081 1082 1083 static int 1084 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1085 { 1086 struct md_ioctl *mdio; 1087 struct md_s *sc; 1088 int error, i; 1089 1090 if (md_debug) 1091 printf("mdctlioctl(%s %lx %p %x %p)\n", 1092 devtoname(dev), cmd, addr, flags, td); 1093 1094 mdio = (struct md_ioctl *)addr; 1095 if (mdio->md_version != MDIOVERSION) 1096 return (EINVAL); 1097 1098 /* 1099 * We assert the version number in the individual ioctl 1100 * handlers instead of out here because (a) it is possible we 1101 * may add another ioctl in the future which doesn't read an 1102 * mdio, and (b) the correct return value for an unknown ioctl 1103 * is ENOIOCTL, not EINVAL. 1104 */ 1105 error = 0; 1106 switch (cmd) { 1107 case MDIOCATTACH: 1108 switch (mdio->md_type) { 1109 case MD_MALLOC: 1110 case MD_PRELOAD: 1111 case MD_VNODE: 1112 case MD_SWAP: 1113 break; 1114 default: 1115 return (EINVAL); 1116 } 1117 if (mdio->md_options & MD_AUTOUNIT) 1118 sc = mdnew(-1, &error, mdio->md_type); 1119 else { 1120 if (mdio->md_unit > INT_MAX) 1121 return (EINVAL); 1122 sc = mdnew(mdio->md_unit, &error, mdio->md_type); 1123 } 1124 if (sc == NULL) 1125 return (error); 1126 if (mdio->md_options & MD_AUTOUNIT) 1127 mdio->md_unit = sc->unit; 1128 sc->mediasize = mdio->md_mediasize; 1129 if (mdio->md_sectorsize == 0) 1130 sc->sectorsize = DEV_BSIZE; 1131 else 1132 sc->sectorsize = mdio->md_sectorsize; 1133 error = EDOOFUS; 1134 switch (sc->type) { 1135 case MD_MALLOC: 1136 sc->start = mdstart_malloc; 1137 error = mdcreate_malloc(sc, mdio); 1138 break; 1139 case MD_PRELOAD: 1140 sc->start = mdstart_preload; 1141 error = mdcreate_preload(sc, mdio); 1142 break; 1143 case MD_VNODE: 1144 sc->start = mdstart_vnode; 1145 error = mdcreate_vnode(sc, mdio, td); 1146 break; 1147 case MD_SWAP: 1148 sc->start = mdstart_swap; 1149 error = mdcreate_swap(sc, mdio, td); 1150 break; 1151 } 1152 if (error != 0) { 1153 mddestroy(sc, td); 1154 return (error); 1155 } 1156 1157 /* Prune off any residual fractional sector */ 1158 i = sc->mediasize % sc->sectorsize; 1159 sc->mediasize -= i; 1160 1161 mdinit(sc); 1162 return (0); 1163 case MDIOCDETACH: 1164 if (mdio->md_mediasize != 0 || 1165 (mdio->md_options & ~MD_FORCE) != 0) 1166 return (EINVAL); 1167 1168 sc = mdfind(mdio->md_unit); 1169 if (sc == NULL) 1170 return (ENOENT); 1171 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) && 1172 !(mdio->md_options & MD_FORCE)) 1173 return (EBUSY); 1174 return (mddestroy(sc, td)); 1175 case MDIOCQUERY: 1176 sc = mdfind(mdio->md_unit); 1177 if (sc == NULL) 1178 return (ENOENT); 1179 mdio->md_type = sc->type; 1180 mdio->md_options = sc->flags; 1181 mdio->md_mediasize = sc->mediasize; 1182 mdio->md_sectorsize = sc->sectorsize; 1183 if (sc->type == MD_VNODE) 1184 error = copyout(sc->file, mdio->md_file, 1185 strlen(sc->file) + 1); 1186 return (error); 1187 case MDIOCLIST: 1188 i = 1; 1189 LIST_FOREACH(sc, &md_softc_list, list) { 1190 if (i == MDNPAD - 1) 1191 mdio->md_pad[i] = -1; 1192 else 1193 mdio->md_pad[i++] = sc->unit; 1194 } 1195 mdio->md_pad[0] = i - 1; 1196 return (0); 1197 default: 1198 return (ENOIOCTL); 1199 }; 1200 } 1201 1202 static int 1203 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1204 { 1205 int error; 1206 1207 sx_xlock(&md_sx); 1208 error = xmdctlioctl(dev, cmd, addr, flags, td); 1209 sx_xunlock(&md_sx); 1210 return (error); 1211 } 1212 1213 static void 1214 md_preloaded(u_char *image, size_t length) 1215 { 1216 struct md_s *sc; 1217 int error; 1218 1219 sc = mdnew(-1, &error, MD_PRELOAD); 1220 if (sc == NULL) 1221 return; 1222 sc->mediasize = length; 1223 sc->sectorsize = DEV_BSIZE; 1224 sc->pl_ptr = image; 1225 sc->pl_len = length; 1226 sc->start = mdstart_preload; 1227 #ifdef MD_ROOT 1228 if (sc->unit == 0) 1229 rootdevnames[0] = "ufs:/dev/md0"; 1230 #endif 1231 mdinit(sc); 1232 } 1233 1234 static void 1235 g_md_init(struct g_class *mp __unused) 1236 { 1237 caddr_t mod; 1238 u_char *ptr, *name, *type; 1239 unsigned len; 1240 int i; 1241 1242 /* figure out log2(NINDIR) */ 1243 for (i = NINDIR, nshift = -1; i; nshift++) 1244 i >>= 1; 1245 1246 mod = NULL; 1247 sx_init(&md_sx, "MD config lock"); 1248 g_topology_unlock(); 1249 md_uh = new_unrhdr(0, INT_MAX, NULL); 1250 #ifdef MD_ROOT_SIZE 1251 sx_xlock(&md_sx); 1252 md_preloaded(mfs_root.start, sizeof(mfs_root.start)); 1253 sx_xunlock(&md_sx); 1254 #endif 1255 /* XXX: are preload_* static or do they need Giant ? */ 1256 while ((mod = preload_search_next_name(mod)) != NULL) { 1257 name = (char *)preload_search_info(mod, MODINFO_NAME); 1258 if (name == NULL) 1259 continue; 1260 type = (char *)preload_search_info(mod, MODINFO_TYPE); 1261 if (type == NULL) 1262 continue; 1263 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 1264 continue; 1265 ptr = preload_fetch_addr(mod); 1266 len = preload_fetch_size(mod); 1267 if (ptr != NULL && len != 0) { 1268 printf("%s%d: Preloaded image <%s> %d bytes at %p\n", 1269 MD_NAME, mdunits, name, len, ptr); 1270 sx_xlock(&md_sx); 1271 md_preloaded(ptr, len); 1272 sx_xunlock(&md_sx); 1273 } 1274 } 1275 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL, 1276 0600, MDCTL_NAME); 1277 g_topology_lock(); 1278 } 1279 1280 static void 1281 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1282 struct g_consumer *cp __unused, struct g_provider *pp) 1283 { 1284 struct md_s *mp; 1285 char *type; 1286 1287 mp = gp->softc; 1288 if (mp == NULL) 1289 return; 1290 1291 switch (mp->type) { 1292 case MD_MALLOC: 1293 type = "malloc"; 1294 break; 1295 case MD_PRELOAD: 1296 type = "preload"; 1297 break; 1298 case MD_VNODE: 1299 type = "vnode"; 1300 break; 1301 case MD_SWAP: 1302 type = "swap"; 1303 break; 1304 default: 1305 type = "unknown"; 1306 break; 1307 } 1308 1309 if (pp != NULL) { 1310 if (indent == NULL) { 1311 sbuf_printf(sb, " u %d", mp->unit); 1312 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize); 1313 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads); 1314 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors); 1315 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize); 1316 sbuf_printf(sb, " t %s", type); 1317 if (mp->type == MD_VNODE && mp->vnode != NULL) 1318 sbuf_printf(sb, " file %s", mp->file); 1319 } else { 1320 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, 1321 mp->unit); 1322 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n", 1323 indent, (uintmax_t) mp->sectorsize); 1324 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n", 1325 indent, (uintmax_t) mp->fwheads); 1326 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n", 1327 indent, (uintmax_t) mp->fwsectors); 1328 sbuf_printf(sb, "%s<length>%ju</length>\n", 1329 indent, (uintmax_t) mp->mediasize); 1330 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1331 type); 1332 if (mp->type == MD_VNODE && mp->vnode != NULL) 1333 sbuf_printf(sb, "%s<file>%s</file>\n", 1334 indent, mp->file); 1335 } 1336 } 1337 } 1338 1339 static void 1340 g_md_fini(struct g_class *mp __unused) 1341 { 1342 1343 sx_destroy(&md_sx); 1344 if (status_dev != NULL) 1345 destroy_dev(status_dev); 1346 delete_unrhdr(md_uh); 1347 } 1348