1 /* 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * $FreeBSD$ 10 * 11 */ 12 13 /* 14 * The following functions are based in the vn(4) driver: mdstart_swap(), 15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 16 * and as such under the following copyright: 17 * 18 * Copyright (c) 1988 University of Utah. 19 * Copyright (c) 1990, 1993 20 * The Regents of the University of California. All rights reserved. 21 * 22 * This code is derived from software contributed to Berkeley by 23 * the Systems Programming Group of the University of Utah Computer 24 * Science Department. 25 * 26 * Redistribution and use in source and binary forms, with or without 27 * modification, are permitted provided that the following conditions 28 * are met: 29 * 1. Redistributions of source code must retain the above copyright 30 * notice, this list of conditions and the following disclaimer. 31 * 2. Redistributions in binary form must reproduce the above copyright 32 * notice, this list of conditions and the following disclaimer in the 33 * documentation and/or other materials provided with the distribution. 34 * 3. All advertising materials mentioning features or use of this software 35 * must display the following acknowledgement: 36 * This product includes software developed by the University of 37 * California, Berkeley and its contributors. 38 * 4. Neither the name of the University nor the names of its contributors 39 * may be used to endorse or promote products derived from this software 40 * without specific prior written permission. 41 * 42 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 45 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 52 * SUCH DAMAGE. 53 * 54 * from: Utah Hdr: vn.c 1.13 94/04/02 55 * 56 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 57 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 58 */ 59 60 #include "opt_geom.h" 61 #include "opt_md.h" 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/bio.h> 66 #include <sys/conf.h> 67 #include <sys/fcntl.h> 68 #include <sys/kernel.h> 69 #include <sys/kthread.h> 70 #include <sys/linker.h> 71 #include <sys/lock.h> 72 #include <sys/malloc.h> 73 #include <sys/mdioctl.h> 74 #include <sys/mutex.h> 75 #include <sys/namei.h> 76 #include <sys/proc.h> 77 #include <sys/queue.h> 78 #include <sys/sysctl.h> 79 #include <sys/vnode.h> 80 81 #include <geom/geom.h> 82 83 #include <vm/vm.h> 84 #include <vm/vm_object.h> 85 #include <vm/vm_page.h> 86 #include <vm/vm_pager.h> 87 #include <vm/swap_pager.h> 88 #include <vm/uma.h> 89 90 #define MD_MODVER 1 91 92 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 93 94 #ifndef MD_NSECT 95 #define MD_NSECT (10000 * 2) 96 #endif 97 98 static MALLOC_DEFINE(M_MD, "MD disk", "Memory Disk"); 99 static MALLOC_DEFINE(M_MDSECT, "MD sectors", "Memory Disk Sectors"); 100 101 static int md_debug; 102 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, ""); 103 104 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE) 105 /* Image gets put here: */ 106 static u_char mfs_root[MD_ROOT_SIZE*1024] = "MFS Filesystem goes here"; 107 static u_char end_mfs_root[] __unused = "MFS Filesystem had better STOP here"; 108 #endif 109 110 static int mdrootready; 111 static int mdunits; 112 static dev_t status_dev = 0; 113 114 #define CDEV_MAJOR 95 115 116 static d_ioctl_t mdctlioctl; 117 118 static struct cdevsw mdctl_cdevsw = { 119 .d_open = nullopen, 120 .d_close = nullclose, 121 .d_ioctl = mdctlioctl, 122 .d_name = MD_NAME, 123 .d_maj = CDEV_MAJOR 124 }; 125 126 127 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(&md_softc_list); 128 129 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 130 #define NMASK (NINDIR-1) 131 static int nshift; 132 133 struct indir { 134 uintptr_t *array; 135 uint total; 136 uint used; 137 uint shift; 138 }; 139 140 struct md_s { 141 int unit; 142 LIST_ENTRY(md_s) list; 143 struct bio_queue_head bio_queue; 144 struct mtx queue_mtx; 145 dev_t dev; 146 enum md_types type; 147 unsigned nsect; 148 unsigned opencount; 149 unsigned secsize; 150 unsigned fwheads; 151 unsigned fwsectors; 152 unsigned flags; 153 char name[20]; 154 struct proc *procp; 155 struct g_geom *gp; 156 struct g_provider *pp; 157 158 /* MD_MALLOC related fields */ 159 struct indir *indir; 160 uma_zone_t uma; 161 162 /* MD_PRELOAD related fields */ 163 u_char *pl_ptr; 164 unsigned pl_len; 165 166 /* MD_VNODE related fields */ 167 struct vnode *vnode; 168 struct ucred *cred; 169 170 /* MD_SWAP related fields */ 171 vm_object_t object; 172 }; 173 174 static int mddestroy(struct md_s *sc, struct thread *td); 175 176 static struct indir * 177 new_indir(uint shift) 178 { 179 struct indir *ip; 180 181 ip = malloc(sizeof *ip, M_MD, M_NOWAIT | M_ZERO); 182 if (ip == NULL) 183 return (NULL); 184 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 185 M_MDSECT, M_NOWAIT | M_ZERO); 186 if (ip->array == NULL) { 187 free(ip, M_MD); 188 return (NULL); 189 } 190 ip->total = NINDIR; 191 ip->shift = shift; 192 return (ip); 193 } 194 195 static void 196 del_indir(struct indir *ip) 197 { 198 199 free(ip->array, M_MDSECT); 200 free(ip, M_MD); 201 } 202 203 static void 204 destroy_indir(struct md_s *sc, struct indir *ip) 205 { 206 int i; 207 208 for (i = 0; i < NINDIR; i++) { 209 if (!ip->array[i]) 210 continue; 211 if (ip->shift) 212 destroy_indir(sc, (struct indir*)(ip->array[i])); 213 else if (ip->array[i] > 255) 214 uma_zfree(sc->uma, (void *)(ip->array[i])); 215 } 216 del_indir(ip); 217 } 218 219 /* 220 * This function does the math and alloctes the top level "indir" structure 221 * for a device of "size" sectors. 222 */ 223 224 static struct indir * 225 dimension(off_t size) 226 { 227 off_t rcnt; 228 struct indir *ip; 229 int i, layer; 230 231 rcnt = size; 232 layer = 0; 233 while (rcnt > NINDIR) { 234 rcnt /= NINDIR; 235 layer++; 236 } 237 /* figure out log2(NINDIR) */ 238 for (i = NINDIR, nshift = -1; i; nshift++) 239 i >>= 1; 240 241 /* 242 * XXX: the top layer is probably not fully populated, so we allocate 243 * too much space for ip->array in here. 244 */ 245 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 246 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 247 M_MDSECT, M_WAITOK | M_ZERO); 248 ip->total = NINDIR; 249 ip->shift = layer * nshift; 250 return (ip); 251 } 252 253 /* 254 * Read a given sector 255 */ 256 257 static uintptr_t 258 s_read(struct indir *ip, off_t offset) 259 { 260 struct indir *cip; 261 int idx; 262 uintptr_t up; 263 264 if (md_debug > 1) 265 printf("s_read(%jd)\n", (intmax_t)offset); 266 up = 0; 267 for (cip = ip; cip != NULL;) { 268 if (cip->shift) { 269 idx = (offset >> cip->shift) & NMASK; 270 up = cip->array[idx]; 271 cip = (struct indir *)up; 272 continue; 273 } 274 idx = offset & NMASK; 275 return (cip->array[idx]); 276 } 277 return (0); 278 } 279 280 /* 281 * Write a given sector, prune the tree if the value is 0 282 */ 283 284 static int 285 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 286 { 287 struct indir *cip, *lip[10]; 288 int idx, li; 289 uintptr_t up; 290 291 if (md_debug > 1) 292 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 293 up = 0; 294 li = 0; 295 cip = ip; 296 for (;;) { 297 lip[li++] = cip; 298 if (cip->shift) { 299 idx = (offset >> cip->shift) & NMASK; 300 up = cip->array[idx]; 301 if (up != 0) { 302 cip = (struct indir *)up; 303 continue; 304 } 305 /* Allocate branch */ 306 cip->array[idx] = 307 (uintptr_t)new_indir(cip->shift - nshift); 308 if (cip->array[idx] == 0) 309 return (ENOSPC); 310 cip->used++; 311 up = cip->array[idx]; 312 cip = (struct indir *)up; 313 continue; 314 } 315 /* leafnode */ 316 idx = offset & NMASK; 317 up = cip->array[idx]; 318 if (up != 0) 319 cip->used--; 320 cip->array[idx] = ptr; 321 if (ptr != 0) 322 cip->used++; 323 break; 324 } 325 if (cip->used != 0 || li == 1) 326 return (0); 327 li--; 328 while (cip->used == 0 && cip != ip) { 329 li--; 330 idx = (offset >> lip[li]->shift) & NMASK; 331 up = lip[li]->array[idx]; 332 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 333 del_indir(cip); 334 lip[li]->array[idx] = 0; 335 lip[li]->used--; 336 cip = lip[li]; 337 } 338 return (0); 339 } 340 341 342 struct g_class g_md_class = { 343 .name = "MD", 344 G_CLASS_INITIALIZER 345 }; 346 347 static int 348 g_md_access(struct g_provider *pp, int r, int w, int e) 349 { 350 struct md_s *sc; 351 352 sc = pp->geom->softc; 353 if (sc == NULL) 354 return (ENXIO); 355 r += pp->acr; 356 w += pp->acw; 357 e += pp->ace; 358 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 359 sc->opencount = 1; 360 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 361 sc->opencount = 0; 362 } 363 return (0); 364 } 365 366 static void 367 g_md_start(struct bio *bp) 368 { 369 struct md_s *sc; 370 371 sc = bp->bio_to->geom->softc; 372 373 bp->bio_blkno = bp->bio_offset >> DEV_BSHIFT; 374 bp->bio_pblkno = bp->bio_offset / sc->secsize; 375 bp->bio_bcount = bp->bio_length; 376 mtx_lock(&sc->queue_mtx); 377 bioq_disksort(&sc->bio_queue, bp); 378 mtx_unlock(&sc->queue_mtx); 379 380 wakeup(sc); 381 } 382 383 DECLARE_GEOM_CLASS(g_md_class, g_md); 384 385 386 static int 387 mdstart_malloc(struct md_s *sc, struct bio *bp) 388 { 389 int i, error; 390 u_char *dst; 391 unsigned secno, nsec, uc; 392 uintptr_t sp, osp; 393 394 nsec = bp->bio_bcount / sc->secsize; 395 secno = bp->bio_pblkno; 396 dst = bp->bio_data; 397 error = 0; 398 while (nsec--) { 399 osp = s_read(sc->indir, secno); 400 if (bp->bio_cmd == BIO_DELETE) { 401 if (osp != 0) 402 error = s_write(sc->indir, secno, 0); 403 } else if (bp->bio_cmd == BIO_READ) { 404 if (osp == 0) 405 bzero(dst, sc->secsize); 406 else if (osp <= 255) 407 for (i = 0; i < sc->secsize; i++) 408 dst[i] = osp; 409 else 410 bcopy((void *)osp, dst, sc->secsize); 411 osp = 0; 412 } else if (bp->bio_cmd == BIO_WRITE) { 413 if (sc->flags & MD_COMPRESS) { 414 uc = dst[0]; 415 for (i = 1; i < sc->secsize; i++) 416 if (dst[i] != uc) 417 break; 418 } else { 419 i = 0; 420 uc = 0; 421 } 422 if (i == sc->secsize) { 423 if (osp != uc) 424 error = s_write(sc->indir, secno, uc); 425 } else { 426 if (osp <= 255) { 427 sp = (uintptr_t) uma_zalloc( 428 sc->uma, M_NOWAIT); 429 if (sp == 0) { 430 error = ENOSPC; 431 break; 432 } 433 bcopy(dst, (void *)sp, sc->secsize); 434 error = s_write(sc->indir, secno, sp); 435 } else { 436 bcopy(dst, (void *)osp, sc->secsize); 437 osp = 0; 438 } 439 } 440 } else { 441 error = EOPNOTSUPP; 442 } 443 if (osp > 255) 444 uma_zfree(sc->uma, (void*)osp); 445 if (error) 446 break; 447 secno++; 448 dst += sc->secsize; 449 } 450 bp->bio_resid = 0; 451 return (error); 452 } 453 454 static int 455 mdstart_preload(struct md_s *sc, struct bio *bp) 456 { 457 458 if (bp->bio_cmd == BIO_DELETE) { 459 } else if (bp->bio_cmd == BIO_READ) { 460 bcopy(sc->pl_ptr + (bp->bio_pblkno << DEV_BSHIFT), bp->bio_data, bp->bio_bcount); 461 } else { 462 bcopy(bp->bio_data, sc->pl_ptr + (bp->bio_pblkno << DEV_BSHIFT), bp->bio_bcount); 463 } 464 bp->bio_resid = 0; 465 return (0); 466 } 467 468 static int 469 mdstart_vnode(struct md_s *sc, struct bio *bp) 470 { 471 int error; 472 struct uio auio; 473 struct iovec aiov; 474 struct mount *mp; 475 476 /* 477 * VNODE I/O 478 * 479 * If an error occurs, we set BIO_ERROR but we do not set 480 * B_INVAL because (for a write anyway), the buffer is 481 * still valid. 482 */ 483 484 bzero(&auio, sizeof(auio)); 485 486 aiov.iov_base = bp->bio_data; 487 aiov.iov_len = bp->bio_bcount; 488 auio.uio_iov = &aiov; 489 auio.uio_iovcnt = 1; 490 auio.uio_offset = (vm_ooffset_t)bp->bio_pblkno * sc->secsize; 491 auio.uio_segflg = UIO_SYSSPACE; 492 if(bp->bio_cmd == BIO_READ) 493 auio.uio_rw = UIO_READ; 494 else 495 auio.uio_rw = UIO_WRITE; 496 auio.uio_resid = bp->bio_bcount; 497 auio.uio_td = curthread; 498 /* 499 * When reading set IO_DIRECT to try to avoid double-caching 500 * the data. When writing IO_DIRECT is not optimal, but we 501 * must set IO_NOWDRAIN to avoid a wdrain deadlock. 502 */ 503 if (bp->bio_cmd == BIO_READ) { 504 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread); 505 error = VOP_READ(sc->vnode, &auio, IO_DIRECT, sc->cred); 506 } else { 507 (void) vn_start_write(sc->vnode, &mp, V_WAIT); 508 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread); 509 error = VOP_WRITE(sc->vnode, &auio, IO_NOWDRAIN, sc->cred); 510 vn_finished_write(mp); 511 } 512 VOP_UNLOCK(sc->vnode, 0, curthread); 513 bp->bio_resid = auio.uio_resid; 514 return (error); 515 } 516 517 static void 518 mddone_swap(struct bio *bp) 519 { 520 521 bp->bio_completed = bp->bio_length - bp->bio_resid; 522 g_std_done(bp); 523 } 524 525 static int 526 mdstart_swap(struct md_s *sc, struct bio *bp) 527 { 528 { 529 struct bio *bp2; 530 531 bp2 = g_clone_bio(bp); 532 bp2->bio_done = mddone_swap; 533 bp2->bio_blkno = bp2->bio_offset >> DEV_BSHIFT; 534 bp2->bio_pblkno = bp2->bio_offset / sc->secsize; 535 bp2->bio_bcount = bp2->bio_length; 536 bp = bp2; 537 } 538 539 bp->bio_resid = 0; 540 if ((bp->bio_cmd == BIO_DELETE) && (sc->flags & MD_RESERVE)) 541 biodone(bp); 542 else 543 vm_pager_strategy(sc->object, bp); 544 return (-1); 545 } 546 547 static void 548 md_kthread(void *arg) 549 { 550 struct md_s *sc; 551 struct bio *bp; 552 int error, hasgiant; 553 554 sc = arg; 555 curthread->td_base_pri = PRIBIO; 556 557 switch (sc->type) { 558 case MD_SWAP: 559 case MD_VNODE: 560 mtx_lock(&Giant); 561 hasgiant = 1; 562 break; 563 case MD_MALLOC: 564 case MD_PRELOAD: 565 default: 566 hasgiant = 0; 567 break; 568 } 569 570 for (;;) { 571 mtx_lock(&sc->queue_mtx); 572 bp = bioq_first(&sc->bio_queue); 573 if (bp) 574 bioq_remove(&sc->bio_queue, bp); 575 if (!bp) { 576 if (sc->flags & MD_SHUTDOWN) { 577 mtx_unlock(&sc->queue_mtx); 578 sc->procp = NULL; 579 wakeup(&sc->procp); 580 if (!hasgiant) 581 mtx_lock(&Giant); 582 kthread_exit(0); 583 } 584 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 585 continue; 586 } 587 mtx_unlock(&sc->queue_mtx); 588 if (bp->bio_cmd == BIO_GETATTR) { 589 if (sc->fwsectors && sc->fwheads && 590 (g_handleattr_int(bp, "GEOM::fwsectors", 591 sc->fwsectors) || 592 g_handleattr_int(bp, "GEOM::fwheads", 593 sc->fwheads))) 594 error = -1; 595 else 596 error = EOPNOTSUPP; 597 } else { 598 switch (sc->type) { 599 case MD_MALLOC: 600 error = mdstart_malloc(sc, bp); 601 break; 602 case MD_PRELOAD: 603 error = mdstart_preload(sc, bp); 604 break; 605 case MD_VNODE: 606 error = mdstart_vnode(sc, bp); 607 break; 608 case MD_SWAP: 609 error = mdstart_swap(sc, bp); 610 break; 611 default: 612 panic("Impossible md(type)"); 613 break; 614 } 615 } 616 617 if (error != -1) { 618 bp->bio_completed = bp->bio_length; 619 g_io_deliver(bp, error); 620 } 621 } 622 } 623 624 static struct md_s * 625 mdfind(int unit) 626 { 627 struct md_s *sc; 628 629 /* XXX: LOCK(unique unit numbers) */ 630 LIST_FOREACH(sc, &md_softc_list, list) { 631 if (sc->unit == unit) 632 break; 633 } 634 /* XXX: UNLOCK(unique unit numbers) */ 635 return (sc); 636 } 637 638 static struct md_s * 639 mdnew(int unit) 640 { 641 struct md_s *sc; 642 int error, max = -1; 643 644 /* XXX: LOCK(unique unit numbers) */ 645 LIST_FOREACH(sc, &md_softc_list, list) { 646 if (sc->unit == unit) { 647 /* XXX: UNLOCK(unique unit numbers) */ 648 return (NULL); 649 } 650 if (sc->unit > max) 651 max = sc->unit; 652 } 653 if (unit == -1) 654 unit = max + 1; 655 if (unit > 255) 656 return (NULL); 657 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); 658 sc->unit = unit; 659 bioq_init(&sc->bio_queue); 660 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 661 sprintf(sc->name, "md%d", unit); 662 error = kthread_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 663 if (error) { 664 free(sc, M_MD); 665 return (NULL); 666 } 667 LIST_INSERT_HEAD(&md_softc_list, sc, list); 668 /* XXX: UNLOCK(unique unit numbers) */ 669 return (sc); 670 } 671 672 static void 673 mdinit(struct md_s *sc) 674 { 675 676 struct g_geom *gp; 677 struct g_provider *pp; 678 679 DROP_GIANT(); 680 g_topology_lock(); 681 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 682 gp->start = g_md_start; 683 gp->access = g_md_access; 684 gp->softc = sc; 685 pp = g_new_providerf(gp, "md%d", sc->unit); 686 pp->mediasize = (off_t)sc->nsect * sc->secsize; 687 pp->sectorsize = sc->secsize; 688 sc->gp = gp; 689 sc->pp = pp; 690 g_error_provider(pp, 0); 691 g_topology_unlock(); 692 PICKUP_GIANT(); 693 } 694 695 /* 696 * XXX: we should check that the range they feed us is mapped. 697 * XXX: we should implement read-only. 698 */ 699 700 static int 701 mdcreate_preload(struct md_ioctl *mdio) 702 { 703 struct md_s *sc; 704 705 if (mdio->md_size == 0) 706 return (EINVAL); 707 if (mdio->md_options & ~(MD_AUTOUNIT)) 708 return (EINVAL); 709 if (mdio->md_options & MD_AUTOUNIT) { 710 sc = mdnew(-1); 711 if (sc == NULL) 712 return (ENOMEM); 713 mdio->md_unit = sc->unit; 714 } else { 715 sc = mdnew(mdio->md_unit); 716 if (sc == NULL) 717 return (EBUSY); 718 } 719 sc->type = MD_PRELOAD; 720 sc->secsize = DEV_BSIZE; 721 sc->nsect = mdio->md_size; 722 sc->flags = mdio->md_options & MD_FORCE; 723 /* Cast to pointer size, then to pointer to avoid warning */ 724 sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base; 725 sc->pl_len = (mdio->md_size << DEV_BSHIFT); 726 mdinit(sc); 727 return (0); 728 } 729 730 731 static int 732 mdcreate_malloc(struct md_ioctl *mdio) 733 { 734 struct md_s *sc; 735 off_t u; 736 uintptr_t sp; 737 int error; 738 739 error = 0; 740 if (mdio->md_size == 0) 741 return (EINVAL); 742 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 743 return (EINVAL); 744 if (mdio->md_secsize != 0 && !powerof2(mdio->md_secsize)) 745 return (EINVAL); 746 /* Compression doesn't make sense if we have reserved space */ 747 if (mdio->md_options & MD_RESERVE) 748 mdio->md_options &= ~MD_COMPRESS; 749 if (mdio->md_options & MD_AUTOUNIT) { 750 sc = mdnew(-1); 751 if (sc == NULL) 752 return (ENOMEM); 753 mdio->md_unit = sc->unit; 754 } else { 755 sc = mdnew(mdio->md_unit); 756 if (sc == NULL) 757 return (EBUSY); 758 } 759 sc->type = MD_MALLOC; 760 if (mdio->md_secsize != 0) 761 sc->secsize = mdio->md_secsize; 762 else 763 sc->secsize = DEV_BSIZE; 764 if (mdio->md_fwsectors != 0) 765 sc->fwsectors = mdio->md_fwsectors; 766 if (mdio->md_fwheads != 0) 767 sc->fwheads = mdio->md_fwheads; 768 sc->nsect = mdio->md_size; 769 sc->nsect /= (sc->secsize / DEV_BSIZE); 770 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE); 771 sc->indir = dimension(sc->nsect); 772 sc->uma = uma_zcreate(sc->name, sc->secsize, 773 NULL, NULL, NULL, NULL, 0x1ff, 0); 774 if (mdio->md_options & MD_RESERVE) { 775 for (u = 0; u < sc->nsect; u++) { 776 sp = (uintptr_t) uma_zalloc(sc->uma, M_NOWAIT | M_ZERO); 777 if (sp != 0) 778 error = s_write(sc->indir, u, sp); 779 else 780 error = ENOMEM; 781 if (error) 782 break; 783 } 784 } 785 if (error) { 786 mddestroy(sc, NULL); 787 return (error); 788 } 789 mdinit(sc); 790 if (!(mdio->md_options & MD_RESERVE)) 791 sc->pp->flags |= G_PF_CANDELETE; 792 return (0); 793 } 794 795 796 static int 797 mdsetcred(struct md_s *sc, struct ucred *cred) 798 { 799 char *tmpbuf; 800 int error = 0; 801 802 /* 803 * Set credits in our softc 804 */ 805 806 if (sc->cred) 807 crfree(sc->cred); 808 sc->cred = crhold(cred); 809 810 /* 811 * Horrible kludge to establish credentials for NFS XXX. 812 */ 813 814 if (sc->vnode) { 815 struct uio auio; 816 struct iovec aiov; 817 818 tmpbuf = malloc(sc->secsize, M_TEMP, M_WAITOK); 819 bzero(&auio, sizeof(auio)); 820 821 aiov.iov_base = tmpbuf; 822 aiov.iov_len = sc->secsize; 823 auio.uio_iov = &aiov; 824 auio.uio_iovcnt = 1; 825 auio.uio_offset = 0; 826 auio.uio_rw = UIO_READ; 827 auio.uio_segflg = UIO_SYSSPACE; 828 auio.uio_resid = aiov.iov_len; 829 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread); 830 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 831 VOP_UNLOCK(sc->vnode, 0, curthread); 832 free(tmpbuf, M_TEMP); 833 } 834 return (error); 835 } 836 837 static int 838 mdcreate_vnode(struct md_ioctl *mdio, struct thread *td) 839 { 840 struct md_s *sc; 841 struct vattr vattr; 842 struct nameidata nd; 843 int error, flags; 844 845 flags = FREAD|FWRITE; 846 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, mdio->md_file, td); 847 error = vn_open(&nd, &flags, 0); 848 if (error) { 849 if (error != EACCES && error != EPERM && error != EROFS) 850 return (error); 851 flags &= ~FWRITE; 852 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, mdio->md_file, td); 853 error = vn_open(&nd, &flags, 0); 854 if (error) 855 return (error); 856 } 857 NDFREE(&nd, NDF_ONLY_PNBUF); 858 if (nd.ni_vp->v_type != VREG || 859 (error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred, td))) { 860 VOP_UNLOCK(nd.ni_vp, 0, td); 861 (void) vn_close(nd.ni_vp, flags, td->td_ucred, td); 862 return (error ? error : EINVAL); 863 } 864 VOP_UNLOCK(nd.ni_vp, 0, td); 865 866 if (mdio->md_options & MD_AUTOUNIT) { 867 sc = mdnew(-1); 868 mdio->md_unit = sc->unit; 869 } else { 870 sc = mdnew(mdio->md_unit); 871 } 872 if (sc == NULL) { 873 (void) vn_close(nd.ni_vp, flags, td->td_ucred, td); 874 return (EBUSY); 875 } 876 877 sc->type = MD_VNODE; 878 sc->flags = mdio->md_options & MD_FORCE; 879 if (!(flags & FWRITE)) 880 sc->flags |= MD_READONLY; 881 sc->secsize = DEV_BSIZE; 882 sc->vnode = nd.ni_vp; 883 884 /* 885 * If the size is specified, override the file attributes. 886 */ 887 if (mdio->md_size) 888 sc->nsect = mdio->md_size; 889 else 890 sc->nsect = vattr.va_size / sc->secsize; /* XXX: round up ? */ 891 if (sc->nsect == 0) { 892 mddestroy(sc, td); 893 return (EINVAL); 894 } 895 error = mdsetcred(sc, td->td_ucred); 896 if (error) { 897 mddestroy(sc, td); 898 return (error); 899 } 900 mdinit(sc); 901 return (0); 902 } 903 904 static void 905 md_zapit(void *p, int cancel) 906 { 907 if (cancel) 908 return; 909 g_wither_geom(p, ENXIO); 910 } 911 912 static int 913 mddestroy(struct md_s *sc, struct thread *td) 914 { 915 916 GIANT_REQUIRED; 917 918 mtx_destroy(&sc->queue_mtx); 919 if (sc->gp) { 920 sc->gp->softc = NULL; 921 g_waitfor_event(md_zapit, sc->gp, M_WAITOK, sc->gp, NULL); 922 sc->gp = NULL; 923 sc->pp = NULL; 924 } 925 sc->flags |= MD_SHUTDOWN; 926 wakeup(sc); 927 while (sc->procp != NULL) 928 tsleep(&sc->procp, PRIBIO, "mddestroy", hz / 10); 929 if (sc->vnode != NULL) 930 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 931 FREAD : (FREAD|FWRITE), sc->cred, td); 932 if (sc->cred != NULL) 933 crfree(sc->cred); 934 if (sc->object != NULL) { 935 vm_pager_deallocate(sc->object); 936 } 937 if (sc->indir) 938 destroy_indir(sc, sc->indir); 939 if (sc->uma) 940 uma_zdestroy(sc->uma); 941 942 /* XXX: LOCK(unique unit numbers) */ 943 LIST_REMOVE(sc, list); 944 /* XXX: UNLOCK(unique unit numbers) */ 945 free(sc, M_MD); 946 return (0); 947 } 948 949 static int 950 mdcreate_swap(struct md_ioctl *mdio, struct thread *td) 951 { 952 int error; 953 struct md_s *sc; 954 955 GIANT_REQUIRED; 956 957 if (mdio->md_options & MD_AUTOUNIT) { 958 sc = mdnew(-1); 959 mdio->md_unit = sc->unit; 960 } else { 961 sc = mdnew(mdio->md_unit); 962 } 963 if (sc == NULL) 964 return (EBUSY); 965 966 sc->type = MD_SWAP; 967 968 /* 969 * Range check. Disallow negative sizes or any size less then the 970 * size of a page. Then round to a page. 971 */ 972 973 if (mdio->md_size == 0) { 974 mddestroy(sc, td); 975 return (EDOM); 976 } 977 978 /* 979 * Allocate an OBJT_SWAP object. 980 * 981 * sc_secsize is PAGE_SIZE'd 982 * 983 * mdio->size is in DEV_BSIZE'd chunks. 984 * Note the truncation. 985 */ 986 987 sc->secsize = PAGE_SIZE; 988 sc->nsect = mdio->md_size / (PAGE_SIZE / DEV_BSIZE); 989 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, sc->secsize * (vm_offset_t)sc->nsect, VM_PROT_DEFAULT, 0); 990 sc->flags = mdio->md_options & MD_FORCE; 991 if (mdio->md_options & MD_RESERVE) { 992 if (swap_pager_reserve(sc->object, 0, sc->nsect) < 0) { 993 vm_pager_deallocate(sc->object); 994 sc->object = NULL; 995 mddestroy(sc, td); 996 return (EDOM); 997 } 998 } 999 error = mdsetcred(sc, td->td_ucred); 1000 if (error) { 1001 mddestroy(sc, td); 1002 return (error); 1003 } 1004 mdinit(sc); 1005 if (!(mdio->md_options & MD_RESERVE)) 1006 sc->pp->flags |= G_PF_CANDELETE; 1007 return (0); 1008 } 1009 1010 static int 1011 mddetach(int unit, struct thread *td) 1012 { 1013 struct md_s *sc; 1014 1015 sc = mdfind(unit); 1016 if (sc == NULL) 1017 return (ENOENT); 1018 if (sc->opencount != 0 && !(sc->flags & MD_FORCE)) 1019 return (EBUSY); 1020 switch(sc->type) { 1021 case MD_VNODE: 1022 case MD_SWAP: 1023 case MD_MALLOC: 1024 case MD_PRELOAD: 1025 return (mddestroy(sc, td)); 1026 default: 1027 return (EOPNOTSUPP); 1028 } 1029 } 1030 1031 static int 1032 mdctlioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1033 { 1034 struct md_ioctl *mdio; 1035 struct md_s *sc; 1036 int i; 1037 1038 if (md_debug) 1039 printf("mdctlioctl(%s %lx %p %x %p)\n", 1040 devtoname(dev), cmd, addr, flags, td); 1041 1042 /* 1043 * We assert the version number in the individual ioctl 1044 * handlers instead of out here because (a) it is possible we 1045 * may add another ioctl in the future which doesn't read an 1046 * mdio, and (b) the correct return value for an unknown ioctl 1047 * is ENOIOCTL, not EINVAL. 1048 */ 1049 mdio = (struct md_ioctl *)addr; 1050 switch (cmd) { 1051 case MDIOCATTACH: 1052 if (mdio->md_version != MDIOVERSION) 1053 return (EINVAL); 1054 switch (mdio->md_type) { 1055 case MD_MALLOC: 1056 return (mdcreate_malloc(mdio)); 1057 case MD_PRELOAD: 1058 return (mdcreate_preload(mdio)); 1059 case MD_VNODE: 1060 return (mdcreate_vnode(mdio, td)); 1061 case MD_SWAP: 1062 return (mdcreate_swap(mdio, td)); 1063 default: 1064 return (EINVAL); 1065 } 1066 case MDIOCDETACH: 1067 if (mdio->md_version != MDIOVERSION) 1068 return (EINVAL); 1069 if (mdio->md_file != NULL || mdio->md_size != 0 || 1070 mdio->md_options != 0) 1071 return (EINVAL); 1072 return (mddetach(mdio->md_unit, td)); 1073 case MDIOCQUERY: 1074 if (mdio->md_version != MDIOVERSION) 1075 return (EINVAL); 1076 sc = mdfind(mdio->md_unit); 1077 if (sc == NULL) 1078 return (ENOENT); 1079 mdio->md_type = sc->type; 1080 mdio->md_options = sc->flags; 1081 switch (sc->type) { 1082 case MD_MALLOC: 1083 mdio->md_size = sc->nsect; 1084 break; 1085 case MD_PRELOAD: 1086 mdio->md_size = sc->nsect; 1087 mdio->md_base = (uint64_t)(intptr_t)sc->pl_ptr; 1088 break; 1089 case MD_SWAP: 1090 mdio->md_size = sc->nsect * (PAGE_SIZE / DEV_BSIZE); 1091 break; 1092 case MD_VNODE: 1093 mdio->md_size = sc->nsect; 1094 /* XXX fill this in */ 1095 mdio->md_file = NULL; 1096 break; 1097 } 1098 return (0); 1099 case MDIOCLIST: 1100 i = 1; 1101 LIST_FOREACH(sc, &md_softc_list, list) { 1102 if (i == MDNPAD - 1) 1103 mdio->md_pad[i] = -1; 1104 else 1105 mdio->md_pad[i++] = sc->unit; 1106 } 1107 mdio->md_pad[0] = i - 1; 1108 return (0); 1109 default: 1110 return (ENOIOCTL); 1111 }; 1112 return (ENOIOCTL); 1113 } 1114 1115 static void 1116 md_preloaded(u_char *image, unsigned length) 1117 { 1118 struct md_s *sc; 1119 1120 sc = mdnew(-1); 1121 if (sc == NULL) 1122 return; 1123 sc->type = MD_PRELOAD; 1124 sc->secsize = DEV_BSIZE; 1125 sc->nsect = length / DEV_BSIZE; 1126 sc->pl_ptr = image; 1127 sc->pl_len = length; 1128 if (sc->unit == 0) 1129 mdrootready = 1; 1130 mdinit(sc); 1131 } 1132 1133 static void 1134 md_drvinit(void *unused) 1135 { 1136 1137 caddr_t mod; 1138 caddr_t c; 1139 u_char *ptr, *name, *type; 1140 unsigned len; 1141 1142 #ifdef MD_ROOT_SIZE 1143 md_preloaded(mfs_root, MD_ROOT_SIZE*1024); 1144 #endif 1145 mod = NULL; 1146 while ((mod = preload_search_next_name(mod)) != NULL) { 1147 name = (char *)preload_search_info(mod, MODINFO_NAME); 1148 type = (char *)preload_search_info(mod, MODINFO_TYPE); 1149 if (name == NULL) 1150 continue; 1151 if (type == NULL) 1152 continue; 1153 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 1154 continue; 1155 c = preload_search_info(mod, MODINFO_ADDR); 1156 ptr = *(u_char **)c; 1157 c = preload_search_info(mod, MODINFO_SIZE); 1158 len = *(size_t *)c; 1159 printf("%s%d: Preloaded image <%s> %d bytes at %p\n", 1160 MD_NAME, mdunits, name, len, ptr); 1161 md_preloaded(ptr, len); 1162 } 1163 status_dev = make_dev(&mdctl_cdevsw, 0xffff00ff, UID_ROOT, GID_WHEEL, 1164 0600, MDCTL_NAME); 1165 } 1166 1167 static int 1168 md_modevent(module_t mod, int type, void *data) 1169 { 1170 int error; 1171 struct md_s *sc; 1172 1173 switch (type) { 1174 case MOD_LOAD: 1175 md_drvinit(NULL); 1176 break; 1177 case MOD_UNLOAD: 1178 LIST_FOREACH(sc, &md_softc_list, list) { 1179 error = mddetach(sc->unit, curthread); 1180 if (error != 0) 1181 return (error); 1182 } 1183 if (status_dev) 1184 destroy_dev(status_dev); 1185 status_dev = 0; 1186 break; 1187 default: 1188 break; 1189 } 1190 return (0); 1191 } 1192 1193 static moduledata_t md_mod = { 1194 MD_NAME, 1195 md_modevent, 1196 NULL 1197 }; 1198 DECLARE_MODULE(md, md_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE+CDEV_MAJOR); 1199 MODULE_VERSION(md, MD_MODVER); 1200 1201 1202 #ifdef MD_ROOT 1203 static void 1204 md_takeroot(void *junk) 1205 { 1206 if (mdrootready) 1207 rootdevnames[0] = "ufs:/dev/md0"; 1208 } 1209 1210 SYSINIT(md_root, SI_SUB_MOUNT_ROOT, SI_ORDER_FIRST, md_takeroot, NULL); 1211 #endif /* MD_ROOT */ 1212