1 /* 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * $FreeBSD$ 10 * 11 */ 12 13 /* 14 * The following functions are based in the vn(4) driver: mdstart_swap(), 15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 16 * and as such under the following copyright: 17 * 18 * Copyright (c) 1988 University of Utah. 19 * Copyright (c) 1990, 1993 20 * The Regents of the University of California. All rights reserved. 21 * 22 * This code is derived from software contributed to Berkeley by 23 * the Systems Programming Group of the University of Utah Computer 24 * Science Department. 25 * 26 * Redistribution and use in source and binary forms, with or without 27 * modification, are permitted provided that the following conditions 28 * are met: 29 * 1. Redistributions of source code must retain the above copyright 30 * notice, this list of conditions and the following disclaimer. 31 * 2. Redistributions in binary form must reproduce the above copyright 32 * notice, this list of conditions and the following disclaimer in the 33 * documentation and/or other materials provided with the distribution. 34 * 3. All advertising materials mentioning features or use of this software 35 * must display the following acknowledgement: 36 * This product includes software developed by the University of 37 * California, Berkeley and its contributors. 38 * 4. Neither the name of the University nor the names of its contributors 39 * may be used to endorse or promote products derived from this software 40 * without specific prior written permission. 41 * 42 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 45 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 52 * SUCH DAMAGE. 53 * 54 * from: Utah Hdr: vn.c 1.13 94/04/02 55 * 56 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 57 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 58 */ 59 60 #include "opt_geom.h" 61 #include "opt_md.h" 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/bio.h> 66 #include <sys/conf.h> 67 #include <sys/devicestat.h> 68 #include <sys/disk.h> 69 #include <sys/fcntl.h> 70 #include <sys/kernel.h> 71 #include <sys/kthread.h> 72 #include <sys/linker.h> 73 #include <sys/lock.h> 74 #include <sys/malloc.h> 75 #include <sys/mdioctl.h> 76 #include <sys/mutex.h> 77 #include <sys/namei.h> 78 #include <sys/proc.h> 79 #include <sys/queue.h> 80 #include <sys/stdint.h> 81 #include <sys/sysctl.h> 82 #include <sys/vnode.h> 83 84 #include <geom/geom.h> 85 86 #include <vm/vm.h> 87 #include <vm/vm_object.h> 88 #include <vm/vm_page.h> 89 #include <vm/vm_pager.h> 90 #include <vm/swap_pager.h> 91 #include <vm/uma.h> 92 93 #define MD_MODVER 1 94 95 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 96 97 #ifndef MD_NSECT 98 #define MD_NSECT (10000 * 2) 99 #endif 100 101 static MALLOC_DEFINE(M_MD, "MD disk", "Memory Disk"); 102 static MALLOC_DEFINE(M_MDSECT, "MD sectors", "Memory Disk Sectors"); 103 104 static int md_debug; 105 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, ""); 106 107 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE) 108 /* Image gets put here: */ 109 static u_char mfs_root[MD_ROOT_SIZE*1024] = "MFS Filesystem goes here"; 110 static u_char end_mfs_root[] __unused = "MFS Filesystem had better STOP here"; 111 #endif 112 113 static int mdrootready; 114 static int mdunits; 115 static dev_t status_dev = 0; 116 117 #define CDEV_MAJOR 95 118 119 static d_ioctl_t mdctlioctl; 120 121 static struct cdevsw mdctl_cdevsw = { 122 /* open */ nullopen, 123 /* close */ nullclose, 124 /* read */ noread, 125 /* write */ nowrite, 126 /* ioctl */ mdctlioctl, 127 /* poll */ nopoll, 128 /* mmap */ nommap, 129 /* strategy */ nostrategy, 130 /* name */ MD_NAME, 131 /* maj */ CDEV_MAJOR 132 }; 133 134 135 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(&md_softc_list); 136 137 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 138 #define NMASK (NINDIR-1) 139 static int nshift; 140 141 struct indir { 142 uintptr_t *array; 143 uint total; 144 uint used; 145 uint shift; 146 }; 147 148 struct md_s { 149 int unit; 150 LIST_ENTRY(md_s) list; 151 struct devstat stats; 152 struct bio_queue_head bio_queue; 153 struct mtx queue_mtx; 154 struct disk disk; 155 dev_t dev; 156 enum md_types type; 157 unsigned nsect; 158 unsigned opencount; 159 unsigned secsize; 160 unsigned flags; 161 char name[20]; 162 struct proc *procp; 163 struct g_geom *gp; 164 struct g_provider *pp; 165 166 /* MD_MALLOC related fields */ 167 struct indir *indir; 168 uma_zone_t uma; 169 170 /* MD_PRELOAD related fields */ 171 u_char *pl_ptr; 172 unsigned pl_len; 173 174 /* MD_VNODE related fields */ 175 struct vnode *vnode; 176 struct ucred *cred; 177 178 /* MD_SWAP related fields */ 179 vm_object_t object; 180 }; 181 182 static int mddestroy(struct md_s *sc, struct thread *td); 183 184 static struct indir * 185 new_indir(uint shift) 186 { 187 struct indir *ip; 188 189 ip = malloc(sizeof *ip, M_MD, M_NOWAIT | M_ZERO); 190 if (ip == NULL) 191 return (NULL); 192 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 193 M_MDSECT, M_NOWAIT | M_ZERO); 194 if (ip->array == NULL) { 195 free(ip, M_MD); 196 return (NULL); 197 } 198 ip->total = NINDIR; 199 ip->shift = shift; 200 return (ip); 201 } 202 203 static void 204 del_indir(struct indir *ip) 205 { 206 207 free(ip->array, M_MDSECT); 208 free(ip, M_MD); 209 } 210 211 static void 212 destroy_indir(struct md_s *sc, struct indir *ip) 213 { 214 int i; 215 216 for (i = 0; i < NINDIR; i++) { 217 if (!ip->array[i]) 218 continue; 219 if (ip->shift) 220 destroy_indir(sc, (struct indir*)(ip->array[i])); 221 else if (ip->array[i] > 255) 222 uma_zfree(sc->uma, (void *)(ip->array[i])); 223 } 224 del_indir(ip); 225 } 226 227 /* 228 * This function does the math and alloctes the top level "indir" structure 229 * for a device of "size" sectors. 230 */ 231 232 static struct indir * 233 dimension(off_t size) 234 { 235 off_t rcnt; 236 struct indir *ip; 237 int i, layer; 238 239 rcnt = size; 240 layer = 0; 241 while (rcnt > NINDIR) { 242 rcnt /= NINDIR; 243 layer++; 244 } 245 /* figure out log2(NINDIR) */ 246 for (i = NINDIR, nshift = -1; i; nshift++) 247 i >>= 1; 248 249 /* 250 * XXX: the top layer is probably not fully populated, so we allocate 251 * too much space for ip->array in new_indir() here. 252 */ 253 ip = new_indir(layer * nshift); 254 return (ip); 255 } 256 257 /* 258 * Read a given sector 259 */ 260 261 static uintptr_t 262 s_read(struct indir *ip, off_t offset) 263 { 264 struct indir *cip; 265 int idx; 266 uintptr_t up; 267 268 if (md_debug > 1) 269 printf("s_read(%jd)\n", (intmax_t)offset); 270 up = 0; 271 for (cip = ip; cip != NULL;) { 272 if (cip->shift) { 273 idx = (offset >> cip->shift) & NMASK; 274 up = cip->array[idx]; 275 cip = (struct indir *)up; 276 continue; 277 } 278 idx = offset & NMASK; 279 return (cip->array[idx]); 280 } 281 return (0); 282 } 283 284 /* 285 * Write a given sector, prune the tree if the value is 0 286 */ 287 288 static int 289 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 290 { 291 struct indir *cip, *lip[10]; 292 int idx, li; 293 uintptr_t up; 294 295 if (md_debug > 1) 296 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 297 up = 0; 298 li = 0; 299 cip = ip; 300 for (;;) { 301 lip[li++] = cip; 302 if (cip->shift) { 303 idx = (offset >> cip->shift) & NMASK; 304 up = cip->array[idx]; 305 if (up != 0) { 306 cip = (struct indir *)up; 307 continue; 308 } 309 /* Allocate branch */ 310 cip->array[idx] = 311 (uintptr_t)new_indir(cip->shift - nshift); 312 if (cip->array[idx] == 0) 313 return (ENOSPC); 314 cip->used++; 315 up = cip->array[idx]; 316 cip = (struct indir *)up; 317 continue; 318 } 319 /* leafnode */ 320 idx = offset & NMASK; 321 up = cip->array[idx]; 322 if (up != 0) 323 cip->used--; 324 cip->array[idx] = ptr; 325 if (ptr != 0) 326 cip->used++; 327 break; 328 } 329 if (cip->used != 0 || li == 1) 330 return (0); 331 li--; 332 while (cip->used == 0 && cip != ip) { 333 li--; 334 idx = (offset >> lip[li]->shift) & NMASK; 335 up = lip[li]->array[idx]; 336 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 337 del_indir(cip); 338 lip[li]->array[idx] = 0; 339 lip[li]->used--; 340 cip = lip[li]; 341 } 342 return (0); 343 } 344 345 346 struct g_class g_md_class = { 347 "MD", 348 NULL, 349 NULL, 350 G_CLASS_INITIALIZER 351 352 }; 353 354 static int 355 g_md_access(struct g_provider *pp, int r, int w, int e) 356 { 357 struct md_s *sc; 358 359 sc = pp->geom->softc; 360 r += pp->acr; 361 w += pp->acw; 362 e += pp->ace; 363 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 364 sc->opencount = 1; 365 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 366 sc->opencount = 0; 367 } 368 return (0); 369 } 370 371 static void 372 g_md_start(struct bio *bp) 373 { 374 struct md_s *sc; 375 376 sc = bp->bio_to->geom->softc; 377 378 switch(bp->bio_cmd) { 379 case BIO_GETATTR: 380 case BIO_SETATTR: 381 g_io_deliver(bp, EOPNOTSUPP); 382 return; 383 } 384 bp->bio_blkno = bp->bio_offset >> DEV_BSHIFT; 385 bp->bio_pblkno = bp->bio_offset / sc->secsize; 386 bp->bio_bcount = bp->bio_length; 387 mtx_lock(&sc->queue_mtx); 388 bioqdisksort(&sc->bio_queue, bp); 389 mtx_unlock(&sc->queue_mtx); 390 391 wakeup(sc); 392 } 393 394 DECLARE_GEOM_CLASS(g_md_class, g_md); 395 396 397 static int 398 mdstart_malloc(struct md_s *sc, struct bio *bp) 399 { 400 int i, error; 401 u_char *dst; 402 unsigned secno, nsec, uc; 403 uintptr_t sp, osp; 404 405 nsec = bp->bio_bcount / sc->secsize; 406 secno = bp->bio_pblkno; 407 dst = bp->bio_data; 408 error = 0; 409 while (nsec--) { 410 osp = s_read(sc->indir, secno); 411 if (bp->bio_cmd == BIO_DELETE) { 412 if (osp != 0) 413 error = s_write(sc->indir, secno, 0); 414 } else if (bp->bio_cmd == BIO_READ) { 415 if (osp == 0) 416 bzero(dst, sc->secsize); 417 else if (osp <= 255) 418 for (i = 0; i < sc->secsize; i++) 419 dst[i] = osp; 420 else 421 bcopy((void *)osp, dst, sc->secsize); 422 osp = 0; 423 } else if (bp->bio_cmd == BIO_WRITE) { 424 if (sc->flags & MD_COMPRESS) { 425 uc = dst[0]; 426 for (i = 1; i < sc->secsize; i++) 427 if (dst[i] != uc) 428 break; 429 } else { 430 i = 0; 431 uc = 0; 432 } 433 if (i == sc->secsize) { 434 if (osp != uc) 435 error = s_write(sc->indir, secno, uc); 436 } else { 437 if (osp <= 255) { 438 sp = (uintptr_t) uma_zalloc( 439 sc->uma, M_NOWAIT); 440 if (sp == 0) { 441 error = ENOSPC; 442 break; 443 } 444 bcopy(dst, (void *)sp, sc->secsize); 445 error = s_write(sc->indir, secno, sp); 446 } else { 447 bcopy(dst, (void *)osp, sc->secsize); 448 osp = 0; 449 } 450 } 451 } else { 452 error = EOPNOTSUPP; 453 } 454 if (osp > 255) 455 uma_zfree(sc->uma, (void*)osp); 456 if (error) 457 break; 458 secno++; 459 dst += sc->secsize; 460 } 461 bp->bio_resid = 0; 462 return (error); 463 } 464 465 static int 466 mdstart_preload(struct md_s *sc, struct bio *bp) 467 { 468 469 if (bp->bio_cmd == BIO_DELETE) { 470 } else if (bp->bio_cmd == BIO_READ) { 471 bcopy(sc->pl_ptr + (bp->bio_pblkno << DEV_BSHIFT), bp->bio_data, bp->bio_bcount); 472 } else { 473 bcopy(bp->bio_data, sc->pl_ptr + (bp->bio_pblkno << DEV_BSHIFT), bp->bio_bcount); 474 } 475 bp->bio_resid = 0; 476 return (0); 477 } 478 479 static int 480 mdstart_vnode(struct md_s *sc, struct bio *bp) 481 { 482 int error; 483 struct uio auio; 484 struct iovec aiov; 485 struct mount *mp; 486 487 /* 488 * VNODE I/O 489 * 490 * If an error occurs, we set BIO_ERROR but we do not set 491 * B_INVAL because (for a write anyway), the buffer is 492 * still valid. 493 */ 494 495 bzero(&auio, sizeof(auio)); 496 497 aiov.iov_base = bp->bio_data; 498 aiov.iov_len = bp->bio_bcount; 499 auio.uio_iov = &aiov; 500 auio.uio_iovcnt = 1; 501 auio.uio_offset = (vm_ooffset_t)bp->bio_pblkno * sc->secsize; 502 auio.uio_segflg = UIO_SYSSPACE; 503 if(bp->bio_cmd == BIO_READ) 504 auio.uio_rw = UIO_READ; 505 else 506 auio.uio_rw = UIO_WRITE; 507 auio.uio_resid = bp->bio_bcount; 508 auio.uio_td = curthread; 509 /* 510 * When reading set IO_DIRECT to try to avoid double-caching 511 * the data. When writing IO_DIRECT is not optimal, but we 512 * must set IO_NOWDRAIN to avoid a wdrain deadlock. 513 */ 514 if (bp->bio_cmd == BIO_READ) { 515 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread); 516 error = VOP_READ(sc->vnode, &auio, IO_DIRECT, sc->cred); 517 } else { 518 (void) vn_start_write(sc->vnode, &mp, V_WAIT); 519 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread); 520 error = VOP_WRITE(sc->vnode, &auio, IO_NOWDRAIN, sc->cred); 521 vn_finished_write(mp); 522 } 523 VOP_UNLOCK(sc->vnode, 0, curthread); 524 bp->bio_resid = auio.uio_resid; 525 return (error); 526 } 527 528 static void 529 mddone_swap(struct bio *bp) 530 { 531 532 bp->bio_completed = bp->bio_length - bp->bio_resid; 533 g_std_done(bp); 534 } 535 536 static int 537 mdstart_swap(struct md_s *sc, struct bio *bp) 538 { 539 { 540 struct bio *bp2; 541 542 bp2 = g_clone_bio(bp); 543 bp2->bio_done = mddone_swap; 544 bp2->bio_blkno = bp2->bio_offset >> DEV_BSHIFT; 545 bp2->bio_pblkno = bp2->bio_offset / sc->secsize; 546 bp2->bio_bcount = bp2->bio_length; 547 bp = bp2; 548 } 549 550 bp->bio_resid = 0; 551 if ((bp->bio_cmd == BIO_DELETE) && (sc->flags & MD_RESERVE)) 552 biodone(bp); 553 else 554 vm_pager_strategy(sc->object, bp); 555 return (-1); 556 } 557 558 static void 559 md_kthread(void *arg) 560 { 561 struct md_s *sc; 562 struct bio *bp; 563 int error, hasgiant; 564 565 sc = arg; 566 curthread->td_base_pri = PRIBIO; 567 568 switch (sc->type) { 569 case MD_SWAP: 570 case MD_VNODE: 571 mtx_lock(&Giant); 572 hasgiant = 1; 573 break; 574 case MD_MALLOC: 575 case MD_PRELOAD: 576 default: 577 hasgiant = 0; 578 break; 579 } 580 581 for (;;) { 582 mtx_lock(&sc->queue_mtx); 583 bp = bioq_first(&sc->bio_queue); 584 if (bp) 585 bioq_remove(&sc->bio_queue, bp); 586 if (!bp) { 587 if (sc->flags & MD_SHUTDOWN) { 588 mtx_unlock(&sc->queue_mtx); 589 sc->procp = NULL; 590 wakeup(&sc->procp); 591 if (!hasgiant) 592 mtx_lock(&Giant); 593 kthread_exit(0); 594 } 595 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 596 continue; 597 } 598 mtx_unlock(&sc->queue_mtx); 599 600 switch (sc->type) { 601 case MD_MALLOC: 602 devstat_start_transaction(&sc->stats); 603 error = mdstart_malloc(sc, bp); 604 break; 605 case MD_PRELOAD: 606 devstat_start_transaction(&sc->stats); 607 error = mdstart_preload(sc, bp); 608 break; 609 case MD_VNODE: 610 devstat_start_transaction(&sc->stats); 611 error = mdstart_vnode(sc, bp); 612 break; 613 case MD_SWAP: 614 error = mdstart_swap(sc, bp); 615 break; 616 default: 617 panic("Impossible md(type)"); 618 break; 619 } 620 621 if (error != -1) { 622 bp->bio_completed = bp->bio_length; 623 g_io_deliver(bp, error); 624 } 625 } 626 } 627 628 static struct md_s * 629 mdfind(int unit) 630 { 631 struct md_s *sc; 632 633 /* XXX: LOCK(unique unit numbers) */ 634 LIST_FOREACH(sc, &md_softc_list, list) { 635 if (sc->unit == unit) 636 break; 637 } 638 /* XXX: UNLOCK(unique unit numbers) */ 639 return (sc); 640 } 641 642 static struct md_s * 643 mdnew(int unit) 644 { 645 struct md_s *sc; 646 int error, max = -1; 647 648 /* XXX: LOCK(unique unit numbers) */ 649 LIST_FOREACH(sc, &md_softc_list, list) { 650 if (sc->unit == unit) { 651 /* XXX: UNLOCK(unique unit numbers) */ 652 return (NULL); 653 } 654 if (sc->unit > max) 655 max = sc->unit; 656 } 657 if (unit == -1) 658 unit = max + 1; 659 if (unit > 255) 660 return (NULL); 661 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_ZERO); 662 sc->unit = unit; 663 bioq_init(&sc->bio_queue); 664 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 665 sprintf(sc->name, "md%d", unit); 666 error = kthread_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 667 if (error) { 668 free(sc, M_MD); 669 return (NULL); 670 } 671 LIST_INSERT_HEAD(&md_softc_list, sc, list); 672 /* XXX: UNLOCK(unique unit numbers) */ 673 return (sc); 674 } 675 676 static void 677 mdinit(struct md_s *sc) 678 { 679 680 devstat_add_entry(&sc->stats, MD_NAME, sc->unit, sc->secsize, 681 DEVSTAT_NO_ORDERED_TAGS, 682 DEVSTAT_TYPE_DIRECT | DEVSTAT_TYPE_IF_OTHER, 683 DEVSTAT_PRIORITY_OTHER); 684 { 685 struct g_geom *gp; 686 struct g_provider *pp; 687 688 DROP_GIANT(); 689 g_topology_lock(); 690 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 691 gp->start = g_md_start; 692 gp->access = g_md_access; 693 gp->softc = sc; 694 pp = g_new_providerf(gp, "md%d", sc->unit); 695 pp->mediasize = (off_t)sc->nsect * sc->secsize; 696 pp->sectorsize = sc->secsize; 697 sc->gp = gp; 698 sc->pp = pp; 699 g_error_provider(pp, 0); 700 g_topology_unlock(); 701 PICKUP_GIANT(); 702 } 703 } 704 705 /* 706 * XXX: we should check that the range they feed us is mapped. 707 * XXX: we should implement read-only. 708 */ 709 710 static int 711 mdcreate_preload(struct md_ioctl *mdio) 712 { 713 struct md_s *sc; 714 715 if (mdio->md_size == 0) 716 return (EINVAL); 717 if (mdio->md_options & ~(MD_AUTOUNIT)) 718 return (EINVAL); 719 if (mdio->md_options & MD_AUTOUNIT) { 720 sc = mdnew(-1); 721 if (sc == NULL) 722 return (ENOMEM); 723 mdio->md_unit = sc->unit; 724 } else { 725 sc = mdnew(mdio->md_unit); 726 if (sc == NULL) 727 return (EBUSY); 728 } 729 sc->type = MD_PRELOAD; 730 sc->secsize = DEV_BSIZE; 731 sc->nsect = mdio->md_size; 732 sc->flags = mdio->md_options & MD_FORCE; 733 /* Cast to pointer size, then to pointer to avoid warning */ 734 sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base; 735 sc->pl_len = (mdio->md_size << DEV_BSHIFT); 736 mdinit(sc); 737 return (0); 738 } 739 740 741 static int 742 mdcreate_malloc(struct md_ioctl *mdio) 743 { 744 struct md_s *sc; 745 off_t u; 746 uintptr_t sp; 747 int error; 748 749 error = 0; 750 if (mdio->md_size == 0) 751 return (EINVAL); 752 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 753 return (EINVAL); 754 /* Compression doesn't make sense if we have reserved space */ 755 if (mdio->md_options & MD_RESERVE) 756 mdio->md_options &= ~MD_COMPRESS; 757 if (mdio->md_options & MD_AUTOUNIT) { 758 sc = mdnew(-1); 759 if (sc == NULL) 760 return (ENOMEM); 761 mdio->md_unit = sc->unit; 762 } else { 763 sc = mdnew(mdio->md_unit); 764 if (sc == NULL) 765 return (EBUSY); 766 } 767 sc->type = MD_MALLOC; 768 sc->secsize = DEV_BSIZE; 769 sc->nsect = mdio->md_size; 770 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE); 771 sc->indir = dimension(sc->nsect); 772 sc->uma = uma_zcreate(sc->name, sc->secsize, 773 NULL, NULL, NULL, NULL, 0x1ff, 0); 774 if (mdio->md_options & MD_RESERVE) { 775 for (u = 0; u < sc->nsect; u++) { 776 sp = (uintptr_t) uma_zalloc(sc->uma, M_NOWAIT | M_ZERO); 777 if (sp != 0) 778 error = s_write(sc->indir, u, sp); 779 else 780 error = ENOMEM; 781 if (error) 782 break; 783 } 784 } 785 if (!error) { 786 mdinit(sc); 787 } else 788 mddestroy(sc, NULL); 789 return (error); 790 } 791 792 793 static int 794 mdsetcred(struct md_s *sc, struct ucred *cred) 795 { 796 char *tmpbuf; 797 int error = 0; 798 799 /* 800 * Set credits in our softc 801 */ 802 803 if (sc->cred) 804 crfree(sc->cred); 805 sc->cred = crhold(cred); 806 807 /* 808 * Horrible kludge to establish credentials for NFS XXX. 809 */ 810 811 if (sc->vnode) { 812 struct uio auio; 813 struct iovec aiov; 814 815 tmpbuf = malloc(sc->secsize, M_TEMP, 0); 816 bzero(&auio, sizeof(auio)); 817 818 aiov.iov_base = tmpbuf; 819 aiov.iov_len = sc->secsize; 820 auio.uio_iov = &aiov; 821 auio.uio_iovcnt = 1; 822 auio.uio_offset = 0; 823 auio.uio_rw = UIO_READ; 824 auio.uio_segflg = UIO_SYSSPACE; 825 auio.uio_resid = aiov.iov_len; 826 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread); 827 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 828 VOP_UNLOCK(sc->vnode, 0, curthread); 829 free(tmpbuf, M_TEMP); 830 } 831 return (error); 832 } 833 834 static int 835 mdcreate_vnode(struct md_ioctl *mdio, struct thread *td) 836 { 837 struct md_s *sc; 838 struct vattr vattr; 839 struct nameidata nd; 840 int error, flags; 841 842 flags = FREAD|FWRITE; 843 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, mdio->md_file, td); 844 error = vn_open(&nd, &flags, 0); 845 if (error) { 846 if (error != EACCES && error != EPERM && error != EROFS) 847 return (error); 848 flags &= ~FWRITE; 849 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, mdio->md_file, td); 850 error = vn_open(&nd, &flags, 0); 851 if (error) 852 return (error); 853 } 854 NDFREE(&nd, NDF_ONLY_PNBUF); 855 if (nd.ni_vp->v_type != VREG || 856 (error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred, td))) { 857 VOP_UNLOCK(nd.ni_vp, 0, td); 858 (void) vn_close(nd.ni_vp, flags, td->td_ucred, td); 859 return (error ? error : EINVAL); 860 } 861 VOP_UNLOCK(nd.ni_vp, 0, td); 862 863 if (mdio->md_options & MD_AUTOUNIT) { 864 sc = mdnew(-1); 865 mdio->md_unit = sc->unit; 866 } else { 867 sc = mdnew(mdio->md_unit); 868 } 869 if (sc == NULL) { 870 (void) vn_close(nd.ni_vp, flags, td->td_ucred, td); 871 return (EBUSY); 872 } 873 874 sc->type = MD_VNODE; 875 sc->flags = mdio->md_options & MD_FORCE; 876 if (!(flags & FWRITE)) 877 sc->flags |= MD_READONLY; 878 sc->secsize = DEV_BSIZE; 879 sc->vnode = nd.ni_vp; 880 881 /* 882 * If the size is specified, override the file attributes. 883 */ 884 if (mdio->md_size) 885 sc->nsect = mdio->md_size; 886 else 887 sc->nsect = vattr.va_size / sc->secsize; /* XXX: round up ? */ 888 if (sc->nsect == 0) { 889 mddestroy(sc, td); 890 return (EINVAL); 891 } 892 error = mdsetcred(sc, td->td_ucred); 893 if (error) { 894 mddestroy(sc, td); 895 return (error); 896 } 897 mdinit(sc); 898 return (0); 899 } 900 901 static int 902 mddestroy(struct md_s *sc, struct thread *td) 903 { 904 905 GIANT_REQUIRED; 906 907 mtx_destroy(&sc->queue_mtx); 908 devstat_remove_entry(&sc->stats); 909 { 910 if (sc->gp) { 911 sc->gp->flags |= G_GEOM_WITHER; 912 sc->gp->softc = NULL; 913 } 914 if (sc->pp) 915 g_orphan_provider(sc->pp, ENXIO); 916 } 917 sc->flags |= MD_SHUTDOWN; 918 wakeup(sc); 919 while (sc->procp != NULL) 920 tsleep(&sc->procp, PRIBIO, "mddestroy", hz / 10); 921 if (sc->vnode != NULL) 922 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 923 FREAD : (FREAD|FWRITE), sc->cred, td); 924 if (sc->cred != NULL) 925 crfree(sc->cred); 926 if (sc->object != NULL) { 927 vm_pager_deallocate(sc->object); 928 } 929 if (sc->indir) 930 destroy_indir(sc, sc->indir); 931 if (sc->uma) 932 uma_zdestroy(sc->uma); 933 934 /* XXX: LOCK(unique unit numbers) */ 935 LIST_REMOVE(sc, list); 936 /* XXX: UNLOCK(unique unit numbers) */ 937 free(sc, M_MD); 938 return (0); 939 } 940 941 static int 942 mdcreate_swap(struct md_ioctl *mdio, struct thread *td) 943 { 944 int error; 945 struct md_s *sc; 946 947 GIANT_REQUIRED; 948 949 if (mdio->md_options & MD_AUTOUNIT) { 950 sc = mdnew(-1); 951 mdio->md_unit = sc->unit; 952 } else { 953 sc = mdnew(mdio->md_unit); 954 } 955 if (sc == NULL) 956 return (EBUSY); 957 958 sc->type = MD_SWAP; 959 960 /* 961 * Range check. Disallow negative sizes or any size less then the 962 * size of a page. Then round to a page. 963 */ 964 965 if (mdio->md_size == 0) { 966 mddestroy(sc, td); 967 return (EDOM); 968 } 969 970 /* 971 * Allocate an OBJT_SWAP object. 972 * 973 * sc_secsize is PAGE_SIZE'd 974 * 975 * mdio->size is in DEV_BSIZE'd chunks. 976 * Note the truncation. 977 */ 978 979 sc->secsize = PAGE_SIZE; 980 sc->nsect = mdio->md_size / (PAGE_SIZE / DEV_BSIZE); 981 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, sc->secsize * (vm_offset_t)sc->nsect, VM_PROT_DEFAULT, 0); 982 sc->flags = mdio->md_options & MD_FORCE; 983 if (mdio->md_options & MD_RESERVE) { 984 if (swap_pager_reserve(sc->object, 0, sc->nsect) < 0) { 985 vm_pager_deallocate(sc->object); 986 sc->object = NULL; 987 mddestroy(sc, td); 988 return (EDOM); 989 } 990 } 991 error = mdsetcred(sc, td->td_ucred); 992 if (error) 993 mddestroy(sc, td); 994 else 995 mdinit(sc); 996 return (error); 997 } 998 999 static int 1000 mddetach(int unit, struct thread *td) 1001 { 1002 struct md_s *sc; 1003 1004 sc = mdfind(unit); 1005 if (sc == NULL) 1006 return (ENOENT); 1007 if (sc->opencount != 0 && !(sc->flags & MD_FORCE)) 1008 return (EBUSY); 1009 switch(sc->type) { 1010 case MD_VNODE: 1011 case MD_SWAP: 1012 case MD_MALLOC: 1013 case MD_PRELOAD: 1014 return (mddestroy(sc, td)); 1015 default: 1016 return (EOPNOTSUPP); 1017 } 1018 } 1019 1020 static int 1021 mdctlioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1022 { 1023 struct md_ioctl *mdio; 1024 struct md_s *sc; 1025 int i; 1026 1027 if (md_debug) 1028 printf("mdctlioctl(%s %lx %p %x %p)\n", 1029 devtoname(dev), cmd, addr, flags, td); 1030 1031 /* 1032 * We assert the version number in the individual ioctl 1033 * handlers instead of out here because (a) it is possible we 1034 * may add another ioctl in the future which doesn't read an 1035 * mdio, and (b) the correct return value for an unknown ioctl 1036 * is ENOIOCTL, not EINVAL. 1037 */ 1038 mdio = (struct md_ioctl *)addr; 1039 switch (cmd) { 1040 case MDIOCATTACH: 1041 if (mdio->md_version != MDIOVERSION) 1042 return (EINVAL); 1043 switch (mdio->md_type) { 1044 case MD_MALLOC: 1045 return (mdcreate_malloc(mdio)); 1046 case MD_PRELOAD: 1047 return (mdcreate_preload(mdio)); 1048 case MD_VNODE: 1049 return (mdcreate_vnode(mdio, td)); 1050 case MD_SWAP: 1051 return (mdcreate_swap(mdio, td)); 1052 default: 1053 return (EINVAL); 1054 } 1055 case MDIOCDETACH: 1056 if (mdio->md_version != MDIOVERSION) 1057 return (EINVAL); 1058 if (mdio->md_file != NULL || mdio->md_size != 0 || 1059 mdio->md_options != 0) 1060 return (EINVAL); 1061 return (mddetach(mdio->md_unit, td)); 1062 case MDIOCQUERY: 1063 if (mdio->md_version != MDIOVERSION) 1064 return (EINVAL); 1065 sc = mdfind(mdio->md_unit); 1066 if (sc == NULL) 1067 return (ENOENT); 1068 mdio->md_type = sc->type; 1069 mdio->md_options = sc->flags; 1070 switch (sc->type) { 1071 case MD_MALLOC: 1072 mdio->md_size = sc->nsect; 1073 break; 1074 case MD_PRELOAD: 1075 mdio->md_size = sc->nsect; 1076 mdio->md_base = (uint64_t)(intptr_t)sc->pl_ptr; 1077 break; 1078 case MD_SWAP: 1079 mdio->md_size = sc->nsect * (PAGE_SIZE / DEV_BSIZE); 1080 break; 1081 case MD_VNODE: 1082 mdio->md_size = sc->nsect; 1083 /* XXX fill this in */ 1084 mdio->md_file = NULL; 1085 break; 1086 } 1087 return (0); 1088 case MDIOCLIST: 1089 i = 1; 1090 LIST_FOREACH(sc, &md_softc_list, list) { 1091 if (i == MDNPAD - 1) 1092 mdio->md_pad[i] = -1; 1093 else 1094 mdio->md_pad[i++] = sc->unit; 1095 } 1096 mdio->md_pad[0] = i - 1; 1097 return (0); 1098 default: 1099 return (ENOIOCTL); 1100 }; 1101 return (ENOIOCTL); 1102 } 1103 1104 static void 1105 md_preloaded(u_char *image, unsigned length) 1106 { 1107 struct md_s *sc; 1108 1109 sc = mdnew(-1); 1110 if (sc == NULL) 1111 return; 1112 sc->type = MD_PRELOAD; 1113 sc->secsize = DEV_BSIZE; 1114 sc->nsect = length / DEV_BSIZE; 1115 sc->pl_ptr = image; 1116 sc->pl_len = length; 1117 if (sc->unit == 0) 1118 mdrootready = 1; 1119 mdinit(sc); 1120 } 1121 1122 static void 1123 md_drvinit(void *unused) 1124 { 1125 1126 caddr_t mod; 1127 caddr_t c; 1128 u_char *ptr, *name, *type; 1129 unsigned len; 1130 1131 #ifdef MD_ROOT_SIZE 1132 md_preloaded(mfs_root, MD_ROOT_SIZE*1024); 1133 #endif 1134 mod = NULL; 1135 while ((mod = preload_search_next_name(mod)) != NULL) { 1136 name = (char *)preload_search_info(mod, MODINFO_NAME); 1137 type = (char *)preload_search_info(mod, MODINFO_TYPE); 1138 if (name == NULL) 1139 continue; 1140 if (type == NULL) 1141 continue; 1142 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 1143 continue; 1144 c = preload_search_info(mod, MODINFO_ADDR); 1145 ptr = *(u_char **)c; 1146 c = preload_search_info(mod, MODINFO_SIZE); 1147 len = *(size_t *)c; 1148 printf("%s%d: Preloaded image <%s> %d bytes at %p\n", 1149 MD_NAME, mdunits, name, len, ptr); 1150 md_preloaded(ptr, len); 1151 } 1152 status_dev = make_dev(&mdctl_cdevsw, 0xffff00ff, UID_ROOT, GID_WHEEL, 1153 0600, MDCTL_NAME); 1154 } 1155 1156 static int 1157 md_modevent(module_t mod, int type, void *data) 1158 { 1159 int error; 1160 struct md_s *sc; 1161 1162 switch (type) { 1163 case MOD_LOAD: 1164 md_drvinit(NULL); 1165 break; 1166 case MOD_UNLOAD: 1167 LIST_FOREACH(sc, &md_softc_list, list) { 1168 error = mddetach(sc->unit, curthread); 1169 if (error != 0) 1170 return (error); 1171 } 1172 if (status_dev) 1173 destroy_dev(status_dev); 1174 status_dev = 0; 1175 break; 1176 default: 1177 break; 1178 } 1179 return (0); 1180 } 1181 1182 static moduledata_t md_mod = { 1183 MD_NAME, 1184 md_modevent, 1185 NULL 1186 }; 1187 DECLARE_MODULE(md, md_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE+CDEV_MAJOR); 1188 MODULE_VERSION(md, MD_MODVER); 1189 1190 1191 #ifdef MD_ROOT 1192 static void 1193 md_takeroot(void *junk) 1194 { 1195 if (mdrootready) 1196 rootdevnames[0] = "ufs:/dev/md0"; 1197 } 1198 1199 SYSINIT(md_root, SI_SUB_MOUNT_ROOT, SI_ORDER_FIRST, md_takeroot, NULL); 1200 #endif /* MD_ROOT */ 1201