1 /*- 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * $FreeBSD$ 10 * 11 */ 12 13 /*- 14 * The following functions are based in the vn(4) driver: mdstart_swap(), 15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 16 * and as such under the following copyright: 17 * 18 * Copyright (c) 1988 University of Utah. 19 * Copyright (c) 1990, 1993 20 * The Regents of the University of California. All rights reserved. 21 * 22 * This code is derived from software contributed to Berkeley by 23 * the Systems Programming Group of the University of Utah Computer 24 * Science Department. 25 * 26 * Redistribution and use in source and binary forms, with or without 27 * modification, are permitted provided that the following conditions 28 * are met: 29 * 1. Redistributions of source code must retain the above copyright 30 * notice, this list of conditions and the following disclaimer. 31 * 2. Redistributions in binary form must reproduce the above copyright 32 * notice, this list of conditions and the following disclaimer in the 33 * documentation and/or other materials provided with the distribution. 34 * 4. Neither the name of the University nor the names of its contributors 35 * may be used to endorse or promote products derived from this software 36 * without specific prior written permission. 37 * 38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 48 * SUCH DAMAGE. 49 * 50 * from: Utah Hdr: vn.c 1.13 94/04/02 51 * 52 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 53 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 54 */ 55 56 #include "opt_geom.h" 57 #include "opt_md.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/bio.h> 62 #include <sys/conf.h> 63 #include <sys/fcntl.h> 64 #include <sys/kernel.h> 65 #include <sys/kthread.h> 66 #include <sys/linker.h> 67 #include <sys/lock.h> 68 #include <sys/malloc.h> 69 #include <sys/mdioctl.h> 70 #include <sys/mutex.h> 71 #include <sys/namei.h> 72 #include <sys/proc.h> 73 #include <sys/queue.h> 74 #include <sys/sched.h> 75 #include <sys/sf_buf.h> 76 #include <sys/sysctl.h> 77 #include <sys/vnode.h> 78 79 #include <geom/geom.h> 80 81 #include <vm/vm.h> 82 #include <vm/vm_object.h> 83 #include <vm/vm_page.h> 84 #include <vm/vm_pager.h> 85 #include <vm/swap_pager.h> 86 #include <vm/uma.h> 87 88 #define MD_MODVER 1 89 90 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 91 92 #ifndef MD_NSECT 93 #define MD_NSECT (10000 * 2) 94 #endif 95 96 static MALLOC_DEFINE(M_MD, "MD disk", "Memory Disk"); 97 static MALLOC_DEFINE(M_MDSECT, "MD sectors", "Memory Disk Sectors"); 98 99 static int md_debug; 100 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, ""); 101 102 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE) 103 /* Image gets put here: */ 104 static u_char mfs_root[MD_ROOT_SIZE*1024] = "MFS Filesystem goes here"; 105 static u_char end_mfs_root[] __unused = "MFS Filesystem had better STOP here"; 106 #endif 107 108 static g_init_t g_md_init; 109 static g_fini_t g_md_fini; 110 static g_start_t g_md_start; 111 static g_access_t g_md_access; 112 113 static int mdunits; 114 static struct cdev *status_dev = 0; 115 116 static d_ioctl_t mdctlioctl; 117 118 static struct cdevsw mdctl_cdevsw = { 119 .d_version = D_VERSION, 120 .d_flags = D_NEEDGIANT, 121 .d_ioctl = mdctlioctl, 122 .d_name = MD_NAME, 123 }; 124 125 struct g_class g_md_class = { 126 .name = "MD", 127 .version = G_VERSION, 128 .init = g_md_init, 129 .fini = g_md_fini, 130 .start = g_md_start, 131 .access = g_md_access, 132 }; 133 134 DECLARE_GEOM_CLASS(g_md_class, g_md); 135 136 137 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(&md_softc_list); 138 139 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 140 #define NMASK (NINDIR-1) 141 static int nshift; 142 143 struct indir { 144 uintptr_t *array; 145 u_int total; 146 u_int used; 147 u_int shift; 148 }; 149 150 struct md_s { 151 int unit; 152 LIST_ENTRY(md_s) list; 153 struct bio_queue_head bio_queue; 154 struct mtx queue_mtx; 155 struct cdev *dev; 156 enum md_types type; 157 off_t mediasize; 158 unsigned sectorsize; 159 unsigned opencount; 160 unsigned fwheads; 161 unsigned fwsectors; 162 unsigned flags; 163 char name[20]; 164 struct proc *procp; 165 struct g_geom *gp; 166 struct g_provider *pp; 167 168 /* MD_MALLOC related fields */ 169 struct indir *indir; 170 uma_zone_t uma; 171 172 /* MD_PRELOAD related fields */ 173 u_char *pl_ptr; 174 size_t pl_len; 175 176 /* MD_VNODE related fields */ 177 struct vnode *vnode; 178 char file[PATH_MAX]; 179 struct ucred *cred; 180 181 /* MD_SWAP related fields */ 182 vm_object_t object; 183 }; 184 185 static int mddestroy(struct md_s *sc, struct thread *td); 186 187 static struct indir * 188 new_indir(u_int shift) 189 { 190 struct indir *ip; 191 192 ip = malloc(sizeof *ip, M_MD, M_NOWAIT | M_ZERO); 193 if (ip == NULL) 194 return (NULL); 195 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 196 M_MDSECT, M_NOWAIT | M_ZERO); 197 if (ip->array == NULL) { 198 free(ip, M_MD); 199 return (NULL); 200 } 201 ip->total = NINDIR; 202 ip->shift = shift; 203 return (ip); 204 } 205 206 static void 207 del_indir(struct indir *ip) 208 { 209 210 free(ip->array, M_MDSECT); 211 free(ip, M_MD); 212 } 213 214 static void 215 destroy_indir(struct md_s *sc, struct indir *ip) 216 { 217 int i; 218 219 for (i = 0; i < NINDIR; i++) { 220 if (!ip->array[i]) 221 continue; 222 if (ip->shift) 223 destroy_indir(sc, (struct indir*)(ip->array[i])); 224 else if (ip->array[i] > 255) 225 uma_zfree(sc->uma, (void *)(ip->array[i])); 226 } 227 del_indir(ip); 228 } 229 230 /* 231 * This function does the math and alloctes the top level "indir" structure 232 * for a device of "size" sectors. 233 */ 234 235 static struct indir * 236 dimension(off_t size) 237 { 238 off_t rcnt; 239 struct indir *ip; 240 int i, layer; 241 242 rcnt = size; 243 layer = 0; 244 while (rcnt > NINDIR) { 245 rcnt /= NINDIR; 246 layer++; 247 } 248 /* figure out log2(NINDIR) */ 249 for (i = NINDIR, nshift = -1; i; nshift++) 250 i >>= 1; 251 252 /* 253 * XXX: the top layer is probably not fully populated, so we allocate 254 * too much space for ip->array in here. 255 */ 256 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 257 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 258 M_MDSECT, M_WAITOK | M_ZERO); 259 ip->total = NINDIR; 260 ip->shift = layer * nshift; 261 return (ip); 262 } 263 264 /* 265 * Read a given sector 266 */ 267 268 static uintptr_t 269 s_read(struct indir *ip, off_t offset) 270 { 271 struct indir *cip; 272 int idx; 273 uintptr_t up; 274 275 if (md_debug > 1) 276 printf("s_read(%jd)\n", (intmax_t)offset); 277 up = 0; 278 for (cip = ip; cip != NULL;) { 279 if (cip->shift) { 280 idx = (offset >> cip->shift) & NMASK; 281 up = cip->array[idx]; 282 cip = (struct indir *)up; 283 continue; 284 } 285 idx = offset & NMASK; 286 return (cip->array[idx]); 287 } 288 return (0); 289 } 290 291 /* 292 * Write a given sector, prune the tree if the value is 0 293 */ 294 295 static int 296 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 297 { 298 struct indir *cip, *lip[10]; 299 int idx, li; 300 uintptr_t up; 301 302 if (md_debug > 1) 303 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 304 up = 0; 305 li = 0; 306 cip = ip; 307 for (;;) { 308 lip[li++] = cip; 309 if (cip->shift) { 310 idx = (offset >> cip->shift) & NMASK; 311 up = cip->array[idx]; 312 if (up != 0) { 313 cip = (struct indir *)up; 314 continue; 315 } 316 /* Allocate branch */ 317 cip->array[idx] = 318 (uintptr_t)new_indir(cip->shift - nshift); 319 if (cip->array[idx] == 0) 320 return (ENOSPC); 321 cip->used++; 322 up = cip->array[idx]; 323 cip = (struct indir *)up; 324 continue; 325 } 326 /* leafnode */ 327 idx = offset & NMASK; 328 up = cip->array[idx]; 329 if (up != 0) 330 cip->used--; 331 cip->array[idx] = ptr; 332 if (ptr != 0) 333 cip->used++; 334 break; 335 } 336 if (cip->used != 0 || li == 1) 337 return (0); 338 li--; 339 while (cip->used == 0 && cip != ip) { 340 li--; 341 idx = (offset >> lip[li]->shift) & NMASK; 342 up = lip[li]->array[idx]; 343 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 344 del_indir(cip); 345 lip[li]->array[idx] = 0; 346 lip[li]->used--; 347 cip = lip[li]; 348 } 349 return (0); 350 } 351 352 353 static int 354 g_md_access(struct g_provider *pp, int r, int w, int e) 355 { 356 struct md_s *sc; 357 358 sc = pp->geom->softc; 359 if (sc == NULL) 360 return (ENXIO); 361 r += pp->acr; 362 w += pp->acw; 363 e += pp->ace; 364 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 365 sc->opencount = 1; 366 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 367 sc->opencount = 0; 368 } 369 return (0); 370 } 371 372 static void 373 g_md_start(struct bio *bp) 374 { 375 struct md_s *sc; 376 377 sc = bp->bio_to->geom->softc; 378 mtx_lock(&sc->queue_mtx); 379 bioq_disksort(&sc->bio_queue, bp); 380 mtx_unlock(&sc->queue_mtx); 381 wakeup(sc); 382 } 383 384 385 386 static int 387 mdstart_malloc(struct md_s *sc, struct bio *bp) 388 { 389 int i, error; 390 u_char *dst; 391 off_t secno, nsec, uc; 392 uintptr_t sp, osp; 393 394 nsec = bp->bio_length / sc->sectorsize; 395 secno = bp->bio_offset / sc->sectorsize; 396 dst = bp->bio_data; 397 error = 0; 398 while (nsec--) { 399 osp = s_read(sc->indir, secno); 400 if (bp->bio_cmd == BIO_DELETE) { 401 if (osp != 0) 402 error = s_write(sc->indir, secno, 0); 403 } else if (bp->bio_cmd == BIO_READ) { 404 if (osp == 0) 405 bzero(dst, sc->sectorsize); 406 else if (osp <= 255) 407 for (i = 0; i < sc->sectorsize; i++) 408 dst[i] = osp; 409 else 410 bcopy((void *)osp, dst, sc->sectorsize); 411 osp = 0; 412 } else if (bp->bio_cmd == BIO_WRITE) { 413 if (sc->flags & MD_COMPRESS) { 414 uc = dst[0]; 415 for (i = 1; i < sc->sectorsize; i++) 416 if (dst[i] != uc) 417 break; 418 } else { 419 i = 0; 420 uc = 0; 421 } 422 if (i == sc->sectorsize) { 423 if (osp != uc) 424 error = s_write(sc->indir, secno, uc); 425 } else { 426 if (osp <= 255) { 427 sp = (uintptr_t)uma_zalloc(sc->uma, 428 M_NOWAIT); 429 if (sp == 0) { 430 error = ENOSPC; 431 break; 432 } 433 bcopy(dst, (void *)sp, sc->sectorsize); 434 error = s_write(sc->indir, secno, sp); 435 } else { 436 bcopy(dst, (void *)osp, sc->sectorsize); 437 osp = 0; 438 } 439 } 440 } else { 441 error = EOPNOTSUPP; 442 } 443 if (osp > 255) 444 uma_zfree(sc->uma, (void*)osp); 445 if (error != 0) 446 break; 447 secno++; 448 dst += sc->sectorsize; 449 } 450 bp->bio_resid = 0; 451 return (error); 452 } 453 454 static int 455 mdstart_preload(struct md_s *sc, struct bio *bp) 456 { 457 458 switch (bp->bio_cmd) { 459 case BIO_READ: 460 bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data, 461 bp->bio_length); 462 break; 463 case BIO_WRITE: 464 bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset, 465 bp->bio_length); 466 break; 467 } 468 bp->bio_resid = 0; 469 return (0); 470 } 471 472 static int 473 mdstart_vnode(struct md_s *sc, struct bio *bp) 474 { 475 int error; 476 struct uio auio; 477 struct iovec aiov; 478 struct mount *mp; 479 480 /* 481 * VNODE I/O 482 * 483 * If an error occurs, we set BIO_ERROR but we do not set 484 * B_INVAL because (for a write anyway), the buffer is 485 * still valid. 486 */ 487 488 bzero(&auio, sizeof(auio)); 489 490 aiov.iov_base = bp->bio_data; 491 aiov.iov_len = bp->bio_length; 492 auio.uio_iov = &aiov; 493 auio.uio_iovcnt = 1; 494 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 495 auio.uio_segflg = UIO_SYSSPACE; 496 if(bp->bio_cmd == BIO_READ) 497 auio.uio_rw = UIO_READ; 498 else if(bp->bio_cmd == BIO_WRITE) 499 auio.uio_rw = UIO_WRITE; 500 else 501 panic("wrong BIO_OP in mdstart_vnode"); 502 auio.uio_resid = bp->bio_length; 503 auio.uio_td = curthread; 504 /* 505 * When reading set IO_DIRECT to try to avoid double-caching 506 * the data. When writing IO_DIRECT is not optimal. 507 */ 508 if (bp->bio_cmd == BIO_READ) { 509 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread); 510 error = VOP_READ(sc->vnode, &auio, IO_DIRECT, sc->cred); 511 VOP_UNLOCK(sc->vnode, 0, curthread); 512 } else { 513 (void) vn_start_write(sc->vnode, &mp, V_WAIT); 514 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread); 515 error = VOP_WRITE(sc->vnode, &auio, 516 sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred); 517 VOP_UNLOCK(sc->vnode, 0, curthread); 518 vn_finished_write(mp); 519 } 520 bp->bio_resid = auio.uio_resid; 521 return (error); 522 } 523 524 static int 525 mdstart_swap(struct md_s *sc, struct bio *bp) 526 { 527 struct sf_buf *sf; 528 int rv, offs, len, lastend; 529 vm_pindex_t i, lastp; 530 vm_page_t m; 531 u_char *p; 532 533 p = bp->bio_data; 534 535 /* 536 * offs is the ofset at whih to start operating on the 537 * next (ie, first) page. lastp is the last page on 538 * which we're going to operate. lastend is the ending 539 * position within that last page (ie, PAGE_SIZE if 540 * we're operating on complete aligned pages). 541 */ 542 offs = bp->bio_offset % PAGE_SIZE; 543 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 544 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 545 546 rv = VM_PAGER_OK; 547 VM_OBJECT_LOCK(sc->object); 548 vm_object_pip_add(sc->object, 1); 549 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 550 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 551 552 m = vm_page_grab(sc->object, i, 553 VM_ALLOC_NORMAL|VM_ALLOC_RETRY); 554 VM_OBJECT_UNLOCK(sc->object); 555 sched_pin(); 556 sf = sf_buf_alloc(m, SFB_CPUPRIVATE); 557 VM_OBJECT_LOCK(sc->object); 558 if (bp->bio_cmd == BIO_READ) { 559 if (m->valid != VM_PAGE_BITS_ALL) 560 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 561 if (rv == VM_PAGER_ERROR) { 562 sf_buf_free(sf); 563 sched_unpin(); 564 vm_page_lock_queues(); 565 vm_page_wakeup(m); 566 vm_page_unlock_queues(); 567 break; 568 } 569 bcopy((void *)(sf_buf_kva(sf) + offs), p, len); 570 } else if (bp->bio_cmd == BIO_WRITE) { 571 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 572 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 573 if (rv == VM_PAGER_ERROR) { 574 sf_buf_free(sf); 575 sched_unpin(); 576 vm_page_lock_queues(); 577 vm_page_wakeup(m); 578 vm_page_unlock_queues(); 579 break; 580 } 581 bcopy(p, (void *)(sf_buf_kva(sf) + offs), len); 582 m->valid = VM_PAGE_BITS_ALL; 583 #if 0 584 } else if (bp->bio_cmd == BIO_DELETE) { 585 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 586 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 587 if (rv == VM_PAGER_ERROR) { 588 sf_buf_free(sf); 589 sched_unpin(); 590 vm_page_lock_queues(); 591 vm_page_wakeup(m); 592 vm_page_unlock_queues(); 593 break; 594 } 595 bzero((void *)(sf_buf_kva(sf) + offs), len); 596 vm_page_dirty(m); 597 m->valid = VM_PAGE_BITS_ALL; 598 #endif 599 } 600 sf_buf_free(sf); 601 sched_unpin(); 602 vm_page_lock_queues(); 603 vm_page_wakeup(m); 604 vm_page_activate(m); 605 if (bp->bio_cmd == BIO_WRITE) 606 vm_page_dirty(m); 607 vm_page_unlock_queues(); 608 609 /* Actions on further pages start at offset 0 */ 610 p += PAGE_SIZE - offs; 611 offs = 0; 612 #if 0 613 if (bootverbose || bp->bio_offset / PAGE_SIZE < 17) 614 printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n", 615 m->wire_count, m->busy, 616 m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i); 617 #endif 618 } 619 vm_object_pip_subtract(sc->object, 1); 620 vm_object_set_writeable_dirty(sc->object); 621 VM_OBJECT_UNLOCK(sc->object); 622 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 623 } 624 625 static void 626 md_kthread(void *arg) 627 { 628 struct md_s *sc; 629 struct bio *bp; 630 int error, hasgiant; 631 632 sc = arg; 633 mtx_lock_spin(&sched_lock); 634 sched_prio(curthread, PRIBIO); 635 mtx_unlock_spin(&sched_lock); 636 637 switch (sc->type) { 638 case MD_VNODE: 639 mtx_lock(&Giant); 640 hasgiant = 1; 641 break; 642 case MD_MALLOC: 643 case MD_PRELOAD: 644 case MD_SWAP: 645 default: 646 hasgiant = 0; 647 break; 648 } 649 650 for (;;) { 651 mtx_lock(&sc->queue_mtx); 652 bp = bioq_takefirst(&sc->bio_queue); 653 if (!bp) { 654 if (sc->flags & MD_SHUTDOWN) { 655 mtx_unlock(&sc->queue_mtx); 656 sc->procp = NULL; 657 wakeup(&sc->procp); 658 if (hasgiant) 659 mtx_unlock(&Giant); 660 kthread_exit(0); 661 } 662 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 663 continue; 664 } 665 mtx_unlock(&sc->queue_mtx); 666 if (bp->bio_cmd == BIO_GETATTR) { 667 if (sc->fwsectors && sc->fwheads && 668 (g_handleattr_int(bp, "GEOM::fwsectors", 669 sc->fwsectors) || 670 g_handleattr_int(bp, "GEOM::fwheads", 671 sc->fwheads))) 672 error = -1; 673 else 674 error = EOPNOTSUPP; 675 } else { 676 switch (sc->type) { 677 case MD_MALLOC: 678 error = mdstart_malloc(sc, bp); 679 break; 680 case MD_PRELOAD: 681 error = mdstart_preload(sc, bp); 682 break; 683 case MD_VNODE: 684 mtx_assert(&Giant, MA_OWNED); 685 error = mdstart_vnode(sc, bp); 686 mtx_assert(&Giant, MA_OWNED); 687 break; 688 case MD_SWAP: 689 error = mdstart_swap(sc, bp); 690 break; 691 default: 692 panic("Impossible md(type)"); 693 break; 694 } 695 } 696 697 if (error != -1) { 698 bp->bio_completed = bp->bio_length; 699 g_io_deliver(bp, error); 700 } 701 } 702 } 703 704 static struct md_s * 705 mdfind(int unit) 706 { 707 struct md_s *sc; 708 709 /* XXX: LOCK(unique unit numbers) */ 710 LIST_FOREACH(sc, &md_softc_list, list) { 711 if (sc->unit == unit) 712 break; 713 } 714 /* XXX: UNLOCK(unique unit numbers) */ 715 return (sc); 716 } 717 718 static struct md_s * 719 mdnew(int unit) 720 { 721 struct md_s *sc; 722 int error, max = -1; 723 724 /* XXX: LOCK(unique unit numbers) */ 725 LIST_FOREACH(sc, &md_softc_list, list) { 726 if (sc->unit == unit) { 727 /* XXX: UNLOCK(unique unit numbers) */ 728 return (NULL); 729 } 730 if (sc->unit > max) 731 max = sc->unit; 732 } 733 if (unit == -1) 734 unit = max + 1; 735 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); 736 sc->unit = unit; 737 bioq_init(&sc->bio_queue); 738 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 739 sprintf(sc->name, "md%d", unit); 740 error = kthread_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 741 if (error != 0) { 742 free(sc, M_MD); 743 return (NULL); 744 } 745 LIST_INSERT_HEAD(&md_softc_list, sc, list); 746 /* XXX: UNLOCK(unique unit numbers) */ 747 return (sc); 748 } 749 750 static void 751 mdinit(struct md_s *sc) 752 { 753 754 struct g_geom *gp; 755 struct g_provider *pp; 756 757 DROP_GIANT(); 758 g_topology_lock(); 759 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 760 gp->softc = sc; 761 pp = g_new_providerf(gp, "md%d", sc->unit); 762 pp->mediasize = sc->mediasize; 763 pp->sectorsize = sc->sectorsize; 764 sc->gp = gp; 765 sc->pp = pp; 766 g_error_provider(pp, 0); 767 g_topology_unlock(); 768 PICKUP_GIANT(); 769 } 770 771 /* 772 * XXX: we should check that the range they feed us is mapped. 773 * XXX: we should implement read-only. 774 */ 775 776 static int 777 mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio) 778 { 779 780 if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE)) 781 return (EINVAL); 782 sc->flags = mdio->md_options & MD_FORCE; 783 /* Cast to pointer size, then to pointer to avoid warning */ 784 sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base; 785 sc->pl_len = (size_t)sc->mediasize; 786 return (0); 787 } 788 789 790 static int 791 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio) 792 { 793 uintptr_t sp; 794 int error; 795 off_t u; 796 797 error = 0; 798 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 799 return (EINVAL); 800 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize)) 801 return (EINVAL); 802 /* Compression doesn't make sense if we have reserved space */ 803 if (mdio->md_options & MD_RESERVE) 804 mdio->md_options &= ~MD_COMPRESS; 805 if (mdio->md_fwsectors != 0) 806 sc->fwsectors = mdio->md_fwsectors; 807 if (mdio->md_fwheads != 0) 808 sc->fwheads = mdio->md_fwheads; 809 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE); 810 sc->indir = dimension(sc->mediasize / sc->sectorsize); 811 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 812 0x1ff, 0); 813 if (mdio->md_options & MD_RESERVE) { 814 off_t nsectors; 815 816 nsectors = sc->mediasize / sc->sectorsize; 817 for (u = 0; u < nsectors; u++) { 818 sp = (uintptr_t)uma_zalloc(sc->uma, M_NOWAIT | M_ZERO); 819 if (sp != 0) 820 error = s_write(sc->indir, u, sp); 821 else 822 error = ENOMEM; 823 if (error != 0) 824 break; 825 } 826 } 827 return (error); 828 } 829 830 831 static int 832 mdsetcred(struct md_s *sc, struct ucred *cred) 833 { 834 char *tmpbuf; 835 int error = 0; 836 837 /* 838 * Set credits in our softc 839 */ 840 841 if (sc->cred) 842 crfree(sc->cred); 843 sc->cred = crhold(cred); 844 845 /* 846 * Horrible kludge to establish credentials for NFS XXX. 847 */ 848 849 if (sc->vnode) { 850 struct uio auio; 851 struct iovec aiov; 852 853 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 854 bzero(&auio, sizeof(auio)); 855 856 aiov.iov_base = tmpbuf; 857 aiov.iov_len = sc->sectorsize; 858 auio.uio_iov = &aiov; 859 auio.uio_iovcnt = 1; 860 auio.uio_offset = 0; 861 auio.uio_rw = UIO_READ; 862 auio.uio_segflg = UIO_SYSSPACE; 863 auio.uio_resid = aiov.iov_len; 864 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread); 865 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 866 VOP_UNLOCK(sc->vnode, 0, curthread); 867 free(tmpbuf, M_TEMP); 868 } 869 return (error); 870 } 871 872 static int 873 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 874 { 875 struct vattr vattr; 876 struct nameidata nd; 877 int error, flags; 878 879 error = copyinstr(mdio->md_file, sc->file, sizeof(sc->file), NULL); 880 if (error != 0) 881 return (error); 882 flags = FREAD|FWRITE; 883 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td); 884 error = vn_open(&nd, &flags, 0, -1); 885 if (error != 0) { 886 NDFREE(&nd, NDF_ONLY_PNBUF); 887 if (error != EACCES && error != EPERM && error != EROFS) 888 return (error); 889 flags &= ~FWRITE; 890 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td); 891 error = vn_open(&nd, &flags, 0, -1); 892 } 893 NDFREE(&nd, NDF_ONLY_PNBUF); 894 if (error != 0) 895 return (error); 896 if (nd.ni_vp->v_type != VREG || 897 (error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred, td))) { 898 VOP_UNLOCK(nd.ni_vp, 0, td); 899 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 900 return (error ? error : EINVAL); 901 } 902 VOP_UNLOCK(nd.ni_vp, 0, td); 903 904 if (mdio->md_fwsectors != 0) 905 sc->fwsectors = mdio->md_fwsectors; 906 if (mdio->md_fwheads != 0) 907 sc->fwheads = mdio->md_fwheads; 908 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC); 909 if (!(flags & FWRITE)) 910 sc->flags |= MD_READONLY; 911 sc->vnode = nd.ni_vp; 912 913 error = mdsetcred(sc, td->td_ucred); 914 if (error != 0) { 915 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 916 return (error); 917 } 918 return (0); 919 } 920 921 static void 922 md_zapit(void *p, int cancel) 923 { 924 if (cancel) 925 return; 926 g_wither_geom(p, ENXIO); 927 } 928 929 static int 930 mddestroy(struct md_s *sc, struct thread *td) 931 { 932 933 GIANT_REQUIRED; 934 935 mtx_destroy(&sc->queue_mtx); 936 if (sc->gp) { 937 sc->gp->softc = NULL; 938 g_waitfor_event(md_zapit, sc->gp, M_WAITOK, sc->gp, NULL); 939 sc->gp = NULL; 940 sc->pp = NULL; 941 } 942 sc->flags |= MD_SHUTDOWN; 943 wakeup(sc); 944 while (sc->procp != NULL) 945 tsleep(&sc->procp, PRIBIO, "mddestroy", hz / 10); 946 if (sc->vnode != NULL) 947 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 948 FREAD : (FREAD|FWRITE), sc->cred, td); 949 if (sc->cred != NULL) 950 crfree(sc->cred); 951 if (sc->object != NULL) 952 vm_object_deallocate(sc->object); 953 if (sc->indir) 954 destroy_indir(sc, sc->indir); 955 if (sc->uma) 956 uma_zdestroy(sc->uma); 957 958 /* XXX: LOCK(unique unit numbers) */ 959 LIST_REMOVE(sc, list); 960 /* XXX: UNLOCK(unique unit numbers) */ 961 free(sc, M_MD); 962 return (0); 963 } 964 965 static int 966 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 967 { 968 vm_ooffset_t npage; 969 int error; 970 971 GIANT_REQUIRED; 972 973 /* 974 * Range check. Disallow negative sizes or any size less then the 975 * size of a page. Then round to a page. 976 */ 977 if (sc->mediasize == 0 || (sc->mediasize % PAGE_SIZE) != 0) 978 return (EDOM); 979 980 /* 981 * Allocate an OBJT_SWAP object. 982 * 983 * Note the truncation. 984 */ 985 986 npage = mdio->md_mediasize / PAGE_SIZE; 987 if (mdio->md_fwsectors != 0) 988 sc->fwsectors = mdio->md_fwsectors; 989 if (mdio->md_fwheads != 0) 990 sc->fwheads = mdio->md_fwheads; 991 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, 992 VM_PROT_DEFAULT, 0); 993 if (sc->object == NULL) 994 return (ENOMEM); 995 sc->flags = mdio->md_options & MD_FORCE; 996 if (mdio->md_options & MD_RESERVE) { 997 if (swap_pager_reserve(sc->object, 0, npage) < 0) { 998 vm_object_deallocate(sc->object); 999 sc->object = NULL; 1000 return (EDOM); 1001 } 1002 } 1003 error = mdsetcred(sc, td->td_ucred); 1004 if (error != 0) { 1005 vm_object_deallocate(sc->object); 1006 sc->object = NULL; 1007 } 1008 return (error); 1009 } 1010 1011 static int 1012 mddetach(int unit, struct thread *td) 1013 { 1014 struct md_s *sc; 1015 1016 sc = mdfind(unit); 1017 if (sc == NULL) 1018 return (ENOENT); 1019 if (sc->opencount != 0 && !(sc->flags & MD_FORCE)) 1020 return (EBUSY); 1021 switch(sc->type) { 1022 case MD_VNODE: 1023 case MD_SWAP: 1024 case MD_MALLOC: 1025 case MD_PRELOAD: 1026 return (mddestroy(sc, td)); 1027 default: 1028 return (EOPNOTSUPP); 1029 } 1030 } 1031 1032 static int 1033 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1034 { 1035 struct md_ioctl *mdio; 1036 struct md_s *sc; 1037 int error, i; 1038 1039 if (md_debug) 1040 printf("mdctlioctl(%s %lx %p %x %p)\n", 1041 devtoname(dev), cmd, addr, flags, td); 1042 1043 /* 1044 * We assert the version number in the individual ioctl 1045 * handlers instead of out here because (a) it is possible we 1046 * may add another ioctl in the future which doesn't read an 1047 * mdio, and (b) the correct return value for an unknown ioctl 1048 * is ENOIOCTL, not EINVAL. 1049 */ 1050 mdio = (struct md_ioctl *)addr; 1051 switch (cmd) { 1052 case MDIOCATTACH: 1053 if (mdio->md_version != MDIOVERSION) 1054 return (EINVAL); 1055 switch (mdio->md_type) { 1056 case MD_MALLOC: 1057 case MD_PRELOAD: 1058 case MD_VNODE: 1059 case MD_SWAP: 1060 break; 1061 default: 1062 return (EINVAL); 1063 } 1064 if (mdio->md_options & MD_AUTOUNIT) { 1065 sc = mdnew(-1); 1066 mdio->md_unit = sc->unit; 1067 } else { 1068 sc = mdnew(mdio->md_unit); 1069 if (sc == NULL) 1070 return (EBUSY); 1071 } 1072 sc->type = mdio->md_type; 1073 sc->mediasize = mdio->md_mediasize; 1074 if (mdio->md_sectorsize == 0) 1075 sc->sectorsize = DEV_BSIZE; 1076 else 1077 sc->sectorsize = mdio->md_sectorsize; 1078 error = EDOOFUS; 1079 switch (sc->type) { 1080 case MD_MALLOC: 1081 error = mdcreate_malloc(sc, mdio); 1082 break; 1083 case MD_PRELOAD: 1084 error = mdcreate_preload(sc, mdio); 1085 break; 1086 case MD_VNODE: 1087 error = mdcreate_vnode(sc, mdio, td); 1088 break; 1089 case MD_SWAP: 1090 error = mdcreate_swap(sc, mdio, td); 1091 break; 1092 } 1093 if (error != 0) { 1094 mddestroy(sc, td); 1095 return (error); 1096 } 1097 mdinit(sc); 1098 return (0); 1099 case MDIOCDETACH: 1100 if (mdio->md_version != MDIOVERSION) 1101 return (EINVAL); 1102 if (mdio->md_mediasize != 0 || mdio->md_options != 0) 1103 return (EINVAL); 1104 return (mddetach(mdio->md_unit, td)); 1105 case MDIOCQUERY: 1106 if (mdio->md_version != MDIOVERSION) 1107 return (EINVAL); 1108 sc = mdfind(mdio->md_unit); 1109 if (sc == NULL) 1110 return (ENOENT); 1111 mdio->md_type = sc->type; 1112 mdio->md_options = sc->flags; 1113 mdio->md_mediasize = sc->mediasize; 1114 mdio->md_sectorsize = sc->sectorsize; 1115 if (sc->type == MD_VNODE) { 1116 error = copyout(sc->file, mdio->md_file, 1117 strlen(sc->file) + 1); 1118 if (error != 0) 1119 return (error); 1120 } 1121 return (0); 1122 case MDIOCLIST: 1123 if (mdio->md_version != MDIOVERSION) 1124 return (EINVAL); 1125 i = 1; 1126 LIST_FOREACH(sc, &md_softc_list, list) { 1127 if (i == MDNPAD - 1) 1128 mdio->md_pad[i] = -1; 1129 else 1130 mdio->md_pad[i++] = sc->unit; 1131 } 1132 mdio->md_pad[0] = i - 1; 1133 return (0); 1134 default: 1135 return (ENOIOCTL); 1136 }; 1137 return (ENOIOCTL); 1138 } 1139 1140 static void 1141 md_preloaded(u_char *image, size_t length) 1142 { 1143 struct md_s *sc; 1144 1145 sc = mdnew(-1); 1146 if (sc == NULL) 1147 return; 1148 sc->type = MD_PRELOAD; 1149 sc->mediasize = length; 1150 sc->sectorsize = DEV_BSIZE; 1151 sc->pl_ptr = image; 1152 sc->pl_len = length; 1153 #ifdef MD_ROOT 1154 if (sc->unit == 0) 1155 rootdevnames[0] = "ufs:/dev/md0"; 1156 #endif 1157 mdinit(sc); 1158 } 1159 1160 static void 1161 g_md_init(struct g_class *mp __unused) 1162 { 1163 1164 caddr_t mod; 1165 caddr_t c; 1166 u_char *ptr, *name, *type; 1167 unsigned len; 1168 1169 mod = NULL; 1170 g_topology_unlock(); 1171 #ifdef MD_ROOT_SIZE 1172 md_preloaded(mfs_root, MD_ROOT_SIZE*1024); 1173 #endif 1174 while ((mod = preload_search_next_name(mod)) != NULL) { 1175 name = (char *)preload_search_info(mod, MODINFO_NAME); 1176 type = (char *)preload_search_info(mod, MODINFO_TYPE); 1177 if (name == NULL) 1178 continue; 1179 if (type == NULL) 1180 continue; 1181 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 1182 continue; 1183 c = preload_search_info(mod, MODINFO_ADDR); 1184 ptr = *(u_char **)c; 1185 c = preload_search_info(mod, MODINFO_SIZE); 1186 len = *(size_t *)c; 1187 printf("%s%d: Preloaded image <%s> %d bytes at %p\n", 1188 MD_NAME, mdunits, name, len, ptr); 1189 md_preloaded(ptr, len); 1190 } 1191 status_dev = make_dev(&mdctl_cdevsw, MAXMINOR, UID_ROOT, GID_WHEEL, 1192 0600, MDCTL_NAME); 1193 g_topology_lock(); 1194 } 1195 1196 static void 1197 g_md_fini(struct g_class *mp __unused) 1198 { 1199 1200 if (status_dev != NULL) 1201 destroy_dev(status_dev); 1202 } 1203