1 /*- 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * $FreeBSD$ 10 * 11 */ 12 13 /*- 14 * The following functions are based in the vn(4) driver: mdstart_swap(), 15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 16 * and as such under the following copyright: 17 * 18 * Copyright (c) 1988 University of Utah. 19 * Copyright (c) 1990, 1993 20 * The Regents of the University of California. All rights reserved. 21 * 22 * This code is derived from software contributed to Berkeley by 23 * the Systems Programming Group of the University of Utah Computer 24 * Science Department. 25 * 26 * Redistribution and use in source and binary forms, with or without 27 * modification, are permitted provided that the following conditions 28 * are met: 29 * 1. Redistributions of source code must retain the above copyright 30 * notice, this list of conditions and the following disclaimer. 31 * 2. Redistributions in binary form must reproduce the above copyright 32 * notice, this list of conditions and the following disclaimer in the 33 * documentation and/or other materials provided with the distribution. 34 * 4. Neither the name of the University nor the names of its contributors 35 * may be used to endorse or promote products derived from this software 36 * without specific prior written permission. 37 * 38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 48 * SUCH DAMAGE. 49 * 50 * from: Utah Hdr: vn.c 1.13 94/04/02 51 * 52 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 53 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 54 */ 55 56 #include "opt_geom.h" 57 #include "opt_md.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/bio.h> 62 #include <sys/conf.h> 63 #include <sys/fcntl.h> 64 #include <sys/kernel.h> 65 #include <sys/kthread.h> 66 #include <sys/linker.h> 67 #include <sys/lock.h> 68 #include <sys/malloc.h> 69 #include <sys/mdioctl.h> 70 #include <sys/mutex.h> 71 #include <sys/sx.h> 72 #include <sys/namei.h> 73 #include <sys/proc.h> 74 #include <sys/queue.h> 75 #include <sys/sched.h> 76 #include <sys/sf_buf.h> 77 #include <sys/sysctl.h> 78 #include <sys/vnode.h> 79 80 #include <geom/geom.h> 81 82 #include <vm/vm.h> 83 #include <vm/vm_object.h> 84 #include <vm/vm_page.h> 85 #include <vm/vm_pager.h> 86 #include <vm/swap_pager.h> 87 #include <vm/uma.h> 88 89 #define MD_MODVER 1 90 91 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 92 93 #ifndef MD_NSECT 94 #define MD_NSECT (10000 * 2) 95 #endif 96 97 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk"); 98 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors"); 99 100 static int md_debug; 101 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, ""); 102 103 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE) 104 /* 105 * Preloaded image gets put here. 106 * Applications that patch the object with the image can determine 107 * the size looking at the start and end markers (strings), 108 * so we want them contiguous. 109 */ 110 static struct { 111 u_char start[MD_ROOT_SIZE*1024]; 112 u_char end[128]; 113 } mfs_root = { 114 .start = "MFS Filesystem goes here", 115 .end = "MFS Filesystem had better STOP here", 116 }; 117 #endif 118 119 static g_init_t g_md_init; 120 static g_fini_t g_md_fini; 121 static g_start_t g_md_start; 122 static g_access_t g_md_access; 123 124 static int mdunits; 125 static struct cdev *status_dev = 0; 126 static struct sx md_sx; 127 128 static d_ioctl_t mdctlioctl; 129 130 static struct cdevsw mdctl_cdevsw = { 131 .d_version = D_VERSION, 132 .d_ioctl = mdctlioctl, 133 .d_name = MD_NAME, 134 }; 135 136 struct g_class g_md_class = { 137 .name = "MD", 138 .version = G_VERSION, 139 .init = g_md_init, 140 .fini = g_md_fini, 141 .start = g_md_start, 142 .access = g_md_access, 143 }; 144 145 DECLARE_GEOM_CLASS(g_md_class, g_md); 146 147 148 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(&md_softc_list); 149 150 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 151 #define NMASK (NINDIR-1) 152 static int nshift; 153 154 struct indir { 155 uintptr_t *array; 156 u_int total; 157 u_int used; 158 u_int shift; 159 }; 160 161 struct md_s { 162 int unit; 163 LIST_ENTRY(md_s) list; 164 struct bio_queue_head bio_queue; 165 struct mtx queue_mtx; 166 struct cdev *dev; 167 enum md_types type; 168 off_t mediasize; 169 unsigned sectorsize; 170 unsigned opencount; 171 unsigned fwheads; 172 unsigned fwsectors; 173 unsigned flags; 174 char name[20]; 175 struct proc *procp; 176 struct g_geom *gp; 177 struct g_provider *pp; 178 int (*start)(struct md_s *sc, struct bio *bp); 179 180 /* MD_MALLOC related fields */ 181 struct indir *indir; 182 uma_zone_t uma; 183 184 /* MD_PRELOAD related fields */ 185 u_char *pl_ptr; 186 size_t pl_len; 187 188 /* MD_VNODE related fields */ 189 struct vnode *vnode; 190 char file[PATH_MAX]; 191 struct ucred *cred; 192 193 /* MD_SWAP related fields */ 194 vm_object_t object; 195 }; 196 197 static struct indir * 198 new_indir(u_int shift) 199 { 200 struct indir *ip; 201 202 ip = malloc(sizeof *ip, M_MD, M_NOWAIT | M_ZERO); 203 if (ip == NULL) 204 return (NULL); 205 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 206 M_MDSECT, M_NOWAIT | M_ZERO); 207 if (ip->array == NULL) { 208 free(ip, M_MD); 209 return (NULL); 210 } 211 ip->total = NINDIR; 212 ip->shift = shift; 213 return (ip); 214 } 215 216 static void 217 del_indir(struct indir *ip) 218 { 219 220 free(ip->array, M_MDSECT); 221 free(ip, M_MD); 222 } 223 224 static void 225 destroy_indir(struct md_s *sc, struct indir *ip) 226 { 227 int i; 228 229 for (i = 0; i < NINDIR; i++) { 230 if (!ip->array[i]) 231 continue; 232 if (ip->shift) 233 destroy_indir(sc, (struct indir*)(ip->array[i])); 234 else if (ip->array[i] > 255) 235 uma_zfree(sc->uma, (void *)(ip->array[i])); 236 } 237 del_indir(ip); 238 } 239 240 /* 241 * This function does the math and allocates the top level "indir" structure 242 * for a device of "size" sectors. 243 */ 244 245 static struct indir * 246 dimension(off_t size) 247 { 248 off_t rcnt; 249 struct indir *ip; 250 int i, layer; 251 252 rcnt = size; 253 layer = 0; 254 while (rcnt > NINDIR) { 255 rcnt /= NINDIR; 256 layer++; 257 } 258 /* figure out log2(NINDIR) */ 259 for (i = NINDIR, nshift = -1; i; nshift++) 260 i >>= 1; 261 262 /* 263 * XXX: the top layer is probably not fully populated, so we allocate 264 * too much space for ip->array in here. 265 */ 266 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 267 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 268 M_MDSECT, M_WAITOK | M_ZERO); 269 ip->total = NINDIR; 270 ip->shift = layer * nshift; 271 return (ip); 272 } 273 274 /* 275 * Read a given sector 276 */ 277 278 static uintptr_t 279 s_read(struct indir *ip, off_t offset) 280 { 281 struct indir *cip; 282 int idx; 283 uintptr_t up; 284 285 if (md_debug > 1) 286 printf("s_read(%jd)\n", (intmax_t)offset); 287 up = 0; 288 for (cip = ip; cip != NULL;) { 289 if (cip->shift) { 290 idx = (offset >> cip->shift) & NMASK; 291 up = cip->array[idx]; 292 cip = (struct indir *)up; 293 continue; 294 } 295 idx = offset & NMASK; 296 return (cip->array[idx]); 297 } 298 return (0); 299 } 300 301 /* 302 * Write a given sector, prune the tree if the value is 0 303 */ 304 305 static int 306 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 307 { 308 struct indir *cip, *lip[10]; 309 int idx, li; 310 uintptr_t up; 311 312 if (md_debug > 1) 313 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 314 up = 0; 315 li = 0; 316 cip = ip; 317 for (;;) { 318 lip[li++] = cip; 319 if (cip->shift) { 320 idx = (offset >> cip->shift) & NMASK; 321 up = cip->array[idx]; 322 if (up != 0) { 323 cip = (struct indir *)up; 324 continue; 325 } 326 /* Allocate branch */ 327 cip->array[idx] = 328 (uintptr_t)new_indir(cip->shift - nshift); 329 if (cip->array[idx] == 0) 330 return (ENOSPC); 331 cip->used++; 332 up = cip->array[idx]; 333 cip = (struct indir *)up; 334 continue; 335 } 336 /* leafnode */ 337 idx = offset & NMASK; 338 up = cip->array[idx]; 339 if (up != 0) 340 cip->used--; 341 cip->array[idx] = ptr; 342 if (ptr != 0) 343 cip->used++; 344 break; 345 } 346 if (cip->used != 0 || li == 1) 347 return (0); 348 li--; 349 while (cip->used == 0 && cip != ip) { 350 li--; 351 idx = (offset >> lip[li]->shift) & NMASK; 352 up = lip[li]->array[idx]; 353 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 354 del_indir(cip); 355 lip[li]->array[idx] = 0; 356 lip[li]->used--; 357 cip = lip[li]; 358 } 359 return (0); 360 } 361 362 363 static int 364 g_md_access(struct g_provider *pp, int r, int w, int e) 365 { 366 struct md_s *sc; 367 368 sc = pp->geom->softc; 369 if (sc == NULL) 370 return (ENXIO); 371 r += pp->acr; 372 w += pp->acw; 373 e += pp->ace; 374 if ((sc->flags & MD_READONLY) != 0 && w > 0) 375 return (EROFS); 376 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 377 sc->opencount = 1; 378 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 379 sc->opencount = 0; 380 } 381 return (0); 382 } 383 384 static void 385 g_md_start(struct bio *bp) 386 { 387 struct md_s *sc; 388 389 sc = bp->bio_to->geom->softc; 390 mtx_lock(&sc->queue_mtx); 391 bioq_disksort(&sc->bio_queue, bp); 392 mtx_unlock(&sc->queue_mtx); 393 wakeup(sc); 394 } 395 396 static int 397 mdstart_malloc(struct md_s *sc, struct bio *bp) 398 { 399 int i, error; 400 u_char *dst; 401 off_t secno, nsec, uc; 402 uintptr_t sp, osp; 403 404 nsec = bp->bio_length / sc->sectorsize; 405 secno = bp->bio_offset / sc->sectorsize; 406 dst = bp->bio_data; 407 error = 0; 408 while (nsec--) { 409 osp = s_read(sc->indir, secno); 410 if (bp->bio_cmd == BIO_DELETE) { 411 if (osp != 0) 412 error = s_write(sc->indir, secno, 0); 413 } else if (bp->bio_cmd == BIO_READ) { 414 if (osp == 0) 415 bzero(dst, sc->sectorsize); 416 else if (osp <= 255) 417 for (i = 0; i < sc->sectorsize; i++) 418 dst[i] = osp; 419 else 420 bcopy((void *)osp, dst, sc->sectorsize); 421 osp = 0; 422 } else if (bp->bio_cmd == BIO_WRITE) { 423 if (sc->flags & MD_COMPRESS) { 424 uc = dst[0]; 425 for (i = 1; i < sc->sectorsize; i++) 426 if (dst[i] != uc) 427 break; 428 } else { 429 i = 0; 430 uc = 0; 431 } 432 if (i == sc->sectorsize) { 433 if (osp != uc) 434 error = s_write(sc->indir, secno, uc); 435 } else { 436 if (osp <= 255) { 437 sp = (uintptr_t)uma_zalloc(sc->uma, 438 M_NOWAIT); 439 if (sp == 0) { 440 error = ENOSPC; 441 break; 442 } 443 bcopy(dst, (void *)sp, sc->sectorsize); 444 error = s_write(sc->indir, secno, sp); 445 } else { 446 bcopy(dst, (void *)osp, sc->sectorsize); 447 osp = 0; 448 } 449 } 450 } else { 451 error = EOPNOTSUPP; 452 } 453 if (osp > 255) 454 uma_zfree(sc->uma, (void*)osp); 455 if (error != 0) 456 break; 457 secno++; 458 dst += sc->sectorsize; 459 } 460 bp->bio_resid = 0; 461 return (error); 462 } 463 464 static int 465 mdstart_preload(struct md_s *sc, struct bio *bp) 466 { 467 468 switch (bp->bio_cmd) { 469 case BIO_READ: 470 bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data, 471 bp->bio_length); 472 break; 473 case BIO_WRITE: 474 bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset, 475 bp->bio_length); 476 break; 477 } 478 bp->bio_resid = 0; 479 return (0); 480 } 481 482 static int 483 mdstart_vnode(struct md_s *sc, struct bio *bp) 484 { 485 int error; 486 struct uio auio; 487 struct iovec aiov; 488 struct mount *mp; 489 490 mtx_assert(&Giant, MA_OWNED); 491 /* 492 * VNODE I/O 493 * 494 * If an error occurs, we set BIO_ERROR but we do not set 495 * B_INVAL because (for a write anyway), the buffer is 496 * still valid. 497 */ 498 499 bzero(&auio, sizeof(auio)); 500 501 aiov.iov_base = bp->bio_data; 502 aiov.iov_len = bp->bio_length; 503 auio.uio_iov = &aiov; 504 auio.uio_iovcnt = 1; 505 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 506 auio.uio_segflg = UIO_SYSSPACE; 507 if(bp->bio_cmd == BIO_READ) 508 auio.uio_rw = UIO_READ; 509 else if(bp->bio_cmd == BIO_WRITE) 510 auio.uio_rw = UIO_WRITE; 511 else 512 panic("wrong BIO_OP in mdstart_vnode"); 513 auio.uio_resid = bp->bio_length; 514 auio.uio_td = curthread; 515 /* 516 * When reading set IO_DIRECT to try to avoid double-caching 517 * the data. When writing IO_DIRECT is not optimal. 518 */ 519 if (bp->bio_cmd == BIO_READ) { 520 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread); 521 error = VOP_READ(sc->vnode, &auio, IO_DIRECT, sc->cred); 522 VOP_UNLOCK(sc->vnode, 0, curthread); 523 } else { 524 (void) vn_start_write(sc->vnode, &mp, V_WAIT); 525 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread); 526 error = VOP_WRITE(sc->vnode, &auio, 527 sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred); 528 VOP_UNLOCK(sc->vnode, 0, curthread); 529 vn_finished_write(mp); 530 } 531 bp->bio_resid = auio.uio_resid; 532 return (error); 533 } 534 535 static int 536 mdstart_swap(struct md_s *sc, struct bio *bp) 537 { 538 struct sf_buf *sf; 539 int rv, offs, len, lastend; 540 vm_pindex_t i, lastp; 541 vm_page_t m; 542 u_char *p; 543 544 p = bp->bio_data; 545 546 /* 547 * offs is the offset at which to start operating on the 548 * next (ie, first) page. lastp is the last page on 549 * which we're going to operate. lastend is the ending 550 * position within that last page (ie, PAGE_SIZE if 551 * we're operating on complete aligned pages). 552 */ 553 offs = bp->bio_offset % PAGE_SIZE; 554 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 555 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 556 557 rv = VM_PAGER_OK; 558 VM_OBJECT_LOCK(sc->object); 559 vm_object_pip_add(sc->object, 1); 560 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 561 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 562 563 m = vm_page_grab(sc->object, i, 564 VM_ALLOC_NORMAL|VM_ALLOC_RETRY); 565 VM_OBJECT_UNLOCK(sc->object); 566 sched_pin(); 567 sf = sf_buf_alloc(m, SFB_CPUPRIVATE); 568 VM_OBJECT_LOCK(sc->object); 569 if (bp->bio_cmd == BIO_READ) { 570 if (m->valid != VM_PAGE_BITS_ALL) 571 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 572 if (rv == VM_PAGER_ERROR) { 573 sf_buf_free(sf); 574 sched_unpin(); 575 vm_page_lock_queues(); 576 vm_page_wakeup(m); 577 vm_page_unlock_queues(); 578 break; 579 } 580 bcopy((void *)(sf_buf_kva(sf) + offs), p, len); 581 } else if (bp->bio_cmd == BIO_WRITE) { 582 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 583 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 584 if (rv == VM_PAGER_ERROR) { 585 sf_buf_free(sf); 586 sched_unpin(); 587 vm_page_lock_queues(); 588 vm_page_wakeup(m); 589 vm_page_unlock_queues(); 590 break; 591 } 592 bcopy(p, (void *)(sf_buf_kva(sf) + offs), len); 593 m->valid = VM_PAGE_BITS_ALL; 594 #if 0 595 } else if (bp->bio_cmd == BIO_DELETE) { 596 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 597 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 598 if (rv == VM_PAGER_ERROR) { 599 sf_buf_free(sf); 600 sched_unpin(); 601 vm_page_lock_queues(); 602 vm_page_wakeup(m); 603 vm_page_unlock_queues(); 604 break; 605 } 606 bzero((void *)(sf_buf_kva(sf) + offs), len); 607 vm_page_dirty(m); 608 m->valid = VM_PAGE_BITS_ALL; 609 #endif 610 } 611 sf_buf_free(sf); 612 sched_unpin(); 613 vm_page_lock_queues(); 614 vm_page_wakeup(m); 615 vm_page_activate(m); 616 if (bp->bio_cmd == BIO_WRITE) 617 vm_page_dirty(m); 618 vm_page_unlock_queues(); 619 620 /* Actions on further pages start at offset 0 */ 621 p += PAGE_SIZE - offs; 622 offs = 0; 623 #if 0 624 if (bootverbose || bp->bio_offset / PAGE_SIZE < 17) 625 printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n", 626 m->wire_count, m->busy, 627 m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i); 628 #endif 629 } 630 vm_object_pip_subtract(sc->object, 1); 631 vm_object_set_writeable_dirty(sc->object); 632 VM_OBJECT_UNLOCK(sc->object); 633 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 634 } 635 636 static void 637 md_kthread(void *arg) 638 { 639 struct md_s *sc; 640 struct bio *bp; 641 int error, hasgiant; 642 643 sc = arg; 644 mtx_lock_spin(&sched_lock); 645 sched_prio(curthread, PRIBIO); 646 mtx_unlock_spin(&sched_lock); 647 648 switch (sc->type) { 649 case MD_VNODE: 650 mtx_lock(&Giant); 651 hasgiant = 1; 652 break; 653 case MD_MALLOC: 654 case MD_PRELOAD: 655 case MD_SWAP: 656 default: 657 hasgiant = 0; 658 break; 659 } 660 661 for (;;) { 662 if (sc->flags & MD_SHUTDOWN) { 663 sc->procp = NULL; 664 wakeup(&sc->procp); 665 if (hasgiant) 666 mtx_unlock(&Giant); 667 kthread_exit(0); 668 } 669 mtx_lock(&sc->queue_mtx); 670 bp = bioq_takefirst(&sc->bio_queue); 671 if (!bp) { 672 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 673 continue; 674 } 675 mtx_unlock(&sc->queue_mtx); 676 if (bp->bio_cmd == BIO_GETATTR) { 677 if (sc->fwsectors && sc->fwheads && 678 (g_handleattr_int(bp, "GEOM::fwsectors", 679 sc->fwsectors) || 680 g_handleattr_int(bp, "GEOM::fwheads", 681 sc->fwheads))) 682 error = -1; 683 else 684 error = EOPNOTSUPP; 685 } else { 686 error = sc->start(sc, bp); 687 } 688 689 if (error != -1) { 690 bp->bio_completed = bp->bio_length; 691 g_io_deliver(bp, error); 692 } 693 } 694 } 695 696 static struct md_s * 697 mdfind(int unit) 698 { 699 struct md_s *sc; 700 701 LIST_FOREACH(sc, &md_softc_list, list) { 702 if (sc->unit == unit) 703 break; 704 } 705 return (sc); 706 } 707 708 static struct md_s * 709 mdnew(int unit, int *errp, enum md_types type) 710 { 711 struct md_s *sc, *sc2; 712 int error, max = -1; 713 714 *errp = 0; 715 LIST_FOREACH(sc2, &md_softc_list, list) { 716 if (unit == sc2->unit) { 717 *errp = EBUSY; 718 return (NULL); 719 } 720 if (unit == -1 && sc2->unit > max) 721 max = sc2->unit; 722 } 723 if (unit == -1) 724 unit = max + 1; 725 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); 726 sc->type = type; 727 bioq_init(&sc->bio_queue); 728 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 729 sc->unit = unit; 730 sprintf(sc->name, "md%d", unit); 731 LIST_INSERT_HEAD(&md_softc_list, sc, list); 732 error = kthread_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 733 if (error == 0) 734 return (sc); 735 LIST_REMOVE(sc, list); 736 mtx_destroy(&sc->queue_mtx); 737 free(sc, M_MD); 738 *errp = error; 739 return (NULL); 740 } 741 742 static void 743 mdinit(struct md_s *sc) 744 { 745 746 struct g_geom *gp; 747 struct g_provider *pp; 748 749 g_topology_lock(); 750 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 751 gp->softc = sc; 752 pp = g_new_providerf(gp, "md%d", sc->unit); 753 pp->mediasize = sc->mediasize; 754 pp->sectorsize = sc->sectorsize; 755 sc->gp = gp; 756 sc->pp = pp; 757 g_error_provider(pp, 0); 758 g_topology_unlock(); 759 } 760 761 /* 762 * XXX: we should check that the range they feed us is mapped. 763 * XXX: we should implement read-only. 764 */ 765 766 static int 767 mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio) 768 { 769 770 if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE)) 771 return (EINVAL); 772 sc->flags = mdio->md_options & MD_FORCE; 773 /* Cast to pointer size, then to pointer to avoid warning */ 774 sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base; 775 sc->pl_len = (size_t)sc->mediasize; 776 return (0); 777 } 778 779 780 static int 781 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio) 782 { 783 uintptr_t sp; 784 int error; 785 off_t u; 786 787 error = 0; 788 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 789 return (EINVAL); 790 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize)) 791 return (EINVAL); 792 /* Compression doesn't make sense if we have reserved space */ 793 if (mdio->md_options & MD_RESERVE) 794 mdio->md_options &= ~MD_COMPRESS; 795 if (mdio->md_fwsectors != 0) 796 sc->fwsectors = mdio->md_fwsectors; 797 if (mdio->md_fwheads != 0) 798 sc->fwheads = mdio->md_fwheads; 799 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE); 800 sc->indir = dimension(sc->mediasize / sc->sectorsize); 801 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 802 0x1ff, 0); 803 if (mdio->md_options & MD_RESERVE) { 804 off_t nsectors; 805 806 nsectors = sc->mediasize / sc->sectorsize; 807 for (u = 0; u < nsectors; u++) { 808 sp = (uintptr_t)uma_zalloc(sc->uma, M_NOWAIT | M_ZERO); 809 if (sp != 0) 810 error = s_write(sc->indir, u, sp); 811 else 812 error = ENOMEM; 813 if (error != 0) 814 break; 815 } 816 } 817 return (error); 818 } 819 820 821 static int 822 mdsetcred(struct md_s *sc, struct ucred *cred) 823 { 824 char *tmpbuf; 825 int error = 0; 826 827 /* 828 * Set credits in our softc 829 */ 830 831 if (sc->cred) 832 crfree(sc->cred); 833 sc->cred = crhold(cred); 834 835 /* 836 * Horrible kludge to establish credentials for NFS XXX. 837 */ 838 839 if (sc->vnode) { 840 struct uio auio; 841 struct iovec aiov; 842 843 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 844 bzero(&auio, sizeof(auio)); 845 846 aiov.iov_base = tmpbuf; 847 aiov.iov_len = sc->sectorsize; 848 auio.uio_iov = &aiov; 849 auio.uio_iovcnt = 1; 850 auio.uio_offset = 0; 851 auio.uio_rw = UIO_READ; 852 auio.uio_segflg = UIO_SYSSPACE; 853 auio.uio_resid = aiov.iov_len; 854 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread); 855 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 856 VOP_UNLOCK(sc->vnode, 0, curthread); 857 free(tmpbuf, M_TEMP); 858 } 859 return (error); 860 } 861 862 static int 863 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 864 { 865 struct vattr vattr; 866 struct nameidata nd; 867 int error, flags; 868 869 error = copyinstr(mdio->md_file, sc->file, sizeof(sc->file), NULL); 870 if (error != 0) 871 return (error); 872 flags = FREAD|FWRITE; 873 /* 874 * If the user specified that this is a read only device, unset the 875 * FWRITE mask before trying to open the backing store. 876 */ 877 if ((mdio->md_options & MD_READONLY) != 0) 878 flags &= ~FWRITE; 879 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td); 880 error = vn_open(&nd, &flags, 0, -1); 881 if (error != 0) 882 return (error); 883 NDFREE(&nd, NDF_ONLY_PNBUF); 884 if (nd.ni_vp->v_type != VREG || 885 (error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred, td))) { 886 VOP_UNLOCK(nd.ni_vp, 0, td); 887 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 888 return (error ? error : EINVAL); 889 } 890 VOP_UNLOCK(nd.ni_vp, 0, td); 891 892 if (mdio->md_fwsectors != 0) 893 sc->fwsectors = mdio->md_fwsectors; 894 if (mdio->md_fwheads != 0) 895 sc->fwheads = mdio->md_fwheads; 896 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC); 897 if (!(flags & FWRITE)) 898 sc->flags |= MD_READONLY; 899 sc->vnode = nd.ni_vp; 900 901 error = mdsetcred(sc, td->td_ucred); 902 if (error != 0) { 903 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 904 return (error); 905 } 906 return (0); 907 } 908 909 static int 910 mddestroy(struct md_s *sc, struct thread *td) 911 { 912 913 914 if (sc->gp) { 915 sc->gp->softc = NULL; 916 g_topology_lock(); 917 g_wither_geom(sc->gp, ENXIO); 918 g_topology_unlock(); 919 sc->gp = NULL; 920 sc->pp = NULL; 921 } 922 sc->flags |= MD_SHUTDOWN; 923 wakeup(sc); 924 while (sc->procp != NULL) 925 tsleep(&sc->procp, PRIBIO, "mddestroy", hz / 10); 926 mtx_destroy(&sc->queue_mtx); 927 if (sc->vnode != NULL) { 928 mtx_lock(&Giant); 929 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 930 FREAD : (FREAD|FWRITE), sc->cred, td); 931 mtx_unlock(&Giant); 932 } 933 if (sc->cred != NULL) 934 crfree(sc->cred); 935 if (sc->object != NULL) 936 vm_object_deallocate(sc->object); 937 if (sc->indir) 938 destroy_indir(sc, sc->indir); 939 if (sc->uma) 940 uma_zdestroy(sc->uma); 941 942 LIST_REMOVE(sc, list); 943 free(sc, M_MD); 944 return (0); 945 } 946 947 static int 948 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 949 { 950 vm_ooffset_t npage; 951 int error; 952 953 /* 954 * Range check. Disallow negative sizes or any size less then the 955 * size of a page. Then round to a page. 956 */ 957 if (sc->mediasize == 0 || (sc->mediasize % PAGE_SIZE) != 0) 958 return (EDOM); 959 960 /* 961 * Allocate an OBJT_SWAP object. 962 * 963 * Note the truncation. 964 */ 965 966 npage = mdio->md_mediasize / PAGE_SIZE; 967 if (mdio->md_fwsectors != 0) 968 sc->fwsectors = mdio->md_fwsectors; 969 if (mdio->md_fwheads != 0) 970 sc->fwheads = mdio->md_fwheads; 971 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, 972 VM_PROT_DEFAULT, 0); 973 if (sc->object == NULL) 974 return (ENOMEM); 975 sc->flags = mdio->md_options & MD_FORCE; 976 if (mdio->md_options & MD_RESERVE) { 977 if (swap_pager_reserve(sc->object, 0, npage) < 0) { 978 vm_object_deallocate(sc->object); 979 sc->object = NULL; 980 return (EDOM); 981 } 982 } 983 error = mdsetcred(sc, td->td_ucred); 984 if (error != 0) { 985 vm_object_deallocate(sc->object); 986 sc->object = NULL; 987 } 988 return (error); 989 } 990 991 992 static int 993 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 994 { 995 struct md_ioctl *mdio; 996 struct md_s *sc; 997 int error, i; 998 999 if (md_debug) 1000 printf("mdctlioctl(%s %lx %p %x %p)\n", 1001 devtoname(dev), cmd, addr, flags, td); 1002 1003 mdio = (struct md_ioctl *)addr; 1004 if (mdio->md_version != MDIOVERSION) 1005 return (EINVAL); 1006 1007 /* 1008 * We assert the version number in the individual ioctl 1009 * handlers instead of out here because (a) it is possible we 1010 * may add another ioctl in the future which doesn't read an 1011 * mdio, and (b) the correct return value for an unknown ioctl 1012 * is ENOIOCTL, not EINVAL. 1013 */ 1014 error = 0; 1015 switch (cmd) { 1016 case MDIOCATTACH: 1017 switch (mdio->md_type) { 1018 case MD_MALLOC: 1019 case MD_PRELOAD: 1020 case MD_VNODE: 1021 case MD_SWAP: 1022 break; 1023 default: 1024 return (EINVAL); 1025 } 1026 if (mdio->md_options & MD_AUTOUNIT) 1027 sc = mdnew(-1, &error, mdio->md_type); 1028 else 1029 sc = mdnew(mdio->md_unit, &error, mdio->md_type); 1030 if (sc == NULL) 1031 return (error); 1032 if (mdio->md_options & MD_AUTOUNIT) 1033 mdio->md_unit = sc->unit; 1034 sc->mediasize = mdio->md_mediasize; 1035 if (mdio->md_sectorsize == 0) 1036 sc->sectorsize = DEV_BSIZE; 1037 else 1038 sc->sectorsize = mdio->md_sectorsize; 1039 error = EDOOFUS; 1040 switch (sc->type) { 1041 case MD_MALLOC: 1042 sc->start = mdstart_malloc; 1043 error = mdcreate_malloc(sc, mdio); 1044 break; 1045 case MD_PRELOAD: 1046 sc->start = mdstart_preload; 1047 error = mdcreate_preload(sc, mdio); 1048 break; 1049 case MD_VNODE: 1050 sc->start = mdstart_vnode; 1051 error = mdcreate_vnode(sc, mdio, td); 1052 break; 1053 case MD_SWAP: 1054 sc->start = mdstart_swap; 1055 error = mdcreate_swap(sc, mdio, td); 1056 break; 1057 } 1058 if (error != 0) { 1059 mddestroy(sc, td); 1060 return (error); 1061 } 1062 1063 /* Prune off any residual fractional sector */ 1064 i = sc->mediasize % sc->sectorsize; 1065 sc->mediasize -= i; 1066 1067 mdinit(sc); 1068 return (0); 1069 case MDIOCDETACH: 1070 if (mdio->md_mediasize != 0 || mdio->md_options != 0) 1071 return (EINVAL); 1072 1073 sc = mdfind(mdio->md_unit); 1074 if (sc == NULL) 1075 return (ENOENT); 1076 if (sc->opencount != 0 && !(sc->flags & MD_FORCE)) 1077 return (EBUSY); 1078 return (mddestroy(sc, td)); 1079 case MDIOCQUERY: 1080 sc = mdfind(mdio->md_unit); 1081 if (sc == NULL) 1082 return (ENOENT); 1083 mdio->md_type = sc->type; 1084 mdio->md_options = sc->flags; 1085 mdio->md_mediasize = sc->mediasize; 1086 mdio->md_sectorsize = sc->sectorsize; 1087 if (sc->type == MD_VNODE) 1088 error = copyout(sc->file, mdio->md_file, 1089 strlen(sc->file) + 1); 1090 return (error); 1091 case MDIOCLIST: 1092 i = 1; 1093 LIST_FOREACH(sc, &md_softc_list, list) { 1094 if (i == MDNPAD - 1) 1095 mdio->md_pad[i] = -1; 1096 else 1097 mdio->md_pad[i++] = sc->unit; 1098 } 1099 mdio->md_pad[0] = i - 1; 1100 return (0); 1101 default: 1102 return (ENOIOCTL); 1103 }; 1104 } 1105 1106 static int 1107 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1108 { 1109 int error; 1110 1111 sx_xlock(&md_sx); 1112 error = xmdctlioctl(dev, cmd, addr, flags, td); 1113 sx_xunlock(&md_sx); 1114 return (error); 1115 } 1116 1117 static void 1118 md_preloaded(u_char *image, size_t length) 1119 { 1120 struct md_s *sc; 1121 int error; 1122 1123 sc = mdnew(-1, &error, MD_PRELOAD); 1124 if (sc == NULL) 1125 return; 1126 sc->mediasize = length; 1127 sc->sectorsize = DEV_BSIZE; 1128 sc->pl_ptr = image; 1129 sc->pl_len = length; 1130 sc->start = mdstart_preload; 1131 #ifdef MD_ROOT 1132 if (sc->unit == 0) 1133 rootdevnames[0] = "ufs:/dev/md0"; 1134 #endif 1135 mdinit(sc); 1136 } 1137 1138 static void 1139 g_md_init(struct g_class *mp __unused) 1140 { 1141 1142 caddr_t mod; 1143 caddr_t c; 1144 u_char *ptr, *name, *type; 1145 unsigned len; 1146 1147 mod = NULL; 1148 sx_init(&md_sx, "MD config lock"); 1149 g_topology_unlock(); 1150 #ifdef MD_ROOT_SIZE 1151 sx_xlock(&md_sx); 1152 md_preloaded(mfs_root.start, sizeof(mfs_root.start)); 1153 sx_xunlock(&md_sx); 1154 #endif 1155 /* XXX: are preload_* static or do they need Giant ? */ 1156 while ((mod = preload_search_next_name(mod)) != NULL) { 1157 name = (char *)preload_search_info(mod, MODINFO_NAME); 1158 if (name == NULL) 1159 continue; 1160 type = (char *)preload_search_info(mod, MODINFO_TYPE); 1161 if (type == NULL) 1162 continue; 1163 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 1164 continue; 1165 c = preload_search_info(mod, MODINFO_ADDR); 1166 ptr = *(u_char **)c; 1167 c = preload_search_info(mod, MODINFO_SIZE); 1168 len = *(size_t *)c; 1169 printf("%s%d: Preloaded image <%s> %d bytes at %p\n", 1170 MD_NAME, mdunits, name, len, ptr); 1171 sx_xlock(&md_sx); 1172 md_preloaded(ptr, len); 1173 sx_xunlock(&md_sx); 1174 } 1175 status_dev = make_dev(&mdctl_cdevsw, MAXMINOR, UID_ROOT, GID_WHEEL, 1176 0600, MDCTL_NAME); 1177 g_topology_lock(); 1178 } 1179 1180 static void 1181 g_md_fini(struct g_class *mp __unused) 1182 { 1183 1184 sx_destroy(&md_sx); 1185 if (status_dev != NULL) 1186 destroy_dev(status_dev); 1187 } 1188