1 /* 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * $FreeBSD$ 10 * 11 */ 12 13 /* 14 * The following functions are based in the vn(4) driver: mdstart_swap(), 15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 16 * and as such under the following copyright: 17 * 18 * Copyright (c) 1988 University of Utah. 19 * Copyright (c) 1990, 1993 20 * The Regents of the University of California. All rights reserved. 21 * 22 * This code is derived from software contributed to Berkeley by 23 * the Systems Programming Group of the University of Utah Computer 24 * Science Department. 25 * 26 * Redistribution and use in source and binary forms, with or without 27 * modification, are permitted provided that the following conditions 28 * are met: 29 * 1. Redistributions of source code must retain the above copyright 30 * notice, this list of conditions and the following disclaimer. 31 * 2. Redistributions in binary form must reproduce the above copyright 32 * notice, this list of conditions and the following disclaimer in the 33 * documentation and/or other materials provided with the distribution. 34 * 4. Neither the name of the University nor the names of its contributors 35 * may be used to endorse or promote products derived from this software 36 * without specific prior written permission. 37 * 38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 48 * SUCH DAMAGE. 49 * 50 * from: Utah Hdr: vn.c 1.13 94/04/02 51 * 52 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 53 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 54 */ 55 56 #include "opt_geom.h" 57 #include "opt_md.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/bio.h> 62 #include <sys/conf.h> 63 #include <sys/fcntl.h> 64 #include <sys/kernel.h> 65 #include <sys/kthread.h> 66 #include <sys/linker.h> 67 #include <sys/lock.h> 68 #include <sys/malloc.h> 69 #include <sys/mdioctl.h> 70 #include <sys/mutex.h> 71 #include <sys/namei.h> 72 #include <sys/proc.h> 73 #include <sys/queue.h> 74 #include <sys/sf_buf.h> 75 #include <sys/sysctl.h> 76 #include <sys/vnode.h> 77 78 #include <geom/geom.h> 79 80 #include <vm/vm.h> 81 #include <vm/vm_object.h> 82 #include <vm/vm_page.h> 83 #include <vm/vm_pager.h> 84 #include <vm/swap_pager.h> 85 #include <vm/uma.h> 86 87 #define MD_MODVER 1 88 89 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 90 91 #ifndef MD_NSECT 92 #define MD_NSECT (10000 * 2) 93 #endif 94 95 static MALLOC_DEFINE(M_MD, "MD disk", "Memory Disk"); 96 static MALLOC_DEFINE(M_MDSECT, "MD sectors", "Memory Disk Sectors"); 97 98 static int md_debug; 99 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, ""); 100 101 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE) 102 /* Image gets put here: */ 103 static u_char mfs_root[MD_ROOT_SIZE*1024] = "MFS Filesystem goes here"; 104 static u_char end_mfs_root[] __unused = "MFS Filesystem had better STOP here"; 105 #endif 106 107 static g_init_t g_md_init; 108 static g_fini_t g_md_fini; 109 static g_start_t g_md_start; 110 static g_access_t g_md_access; 111 112 static int mdunits; 113 static struct cdev *status_dev = 0; 114 115 static d_ioctl_t mdctlioctl; 116 117 static struct cdevsw mdctl_cdevsw = { 118 .d_version = D_VERSION, 119 .d_flags = D_NEEDGIANT, 120 .d_ioctl = mdctlioctl, 121 .d_name = MD_NAME, 122 }; 123 124 struct g_class g_md_class = { 125 .name = "MD", 126 .version = G_VERSION, 127 .init = g_md_init, 128 .fini = g_md_fini, 129 .start = g_md_start, 130 .access = g_md_access, 131 }; 132 133 DECLARE_GEOM_CLASS(g_md_class, g_md); 134 135 136 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(&md_softc_list); 137 138 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 139 #define NMASK (NINDIR-1) 140 static int nshift; 141 142 struct indir { 143 uintptr_t *array; 144 u_int total; 145 u_int used; 146 u_int shift; 147 }; 148 149 struct md_s { 150 int unit; 151 LIST_ENTRY(md_s) list; 152 struct bio_queue_head bio_queue; 153 struct mtx queue_mtx; 154 struct cdev *dev; 155 enum md_types type; 156 off_t mediasize; 157 unsigned sectorsize; 158 unsigned opencount; 159 unsigned fwheads; 160 unsigned fwsectors; 161 unsigned flags; 162 char name[20]; 163 struct proc *procp; 164 struct g_geom *gp; 165 struct g_provider *pp; 166 167 /* MD_MALLOC related fields */ 168 struct indir *indir; 169 uma_zone_t uma; 170 171 /* MD_PRELOAD related fields */ 172 u_char *pl_ptr; 173 size_t pl_len; 174 175 /* MD_VNODE related fields */ 176 struct vnode *vnode; 177 struct ucred *cred; 178 179 /* MD_SWAP related fields */ 180 vm_object_t object; 181 }; 182 183 static int mddestroy(struct md_s *sc, struct thread *td); 184 185 static struct indir * 186 new_indir(u_int shift) 187 { 188 struct indir *ip; 189 190 ip = malloc(sizeof *ip, M_MD, M_NOWAIT | M_ZERO); 191 if (ip == NULL) 192 return (NULL); 193 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 194 M_MDSECT, M_NOWAIT | M_ZERO); 195 if (ip->array == NULL) { 196 free(ip, M_MD); 197 return (NULL); 198 } 199 ip->total = NINDIR; 200 ip->shift = shift; 201 return (ip); 202 } 203 204 static void 205 del_indir(struct indir *ip) 206 { 207 208 free(ip->array, M_MDSECT); 209 free(ip, M_MD); 210 } 211 212 static void 213 destroy_indir(struct md_s *sc, struct indir *ip) 214 { 215 int i; 216 217 for (i = 0; i < NINDIR; i++) { 218 if (!ip->array[i]) 219 continue; 220 if (ip->shift) 221 destroy_indir(sc, (struct indir*)(ip->array[i])); 222 else if (ip->array[i] > 255) 223 uma_zfree(sc->uma, (void *)(ip->array[i])); 224 } 225 del_indir(ip); 226 } 227 228 /* 229 * This function does the math and alloctes the top level "indir" structure 230 * for a device of "size" sectors. 231 */ 232 233 static struct indir * 234 dimension(off_t size) 235 { 236 off_t rcnt; 237 struct indir *ip; 238 int i, layer; 239 240 rcnt = size; 241 layer = 0; 242 while (rcnt > NINDIR) { 243 rcnt /= NINDIR; 244 layer++; 245 } 246 /* figure out log2(NINDIR) */ 247 for (i = NINDIR, nshift = -1; i; nshift++) 248 i >>= 1; 249 250 /* 251 * XXX: the top layer is probably not fully populated, so we allocate 252 * too much space for ip->array in here. 253 */ 254 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 255 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 256 M_MDSECT, M_WAITOK | M_ZERO); 257 ip->total = NINDIR; 258 ip->shift = layer * nshift; 259 return (ip); 260 } 261 262 /* 263 * Read a given sector 264 */ 265 266 static uintptr_t 267 s_read(struct indir *ip, off_t offset) 268 { 269 struct indir *cip; 270 int idx; 271 uintptr_t up; 272 273 if (md_debug > 1) 274 printf("s_read(%jd)\n", (intmax_t)offset); 275 up = 0; 276 for (cip = ip; cip != NULL;) { 277 if (cip->shift) { 278 idx = (offset >> cip->shift) & NMASK; 279 up = cip->array[idx]; 280 cip = (struct indir *)up; 281 continue; 282 } 283 idx = offset & NMASK; 284 return (cip->array[idx]); 285 } 286 return (0); 287 } 288 289 /* 290 * Write a given sector, prune the tree if the value is 0 291 */ 292 293 static int 294 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 295 { 296 struct indir *cip, *lip[10]; 297 int idx, li; 298 uintptr_t up; 299 300 if (md_debug > 1) 301 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 302 up = 0; 303 li = 0; 304 cip = ip; 305 for (;;) { 306 lip[li++] = cip; 307 if (cip->shift) { 308 idx = (offset >> cip->shift) & NMASK; 309 up = cip->array[idx]; 310 if (up != 0) { 311 cip = (struct indir *)up; 312 continue; 313 } 314 /* Allocate branch */ 315 cip->array[idx] = 316 (uintptr_t)new_indir(cip->shift - nshift); 317 if (cip->array[idx] == 0) 318 return (ENOSPC); 319 cip->used++; 320 up = cip->array[idx]; 321 cip = (struct indir *)up; 322 continue; 323 } 324 /* leafnode */ 325 idx = offset & NMASK; 326 up = cip->array[idx]; 327 if (up != 0) 328 cip->used--; 329 cip->array[idx] = ptr; 330 if (ptr != 0) 331 cip->used++; 332 break; 333 } 334 if (cip->used != 0 || li == 1) 335 return (0); 336 li--; 337 while (cip->used == 0 && cip != ip) { 338 li--; 339 idx = (offset >> lip[li]->shift) & NMASK; 340 up = lip[li]->array[idx]; 341 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 342 del_indir(cip); 343 lip[li]->array[idx] = 0; 344 lip[li]->used--; 345 cip = lip[li]; 346 } 347 return (0); 348 } 349 350 351 static int 352 g_md_access(struct g_provider *pp, int r, int w, int e) 353 { 354 struct md_s *sc; 355 356 sc = pp->geom->softc; 357 if (sc == NULL) 358 return (ENXIO); 359 r += pp->acr; 360 w += pp->acw; 361 e += pp->ace; 362 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 363 sc->opencount = 1; 364 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 365 sc->opencount = 0; 366 } 367 return (0); 368 } 369 370 static void 371 g_md_start(struct bio *bp) 372 { 373 struct md_s *sc; 374 375 sc = bp->bio_to->geom->softc; 376 mtx_lock(&sc->queue_mtx); 377 bioq_disksort(&sc->bio_queue, bp); 378 mtx_unlock(&sc->queue_mtx); 379 wakeup(sc); 380 } 381 382 383 384 static int 385 mdstart_malloc(struct md_s *sc, struct bio *bp) 386 { 387 int i, error; 388 u_char *dst; 389 off_t secno, nsec, uc; 390 uintptr_t sp, osp; 391 392 nsec = bp->bio_length / sc->sectorsize; 393 secno = bp->bio_offset / sc->sectorsize; 394 dst = bp->bio_data; 395 error = 0; 396 while (nsec--) { 397 osp = s_read(sc->indir, secno); 398 if (bp->bio_cmd == BIO_DELETE) { 399 if (osp != 0) 400 error = s_write(sc->indir, secno, 0); 401 } else if (bp->bio_cmd == BIO_READ) { 402 if (osp == 0) 403 bzero(dst, sc->sectorsize); 404 else if (osp <= 255) 405 for (i = 0; i < sc->sectorsize; i++) 406 dst[i] = osp; 407 else 408 bcopy((void *)osp, dst, sc->sectorsize); 409 osp = 0; 410 } else if (bp->bio_cmd == BIO_WRITE) { 411 if (sc->flags & MD_COMPRESS) { 412 uc = dst[0]; 413 for (i = 1; i < sc->sectorsize; i++) 414 if (dst[i] != uc) 415 break; 416 } else { 417 i = 0; 418 uc = 0; 419 } 420 if (i == sc->sectorsize) { 421 if (osp != uc) 422 error = s_write(sc->indir, secno, uc); 423 } else { 424 if (osp <= 255) { 425 sp = (uintptr_t)uma_zalloc(sc->uma, 426 M_NOWAIT); 427 if (sp == 0) { 428 error = ENOSPC; 429 break; 430 } 431 bcopy(dst, (void *)sp, sc->sectorsize); 432 error = s_write(sc->indir, secno, sp); 433 } else { 434 bcopy(dst, (void *)osp, sc->sectorsize); 435 osp = 0; 436 } 437 } 438 } else { 439 error = EOPNOTSUPP; 440 } 441 if (osp > 255) 442 uma_zfree(sc->uma, (void*)osp); 443 if (error) 444 break; 445 secno++; 446 dst += sc->sectorsize; 447 } 448 bp->bio_resid = 0; 449 return (error); 450 } 451 452 static int 453 mdstart_preload(struct md_s *sc, struct bio *bp) 454 { 455 456 switch (bp->bio_cmd) { 457 case BIO_READ: 458 bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data, 459 bp->bio_length); 460 break; 461 case BIO_WRITE: 462 bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset, 463 bp->bio_length); 464 break; 465 } 466 bp->bio_resid = 0; 467 return (0); 468 } 469 470 static int 471 mdstart_vnode(struct md_s *sc, struct bio *bp) 472 { 473 int error; 474 struct uio auio; 475 struct iovec aiov; 476 struct mount *mp; 477 478 /* 479 * VNODE I/O 480 * 481 * If an error occurs, we set BIO_ERROR but we do not set 482 * B_INVAL because (for a write anyway), the buffer is 483 * still valid. 484 */ 485 486 bzero(&auio, sizeof(auio)); 487 488 aiov.iov_base = bp->bio_data; 489 aiov.iov_len = bp->bio_length; 490 auio.uio_iov = &aiov; 491 auio.uio_iovcnt = 1; 492 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 493 auio.uio_segflg = UIO_SYSSPACE; 494 if(bp->bio_cmd == BIO_READ) 495 auio.uio_rw = UIO_READ; 496 else if(bp->bio_cmd == BIO_WRITE) 497 auio.uio_rw = UIO_WRITE; 498 else 499 panic("wrong BIO_OP in mdstart_vnode"); 500 auio.uio_resid = bp->bio_length; 501 auio.uio_td = curthread; 502 /* 503 * When reading set IO_DIRECT to try to avoid double-caching 504 * the data. When writing IO_DIRECT is not optimal. 505 */ 506 if (bp->bio_cmd == BIO_READ) { 507 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread); 508 error = VOP_READ(sc->vnode, &auio, IO_DIRECT, sc->cred); 509 VOP_UNLOCK(sc->vnode, 0, curthread); 510 } else { 511 (void) vn_start_write(sc->vnode, &mp, V_WAIT); 512 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread); 513 error = VOP_WRITE(sc->vnode, &auio, 514 sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred); 515 VOP_UNLOCK(sc->vnode, 0, curthread); 516 vn_finished_write(mp); 517 } 518 bp->bio_resid = auio.uio_resid; 519 return (error); 520 } 521 522 static int 523 mdstart_swap(struct md_s *sc, struct bio *bp) 524 { 525 struct sf_buf *sf; 526 int rv, offs, len, lastend; 527 vm_pindex_t i, lastp; 528 vm_page_t m; 529 u_char *p; 530 531 p = bp->bio_data; 532 533 /* 534 * offs is the ofset at whih to start operating on the 535 * next (ie, first) page. lastp is the last page on 536 * which we're going to operate. lastend is the ending 537 * position within that last page (ie, PAGE_SIZE if 538 * we're operating on complete aligned pages). 539 */ 540 offs = bp->bio_offset % PAGE_SIZE; 541 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 542 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 543 544 rv = VM_PAGER_OK; 545 VM_OBJECT_LOCK(sc->object); 546 vm_object_pip_add(sc->object, 1); 547 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 548 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 549 550 m = vm_page_grab(sc->object, i, 551 VM_ALLOC_NORMAL|VM_ALLOC_RETRY); 552 VM_OBJECT_UNLOCK(sc->object); 553 sf = sf_buf_alloc(m, 0); 554 VM_OBJECT_LOCK(sc->object); 555 if (bp->bio_cmd == BIO_READ) { 556 if (m->valid != VM_PAGE_BITS_ALL) 557 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 558 if (rv == VM_PAGER_ERROR) { 559 sf_buf_free(sf); 560 vm_page_wakeup(m); 561 break; 562 } 563 bcopy((void *)(sf_buf_kva(sf) + offs), p, len); 564 } else if (bp->bio_cmd == BIO_WRITE) { 565 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 566 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 567 if (rv == VM_PAGER_ERROR) { 568 sf_buf_free(sf); 569 vm_page_wakeup(m); 570 break; 571 } 572 bcopy(p, (void *)(sf_buf_kva(sf) + offs), len); 573 m->valid = VM_PAGE_BITS_ALL; 574 #if 0 575 } else if (bp->bio_cmd == BIO_DELETE) { 576 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 577 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 578 if (rv == VM_PAGER_ERROR) { 579 sf_buf_free(sf); 580 vm_page_wakeup(m); 581 break; 582 } 583 bzero((void *)(sf_buf_kva(sf) + offs), len); 584 vm_page_dirty(m); 585 m->valid = VM_PAGE_BITS_ALL; 586 #endif 587 } 588 sf_buf_free(sf); 589 vm_page_lock_queues(); 590 vm_page_wakeup(m); 591 vm_page_activate(m); 592 if (bp->bio_cmd == BIO_WRITE) 593 vm_page_dirty(m); 594 vm_page_unlock_queues(); 595 596 /* Actions on further pages start at offset 0 */ 597 p += PAGE_SIZE - offs; 598 offs = 0; 599 #if 0 600 if (bootverbose || bp->bio_offset / PAGE_SIZE < 17) 601 printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n", 602 m->wire_count, m->busy, 603 m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i); 604 #endif 605 } 606 vm_object_pip_subtract(sc->object, 1); 607 vm_object_set_writeable_dirty(sc->object); 608 VM_OBJECT_UNLOCK(sc->object); 609 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 610 } 611 612 static void 613 md_kthread(void *arg) 614 { 615 struct md_s *sc; 616 struct bio *bp; 617 int error, hasgiant; 618 619 sc = arg; 620 curthread->td_base_pri = PRIBIO; 621 622 switch (sc->type) { 623 case MD_VNODE: 624 mtx_lock(&Giant); 625 hasgiant = 1; 626 break; 627 case MD_MALLOC: 628 case MD_PRELOAD: 629 case MD_SWAP: 630 default: 631 hasgiant = 0; 632 break; 633 } 634 635 for (;;) { 636 mtx_lock(&sc->queue_mtx); 637 bp = bioq_takefirst(&sc->bio_queue); 638 if (!bp) { 639 if (sc->flags & MD_SHUTDOWN) { 640 mtx_unlock(&sc->queue_mtx); 641 sc->procp = NULL; 642 wakeup(&sc->procp); 643 if (hasgiant) 644 mtx_unlock(&Giant); 645 kthread_exit(0); 646 } 647 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 648 continue; 649 } 650 mtx_unlock(&sc->queue_mtx); 651 if (bp->bio_cmd == BIO_GETATTR) { 652 if (sc->fwsectors && sc->fwheads && 653 (g_handleattr_int(bp, "GEOM::fwsectors", 654 sc->fwsectors) || 655 g_handleattr_int(bp, "GEOM::fwheads", 656 sc->fwheads))) 657 error = -1; 658 else 659 error = EOPNOTSUPP; 660 } else { 661 switch (sc->type) { 662 case MD_MALLOC: 663 error = mdstart_malloc(sc, bp); 664 break; 665 case MD_PRELOAD: 666 error = mdstart_preload(sc, bp); 667 break; 668 case MD_VNODE: 669 error = mdstart_vnode(sc, bp); 670 break; 671 case MD_SWAP: 672 error = mdstart_swap(sc, bp); 673 break; 674 default: 675 panic("Impossible md(type)"); 676 break; 677 } 678 } 679 680 if (error != -1) { 681 bp->bio_completed = bp->bio_length; 682 g_io_deliver(bp, error); 683 } 684 } 685 } 686 687 static struct md_s * 688 mdfind(int unit) 689 { 690 struct md_s *sc; 691 692 /* XXX: LOCK(unique unit numbers) */ 693 LIST_FOREACH(sc, &md_softc_list, list) { 694 if (sc->unit == unit) 695 break; 696 } 697 /* XXX: UNLOCK(unique unit numbers) */ 698 return (sc); 699 } 700 701 static struct md_s * 702 mdnew(int unit) 703 { 704 struct md_s *sc; 705 int error, max = -1; 706 707 /* XXX: LOCK(unique unit numbers) */ 708 LIST_FOREACH(sc, &md_softc_list, list) { 709 if (sc->unit == unit) { 710 /* XXX: UNLOCK(unique unit numbers) */ 711 return (NULL); 712 } 713 if (sc->unit > max) 714 max = sc->unit; 715 } 716 if (unit == -1) 717 unit = max + 1; 718 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); 719 sc->unit = unit; 720 bioq_init(&sc->bio_queue); 721 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 722 sprintf(sc->name, "md%d", unit); 723 error = kthread_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 724 if (error) { 725 free(sc, M_MD); 726 return (NULL); 727 } 728 LIST_INSERT_HEAD(&md_softc_list, sc, list); 729 /* XXX: UNLOCK(unique unit numbers) */ 730 return (sc); 731 } 732 733 static void 734 mdinit(struct md_s *sc) 735 { 736 737 struct g_geom *gp; 738 struct g_provider *pp; 739 740 DROP_GIANT(); 741 g_topology_lock(); 742 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 743 gp->softc = sc; 744 pp = g_new_providerf(gp, "md%d", sc->unit); 745 pp->mediasize = sc->mediasize; 746 pp->sectorsize = sc->sectorsize; 747 sc->gp = gp; 748 sc->pp = pp; 749 g_error_provider(pp, 0); 750 g_topology_unlock(); 751 PICKUP_GIANT(); 752 } 753 754 /* 755 * XXX: we should check that the range they feed us is mapped. 756 * XXX: we should implement read-only. 757 */ 758 759 static int 760 mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio) 761 { 762 763 if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE)) 764 return (EINVAL); 765 sc->flags = mdio->md_options & MD_FORCE; 766 /* Cast to pointer size, then to pointer to avoid warning */ 767 sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base; 768 sc->pl_len = (size_t)sc->mediasize; 769 return (0); 770 } 771 772 773 static int 774 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio) 775 { 776 uintptr_t sp; 777 int error; 778 off_t u; 779 780 error = 0; 781 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 782 return (EINVAL); 783 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize)) 784 return (EINVAL); 785 /* Compression doesn't make sense if we have reserved space */ 786 if (mdio->md_options & MD_RESERVE) 787 mdio->md_options &= ~MD_COMPRESS; 788 if (mdio->md_fwsectors != 0) 789 sc->fwsectors = mdio->md_fwsectors; 790 if (mdio->md_fwheads != 0) 791 sc->fwheads = mdio->md_fwheads; 792 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE); 793 sc->indir = dimension(sc->mediasize / sc->sectorsize); 794 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 795 0x1ff, 0); 796 if (mdio->md_options & MD_RESERVE) { 797 off_t nsectors; 798 799 nsectors = sc->mediasize / sc->sectorsize; 800 for (u = 0; u < nsectors; u++) { 801 sp = (uintptr_t)uma_zalloc(sc->uma, M_NOWAIT | M_ZERO); 802 if (sp != 0) 803 error = s_write(sc->indir, u, sp); 804 else 805 error = ENOMEM; 806 if (error != 0) 807 break; 808 } 809 } 810 if (error != 0) 811 uma_zdestroy(sc->uma); 812 return (error); 813 } 814 815 816 static int 817 mdsetcred(struct md_s *sc, struct ucred *cred) 818 { 819 char *tmpbuf; 820 int error = 0; 821 822 /* 823 * Set credits in our softc 824 */ 825 826 if (sc->cred) 827 crfree(sc->cred); 828 sc->cred = crhold(cred); 829 830 /* 831 * Horrible kludge to establish credentials for NFS XXX. 832 */ 833 834 if (sc->vnode) { 835 struct uio auio; 836 struct iovec aiov; 837 838 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 839 bzero(&auio, sizeof(auio)); 840 841 aiov.iov_base = tmpbuf; 842 aiov.iov_len = sc->sectorsize; 843 auio.uio_iov = &aiov; 844 auio.uio_iovcnt = 1; 845 auio.uio_offset = 0; 846 auio.uio_rw = UIO_READ; 847 auio.uio_segflg = UIO_SYSSPACE; 848 auio.uio_resid = aiov.iov_len; 849 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread); 850 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 851 VOP_UNLOCK(sc->vnode, 0, curthread); 852 free(tmpbuf, M_TEMP); 853 } 854 return (error); 855 } 856 857 static int 858 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 859 { 860 struct vattr vattr; 861 struct nameidata nd; 862 int error, flags; 863 864 flags = FREAD|FWRITE; 865 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, mdio->md_file, td); 866 error = vn_open(&nd, &flags, 0, -1); 867 if (error) { 868 NDFREE(&nd, NDF_ONLY_PNBUF); 869 if (error != EACCES && error != EPERM && error != EROFS) 870 return (error); 871 flags &= ~FWRITE; 872 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, mdio->md_file, td); 873 error = vn_open(&nd, &flags, 0, -1); 874 } 875 NDFREE(&nd, NDF_ONLY_PNBUF); 876 if (error) 877 return (error); 878 if (nd.ni_vp->v_type != VREG || 879 (error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred, td))) { 880 VOP_UNLOCK(nd.ni_vp, 0, td); 881 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 882 return (error ? error : EINVAL); 883 } 884 VOP_UNLOCK(nd.ni_vp, 0, td); 885 886 if (mdio->md_fwsectors != 0) 887 sc->fwsectors = mdio->md_fwsectors; 888 if (mdio->md_fwheads != 0) 889 sc->fwheads = mdio->md_fwheads; 890 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC); 891 if (!(flags & FWRITE)) 892 sc->flags |= MD_READONLY; 893 sc->vnode = nd.ni_vp; 894 895 error = mdsetcred(sc, td->td_ucred); 896 if (error != 0) { 897 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 898 return (error); 899 } 900 return (0); 901 } 902 903 static void 904 md_zapit(void *p, int cancel) 905 { 906 if (cancel) 907 return; 908 g_wither_geom(p, ENXIO); 909 } 910 911 static int 912 mddestroy(struct md_s *sc, struct thread *td) 913 { 914 915 GIANT_REQUIRED; 916 917 mtx_destroy(&sc->queue_mtx); 918 if (sc->gp) { 919 sc->gp->softc = NULL; 920 g_waitfor_event(md_zapit, sc->gp, M_WAITOK, sc->gp, NULL); 921 sc->gp = NULL; 922 sc->pp = NULL; 923 } 924 sc->flags |= MD_SHUTDOWN; 925 wakeup(sc); 926 while (sc->procp != NULL) 927 tsleep(&sc->procp, PRIBIO, "mddestroy", hz / 10); 928 if (sc->vnode != NULL) 929 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 930 FREAD : (FREAD|FWRITE), sc->cred, td); 931 if (sc->cred != NULL) 932 crfree(sc->cred); 933 if (sc->object != NULL) { 934 vm_object_deallocate(sc->object); 935 } 936 if (sc->indir) 937 destroy_indir(sc, sc->indir); 938 if (sc->uma) 939 uma_zdestroy(sc->uma); 940 941 /* XXX: LOCK(unique unit numbers) */ 942 LIST_REMOVE(sc, list); 943 /* XXX: UNLOCK(unique unit numbers) */ 944 free(sc, M_MD); 945 return (0); 946 } 947 948 static int 949 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 950 { 951 vm_ooffset_t npage; 952 int error; 953 954 GIANT_REQUIRED; 955 956 /* 957 * Range check. Disallow negative sizes or any size less then the 958 * size of a page. Then round to a page. 959 */ 960 if (sc->mediasize == 0 || (sc->mediasize % PAGE_SIZE) != 0) 961 return (EDOM); 962 963 /* 964 * Allocate an OBJT_SWAP object. 965 * 966 * Note the truncation. 967 */ 968 969 npage = mdio->md_mediasize / PAGE_SIZE; 970 if (mdio->md_fwsectors != 0) 971 sc->fwsectors = mdio->md_fwsectors; 972 if (mdio->md_fwheads != 0) 973 sc->fwheads = mdio->md_fwheads; 974 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, 975 VM_PROT_DEFAULT, 0); 976 if (sc->object == NULL) 977 return (ENOMEM); 978 sc->flags = mdio->md_options & MD_FORCE; 979 if (mdio->md_options & MD_RESERVE) { 980 if (swap_pager_reserve(sc->object, 0, npage) < 0) { 981 vm_object_deallocate(sc->object); 982 sc->object = NULL; 983 return (EDOM); 984 } 985 } 986 error = mdsetcred(sc, td->td_ucred); 987 if (error) { 988 vm_object_deallocate(sc->object); 989 sc->object = NULL; 990 } 991 return (error); 992 } 993 994 static int 995 mddetach(int unit, struct thread *td) 996 { 997 struct md_s *sc; 998 999 sc = mdfind(unit); 1000 if (sc == NULL) 1001 return (ENOENT); 1002 if (sc->opencount != 0 && !(sc->flags & MD_FORCE)) 1003 return (EBUSY); 1004 switch(sc->type) { 1005 case MD_VNODE: 1006 case MD_SWAP: 1007 case MD_MALLOC: 1008 case MD_PRELOAD: 1009 return (mddestroy(sc, td)); 1010 default: 1011 return (EOPNOTSUPP); 1012 } 1013 } 1014 1015 static int 1016 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1017 { 1018 struct md_ioctl *mdio; 1019 struct md_s *sc; 1020 int error, i; 1021 1022 if (md_debug) 1023 printf("mdctlioctl(%s %lx %p %x %p)\n", 1024 devtoname(dev), cmd, addr, flags, td); 1025 1026 /* 1027 * We assert the version number in the individual ioctl 1028 * handlers instead of out here because (a) it is possible we 1029 * may add another ioctl in the future which doesn't read an 1030 * mdio, and (b) the correct return value for an unknown ioctl 1031 * is ENOIOCTL, not EINVAL. 1032 */ 1033 mdio = (struct md_ioctl *)addr; 1034 switch (cmd) { 1035 case MDIOCATTACH: 1036 if (mdio->md_version != MDIOVERSION) 1037 return (EINVAL); 1038 switch (mdio->md_type) { 1039 case MD_MALLOC: 1040 case MD_PRELOAD: 1041 case MD_VNODE: 1042 case MD_SWAP: 1043 break; 1044 default: 1045 return (EINVAL); 1046 } 1047 if (mdio->md_options & MD_AUTOUNIT) { 1048 sc = mdnew(-1); 1049 mdio->md_unit = sc->unit; 1050 } else { 1051 sc = mdnew(mdio->md_unit); 1052 if (sc == NULL) 1053 return (EBUSY); 1054 } 1055 sc->type = mdio->md_type; 1056 sc->mediasize = mdio->md_mediasize; 1057 if (mdio->md_sectorsize == 0) 1058 sc->sectorsize = DEV_BSIZE; 1059 else 1060 sc->sectorsize = mdio->md_sectorsize; 1061 error = EDOOFUS; 1062 switch (sc->type) { 1063 case MD_MALLOC: 1064 error = mdcreate_malloc(sc, mdio); 1065 break; 1066 case MD_PRELOAD: 1067 error = mdcreate_preload(sc, mdio); 1068 break; 1069 case MD_VNODE: 1070 error = mdcreate_vnode(sc, mdio, td); 1071 break; 1072 case MD_SWAP: 1073 error = mdcreate_swap(sc, mdio, td); 1074 break; 1075 } 1076 if (error != 0) { 1077 mddestroy(sc, td); 1078 return (error); 1079 } 1080 mdinit(sc); 1081 return (0); 1082 case MDIOCDETACH: 1083 if (mdio->md_version != MDIOVERSION) 1084 return (EINVAL); 1085 if (mdio->md_file != NULL || mdio->md_mediasize != 0 || 1086 mdio->md_options != 0) 1087 return (EINVAL); 1088 return (mddetach(mdio->md_unit, td)); 1089 case MDIOCQUERY: 1090 if (mdio->md_version != MDIOVERSION) 1091 return (EINVAL); 1092 sc = mdfind(mdio->md_unit); 1093 if (sc == NULL) 1094 return (ENOENT); 1095 mdio->md_type = sc->type; 1096 mdio->md_options = sc->flags; 1097 mdio->md_mediasize = sc->mediasize; 1098 mdio->md_sectorsize = sc->sectorsize; 1099 if (sc->type == MD_VNODE) { 1100 /* XXX fill this in */ 1101 mdio->md_file = NULL; 1102 } 1103 return (0); 1104 case MDIOCLIST: 1105 i = 1; 1106 LIST_FOREACH(sc, &md_softc_list, list) { 1107 if (i == MDNPAD - 1) 1108 mdio->md_pad[i] = -1; 1109 else 1110 mdio->md_pad[i++] = sc->unit; 1111 } 1112 mdio->md_pad[0] = i - 1; 1113 return (0); 1114 default: 1115 return (ENOIOCTL); 1116 }; 1117 return (ENOIOCTL); 1118 } 1119 1120 static void 1121 md_preloaded(u_char *image, size_t length) 1122 { 1123 struct md_s *sc; 1124 1125 sc = mdnew(-1); 1126 if (sc == NULL) 1127 return; 1128 sc->type = MD_PRELOAD; 1129 sc->mediasize = length; 1130 sc->sectorsize = DEV_BSIZE; 1131 sc->pl_ptr = image; 1132 sc->pl_len = length; 1133 #ifdef MD_ROOT 1134 if (sc->unit == 0) 1135 rootdevnames[0] = "ufs:/dev/md0"; 1136 #endif 1137 mdinit(sc); 1138 } 1139 1140 static void 1141 g_md_init(struct g_class *mp __unused) 1142 { 1143 1144 caddr_t mod; 1145 caddr_t c; 1146 u_char *ptr, *name, *type; 1147 unsigned len; 1148 1149 mod = NULL; 1150 g_topology_unlock(); 1151 #ifdef MD_ROOT_SIZE 1152 md_preloaded(mfs_root, MD_ROOT_SIZE*1024); 1153 #endif 1154 while ((mod = preload_search_next_name(mod)) != NULL) { 1155 name = (char *)preload_search_info(mod, MODINFO_NAME); 1156 type = (char *)preload_search_info(mod, MODINFO_TYPE); 1157 if (name == NULL) 1158 continue; 1159 if (type == NULL) 1160 continue; 1161 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 1162 continue; 1163 c = preload_search_info(mod, MODINFO_ADDR); 1164 ptr = *(u_char **)c; 1165 c = preload_search_info(mod, MODINFO_SIZE); 1166 len = *(size_t *)c; 1167 printf("%s%d: Preloaded image <%s> %d bytes at %p\n", 1168 MD_NAME, mdunits, name, len, ptr); 1169 md_preloaded(ptr, len); 1170 } 1171 status_dev = make_dev(&mdctl_cdevsw, 0xffff00ff, UID_ROOT, GID_WHEEL, 1172 0600, MDCTL_NAME); 1173 g_topology_lock(); 1174 } 1175 1176 static void 1177 g_md_fini(struct g_class *mp __unused) 1178 { 1179 1180 if (status_dev != NULL) 1181 destroy_dev(status_dev); 1182 } 1183