1 /*- 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * $FreeBSD$ 10 * 11 */ 12 13 /*- 14 * The following functions are based in the vn(4) driver: mdstart_swap(), 15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 16 * and as such under the following copyright: 17 * 18 * Copyright (c) 1988 University of Utah. 19 * Copyright (c) 1990, 1993 20 * The Regents of the University of California. All rights reserved. 21 * Copyright (c) 2013 The FreeBSD Foundation 22 * All rights reserved. 23 * 24 * This code is derived from software contributed to Berkeley by 25 * the Systems Programming Group of the University of Utah Computer 26 * Science Department. 27 * 28 * Portions of this software were developed by Konstantin Belousov 29 * under sponsorship from the FreeBSD Foundation. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 4. Neither the name of the University nor the names of its contributors 40 * may be used to endorse or promote products derived from this software 41 * without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 53 * SUCH DAMAGE. 54 * 55 * from: Utah Hdr: vn.c 1.13 94/04/02 56 * 57 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 58 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 59 */ 60 61 #include "opt_geom.h" 62 #include "opt_md.h" 63 64 #include <sys/param.h> 65 #include <sys/systm.h> 66 #include <sys/bio.h> 67 #include <sys/buf.h> 68 #include <sys/conf.h> 69 #include <sys/devicestat.h> 70 #include <sys/fcntl.h> 71 #include <sys/kernel.h> 72 #include <sys/kthread.h> 73 #include <sys/limits.h> 74 #include <sys/linker.h> 75 #include <sys/lock.h> 76 #include <sys/malloc.h> 77 #include <sys/mdioctl.h> 78 #include <sys/mount.h> 79 #include <sys/mutex.h> 80 #include <sys/sx.h> 81 #include <sys/namei.h> 82 #include <sys/proc.h> 83 #include <sys/queue.h> 84 #include <sys/rwlock.h> 85 #include <sys/sbuf.h> 86 #include <sys/sched.h> 87 #include <sys/sf_buf.h> 88 #include <sys/sysctl.h> 89 #include <sys/vnode.h> 90 91 #include <geom/geom.h> 92 93 #include <vm/vm.h> 94 #include <vm/vm_param.h> 95 #include <vm/vm_object.h> 96 #include <vm/vm_page.h> 97 #include <vm/vm_pager.h> 98 #include <vm/swap_pager.h> 99 #include <vm/uma.h> 100 101 #define MD_MODVER 1 102 103 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 104 #define MD_EXITING 0x20000 /* Worker thread is exiting. */ 105 106 #ifndef MD_NSECT 107 #define MD_NSECT (10000 * 2) 108 #endif 109 110 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk"); 111 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors"); 112 113 static int md_debug; 114 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, 115 "Enable md(4) debug messages"); 116 static int md_malloc_wait; 117 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, 118 "Allow malloc to wait for memory allocations"); 119 120 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE) 121 #define MD_ROOT_FSTYPE "ufs" 122 #endif 123 124 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE) 125 /* 126 * Preloaded image gets put here. 127 * Applications that patch the object with the image can determine 128 * the size looking at the start and end markers (strings), 129 * so we want them contiguous. 130 */ 131 static struct { 132 u_char start[MD_ROOT_SIZE*1024]; 133 u_char end[128]; 134 } mfs_root = { 135 .start = "MFS Filesystem goes here", 136 .end = "MFS Filesystem had better STOP here", 137 }; 138 #endif 139 140 static g_init_t g_md_init; 141 static g_fini_t g_md_fini; 142 static g_start_t g_md_start; 143 static g_access_t g_md_access; 144 static void g_md_dumpconf(struct sbuf *sb, const char *indent, 145 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp); 146 147 static struct cdev *status_dev = 0; 148 static struct sx md_sx; 149 static struct unrhdr *md_uh; 150 151 static d_ioctl_t mdctlioctl; 152 153 static struct cdevsw mdctl_cdevsw = { 154 .d_version = D_VERSION, 155 .d_ioctl = mdctlioctl, 156 .d_name = MD_NAME, 157 }; 158 159 struct g_class g_md_class = { 160 .name = "MD", 161 .version = G_VERSION, 162 .init = g_md_init, 163 .fini = g_md_fini, 164 .start = g_md_start, 165 .access = g_md_access, 166 .dumpconf = g_md_dumpconf, 167 }; 168 169 DECLARE_GEOM_CLASS(g_md_class, g_md); 170 171 172 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list); 173 174 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 175 #define NMASK (NINDIR-1) 176 static int nshift; 177 178 static int md_vnode_pbuf_freecnt; 179 180 struct indir { 181 uintptr_t *array; 182 u_int total; 183 u_int used; 184 u_int shift; 185 }; 186 187 struct md_s { 188 int unit; 189 LIST_ENTRY(md_s) list; 190 struct bio_queue_head bio_queue; 191 struct mtx queue_mtx; 192 struct mtx stat_mtx; 193 struct cdev *dev; 194 enum md_types type; 195 off_t mediasize; 196 unsigned sectorsize; 197 unsigned opencount; 198 unsigned fwheads; 199 unsigned fwsectors; 200 unsigned flags; 201 char name[20]; 202 struct proc *procp; 203 struct g_geom *gp; 204 struct g_provider *pp; 205 int (*start)(struct md_s *sc, struct bio *bp); 206 struct devstat *devstat; 207 208 /* MD_MALLOC related fields */ 209 struct indir *indir; 210 uma_zone_t uma; 211 212 /* MD_PRELOAD related fields */ 213 u_char *pl_ptr; 214 size_t pl_len; 215 216 /* MD_VNODE related fields */ 217 struct vnode *vnode; 218 char file[PATH_MAX]; 219 struct ucred *cred; 220 221 /* MD_SWAP related fields */ 222 vm_object_t object; 223 }; 224 225 static struct indir * 226 new_indir(u_int shift) 227 { 228 struct indir *ip; 229 230 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT) 231 | M_ZERO); 232 if (ip == NULL) 233 return (NULL); 234 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 235 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); 236 if (ip->array == NULL) { 237 free(ip, M_MD); 238 return (NULL); 239 } 240 ip->total = NINDIR; 241 ip->shift = shift; 242 return (ip); 243 } 244 245 static void 246 del_indir(struct indir *ip) 247 { 248 249 free(ip->array, M_MDSECT); 250 free(ip, M_MD); 251 } 252 253 static void 254 destroy_indir(struct md_s *sc, struct indir *ip) 255 { 256 int i; 257 258 for (i = 0; i < NINDIR; i++) { 259 if (!ip->array[i]) 260 continue; 261 if (ip->shift) 262 destroy_indir(sc, (struct indir*)(ip->array[i])); 263 else if (ip->array[i] > 255) 264 uma_zfree(sc->uma, (void *)(ip->array[i])); 265 } 266 del_indir(ip); 267 } 268 269 /* 270 * This function does the math and allocates the top level "indir" structure 271 * for a device of "size" sectors. 272 */ 273 274 static struct indir * 275 dimension(off_t size) 276 { 277 off_t rcnt; 278 struct indir *ip; 279 int layer; 280 281 rcnt = size; 282 layer = 0; 283 while (rcnt > NINDIR) { 284 rcnt /= NINDIR; 285 layer++; 286 } 287 288 /* 289 * XXX: the top layer is probably not fully populated, so we allocate 290 * too much space for ip->array in here. 291 */ 292 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 293 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 294 M_MDSECT, M_WAITOK | M_ZERO); 295 ip->total = NINDIR; 296 ip->shift = layer * nshift; 297 return (ip); 298 } 299 300 /* 301 * Read a given sector 302 */ 303 304 static uintptr_t 305 s_read(struct indir *ip, off_t offset) 306 { 307 struct indir *cip; 308 int idx; 309 uintptr_t up; 310 311 if (md_debug > 1) 312 printf("s_read(%jd)\n", (intmax_t)offset); 313 up = 0; 314 for (cip = ip; cip != NULL;) { 315 if (cip->shift) { 316 idx = (offset >> cip->shift) & NMASK; 317 up = cip->array[idx]; 318 cip = (struct indir *)up; 319 continue; 320 } 321 idx = offset & NMASK; 322 return (cip->array[idx]); 323 } 324 return (0); 325 } 326 327 /* 328 * Write a given sector, prune the tree if the value is 0 329 */ 330 331 static int 332 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 333 { 334 struct indir *cip, *lip[10]; 335 int idx, li; 336 uintptr_t up; 337 338 if (md_debug > 1) 339 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 340 up = 0; 341 li = 0; 342 cip = ip; 343 for (;;) { 344 lip[li++] = cip; 345 if (cip->shift) { 346 idx = (offset >> cip->shift) & NMASK; 347 up = cip->array[idx]; 348 if (up != 0) { 349 cip = (struct indir *)up; 350 continue; 351 } 352 /* Allocate branch */ 353 cip->array[idx] = 354 (uintptr_t)new_indir(cip->shift - nshift); 355 if (cip->array[idx] == 0) 356 return (ENOSPC); 357 cip->used++; 358 up = cip->array[idx]; 359 cip = (struct indir *)up; 360 continue; 361 } 362 /* leafnode */ 363 idx = offset & NMASK; 364 up = cip->array[idx]; 365 if (up != 0) 366 cip->used--; 367 cip->array[idx] = ptr; 368 if (ptr != 0) 369 cip->used++; 370 break; 371 } 372 if (cip->used != 0 || li == 1) 373 return (0); 374 li--; 375 while (cip->used == 0 && cip != ip) { 376 li--; 377 idx = (offset >> lip[li]->shift) & NMASK; 378 up = lip[li]->array[idx]; 379 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 380 del_indir(cip); 381 lip[li]->array[idx] = 0; 382 lip[li]->used--; 383 cip = lip[li]; 384 } 385 return (0); 386 } 387 388 389 static int 390 g_md_access(struct g_provider *pp, int r, int w, int e) 391 { 392 struct md_s *sc; 393 394 sc = pp->geom->softc; 395 if (sc == NULL) { 396 if (r <= 0 && w <= 0 && e <= 0) 397 return (0); 398 return (ENXIO); 399 } 400 r += pp->acr; 401 w += pp->acw; 402 e += pp->ace; 403 if ((sc->flags & MD_READONLY) != 0 && w > 0) 404 return (EROFS); 405 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 406 sc->opencount = 1; 407 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 408 sc->opencount = 0; 409 } 410 return (0); 411 } 412 413 static void 414 g_md_start(struct bio *bp) 415 { 416 struct md_s *sc; 417 418 sc = bp->bio_to->geom->softc; 419 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) { 420 mtx_lock(&sc->stat_mtx); 421 devstat_start_transaction_bio(sc->devstat, bp); 422 mtx_unlock(&sc->stat_mtx); 423 } 424 mtx_lock(&sc->queue_mtx); 425 bioq_disksort(&sc->bio_queue, bp); 426 mtx_unlock(&sc->queue_mtx); 427 wakeup(sc); 428 } 429 430 #define MD_MALLOC_MOVE_ZERO 1 431 #define MD_MALLOC_MOVE_FILL 2 432 #define MD_MALLOC_MOVE_READ 3 433 #define MD_MALLOC_MOVE_WRITE 4 434 #define MD_MALLOC_MOVE_CMP 5 435 436 static int 437 md_malloc_move(vm_page_t **mp, int *ma_offs, unsigned sectorsize, 438 void *ptr, u_char fill, int op) 439 { 440 struct sf_buf *sf; 441 vm_page_t m, *mp1; 442 char *p, first; 443 off_t *uc; 444 unsigned n; 445 int error, i, ma_offs1, sz, first_read; 446 447 m = NULL; 448 error = 0; 449 sf = NULL; 450 /* if (op == MD_MALLOC_MOVE_CMP) { gcc */ 451 first = 0; 452 first_read = 0; 453 uc = ptr; 454 mp1 = *mp; 455 ma_offs1 = *ma_offs; 456 /* } */ 457 sched_pin(); 458 for (n = sectorsize; n != 0; n -= sz) { 459 sz = imin(PAGE_SIZE - *ma_offs, n); 460 if (m != **mp) { 461 if (sf != NULL) 462 sf_buf_free(sf); 463 m = **mp; 464 sf = sf_buf_alloc(m, SFB_CPUPRIVATE | 465 (md_malloc_wait ? 0 : SFB_NOWAIT)); 466 if (sf == NULL) { 467 error = ENOMEM; 468 break; 469 } 470 } 471 p = (char *)sf_buf_kva(sf) + *ma_offs; 472 switch (op) { 473 case MD_MALLOC_MOVE_ZERO: 474 bzero(p, sz); 475 break; 476 case MD_MALLOC_MOVE_FILL: 477 memset(p, fill, sz); 478 break; 479 case MD_MALLOC_MOVE_READ: 480 bcopy(ptr, p, sz); 481 cpu_flush_dcache(p, sz); 482 break; 483 case MD_MALLOC_MOVE_WRITE: 484 bcopy(p, ptr, sz); 485 break; 486 case MD_MALLOC_MOVE_CMP: 487 for (i = 0; i < sz; i++, p++) { 488 if (!first_read) { 489 *uc = (u_char)*p; 490 first = *p; 491 first_read = 1; 492 } else if (*p != first) { 493 error = EDOOFUS; 494 break; 495 } 496 } 497 break; 498 default: 499 KASSERT(0, ("md_malloc_move unknown op %d\n", op)); 500 break; 501 } 502 if (error != 0) 503 break; 504 *ma_offs += sz; 505 *ma_offs %= PAGE_SIZE; 506 if (*ma_offs == 0) 507 (*mp)++; 508 ptr = (char *)ptr + sz; 509 } 510 511 if (sf != NULL) 512 sf_buf_free(sf); 513 sched_unpin(); 514 if (op == MD_MALLOC_MOVE_CMP && error != 0) { 515 *mp = mp1; 516 *ma_offs = ma_offs1; 517 } 518 return (error); 519 } 520 521 static int 522 mdstart_malloc(struct md_s *sc, struct bio *bp) 523 { 524 u_char *dst; 525 vm_page_t *m; 526 int i, error, error1, ma_offs, notmapped; 527 off_t secno, nsec, uc; 528 uintptr_t sp, osp; 529 530 switch (bp->bio_cmd) { 531 case BIO_READ: 532 case BIO_WRITE: 533 case BIO_DELETE: 534 break; 535 default: 536 return (EOPNOTSUPP); 537 } 538 539 notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0; 540 if (notmapped) { 541 m = bp->bio_ma; 542 ma_offs = bp->bio_ma_offset; 543 dst = NULL; 544 } else { 545 dst = bp->bio_data; 546 } 547 548 nsec = bp->bio_length / sc->sectorsize; 549 secno = bp->bio_offset / sc->sectorsize; 550 error = 0; 551 while (nsec--) { 552 osp = s_read(sc->indir, secno); 553 if (bp->bio_cmd == BIO_DELETE) { 554 if (osp != 0) 555 error = s_write(sc->indir, secno, 0); 556 } else if (bp->bio_cmd == BIO_READ) { 557 if (osp == 0) { 558 if (notmapped) { 559 error = md_malloc_move(&m, &ma_offs, 560 sc->sectorsize, NULL, 0, 561 MD_MALLOC_MOVE_ZERO); 562 } else 563 bzero(dst, sc->sectorsize); 564 } else if (osp <= 255) { 565 if (notmapped) { 566 error = md_malloc_move(&m, &ma_offs, 567 sc->sectorsize, NULL, osp, 568 MD_MALLOC_MOVE_FILL); 569 } else 570 memset(dst, osp, sc->sectorsize); 571 } else { 572 if (notmapped) { 573 error = md_malloc_move(&m, &ma_offs, 574 sc->sectorsize, (void *)osp, 0, 575 MD_MALLOC_MOVE_READ); 576 } else { 577 bcopy((void *)osp, dst, sc->sectorsize); 578 cpu_flush_dcache(dst, sc->sectorsize); 579 } 580 } 581 osp = 0; 582 } else if (bp->bio_cmd == BIO_WRITE) { 583 if (sc->flags & MD_COMPRESS) { 584 if (notmapped) { 585 error1 = md_malloc_move(&m, &ma_offs, 586 sc->sectorsize, &uc, 0, 587 MD_MALLOC_MOVE_CMP); 588 i = error1 == 0 ? sc->sectorsize : 0; 589 } else { 590 uc = dst[0]; 591 for (i = 1; i < sc->sectorsize; i++) { 592 if (dst[i] != uc) 593 break; 594 } 595 } 596 } else { 597 i = 0; 598 uc = 0; 599 } 600 if (i == sc->sectorsize) { 601 if (osp != uc) 602 error = s_write(sc->indir, secno, uc); 603 } else { 604 if (osp <= 255) { 605 sp = (uintptr_t)uma_zalloc(sc->uma, 606 md_malloc_wait ? M_WAITOK : 607 M_NOWAIT); 608 if (sp == 0) { 609 error = ENOSPC; 610 break; 611 } 612 if (notmapped) { 613 error = md_malloc_move(&m, 614 &ma_offs, sc->sectorsize, 615 (void *)sp, 0, 616 MD_MALLOC_MOVE_WRITE); 617 } else { 618 bcopy(dst, (void *)sp, 619 sc->sectorsize); 620 } 621 error = s_write(sc->indir, secno, sp); 622 } else { 623 if (notmapped) { 624 error = md_malloc_move(&m, 625 &ma_offs, sc->sectorsize, 626 (void *)osp, 0, 627 MD_MALLOC_MOVE_WRITE); 628 } else { 629 bcopy(dst, (void *)osp, 630 sc->sectorsize); 631 } 632 osp = 0; 633 } 634 } 635 } else { 636 error = EOPNOTSUPP; 637 } 638 if (osp > 255) 639 uma_zfree(sc->uma, (void*)osp); 640 if (error != 0) 641 break; 642 secno++; 643 if (!notmapped) 644 dst += sc->sectorsize; 645 } 646 bp->bio_resid = 0; 647 return (error); 648 } 649 650 static int 651 mdstart_preload(struct md_s *sc, struct bio *bp) 652 { 653 654 switch (bp->bio_cmd) { 655 case BIO_READ: 656 bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data, 657 bp->bio_length); 658 cpu_flush_dcache(bp->bio_data, bp->bio_length); 659 break; 660 case BIO_WRITE: 661 bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset, 662 bp->bio_length); 663 break; 664 } 665 bp->bio_resid = 0; 666 return (0); 667 } 668 669 static int 670 mdstart_vnode(struct md_s *sc, struct bio *bp) 671 { 672 int error; 673 struct uio auio; 674 struct iovec aiov; 675 struct mount *mp; 676 struct vnode *vp; 677 struct buf *pb; 678 struct thread *td; 679 off_t end, zerosize; 680 681 switch (bp->bio_cmd) { 682 case BIO_READ: 683 case BIO_WRITE: 684 case BIO_DELETE: 685 case BIO_FLUSH: 686 break; 687 default: 688 return (EOPNOTSUPP); 689 } 690 691 td = curthread; 692 vp = sc->vnode; 693 694 /* 695 * VNODE I/O 696 * 697 * If an error occurs, we set BIO_ERROR but we do not set 698 * B_INVAL because (for a write anyway), the buffer is 699 * still valid. 700 */ 701 702 if (bp->bio_cmd == BIO_FLUSH) { 703 (void) vn_start_write(vp, &mp, V_WAIT); 704 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 705 error = VOP_FSYNC(vp, MNT_WAIT, td); 706 VOP_UNLOCK(vp, 0); 707 vn_finished_write(mp); 708 return (error); 709 } 710 711 bzero(&auio, sizeof(auio)); 712 713 /* 714 * Special case for BIO_DELETE. On the surface, this is very 715 * similar to BIO_WRITE, except that we write from our own 716 * fixed-length buffer, so we have to loop. The net result is 717 * that the two cases end up having very little in common. 718 */ 719 if (bp->bio_cmd == BIO_DELETE) { 720 zerosize = ZERO_REGION_SIZE - 721 (ZERO_REGION_SIZE % sc->sectorsize); 722 auio.uio_iov = &aiov; 723 auio.uio_iovcnt = 1; 724 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 725 auio.uio_segflg = UIO_SYSSPACE; 726 auio.uio_rw = UIO_WRITE; 727 auio.uio_td = td; 728 end = bp->bio_offset + bp->bio_length; 729 (void) vn_start_write(vp, &mp, V_WAIT); 730 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 731 error = 0; 732 while (auio.uio_offset < end) { 733 aiov.iov_base = __DECONST(void *, zero_region); 734 aiov.iov_len = end - auio.uio_offset; 735 if (aiov.iov_len > zerosize) 736 aiov.iov_len = zerosize; 737 auio.uio_resid = aiov.iov_len; 738 error = VOP_WRITE(vp, &auio, 739 sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred); 740 if (error != 0) 741 break; 742 } 743 VOP_UNLOCK(vp, 0); 744 vn_finished_write(mp); 745 bp->bio_resid = end - auio.uio_offset; 746 return (error); 747 } 748 749 if ((bp->bio_flags & BIO_UNMAPPED) == 0) { 750 pb = NULL; 751 aiov.iov_base = bp->bio_data; 752 } else { 753 KASSERT(bp->bio_length <= MAXPHYS, ("bio_length %jd", 754 (uintmax_t)bp->bio_length)); 755 pb = getpbuf(&md_vnode_pbuf_freecnt); 756 pmap_qenter((vm_offset_t)pb->b_data, bp->bio_ma, bp->bio_ma_n); 757 aiov.iov_base = (void *)((vm_offset_t)pb->b_data + 758 bp->bio_ma_offset); 759 } 760 aiov.iov_len = bp->bio_length; 761 auio.uio_iov = &aiov; 762 auio.uio_iovcnt = 1; 763 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 764 auio.uio_segflg = UIO_SYSSPACE; 765 if (bp->bio_cmd == BIO_READ) 766 auio.uio_rw = UIO_READ; 767 else if (bp->bio_cmd == BIO_WRITE) 768 auio.uio_rw = UIO_WRITE; 769 else 770 panic("wrong BIO_OP in mdstart_vnode"); 771 auio.uio_resid = bp->bio_length; 772 auio.uio_td = td; 773 /* 774 * When reading set IO_DIRECT to try to avoid double-caching 775 * the data. When writing IO_DIRECT is not optimal. 776 */ 777 if (bp->bio_cmd == BIO_READ) { 778 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 779 error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred); 780 VOP_UNLOCK(vp, 0); 781 } else { 782 (void) vn_start_write(vp, &mp, V_WAIT); 783 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 784 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC, 785 sc->cred); 786 VOP_UNLOCK(vp, 0); 787 vn_finished_write(mp); 788 } 789 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 790 pmap_qremove((vm_offset_t)pb->b_data, bp->bio_ma_n); 791 relpbuf(pb, &md_vnode_pbuf_freecnt); 792 } 793 bp->bio_resid = auio.uio_resid; 794 return (error); 795 } 796 797 static int 798 mdstart_swap(struct md_s *sc, struct bio *bp) 799 { 800 vm_page_t m; 801 u_char *p; 802 vm_pindex_t i, lastp; 803 int rv, ma_offs, offs, len, lastend; 804 805 switch (bp->bio_cmd) { 806 case BIO_READ: 807 case BIO_WRITE: 808 case BIO_DELETE: 809 break; 810 default: 811 return (EOPNOTSUPP); 812 } 813 814 p = bp->bio_data; 815 ma_offs = (bp->bio_flags & BIO_UNMAPPED) == 0 ? 0 : bp->bio_ma_offset; 816 817 /* 818 * offs is the offset at which to start operating on the 819 * next (ie, first) page. lastp is the last page on 820 * which we're going to operate. lastend is the ending 821 * position within that last page (ie, PAGE_SIZE if 822 * we're operating on complete aligned pages). 823 */ 824 offs = bp->bio_offset % PAGE_SIZE; 825 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 826 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 827 828 rv = VM_PAGER_OK; 829 VM_OBJECT_WLOCK(sc->object); 830 vm_object_pip_add(sc->object, 1); 831 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 832 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 833 m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM); 834 if (bp->bio_cmd == BIO_READ) { 835 if (m->valid == VM_PAGE_BITS_ALL) 836 rv = VM_PAGER_OK; 837 else 838 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 839 if (rv == VM_PAGER_ERROR) { 840 vm_page_xunbusy(m); 841 break; 842 } else if (rv == VM_PAGER_FAIL) { 843 /* 844 * Pager does not have the page. Zero 845 * the allocated page, and mark it as 846 * valid. Do not set dirty, the page 847 * can be recreated if thrown out. 848 */ 849 pmap_zero_page(m); 850 m->valid = VM_PAGE_BITS_ALL; 851 } 852 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 853 pmap_copy_pages(&m, offs, bp->bio_ma, 854 ma_offs, len); 855 } else { 856 physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len); 857 cpu_flush_dcache(p, len); 858 } 859 } else if (bp->bio_cmd == BIO_WRITE) { 860 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 861 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 862 else 863 rv = VM_PAGER_OK; 864 if (rv == VM_PAGER_ERROR) { 865 vm_page_xunbusy(m); 866 break; 867 } 868 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 869 pmap_copy_pages(bp->bio_ma, ma_offs, &m, 870 offs, len); 871 } else { 872 physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len); 873 } 874 m->valid = VM_PAGE_BITS_ALL; 875 } else if (bp->bio_cmd == BIO_DELETE) { 876 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 877 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 878 else 879 rv = VM_PAGER_OK; 880 if (rv == VM_PAGER_ERROR) { 881 vm_page_xunbusy(m); 882 break; 883 } 884 if (len != PAGE_SIZE) { 885 pmap_zero_page_area(m, offs, len); 886 vm_page_clear_dirty(m, offs, len); 887 m->valid = VM_PAGE_BITS_ALL; 888 } else 889 vm_pager_page_unswapped(m); 890 } 891 vm_page_xunbusy(m); 892 vm_page_lock(m); 893 if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE) 894 vm_page_free(m); 895 else 896 vm_page_activate(m); 897 vm_page_unlock(m); 898 if (bp->bio_cmd == BIO_WRITE) 899 vm_page_dirty(m); 900 901 /* Actions on further pages start at offset 0 */ 902 p += PAGE_SIZE - offs; 903 offs = 0; 904 ma_offs += len; 905 } 906 vm_object_pip_subtract(sc->object, 1); 907 VM_OBJECT_WUNLOCK(sc->object); 908 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 909 } 910 911 static int 912 mdstart_null(struct md_s *sc, struct bio *bp) 913 { 914 915 switch (bp->bio_cmd) { 916 case BIO_READ: 917 bzero(bp->bio_data, bp->bio_length); 918 cpu_flush_dcache(bp->bio_data, bp->bio_length); 919 break; 920 case BIO_WRITE: 921 break; 922 } 923 bp->bio_resid = 0; 924 return (0); 925 } 926 927 static void 928 md_kthread(void *arg) 929 { 930 struct md_s *sc; 931 struct bio *bp; 932 int error; 933 934 sc = arg; 935 thread_lock(curthread); 936 sched_prio(curthread, PRIBIO); 937 thread_unlock(curthread); 938 if (sc->type == MD_VNODE) 939 curthread->td_pflags |= TDP_NORUNNINGBUF; 940 941 for (;;) { 942 mtx_lock(&sc->queue_mtx); 943 if (sc->flags & MD_SHUTDOWN) { 944 sc->flags |= MD_EXITING; 945 mtx_unlock(&sc->queue_mtx); 946 kproc_exit(0); 947 } 948 bp = bioq_takefirst(&sc->bio_queue); 949 if (!bp) { 950 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 951 continue; 952 } 953 mtx_unlock(&sc->queue_mtx); 954 if (bp->bio_cmd == BIO_GETATTR) { 955 if ((sc->fwsectors && sc->fwheads && 956 (g_handleattr_int(bp, "GEOM::fwsectors", 957 sc->fwsectors) || 958 g_handleattr_int(bp, "GEOM::fwheads", 959 sc->fwheads))) || 960 g_handleattr_int(bp, "GEOM::candelete", 1)) 961 error = -1; 962 else 963 error = EOPNOTSUPP; 964 } else { 965 error = sc->start(sc, bp); 966 } 967 968 if (error != -1) { 969 bp->bio_completed = bp->bio_length; 970 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) 971 devstat_end_transaction_bio(sc->devstat, bp); 972 g_io_deliver(bp, error); 973 } 974 } 975 } 976 977 static struct md_s * 978 mdfind(int unit) 979 { 980 struct md_s *sc; 981 982 LIST_FOREACH(sc, &md_softc_list, list) { 983 if (sc->unit == unit) 984 break; 985 } 986 return (sc); 987 } 988 989 static struct md_s * 990 mdnew(int unit, int *errp, enum md_types type) 991 { 992 struct md_s *sc; 993 int error; 994 995 *errp = 0; 996 if (unit == -1) 997 unit = alloc_unr(md_uh); 998 else 999 unit = alloc_unr_specific(md_uh, unit); 1000 1001 if (unit == -1) { 1002 *errp = EBUSY; 1003 return (NULL); 1004 } 1005 1006 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); 1007 sc->type = type; 1008 bioq_init(&sc->bio_queue); 1009 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 1010 mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF); 1011 sc->unit = unit; 1012 sprintf(sc->name, "md%d", unit); 1013 LIST_INSERT_HEAD(&md_softc_list, sc, list); 1014 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 1015 if (error == 0) 1016 return (sc); 1017 LIST_REMOVE(sc, list); 1018 mtx_destroy(&sc->stat_mtx); 1019 mtx_destroy(&sc->queue_mtx); 1020 free_unr(md_uh, sc->unit); 1021 free(sc, M_MD); 1022 *errp = error; 1023 return (NULL); 1024 } 1025 1026 static void 1027 mdinit(struct md_s *sc) 1028 { 1029 struct g_geom *gp; 1030 struct g_provider *pp; 1031 1032 g_topology_lock(); 1033 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 1034 gp->softc = sc; 1035 pp = g_new_providerf(gp, "md%d", sc->unit); 1036 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 1037 pp->mediasize = sc->mediasize; 1038 pp->sectorsize = sc->sectorsize; 1039 switch (sc->type) { 1040 case MD_MALLOC: 1041 case MD_VNODE: 1042 case MD_SWAP: 1043 pp->flags |= G_PF_ACCEPT_UNMAPPED; 1044 break; 1045 case MD_PRELOAD: 1046 case MD_NULL: 1047 break; 1048 } 1049 sc->gp = gp; 1050 sc->pp = pp; 1051 g_error_provider(pp, 0); 1052 g_topology_unlock(); 1053 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize, 1054 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 1055 } 1056 1057 static int 1058 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio) 1059 { 1060 uintptr_t sp; 1061 int error; 1062 off_t u; 1063 1064 error = 0; 1065 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 1066 return (EINVAL); 1067 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize)) 1068 return (EINVAL); 1069 /* Compression doesn't make sense if we have reserved space */ 1070 if (mdio->md_options & MD_RESERVE) 1071 mdio->md_options &= ~MD_COMPRESS; 1072 if (mdio->md_fwsectors != 0) 1073 sc->fwsectors = mdio->md_fwsectors; 1074 if (mdio->md_fwheads != 0) 1075 sc->fwheads = mdio->md_fwheads; 1076 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE); 1077 sc->indir = dimension(sc->mediasize / sc->sectorsize); 1078 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 1079 0x1ff, 0); 1080 if (mdio->md_options & MD_RESERVE) { 1081 off_t nsectors; 1082 1083 nsectors = sc->mediasize / sc->sectorsize; 1084 for (u = 0; u < nsectors; u++) { 1085 sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ? 1086 M_WAITOK : M_NOWAIT) | M_ZERO); 1087 if (sp != 0) 1088 error = s_write(sc->indir, u, sp); 1089 else 1090 error = ENOMEM; 1091 if (error != 0) 1092 break; 1093 } 1094 } 1095 return (error); 1096 } 1097 1098 1099 static int 1100 mdsetcred(struct md_s *sc, struct ucred *cred) 1101 { 1102 char *tmpbuf; 1103 int error = 0; 1104 1105 /* 1106 * Set credits in our softc 1107 */ 1108 1109 if (sc->cred) 1110 crfree(sc->cred); 1111 sc->cred = crhold(cred); 1112 1113 /* 1114 * Horrible kludge to establish credentials for NFS XXX. 1115 */ 1116 1117 if (sc->vnode) { 1118 struct uio auio; 1119 struct iovec aiov; 1120 1121 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 1122 bzero(&auio, sizeof(auio)); 1123 1124 aiov.iov_base = tmpbuf; 1125 aiov.iov_len = sc->sectorsize; 1126 auio.uio_iov = &aiov; 1127 auio.uio_iovcnt = 1; 1128 auio.uio_offset = 0; 1129 auio.uio_rw = UIO_READ; 1130 auio.uio_segflg = UIO_SYSSPACE; 1131 auio.uio_resid = aiov.iov_len; 1132 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1133 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 1134 VOP_UNLOCK(sc->vnode, 0); 1135 free(tmpbuf, M_TEMP); 1136 } 1137 return (error); 1138 } 1139 1140 static int 1141 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1142 { 1143 struct vattr vattr; 1144 struct nameidata nd; 1145 char *fname; 1146 int error, flags; 1147 1148 /* 1149 * Kernel-originated requests must have the filename appended 1150 * to the mdio structure to protect against malicious software. 1151 */ 1152 fname = mdio->md_file; 1153 if ((void *)fname != (void *)(mdio + 1)) { 1154 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL); 1155 if (error != 0) 1156 return (error); 1157 } else 1158 strlcpy(sc->file, fname, sizeof(sc->file)); 1159 1160 /* 1161 * If the user specified that this is a read only device, don't 1162 * set the FWRITE mask before trying to open the backing store. 1163 */ 1164 flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE); 1165 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td); 1166 error = vn_open(&nd, &flags, 0, NULL); 1167 if (error != 0) 1168 return (error); 1169 NDFREE(&nd, NDF_ONLY_PNBUF); 1170 if (nd.ni_vp->v_type != VREG) { 1171 error = EINVAL; 1172 goto bad; 1173 } 1174 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred); 1175 if (error != 0) 1176 goto bad; 1177 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { 1178 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); 1179 if (nd.ni_vp->v_iflag & VI_DOOMED) { 1180 /* Forced unmount. */ 1181 error = EBADF; 1182 goto bad; 1183 } 1184 } 1185 nd.ni_vp->v_vflag |= VV_MD; 1186 VOP_UNLOCK(nd.ni_vp, 0); 1187 1188 if (mdio->md_fwsectors != 0) 1189 sc->fwsectors = mdio->md_fwsectors; 1190 if (mdio->md_fwheads != 0) 1191 sc->fwheads = mdio->md_fwheads; 1192 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC); 1193 if (!(flags & FWRITE)) 1194 sc->flags |= MD_READONLY; 1195 sc->vnode = nd.ni_vp; 1196 1197 error = mdsetcred(sc, td->td_ucred); 1198 if (error != 0) { 1199 sc->vnode = NULL; 1200 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); 1201 nd.ni_vp->v_vflag &= ~VV_MD; 1202 goto bad; 1203 } 1204 return (0); 1205 bad: 1206 VOP_UNLOCK(nd.ni_vp, 0); 1207 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 1208 return (error); 1209 } 1210 1211 static int 1212 mddestroy(struct md_s *sc, struct thread *td) 1213 { 1214 1215 if (sc->gp) { 1216 sc->gp->softc = NULL; 1217 g_topology_lock(); 1218 g_wither_geom(sc->gp, ENXIO); 1219 g_topology_unlock(); 1220 sc->gp = NULL; 1221 sc->pp = NULL; 1222 } 1223 if (sc->devstat) { 1224 devstat_remove_entry(sc->devstat); 1225 sc->devstat = NULL; 1226 } 1227 mtx_lock(&sc->queue_mtx); 1228 sc->flags |= MD_SHUTDOWN; 1229 wakeup(sc); 1230 while (!(sc->flags & MD_EXITING)) 1231 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10); 1232 mtx_unlock(&sc->queue_mtx); 1233 mtx_destroy(&sc->stat_mtx); 1234 mtx_destroy(&sc->queue_mtx); 1235 if (sc->vnode != NULL) { 1236 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1237 sc->vnode->v_vflag &= ~VV_MD; 1238 VOP_UNLOCK(sc->vnode, 0); 1239 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 1240 FREAD : (FREAD|FWRITE), sc->cred, td); 1241 } 1242 if (sc->cred != NULL) 1243 crfree(sc->cred); 1244 if (sc->object != NULL) 1245 vm_object_deallocate(sc->object); 1246 if (sc->indir) 1247 destroy_indir(sc, sc->indir); 1248 if (sc->uma) 1249 uma_zdestroy(sc->uma); 1250 1251 LIST_REMOVE(sc, list); 1252 free_unr(md_uh, sc->unit); 1253 free(sc, M_MD); 1254 return (0); 1255 } 1256 1257 static int 1258 mdresize(struct md_s *sc, struct md_ioctl *mdio) 1259 { 1260 int error, res; 1261 vm_pindex_t oldpages, newpages; 1262 1263 switch (sc->type) { 1264 case MD_VNODE: 1265 case MD_NULL: 1266 break; 1267 case MD_SWAP: 1268 if (mdio->md_mediasize <= 0 || 1269 (mdio->md_mediasize % PAGE_SIZE) != 0) 1270 return (EDOM); 1271 oldpages = OFF_TO_IDX(round_page(sc->mediasize)); 1272 newpages = OFF_TO_IDX(round_page(mdio->md_mediasize)); 1273 if (newpages < oldpages) { 1274 VM_OBJECT_WLOCK(sc->object); 1275 vm_object_page_remove(sc->object, newpages, 0, 0); 1276 swap_pager_freespace(sc->object, newpages, 1277 oldpages - newpages); 1278 swap_release_by_cred(IDX_TO_OFF(oldpages - 1279 newpages), sc->cred); 1280 sc->object->charge = IDX_TO_OFF(newpages); 1281 sc->object->size = newpages; 1282 VM_OBJECT_WUNLOCK(sc->object); 1283 } else if (newpages > oldpages) { 1284 res = swap_reserve_by_cred(IDX_TO_OFF(newpages - 1285 oldpages), sc->cred); 1286 if (!res) 1287 return (ENOMEM); 1288 if ((mdio->md_options & MD_RESERVE) || 1289 (sc->flags & MD_RESERVE)) { 1290 error = swap_pager_reserve(sc->object, 1291 oldpages, newpages - oldpages); 1292 if (error < 0) { 1293 swap_release_by_cred( 1294 IDX_TO_OFF(newpages - oldpages), 1295 sc->cred); 1296 return (EDOM); 1297 } 1298 } 1299 VM_OBJECT_WLOCK(sc->object); 1300 sc->object->charge = IDX_TO_OFF(newpages); 1301 sc->object->size = newpages; 1302 VM_OBJECT_WUNLOCK(sc->object); 1303 } 1304 break; 1305 default: 1306 return (EOPNOTSUPP); 1307 } 1308 1309 sc->mediasize = mdio->md_mediasize; 1310 g_topology_lock(); 1311 g_resize_provider(sc->pp, sc->mediasize); 1312 g_topology_unlock(); 1313 return (0); 1314 } 1315 1316 static int 1317 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1318 { 1319 vm_ooffset_t npage; 1320 int error; 1321 1322 /* 1323 * Range check. Disallow negative sizes and sizes not being 1324 * multiple of page size. 1325 */ 1326 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1327 return (EDOM); 1328 1329 /* 1330 * Allocate an OBJT_SWAP object. 1331 * 1332 * Note the truncation. 1333 */ 1334 1335 npage = mdio->md_mediasize / PAGE_SIZE; 1336 if (mdio->md_fwsectors != 0) 1337 sc->fwsectors = mdio->md_fwsectors; 1338 if (mdio->md_fwheads != 0) 1339 sc->fwheads = mdio->md_fwheads; 1340 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, 1341 VM_PROT_DEFAULT, 0, td->td_ucred); 1342 if (sc->object == NULL) 1343 return (ENOMEM); 1344 sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE); 1345 if (mdio->md_options & MD_RESERVE) { 1346 if (swap_pager_reserve(sc->object, 0, npage) < 0) { 1347 error = EDOM; 1348 goto finish; 1349 } 1350 } 1351 error = mdsetcred(sc, td->td_ucred); 1352 finish: 1353 if (error != 0) { 1354 vm_object_deallocate(sc->object); 1355 sc->object = NULL; 1356 } 1357 return (error); 1358 } 1359 1360 static int 1361 mdcreate_null(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1362 { 1363 1364 /* 1365 * Range check. Disallow negative sizes and sizes not being 1366 * multiple of page size. 1367 */ 1368 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1369 return (EDOM); 1370 1371 return (0); 1372 } 1373 1374 static int 1375 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1376 { 1377 struct md_ioctl *mdio; 1378 struct md_s *sc; 1379 int error, i; 1380 unsigned sectsize; 1381 1382 if (md_debug) 1383 printf("mdctlioctl(%s %lx %p %x %p)\n", 1384 devtoname(dev), cmd, addr, flags, td); 1385 1386 mdio = (struct md_ioctl *)addr; 1387 if (mdio->md_version != MDIOVERSION) 1388 return (EINVAL); 1389 1390 /* 1391 * We assert the version number in the individual ioctl 1392 * handlers instead of out here because (a) it is possible we 1393 * may add another ioctl in the future which doesn't read an 1394 * mdio, and (b) the correct return value for an unknown ioctl 1395 * is ENOIOCTL, not EINVAL. 1396 */ 1397 error = 0; 1398 switch (cmd) { 1399 case MDIOCATTACH: 1400 switch (mdio->md_type) { 1401 case MD_MALLOC: 1402 case MD_PRELOAD: 1403 case MD_VNODE: 1404 case MD_SWAP: 1405 case MD_NULL: 1406 break; 1407 default: 1408 return (EINVAL); 1409 } 1410 if (mdio->md_sectorsize == 0) 1411 sectsize = DEV_BSIZE; 1412 else 1413 sectsize = mdio->md_sectorsize; 1414 if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize) 1415 return (EINVAL); 1416 if (mdio->md_options & MD_AUTOUNIT) 1417 sc = mdnew(-1, &error, mdio->md_type); 1418 else { 1419 if (mdio->md_unit > INT_MAX) 1420 return (EINVAL); 1421 sc = mdnew(mdio->md_unit, &error, mdio->md_type); 1422 } 1423 if (sc == NULL) 1424 return (error); 1425 if (mdio->md_options & MD_AUTOUNIT) 1426 mdio->md_unit = sc->unit; 1427 sc->mediasize = mdio->md_mediasize; 1428 sc->sectorsize = sectsize; 1429 error = EDOOFUS; 1430 switch (sc->type) { 1431 case MD_MALLOC: 1432 sc->start = mdstart_malloc; 1433 error = mdcreate_malloc(sc, mdio); 1434 break; 1435 case MD_PRELOAD: 1436 /* 1437 * We disallow attaching preloaded memory disks via 1438 * ioctl. Preloaded memory disks are automatically 1439 * attached in g_md_init(). 1440 */ 1441 error = EOPNOTSUPP; 1442 break; 1443 case MD_VNODE: 1444 sc->start = mdstart_vnode; 1445 error = mdcreate_vnode(sc, mdio, td); 1446 break; 1447 case MD_SWAP: 1448 sc->start = mdstart_swap; 1449 error = mdcreate_swap(sc, mdio, td); 1450 break; 1451 case MD_NULL: 1452 sc->start = mdstart_null; 1453 error = mdcreate_null(sc, mdio, td); 1454 break; 1455 } 1456 if (error != 0) { 1457 mddestroy(sc, td); 1458 return (error); 1459 } 1460 1461 /* Prune off any residual fractional sector */ 1462 i = sc->mediasize % sc->sectorsize; 1463 sc->mediasize -= i; 1464 1465 mdinit(sc); 1466 return (0); 1467 case MDIOCDETACH: 1468 if (mdio->md_mediasize != 0 || 1469 (mdio->md_options & ~MD_FORCE) != 0) 1470 return (EINVAL); 1471 1472 sc = mdfind(mdio->md_unit); 1473 if (sc == NULL) 1474 return (ENOENT); 1475 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) && 1476 !(mdio->md_options & MD_FORCE)) 1477 return (EBUSY); 1478 return (mddestroy(sc, td)); 1479 case MDIOCRESIZE: 1480 if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0) 1481 return (EINVAL); 1482 1483 sc = mdfind(mdio->md_unit); 1484 if (sc == NULL) 1485 return (ENOENT); 1486 if (mdio->md_mediasize < sc->sectorsize) 1487 return (EINVAL); 1488 if (mdio->md_mediasize < sc->mediasize && 1489 !(sc->flags & MD_FORCE) && 1490 !(mdio->md_options & MD_FORCE)) 1491 return (EBUSY); 1492 return (mdresize(sc, mdio)); 1493 case MDIOCQUERY: 1494 sc = mdfind(mdio->md_unit); 1495 if (sc == NULL) 1496 return (ENOENT); 1497 mdio->md_type = sc->type; 1498 mdio->md_options = sc->flags; 1499 mdio->md_mediasize = sc->mediasize; 1500 mdio->md_sectorsize = sc->sectorsize; 1501 if (sc->type == MD_VNODE) 1502 error = copyout(sc->file, mdio->md_file, 1503 strlen(sc->file) + 1); 1504 return (error); 1505 case MDIOCLIST: 1506 i = 1; 1507 LIST_FOREACH(sc, &md_softc_list, list) { 1508 if (i == MDNPAD - 1) 1509 mdio->md_pad[i] = -1; 1510 else 1511 mdio->md_pad[i++] = sc->unit; 1512 } 1513 mdio->md_pad[0] = i - 1; 1514 return (0); 1515 default: 1516 return (ENOIOCTL); 1517 }; 1518 } 1519 1520 static int 1521 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1522 { 1523 int error; 1524 1525 sx_xlock(&md_sx); 1526 error = xmdctlioctl(dev, cmd, addr, flags, td); 1527 sx_xunlock(&md_sx); 1528 return (error); 1529 } 1530 1531 static void 1532 md_preloaded(u_char *image, size_t length, const char *name) 1533 { 1534 struct md_s *sc; 1535 int error; 1536 1537 sc = mdnew(-1, &error, MD_PRELOAD); 1538 if (sc == NULL) 1539 return; 1540 sc->mediasize = length; 1541 sc->sectorsize = DEV_BSIZE; 1542 sc->pl_ptr = image; 1543 sc->pl_len = length; 1544 sc->start = mdstart_preload; 1545 #ifdef MD_ROOT 1546 if (sc->unit == 0) 1547 rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0"; 1548 #endif 1549 mdinit(sc); 1550 if (name != NULL) { 1551 printf("%s%d: Preloaded image <%s> %zd bytes at %p\n", 1552 MD_NAME, sc->unit, name, length, image); 1553 } 1554 } 1555 1556 static void 1557 g_md_init(struct g_class *mp __unused) 1558 { 1559 caddr_t mod; 1560 u_char *ptr, *name, *type; 1561 unsigned len; 1562 int i; 1563 1564 /* figure out log2(NINDIR) */ 1565 for (i = NINDIR, nshift = -1; i; nshift++) 1566 i >>= 1; 1567 1568 mod = NULL; 1569 sx_init(&md_sx, "MD config lock"); 1570 g_topology_unlock(); 1571 md_uh = new_unrhdr(0, INT_MAX, NULL); 1572 #ifdef MD_ROOT_SIZE 1573 sx_xlock(&md_sx); 1574 md_preloaded(mfs_root.start, sizeof(mfs_root.start), NULL); 1575 sx_xunlock(&md_sx); 1576 #endif 1577 /* XXX: are preload_* static or do they need Giant ? */ 1578 while ((mod = preload_search_next_name(mod)) != NULL) { 1579 name = (char *)preload_search_info(mod, MODINFO_NAME); 1580 if (name == NULL) 1581 continue; 1582 type = (char *)preload_search_info(mod, MODINFO_TYPE); 1583 if (type == NULL) 1584 continue; 1585 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 1586 continue; 1587 ptr = preload_fetch_addr(mod); 1588 len = preload_fetch_size(mod); 1589 if (ptr != NULL && len != 0) { 1590 sx_xlock(&md_sx); 1591 md_preloaded(ptr, len, name); 1592 sx_xunlock(&md_sx); 1593 } 1594 } 1595 md_vnode_pbuf_freecnt = nswbuf / 10; 1596 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL, 1597 0600, MDCTL_NAME); 1598 g_topology_lock(); 1599 } 1600 1601 static void 1602 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1603 struct g_consumer *cp __unused, struct g_provider *pp) 1604 { 1605 struct md_s *mp; 1606 char *type; 1607 1608 mp = gp->softc; 1609 if (mp == NULL) 1610 return; 1611 1612 switch (mp->type) { 1613 case MD_MALLOC: 1614 type = "malloc"; 1615 break; 1616 case MD_PRELOAD: 1617 type = "preload"; 1618 break; 1619 case MD_VNODE: 1620 type = "vnode"; 1621 break; 1622 case MD_SWAP: 1623 type = "swap"; 1624 break; 1625 case MD_NULL: 1626 type = "null"; 1627 break; 1628 default: 1629 type = "unknown"; 1630 break; 1631 } 1632 1633 if (pp != NULL) { 1634 if (indent == NULL) { 1635 sbuf_printf(sb, " u %d", mp->unit); 1636 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize); 1637 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads); 1638 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors); 1639 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize); 1640 sbuf_printf(sb, " t %s", type); 1641 if (mp->type == MD_VNODE && mp->vnode != NULL) 1642 sbuf_printf(sb, " file %s", mp->file); 1643 } else { 1644 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, 1645 mp->unit); 1646 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n", 1647 indent, (uintmax_t) mp->sectorsize); 1648 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n", 1649 indent, (uintmax_t) mp->fwheads); 1650 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n", 1651 indent, (uintmax_t) mp->fwsectors); 1652 sbuf_printf(sb, "%s<length>%ju</length>\n", 1653 indent, (uintmax_t) mp->mediasize); 1654 sbuf_printf(sb, "%s<compression>%s</compression>\n", indent, 1655 (mp->flags & MD_COMPRESS) == 0 ? "off": "on"); 1656 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 1657 (mp->flags & MD_READONLY) == 0 ? "read-write": 1658 "read-only"); 1659 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1660 type); 1661 if (mp->type == MD_VNODE && mp->vnode != NULL) 1662 sbuf_printf(sb, "%s<file>%s</file>\n", 1663 indent, mp->file); 1664 } 1665 } 1666 } 1667 1668 static void 1669 g_md_fini(struct g_class *mp __unused) 1670 { 1671 1672 sx_destroy(&md_sx); 1673 if (status_dev != NULL) 1674 destroy_dev(status_dev); 1675 delete_unrhdr(md_uh); 1676 } 1677