1 /*- 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * $FreeBSD$ 10 * 11 */ 12 13 /*- 14 * The following functions are based in the vn(4) driver: mdstart_swap(), 15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 16 * and as such under the following copyright: 17 * 18 * Copyright (c) 1988 University of Utah. 19 * Copyright (c) 1990, 1993 20 * The Regents of the University of California. All rights reserved. 21 * Copyright (c) 2013 The FreeBSD Foundation 22 * All rights reserved. 23 * 24 * This code is derived from software contributed to Berkeley by 25 * the Systems Programming Group of the University of Utah Computer 26 * Science Department. 27 * 28 * Portions of this software were developed by Konstantin Belousov 29 * under sponsorship from the FreeBSD Foundation. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 4. Neither the name of the University nor the names of its contributors 40 * may be used to endorse or promote products derived from this software 41 * without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 53 * SUCH DAMAGE. 54 * 55 * from: Utah Hdr: vn.c 1.13 94/04/02 56 * 57 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 58 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 59 */ 60 61 #include "opt_geom.h" 62 #include "opt_md.h" 63 64 #include <sys/param.h> 65 #include <sys/systm.h> 66 #include <sys/bio.h> 67 #include <sys/buf.h> 68 #include <sys/conf.h> 69 #include <sys/devicestat.h> 70 #include <sys/fcntl.h> 71 #include <sys/kernel.h> 72 #include <sys/kthread.h> 73 #include <sys/limits.h> 74 #include <sys/linker.h> 75 #include <sys/lock.h> 76 #include <sys/malloc.h> 77 #include <sys/mdioctl.h> 78 #include <sys/mount.h> 79 #include <sys/mutex.h> 80 #include <sys/sx.h> 81 #include <sys/namei.h> 82 #include <sys/proc.h> 83 #include <sys/queue.h> 84 #include <sys/rwlock.h> 85 #include <sys/sbuf.h> 86 #include <sys/sched.h> 87 #include <sys/sf_buf.h> 88 #include <sys/sysctl.h> 89 #include <sys/vnode.h> 90 91 #include <geom/geom.h> 92 #include <geom/geom_int.h> 93 94 #include <vm/vm.h> 95 #include <vm/vm_param.h> 96 #include <vm/vm_object.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_pager.h> 99 #include <vm/swap_pager.h> 100 #include <vm/uma.h> 101 102 #include <machine/bus.h> 103 104 #define MD_MODVER 1 105 106 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 107 #define MD_EXITING 0x20000 /* Worker thread is exiting. */ 108 109 #ifndef MD_NSECT 110 #define MD_NSECT (10000 * 2) 111 #endif 112 113 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk"); 114 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors"); 115 116 static int md_debug; 117 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, 118 "Enable md(4) debug messages"); 119 static int md_malloc_wait; 120 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, 121 "Allow malloc to wait for memory allocations"); 122 123 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE) 124 #define MD_ROOT_FSTYPE "ufs" 125 #endif 126 127 #if defined(MD_ROOT) 128 /* 129 * Preloaded image gets put here. 130 */ 131 #if defined(MD_ROOT_SIZE) 132 /* 133 * Applications that patch the object with the image can determine 134 * the size looking at the start and end markers (strings), 135 * so we want them contiguous. 136 */ 137 static struct { 138 u_char start[MD_ROOT_SIZE*1024]; 139 u_char end[128]; 140 } mfs_root = { 141 .start = "MFS Filesystem goes here", 142 .end = "MFS Filesystem had better STOP here", 143 }; 144 const int mfs_root_size = sizeof(mfs_root.start); 145 #else 146 extern volatile u_char __weak_symbol mfs_root; 147 extern volatile u_char __weak_symbol mfs_root_end; 148 __GLOBL(mfs_root); 149 __GLOBL(mfs_root_end); 150 #define mfs_root_size ((uintptr_t)(&mfs_root_end - &mfs_root)) 151 #endif 152 #endif 153 154 static g_init_t g_md_init; 155 static g_fini_t g_md_fini; 156 static g_start_t g_md_start; 157 static g_access_t g_md_access; 158 static void g_md_dumpconf(struct sbuf *sb, const char *indent, 159 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp); 160 161 static struct cdev *status_dev = 0; 162 static struct sx md_sx; 163 static struct unrhdr *md_uh; 164 165 static d_ioctl_t mdctlioctl; 166 167 static struct cdevsw mdctl_cdevsw = { 168 .d_version = D_VERSION, 169 .d_ioctl = mdctlioctl, 170 .d_name = MD_NAME, 171 }; 172 173 struct g_class g_md_class = { 174 .name = "MD", 175 .version = G_VERSION, 176 .init = g_md_init, 177 .fini = g_md_fini, 178 .start = g_md_start, 179 .access = g_md_access, 180 .dumpconf = g_md_dumpconf, 181 }; 182 183 DECLARE_GEOM_CLASS(g_md_class, g_md); 184 185 186 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list); 187 188 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 189 #define NMASK (NINDIR-1) 190 static int nshift; 191 192 static int md_vnode_pbuf_freecnt; 193 194 struct indir { 195 uintptr_t *array; 196 u_int total; 197 u_int used; 198 u_int shift; 199 }; 200 201 struct md_s { 202 int unit; 203 LIST_ENTRY(md_s) list; 204 struct bio_queue_head bio_queue; 205 struct mtx queue_mtx; 206 struct mtx stat_mtx; 207 struct cdev *dev; 208 enum md_types type; 209 off_t mediasize; 210 unsigned sectorsize; 211 unsigned opencount; 212 unsigned fwheads; 213 unsigned fwsectors; 214 unsigned flags; 215 char name[20]; 216 struct proc *procp; 217 struct g_geom *gp; 218 struct g_provider *pp; 219 int (*start)(struct md_s *sc, struct bio *bp); 220 struct devstat *devstat; 221 222 /* MD_MALLOC related fields */ 223 struct indir *indir; 224 uma_zone_t uma; 225 226 /* MD_PRELOAD related fields */ 227 u_char *pl_ptr; 228 size_t pl_len; 229 230 /* MD_VNODE related fields */ 231 struct vnode *vnode; 232 char file[PATH_MAX]; 233 struct ucred *cred; 234 235 /* MD_SWAP related fields */ 236 vm_object_t object; 237 }; 238 239 static struct indir * 240 new_indir(u_int shift) 241 { 242 struct indir *ip; 243 244 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT) 245 | M_ZERO); 246 if (ip == NULL) 247 return (NULL); 248 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 249 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); 250 if (ip->array == NULL) { 251 free(ip, M_MD); 252 return (NULL); 253 } 254 ip->total = NINDIR; 255 ip->shift = shift; 256 return (ip); 257 } 258 259 static void 260 del_indir(struct indir *ip) 261 { 262 263 free(ip->array, M_MDSECT); 264 free(ip, M_MD); 265 } 266 267 static void 268 destroy_indir(struct md_s *sc, struct indir *ip) 269 { 270 int i; 271 272 for (i = 0; i < NINDIR; i++) { 273 if (!ip->array[i]) 274 continue; 275 if (ip->shift) 276 destroy_indir(sc, (struct indir*)(ip->array[i])); 277 else if (ip->array[i] > 255) 278 uma_zfree(sc->uma, (void *)(ip->array[i])); 279 } 280 del_indir(ip); 281 } 282 283 /* 284 * This function does the math and allocates the top level "indir" structure 285 * for a device of "size" sectors. 286 */ 287 288 static struct indir * 289 dimension(off_t size) 290 { 291 off_t rcnt; 292 struct indir *ip; 293 int layer; 294 295 rcnt = size; 296 layer = 0; 297 while (rcnt > NINDIR) { 298 rcnt /= NINDIR; 299 layer++; 300 } 301 302 /* 303 * XXX: the top layer is probably not fully populated, so we allocate 304 * too much space for ip->array in here. 305 */ 306 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 307 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 308 M_MDSECT, M_WAITOK | M_ZERO); 309 ip->total = NINDIR; 310 ip->shift = layer * nshift; 311 return (ip); 312 } 313 314 /* 315 * Read a given sector 316 */ 317 318 static uintptr_t 319 s_read(struct indir *ip, off_t offset) 320 { 321 struct indir *cip; 322 int idx; 323 uintptr_t up; 324 325 if (md_debug > 1) 326 printf("s_read(%jd)\n", (intmax_t)offset); 327 up = 0; 328 for (cip = ip; cip != NULL;) { 329 if (cip->shift) { 330 idx = (offset >> cip->shift) & NMASK; 331 up = cip->array[idx]; 332 cip = (struct indir *)up; 333 continue; 334 } 335 idx = offset & NMASK; 336 return (cip->array[idx]); 337 } 338 return (0); 339 } 340 341 /* 342 * Write a given sector, prune the tree if the value is 0 343 */ 344 345 static int 346 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 347 { 348 struct indir *cip, *lip[10]; 349 int idx, li; 350 uintptr_t up; 351 352 if (md_debug > 1) 353 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 354 up = 0; 355 li = 0; 356 cip = ip; 357 for (;;) { 358 lip[li++] = cip; 359 if (cip->shift) { 360 idx = (offset >> cip->shift) & NMASK; 361 up = cip->array[idx]; 362 if (up != 0) { 363 cip = (struct indir *)up; 364 continue; 365 } 366 /* Allocate branch */ 367 cip->array[idx] = 368 (uintptr_t)new_indir(cip->shift - nshift); 369 if (cip->array[idx] == 0) 370 return (ENOSPC); 371 cip->used++; 372 up = cip->array[idx]; 373 cip = (struct indir *)up; 374 continue; 375 } 376 /* leafnode */ 377 idx = offset & NMASK; 378 up = cip->array[idx]; 379 if (up != 0) 380 cip->used--; 381 cip->array[idx] = ptr; 382 if (ptr != 0) 383 cip->used++; 384 break; 385 } 386 if (cip->used != 0 || li == 1) 387 return (0); 388 li--; 389 while (cip->used == 0 && cip != ip) { 390 li--; 391 idx = (offset >> lip[li]->shift) & NMASK; 392 up = lip[li]->array[idx]; 393 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 394 del_indir(cip); 395 lip[li]->array[idx] = 0; 396 lip[li]->used--; 397 cip = lip[li]; 398 } 399 return (0); 400 } 401 402 403 static int 404 g_md_access(struct g_provider *pp, int r, int w, int e) 405 { 406 struct md_s *sc; 407 408 sc = pp->geom->softc; 409 if (sc == NULL) { 410 if (r <= 0 && w <= 0 && e <= 0) 411 return (0); 412 return (ENXIO); 413 } 414 r += pp->acr; 415 w += pp->acw; 416 e += pp->ace; 417 if ((sc->flags & MD_READONLY) != 0 && w > 0) 418 return (EROFS); 419 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 420 sc->opencount = 1; 421 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 422 sc->opencount = 0; 423 } 424 return (0); 425 } 426 427 static void 428 g_md_start(struct bio *bp) 429 { 430 struct md_s *sc; 431 432 sc = bp->bio_to->geom->softc; 433 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) { 434 mtx_lock(&sc->stat_mtx); 435 devstat_start_transaction_bio(sc->devstat, bp); 436 mtx_unlock(&sc->stat_mtx); 437 } 438 mtx_lock(&sc->queue_mtx); 439 bioq_disksort(&sc->bio_queue, bp); 440 mtx_unlock(&sc->queue_mtx); 441 wakeup(sc); 442 } 443 444 #define MD_MALLOC_MOVE_ZERO 1 445 #define MD_MALLOC_MOVE_FILL 2 446 #define MD_MALLOC_MOVE_READ 3 447 #define MD_MALLOC_MOVE_WRITE 4 448 #define MD_MALLOC_MOVE_CMP 5 449 450 static int 451 md_malloc_move_ma(vm_page_t **mp, int *ma_offs, unsigned sectorsize, 452 void *ptr, u_char fill, int op) 453 { 454 struct sf_buf *sf; 455 vm_page_t m, *mp1; 456 char *p, first; 457 off_t *uc; 458 unsigned n; 459 int error, i, ma_offs1, sz, first_read; 460 461 m = NULL; 462 error = 0; 463 sf = NULL; 464 /* if (op == MD_MALLOC_MOVE_CMP) { gcc */ 465 first = 0; 466 first_read = 0; 467 uc = ptr; 468 mp1 = *mp; 469 ma_offs1 = *ma_offs; 470 /* } */ 471 sched_pin(); 472 for (n = sectorsize; n != 0; n -= sz) { 473 sz = imin(PAGE_SIZE - *ma_offs, n); 474 if (m != **mp) { 475 if (sf != NULL) 476 sf_buf_free(sf); 477 m = **mp; 478 sf = sf_buf_alloc(m, SFB_CPUPRIVATE | 479 (md_malloc_wait ? 0 : SFB_NOWAIT)); 480 if (sf == NULL) { 481 error = ENOMEM; 482 break; 483 } 484 } 485 p = (char *)sf_buf_kva(sf) + *ma_offs; 486 switch (op) { 487 case MD_MALLOC_MOVE_ZERO: 488 bzero(p, sz); 489 break; 490 case MD_MALLOC_MOVE_FILL: 491 memset(p, fill, sz); 492 break; 493 case MD_MALLOC_MOVE_READ: 494 bcopy(ptr, p, sz); 495 cpu_flush_dcache(p, sz); 496 break; 497 case MD_MALLOC_MOVE_WRITE: 498 bcopy(p, ptr, sz); 499 break; 500 case MD_MALLOC_MOVE_CMP: 501 for (i = 0; i < sz; i++, p++) { 502 if (!first_read) { 503 *uc = (u_char)*p; 504 first = *p; 505 first_read = 1; 506 } else if (*p != first) { 507 error = EDOOFUS; 508 break; 509 } 510 } 511 break; 512 default: 513 KASSERT(0, ("md_malloc_move_ma unknown op %d\n", op)); 514 break; 515 } 516 if (error != 0) 517 break; 518 *ma_offs += sz; 519 *ma_offs %= PAGE_SIZE; 520 if (*ma_offs == 0) 521 (*mp)++; 522 ptr = (char *)ptr + sz; 523 } 524 525 if (sf != NULL) 526 sf_buf_free(sf); 527 sched_unpin(); 528 if (op == MD_MALLOC_MOVE_CMP && error != 0) { 529 *mp = mp1; 530 *ma_offs = ma_offs1; 531 } 532 return (error); 533 } 534 535 static int 536 md_malloc_move_vlist(bus_dma_segment_t **pvlist, int *pma_offs, 537 unsigned len, void *ptr, u_char fill, int op) 538 { 539 bus_dma_segment_t *vlist; 540 uint8_t *p, *end, first; 541 off_t *uc; 542 int ma_offs, seg_len; 543 544 vlist = *pvlist; 545 ma_offs = *pma_offs; 546 uc = ptr; 547 548 for (; len != 0; len -= seg_len) { 549 seg_len = imin(vlist->ds_len - ma_offs, len); 550 p = (uint8_t *)(uintptr_t)vlist->ds_addr + ma_offs; 551 switch (op) { 552 case MD_MALLOC_MOVE_ZERO: 553 bzero(p, seg_len); 554 break; 555 case MD_MALLOC_MOVE_FILL: 556 memset(p, fill, seg_len); 557 break; 558 case MD_MALLOC_MOVE_READ: 559 bcopy(ptr, p, seg_len); 560 cpu_flush_dcache(p, seg_len); 561 break; 562 case MD_MALLOC_MOVE_WRITE: 563 bcopy(p, ptr, seg_len); 564 break; 565 case MD_MALLOC_MOVE_CMP: 566 end = p + seg_len; 567 first = *uc = *p; 568 /* Confirm all following bytes match the first */ 569 while (++p < end) { 570 if (*p != first) 571 return (EDOOFUS); 572 } 573 break; 574 default: 575 KASSERT(0, ("md_malloc_move_vlist unknown op %d\n", op)); 576 break; 577 } 578 579 ma_offs += seg_len; 580 if (ma_offs == vlist->ds_len) { 581 ma_offs = 0; 582 vlist++; 583 } 584 ptr = (uint8_t *)ptr + seg_len; 585 } 586 *pvlist = vlist; 587 *pma_offs = ma_offs; 588 589 return (0); 590 } 591 592 static int 593 mdstart_malloc(struct md_s *sc, struct bio *bp) 594 { 595 u_char *dst; 596 vm_page_t *m; 597 bus_dma_segment_t *vlist; 598 int i, error, error1, ma_offs, notmapped; 599 off_t secno, nsec, uc; 600 uintptr_t sp, osp; 601 602 switch (bp->bio_cmd) { 603 case BIO_READ: 604 case BIO_WRITE: 605 case BIO_DELETE: 606 break; 607 default: 608 return (EOPNOTSUPP); 609 } 610 611 notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0; 612 vlist = (bp->bio_flags & BIO_VLIST) != 0 ? 613 (bus_dma_segment_t *)bp->bio_data : NULL; 614 if (notmapped) { 615 m = bp->bio_ma; 616 ma_offs = bp->bio_ma_offset; 617 dst = NULL; 618 KASSERT(vlist == NULL, ("vlists cannot be unmapped")); 619 } else if (vlist != NULL) { 620 ma_offs = bp->bio_ma_offset; 621 dst = NULL; 622 } else { 623 dst = bp->bio_data; 624 } 625 626 nsec = bp->bio_length / sc->sectorsize; 627 secno = bp->bio_offset / sc->sectorsize; 628 error = 0; 629 while (nsec--) { 630 osp = s_read(sc->indir, secno); 631 if (bp->bio_cmd == BIO_DELETE) { 632 if (osp != 0) 633 error = s_write(sc->indir, secno, 0); 634 } else if (bp->bio_cmd == BIO_READ) { 635 if (osp == 0) { 636 if (notmapped) { 637 error = md_malloc_move_ma(&m, &ma_offs, 638 sc->sectorsize, NULL, 0, 639 MD_MALLOC_MOVE_ZERO); 640 } else if (vlist != NULL) { 641 error = md_malloc_move_vlist(&vlist, 642 &ma_offs, sc->sectorsize, NULL, 0, 643 MD_MALLOC_MOVE_ZERO); 644 } else 645 bzero(dst, sc->sectorsize); 646 } else if (osp <= 255) { 647 if (notmapped) { 648 error = md_malloc_move_ma(&m, &ma_offs, 649 sc->sectorsize, NULL, osp, 650 MD_MALLOC_MOVE_FILL); 651 } else if (vlist != NULL) { 652 error = md_malloc_move_vlist(&vlist, 653 &ma_offs, sc->sectorsize, NULL, osp, 654 MD_MALLOC_MOVE_FILL); 655 } else 656 memset(dst, osp, sc->sectorsize); 657 } else { 658 if (notmapped) { 659 error = md_malloc_move_ma(&m, &ma_offs, 660 sc->sectorsize, (void *)osp, 0, 661 MD_MALLOC_MOVE_READ); 662 } else if (vlist != NULL) { 663 error = md_malloc_move_vlist(&vlist, 664 &ma_offs, sc->sectorsize, 665 (void *)osp, 0, 666 MD_MALLOC_MOVE_READ); 667 } else { 668 bcopy((void *)osp, dst, sc->sectorsize); 669 cpu_flush_dcache(dst, sc->sectorsize); 670 } 671 } 672 osp = 0; 673 } else if (bp->bio_cmd == BIO_WRITE) { 674 if (sc->flags & MD_COMPRESS) { 675 if (notmapped) { 676 error1 = md_malloc_move_ma(&m, &ma_offs, 677 sc->sectorsize, &uc, 0, 678 MD_MALLOC_MOVE_CMP); 679 i = error1 == 0 ? sc->sectorsize : 0; 680 } else if (vlist != NULL) { 681 error1 = md_malloc_move_vlist(&vlist, 682 &ma_offs, sc->sectorsize, &uc, 0, 683 MD_MALLOC_MOVE_CMP); 684 i = error1 == 0 ? sc->sectorsize : 0; 685 } else { 686 uc = dst[0]; 687 for (i = 1; i < sc->sectorsize; i++) { 688 if (dst[i] != uc) 689 break; 690 } 691 } 692 } else { 693 i = 0; 694 uc = 0; 695 } 696 if (i == sc->sectorsize) { 697 if (osp != uc) 698 error = s_write(sc->indir, secno, uc); 699 } else { 700 if (osp <= 255) { 701 sp = (uintptr_t)uma_zalloc(sc->uma, 702 md_malloc_wait ? M_WAITOK : 703 M_NOWAIT); 704 if (sp == 0) { 705 error = ENOSPC; 706 break; 707 } 708 if (notmapped) { 709 error = md_malloc_move_ma(&m, 710 &ma_offs, sc->sectorsize, 711 (void *)sp, 0, 712 MD_MALLOC_MOVE_WRITE); 713 } else if (vlist != NULL) { 714 error = md_malloc_move_vlist( 715 &vlist, &ma_offs, 716 sc->sectorsize, (void *)sp, 717 0, MD_MALLOC_MOVE_WRITE); 718 } else { 719 bcopy(dst, (void *)sp, 720 sc->sectorsize); 721 } 722 error = s_write(sc->indir, secno, sp); 723 } else { 724 if (notmapped) { 725 error = md_malloc_move_ma(&m, 726 &ma_offs, sc->sectorsize, 727 (void *)osp, 0, 728 MD_MALLOC_MOVE_WRITE); 729 } else if (vlist != NULL) { 730 error = md_malloc_move_vlist( 731 &vlist, &ma_offs, 732 sc->sectorsize, (void *)osp, 733 0, MD_MALLOC_MOVE_WRITE); 734 } else { 735 bcopy(dst, (void *)osp, 736 sc->sectorsize); 737 } 738 osp = 0; 739 } 740 } 741 } else { 742 error = EOPNOTSUPP; 743 } 744 if (osp > 255) 745 uma_zfree(sc->uma, (void*)osp); 746 if (error != 0) 747 break; 748 secno++; 749 if (!notmapped && vlist == NULL) 750 dst += sc->sectorsize; 751 } 752 bp->bio_resid = 0; 753 return (error); 754 } 755 756 static void 757 mdcopyto_vlist(void *src, bus_dma_segment_t *vlist, off_t offset, off_t len) 758 { 759 off_t seg_len; 760 761 while (offset >= vlist->ds_len) { 762 offset -= vlist->ds_len; 763 vlist++; 764 } 765 766 while (len != 0) { 767 seg_len = omin(len, vlist->ds_len - offset); 768 bcopy(src, (void *)(uintptr_t)(vlist->ds_addr + offset), 769 seg_len); 770 offset = 0; 771 src = (uint8_t *)src + seg_len; 772 len -= seg_len; 773 vlist++; 774 } 775 } 776 777 static void 778 mdcopyfrom_vlist(bus_dma_segment_t *vlist, off_t offset, void *dst, off_t len) 779 { 780 off_t seg_len; 781 782 while (offset >= vlist->ds_len) { 783 offset -= vlist->ds_len; 784 vlist++; 785 } 786 787 while (len != 0) { 788 seg_len = omin(len, vlist->ds_len - offset); 789 bcopy((void *)(uintptr_t)(vlist->ds_addr + offset), dst, 790 seg_len); 791 offset = 0; 792 dst = (uint8_t *)dst + seg_len; 793 len -= seg_len; 794 vlist++; 795 } 796 } 797 798 static int 799 mdstart_preload(struct md_s *sc, struct bio *bp) 800 { 801 uint8_t *p; 802 803 p = sc->pl_ptr + bp->bio_offset; 804 switch (bp->bio_cmd) { 805 case BIO_READ: 806 if ((bp->bio_flags & BIO_VLIST) != 0) { 807 mdcopyto_vlist(p, (bus_dma_segment_t *)bp->bio_data, 808 bp->bio_ma_offset, bp->bio_length); 809 } else { 810 bcopy(p, bp->bio_data, bp->bio_length); 811 } 812 cpu_flush_dcache(bp->bio_data, bp->bio_length); 813 break; 814 case BIO_WRITE: 815 if ((bp->bio_flags & BIO_VLIST) != 0) { 816 mdcopyfrom_vlist((bus_dma_segment_t *)bp->bio_data, 817 bp->bio_ma_offset, p, bp->bio_length); 818 } else { 819 bcopy(bp->bio_data, p, bp->bio_length); 820 } 821 break; 822 } 823 bp->bio_resid = 0; 824 return (0); 825 } 826 827 static int 828 mdstart_vnode(struct md_s *sc, struct bio *bp) 829 { 830 int error; 831 struct uio auio; 832 struct iovec aiov; 833 struct iovec *piov; 834 struct mount *mp; 835 struct vnode *vp; 836 struct buf *pb; 837 bus_dma_segment_t *vlist; 838 struct thread *td; 839 off_t len, zerosize; 840 int ma_offs; 841 842 switch (bp->bio_cmd) { 843 case BIO_READ: 844 auio.uio_rw = UIO_READ; 845 break; 846 case BIO_WRITE: 847 case BIO_DELETE: 848 auio.uio_rw = UIO_WRITE; 849 break; 850 case BIO_FLUSH: 851 break; 852 default: 853 return (EOPNOTSUPP); 854 } 855 856 td = curthread; 857 vp = sc->vnode; 858 pb = NULL; 859 piov = NULL; 860 ma_offs = bp->bio_ma_offset; 861 862 /* 863 * VNODE I/O 864 * 865 * If an error occurs, we set BIO_ERROR but we do not set 866 * B_INVAL because (for a write anyway), the buffer is 867 * still valid. 868 */ 869 870 if (bp->bio_cmd == BIO_FLUSH) { 871 (void) vn_start_write(vp, &mp, V_WAIT); 872 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 873 error = VOP_FSYNC(vp, MNT_WAIT, td); 874 VOP_UNLOCK(vp, 0); 875 vn_finished_write(mp); 876 return (error); 877 } 878 879 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 880 auio.uio_resid = bp->bio_length; 881 auio.uio_segflg = UIO_SYSSPACE; 882 auio.uio_td = td; 883 884 if (bp->bio_cmd == BIO_DELETE) { 885 /* 886 * Emulate BIO_DELETE by writing zeros. 887 */ 888 zerosize = ZERO_REGION_SIZE - 889 (ZERO_REGION_SIZE % sc->sectorsize); 890 auio.uio_iovcnt = howmany(bp->bio_length, zerosize); 891 piov = malloc(sizeof(*piov) * auio.uio_iovcnt, M_MD, M_WAITOK); 892 auio.uio_iov = piov; 893 len = bp->bio_length; 894 while (len > 0) { 895 piov->iov_base = __DECONST(void *, zero_region); 896 piov->iov_len = len; 897 if (len > zerosize) 898 piov->iov_len = zerosize; 899 len -= piov->iov_len; 900 piov++; 901 } 902 piov = auio.uio_iov; 903 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 904 piov = malloc(sizeof(*piov) * bp->bio_ma_n, M_MD, M_WAITOK); 905 auio.uio_iov = piov; 906 vlist = (bus_dma_segment_t *)bp->bio_data; 907 len = bp->bio_length; 908 while (len > 0) { 909 piov->iov_base = (void *)(uintptr_t)(vlist->ds_addr + 910 ma_offs); 911 piov->iov_len = vlist->ds_len - ma_offs; 912 if (piov->iov_len > len) 913 piov->iov_len = len; 914 len -= piov->iov_len; 915 ma_offs = 0; 916 vlist++; 917 piov++; 918 } 919 auio.uio_iovcnt = piov - auio.uio_iov; 920 piov = auio.uio_iov; 921 } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 922 pb = getpbuf(&md_vnode_pbuf_freecnt); 923 pmap_qenter((vm_offset_t)pb->b_data, bp->bio_ma, bp->bio_ma_n); 924 aiov.iov_base = (void *)((vm_offset_t)pb->b_data + ma_offs); 925 aiov.iov_len = bp->bio_length; 926 auio.uio_iov = &aiov; 927 auio.uio_iovcnt = 1; 928 } else { 929 aiov.iov_base = bp->bio_data; 930 aiov.iov_len = bp->bio_length; 931 auio.uio_iov = &aiov; 932 auio.uio_iovcnt = 1; 933 } 934 /* 935 * When reading set IO_DIRECT to try to avoid double-caching 936 * the data. When writing IO_DIRECT is not optimal. 937 */ 938 if (auio.uio_rw == UIO_READ) { 939 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 940 error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred); 941 VOP_UNLOCK(vp, 0); 942 } else { 943 (void) vn_start_write(vp, &mp, V_WAIT); 944 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 945 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC, 946 sc->cred); 947 VOP_UNLOCK(vp, 0); 948 vn_finished_write(mp); 949 } 950 951 if (pb) { 952 pmap_qremove((vm_offset_t)pb->b_data, bp->bio_ma_n); 953 relpbuf(pb, &md_vnode_pbuf_freecnt); 954 } 955 956 if (piov != NULL) 957 free(piov, M_MD); 958 959 bp->bio_resid = auio.uio_resid; 960 return (error); 961 } 962 963 static int 964 mdstart_swap(struct md_s *sc, struct bio *bp) 965 { 966 vm_page_t m; 967 u_char *p; 968 vm_pindex_t i, lastp; 969 bus_dma_segment_t *vlist; 970 int rv, ma_offs, offs, len, lastend; 971 972 switch (bp->bio_cmd) { 973 case BIO_READ: 974 case BIO_WRITE: 975 case BIO_DELETE: 976 break; 977 default: 978 return (EOPNOTSUPP); 979 } 980 981 p = bp->bio_data; 982 ma_offs = (bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0 ? 983 bp->bio_ma_offset : 0; 984 vlist = (bp->bio_flags & BIO_VLIST) != 0 ? 985 (bus_dma_segment_t *)bp->bio_data : NULL; 986 987 /* 988 * offs is the offset at which to start operating on the 989 * next (ie, first) page. lastp is the last page on 990 * which we're going to operate. lastend is the ending 991 * position within that last page (ie, PAGE_SIZE if 992 * we're operating on complete aligned pages). 993 */ 994 offs = bp->bio_offset % PAGE_SIZE; 995 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 996 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 997 998 rv = VM_PAGER_OK; 999 VM_OBJECT_WLOCK(sc->object); 1000 vm_object_pip_add(sc->object, 1); 1001 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 1002 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 1003 m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM); 1004 if (bp->bio_cmd == BIO_READ) { 1005 if (m->valid == VM_PAGE_BITS_ALL) 1006 rv = VM_PAGER_OK; 1007 else 1008 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 1009 if (rv == VM_PAGER_ERROR) { 1010 vm_page_xunbusy(m); 1011 break; 1012 } else if (rv == VM_PAGER_FAIL) { 1013 /* 1014 * Pager does not have the page. Zero 1015 * the allocated page, and mark it as 1016 * valid. Do not set dirty, the page 1017 * can be recreated if thrown out. 1018 */ 1019 pmap_zero_page(m); 1020 m->valid = VM_PAGE_BITS_ALL; 1021 } 1022 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 1023 pmap_copy_pages(&m, offs, bp->bio_ma, 1024 ma_offs, len); 1025 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 1026 physcopyout_vlist(VM_PAGE_TO_PHYS(m) + offs, 1027 vlist, ma_offs, len); 1028 cpu_flush_dcache(p, len); 1029 } else { 1030 physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len); 1031 cpu_flush_dcache(p, len); 1032 } 1033 } else if (bp->bio_cmd == BIO_WRITE) { 1034 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 1035 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 1036 else 1037 rv = VM_PAGER_OK; 1038 if (rv == VM_PAGER_ERROR) { 1039 vm_page_xunbusy(m); 1040 break; 1041 } 1042 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 1043 pmap_copy_pages(bp->bio_ma, ma_offs, &m, 1044 offs, len); 1045 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 1046 physcopyin_vlist(vlist, ma_offs, 1047 VM_PAGE_TO_PHYS(m) + offs, len); 1048 } else { 1049 physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len); 1050 } 1051 m->valid = VM_PAGE_BITS_ALL; 1052 } else if (bp->bio_cmd == BIO_DELETE) { 1053 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 1054 rv = vm_pager_get_pages(sc->object, &m, 1, 0); 1055 else 1056 rv = VM_PAGER_OK; 1057 if (rv == VM_PAGER_ERROR) { 1058 vm_page_xunbusy(m); 1059 break; 1060 } 1061 if (len != PAGE_SIZE) { 1062 pmap_zero_page_area(m, offs, len); 1063 vm_page_clear_dirty(m, offs, len); 1064 m->valid = VM_PAGE_BITS_ALL; 1065 } else 1066 vm_pager_page_unswapped(m); 1067 } 1068 vm_page_xunbusy(m); 1069 vm_page_lock(m); 1070 if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE) 1071 vm_page_free(m); 1072 else 1073 vm_page_activate(m); 1074 vm_page_unlock(m); 1075 if (bp->bio_cmd == BIO_WRITE) { 1076 vm_page_dirty(m); 1077 vm_pager_page_unswapped(m); 1078 } 1079 1080 /* Actions on further pages start at offset 0 */ 1081 p += PAGE_SIZE - offs; 1082 offs = 0; 1083 ma_offs += len; 1084 } 1085 vm_object_pip_wakeup(sc->object); 1086 VM_OBJECT_WUNLOCK(sc->object); 1087 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 1088 } 1089 1090 static int 1091 mdstart_null(struct md_s *sc, struct bio *bp) 1092 { 1093 1094 switch (bp->bio_cmd) { 1095 case BIO_READ: 1096 bzero(bp->bio_data, bp->bio_length); 1097 cpu_flush_dcache(bp->bio_data, bp->bio_length); 1098 break; 1099 case BIO_WRITE: 1100 break; 1101 } 1102 bp->bio_resid = 0; 1103 return (0); 1104 } 1105 1106 static void 1107 md_kthread(void *arg) 1108 { 1109 struct md_s *sc; 1110 struct bio *bp; 1111 int error; 1112 1113 sc = arg; 1114 thread_lock(curthread); 1115 sched_prio(curthread, PRIBIO); 1116 thread_unlock(curthread); 1117 if (sc->type == MD_VNODE) 1118 curthread->td_pflags |= TDP_NORUNNINGBUF; 1119 1120 for (;;) { 1121 mtx_lock(&sc->queue_mtx); 1122 if (sc->flags & MD_SHUTDOWN) { 1123 sc->flags |= MD_EXITING; 1124 mtx_unlock(&sc->queue_mtx); 1125 kproc_exit(0); 1126 } 1127 bp = bioq_takefirst(&sc->bio_queue); 1128 if (!bp) { 1129 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 1130 continue; 1131 } 1132 mtx_unlock(&sc->queue_mtx); 1133 if (bp->bio_cmd == BIO_GETATTR) { 1134 if ((sc->fwsectors && sc->fwheads && 1135 (g_handleattr_int(bp, "GEOM::fwsectors", 1136 sc->fwsectors) || 1137 g_handleattr_int(bp, "GEOM::fwheads", 1138 sc->fwheads))) || 1139 g_handleattr_int(bp, "GEOM::candelete", 1)) 1140 error = -1; 1141 else 1142 error = EOPNOTSUPP; 1143 } else { 1144 error = sc->start(sc, bp); 1145 } 1146 1147 if (error != -1) { 1148 bp->bio_completed = bp->bio_length; 1149 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) 1150 devstat_end_transaction_bio(sc->devstat, bp); 1151 g_io_deliver(bp, error); 1152 } 1153 } 1154 } 1155 1156 static struct md_s * 1157 mdfind(int unit) 1158 { 1159 struct md_s *sc; 1160 1161 LIST_FOREACH(sc, &md_softc_list, list) { 1162 if (sc->unit == unit) 1163 break; 1164 } 1165 return (sc); 1166 } 1167 1168 static struct md_s * 1169 mdnew(int unit, int *errp, enum md_types type) 1170 { 1171 struct md_s *sc; 1172 int error; 1173 1174 *errp = 0; 1175 if (unit == -1) 1176 unit = alloc_unr(md_uh); 1177 else 1178 unit = alloc_unr_specific(md_uh, unit); 1179 1180 if (unit == -1) { 1181 *errp = EBUSY; 1182 return (NULL); 1183 } 1184 1185 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); 1186 sc->type = type; 1187 bioq_init(&sc->bio_queue); 1188 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 1189 mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF); 1190 sc->unit = unit; 1191 sprintf(sc->name, "md%d", unit); 1192 LIST_INSERT_HEAD(&md_softc_list, sc, list); 1193 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 1194 if (error == 0) 1195 return (sc); 1196 LIST_REMOVE(sc, list); 1197 mtx_destroy(&sc->stat_mtx); 1198 mtx_destroy(&sc->queue_mtx); 1199 free_unr(md_uh, sc->unit); 1200 free(sc, M_MD); 1201 *errp = error; 1202 return (NULL); 1203 } 1204 1205 static void 1206 mdinit(struct md_s *sc) 1207 { 1208 struct g_geom *gp; 1209 struct g_provider *pp; 1210 1211 g_topology_lock(); 1212 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 1213 gp->softc = sc; 1214 pp = g_new_providerf(gp, "md%d", sc->unit); 1215 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 1216 pp->mediasize = sc->mediasize; 1217 pp->sectorsize = sc->sectorsize; 1218 switch (sc->type) { 1219 case MD_MALLOC: 1220 case MD_VNODE: 1221 case MD_SWAP: 1222 pp->flags |= G_PF_ACCEPT_UNMAPPED; 1223 break; 1224 case MD_PRELOAD: 1225 case MD_NULL: 1226 break; 1227 } 1228 sc->gp = gp; 1229 sc->pp = pp; 1230 g_error_provider(pp, 0); 1231 g_topology_unlock(); 1232 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize, 1233 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 1234 } 1235 1236 static int 1237 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio) 1238 { 1239 uintptr_t sp; 1240 int error; 1241 off_t u; 1242 1243 error = 0; 1244 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 1245 return (EINVAL); 1246 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize)) 1247 return (EINVAL); 1248 /* Compression doesn't make sense if we have reserved space */ 1249 if (mdio->md_options & MD_RESERVE) 1250 mdio->md_options &= ~MD_COMPRESS; 1251 if (mdio->md_fwsectors != 0) 1252 sc->fwsectors = mdio->md_fwsectors; 1253 if (mdio->md_fwheads != 0) 1254 sc->fwheads = mdio->md_fwheads; 1255 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE); 1256 sc->indir = dimension(sc->mediasize / sc->sectorsize); 1257 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 1258 0x1ff, 0); 1259 if (mdio->md_options & MD_RESERVE) { 1260 off_t nsectors; 1261 1262 nsectors = sc->mediasize / sc->sectorsize; 1263 for (u = 0; u < nsectors; u++) { 1264 sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ? 1265 M_WAITOK : M_NOWAIT) | M_ZERO); 1266 if (sp != 0) 1267 error = s_write(sc->indir, u, sp); 1268 else 1269 error = ENOMEM; 1270 if (error != 0) 1271 break; 1272 } 1273 } 1274 return (error); 1275 } 1276 1277 1278 static int 1279 mdsetcred(struct md_s *sc, struct ucred *cred) 1280 { 1281 char *tmpbuf; 1282 int error = 0; 1283 1284 /* 1285 * Set credits in our softc 1286 */ 1287 1288 if (sc->cred) 1289 crfree(sc->cred); 1290 sc->cred = crhold(cred); 1291 1292 /* 1293 * Horrible kludge to establish credentials for NFS XXX. 1294 */ 1295 1296 if (sc->vnode) { 1297 struct uio auio; 1298 struct iovec aiov; 1299 1300 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 1301 bzero(&auio, sizeof(auio)); 1302 1303 aiov.iov_base = tmpbuf; 1304 aiov.iov_len = sc->sectorsize; 1305 auio.uio_iov = &aiov; 1306 auio.uio_iovcnt = 1; 1307 auio.uio_offset = 0; 1308 auio.uio_rw = UIO_READ; 1309 auio.uio_segflg = UIO_SYSSPACE; 1310 auio.uio_resid = aiov.iov_len; 1311 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1312 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 1313 VOP_UNLOCK(sc->vnode, 0); 1314 free(tmpbuf, M_TEMP); 1315 } 1316 return (error); 1317 } 1318 1319 static int 1320 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1321 { 1322 struct vattr vattr; 1323 struct nameidata nd; 1324 char *fname; 1325 int error, flags; 1326 1327 /* 1328 * Kernel-originated requests must have the filename appended 1329 * to the mdio structure to protect against malicious software. 1330 */ 1331 fname = mdio->md_file; 1332 if ((void *)fname != (void *)(mdio + 1)) { 1333 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL); 1334 if (error != 0) 1335 return (error); 1336 } else 1337 strlcpy(sc->file, fname, sizeof(sc->file)); 1338 1339 /* 1340 * If the user specified that this is a read only device, don't 1341 * set the FWRITE mask before trying to open the backing store. 1342 */ 1343 flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE); 1344 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td); 1345 error = vn_open(&nd, &flags, 0, NULL); 1346 if (error != 0) 1347 return (error); 1348 NDFREE(&nd, NDF_ONLY_PNBUF); 1349 if (nd.ni_vp->v_type != VREG) { 1350 error = EINVAL; 1351 goto bad; 1352 } 1353 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred); 1354 if (error != 0) 1355 goto bad; 1356 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { 1357 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); 1358 if (nd.ni_vp->v_iflag & VI_DOOMED) { 1359 /* Forced unmount. */ 1360 error = EBADF; 1361 goto bad; 1362 } 1363 } 1364 nd.ni_vp->v_vflag |= VV_MD; 1365 VOP_UNLOCK(nd.ni_vp, 0); 1366 1367 if (mdio->md_fwsectors != 0) 1368 sc->fwsectors = mdio->md_fwsectors; 1369 if (mdio->md_fwheads != 0) 1370 sc->fwheads = mdio->md_fwheads; 1371 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC); 1372 if (!(flags & FWRITE)) 1373 sc->flags |= MD_READONLY; 1374 sc->vnode = nd.ni_vp; 1375 1376 error = mdsetcred(sc, td->td_ucred); 1377 if (error != 0) { 1378 sc->vnode = NULL; 1379 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); 1380 nd.ni_vp->v_vflag &= ~VV_MD; 1381 goto bad; 1382 } 1383 return (0); 1384 bad: 1385 VOP_UNLOCK(nd.ni_vp, 0); 1386 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 1387 return (error); 1388 } 1389 1390 static int 1391 mddestroy(struct md_s *sc, struct thread *td) 1392 { 1393 1394 if (sc->gp) { 1395 sc->gp->softc = NULL; 1396 g_topology_lock(); 1397 g_wither_geom(sc->gp, ENXIO); 1398 g_topology_unlock(); 1399 sc->gp = NULL; 1400 sc->pp = NULL; 1401 } 1402 if (sc->devstat) { 1403 devstat_remove_entry(sc->devstat); 1404 sc->devstat = NULL; 1405 } 1406 mtx_lock(&sc->queue_mtx); 1407 sc->flags |= MD_SHUTDOWN; 1408 wakeup(sc); 1409 while (!(sc->flags & MD_EXITING)) 1410 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10); 1411 mtx_unlock(&sc->queue_mtx); 1412 mtx_destroy(&sc->stat_mtx); 1413 mtx_destroy(&sc->queue_mtx); 1414 if (sc->vnode != NULL) { 1415 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1416 sc->vnode->v_vflag &= ~VV_MD; 1417 VOP_UNLOCK(sc->vnode, 0); 1418 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 1419 FREAD : (FREAD|FWRITE), sc->cred, td); 1420 } 1421 if (sc->cred != NULL) 1422 crfree(sc->cred); 1423 if (sc->object != NULL) 1424 vm_object_deallocate(sc->object); 1425 if (sc->indir) 1426 destroy_indir(sc, sc->indir); 1427 if (sc->uma) 1428 uma_zdestroy(sc->uma); 1429 1430 LIST_REMOVE(sc, list); 1431 free_unr(md_uh, sc->unit); 1432 free(sc, M_MD); 1433 return (0); 1434 } 1435 1436 static int 1437 mdresize(struct md_s *sc, struct md_ioctl *mdio) 1438 { 1439 int error, res; 1440 vm_pindex_t oldpages, newpages; 1441 1442 switch (sc->type) { 1443 case MD_VNODE: 1444 case MD_NULL: 1445 break; 1446 case MD_SWAP: 1447 if (mdio->md_mediasize <= 0 || 1448 (mdio->md_mediasize % PAGE_SIZE) != 0) 1449 return (EDOM); 1450 oldpages = OFF_TO_IDX(round_page(sc->mediasize)); 1451 newpages = OFF_TO_IDX(round_page(mdio->md_mediasize)); 1452 if (newpages < oldpages) { 1453 VM_OBJECT_WLOCK(sc->object); 1454 vm_object_page_remove(sc->object, newpages, 0, 0); 1455 swap_pager_freespace(sc->object, newpages, 1456 oldpages - newpages); 1457 swap_release_by_cred(IDX_TO_OFF(oldpages - 1458 newpages), sc->cred); 1459 sc->object->charge = IDX_TO_OFF(newpages); 1460 sc->object->size = newpages; 1461 VM_OBJECT_WUNLOCK(sc->object); 1462 } else if (newpages > oldpages) { 1463 res = swap_reserve_by_cred(IDX_TO_OFF(newpages - 1464 oldpages), sc->cred); 1465 if (!res) 1466 return (ENOMEM); 1467 if ((mdio->md_options & MD_RESERVE) || 1468 (sc->flags & MD_RESERVE)) { 1469 error = swap_pager_reserve(sc->object, 1470 oldpages, newpages - oldpages); 1471 if (error < 0) { 1472 swap_release_by_cred( 1473 IDX_TO_OFF(newpages - oldpages), 1474 sc->cred); 1475 return (EDOM); 1476 } 1477 } 1478 VM_OBJECT_WLOCK(sc->object); 1479 sc->object->charge = IDX_TO_OFF(newpages); 1480 sc->object->size = newpages; 1481 VM_OBJECT_WUNLOCK(sc->object); 1482 } 1483 break; 1484 default: 1485 return (EOPNOTSUPP); 1486 } 1487 1488 sc->mediasize = mdio->md_mediasize; 1489 g_topology_lock(); 1490 g_resize_provider(sc->pp, sc->mediasize); 1491 g_topology_unlock(); 1492 return (0); 1493 } 1494 1495 static int 1496 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1497 { 1498 vm_ooffset_t npage; 1499 int error; 1500 1501 /* 1502 * Range check. Disallow negative sizes and sizes not being 1503 * multiple of page size. 1504 */ 1505 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1506 return (EDOM); 1507 1508 /* 1509 * Allocate an OBJT_SWAP object. 1510 * 1511 * Note the truncation. 1512 */ 1513 1514 npage = mdio->md_mediasize / PAGE_SIZE; 1515 if (mdio->md_fwsectors != 0) 1516 sc->fwsectors = mdio->md_fwsectors; 1517 if (mdio->md_fwheads != 0) 1518 sc->fwheads = mdio->md_fwheads; 1519 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, 1520 VM_PROT_DEFAULT, 0, td->td_ucred); 1521 if (sc->object == NULL) 1522 return (ENOMEM); 1523 sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE); 1524 if (mdio->md_options & MD_RESERVE) { 1525 if (swap_pager_reserve(sc->object, 0, npage) < 0) { 1526 error = EDOM; 1527 goto finish; 1528 } 1529 } 1530 error = mdsetcred(sc, td->td_ucred); 1531 finish: 1532 if (error != 0) { 1533 vm_object_deallocate(sc->object); 1534 sc->object = NULL; 1535 } 1536 return (error); 1537 } 1538 1539 static int 1540 mdcreate_null(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1541 { 1542 1543 /* 1544 * Range check. Disallow negative sizes and sizes not being 1545 * multiple of page size. 1546 */ 1547 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1548 return (EDOM); 1549 1550 return (0); 1551 } 1552 1553 static int 1554 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1555 { 1556 struct md_ioctl *mdio; 1557 struct md_s *sc; 1558 int error, i; 1559 unsigned sectsize; 1560 1561 if (md_debug) 1562 printf("mdctlioctl(%s %lx %p %x %p)\n", 1563 devtoname(dev), cmd, addr, flags, td); 1564 1565 mdio = (struct md_ioctl *)addr; 1566 if (mdio->md_version != MDIOVERSION) 1567 return (EINVAL); 1568 1569 /* 1570 * We assert the version number in the individual ioctl 1571 * handlers instead of out here because (a) it is possible we 1572 * may add another ioctl in the future which doesn't read an 1573 * mdio, and (b) the correct return value for an unknown ioctl 1574 * is ENOIOCTL, not EINVAL. 1575 */ 1576 error = 0; 1577 switch (cmd) { 1578 case MDIOCATTACH: 1579 switch (mdio->md_type) { 1580 case MD_MALLOC: 1581 case MD_PRELOAD: 1582 case MD_VNODE: 1583 case MD_SWAP: 1584 case MD_NULL: 1585 break; 1586 default: 1587 return (EINVAL); 1588 } 1589 if (mdio->md_sectorsize == 0) 1590 sectsize = DEV_BSIZE; 1591 else 1592 sectsize = mdio->md_sectorsize; 1593 if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize) 1594 return (EINVAL); 1595 if (mdio->md_options & MD_AUTOUNIT) 1596 sc = mdnew(-1, &error, mdio->md_type); 1597 else { 1598 if (mdio->md_unit > INT_MAX) 1599 return (EINVAL); 1600 sc = mdnew(mdio->md_unit, &error, mdio->md_type); 1601 } 1602 if (sc == NULL) 1603 return (error); 1604 if (mdio->md_options & MD_AUTOUNIT) 1605 mdio->md_unit = sc->unit; 1606 sc->mediasize = mdio->md_mediasize; 1607 sc->sectorsize = sectsize; 1608 error = EDOOFUS; 1609 switch (sc->type) { 1610 case MD_MALLOC: 1611 sc->start = mdstart_malloc; 1612 error = mdcreate_malloc(sc, mdio); 1613 break; 1614 case MD_PRELOAD: 1615 /* 1616 * We disallow attaching preloaded memory disks via 1617 * ioctl. Preloaded memory disks are automatically 1618 * attached in g_md_init(). 1619 */ 1620 error = EOPNOTSUPP; 1621 break; 1622 case MD_VNODE: 1623 sc->start = mdstart_vnode; 1624 error = mdcreate_vnode(sc, mdio, td); 1625 break; 1626 case MD_SWAP: 1627 sc->start = mdstart_swap; 1628 error = mdcreate_swap(sc, mdio, td); 1629 break; 1630 case MD_NULL: 1631 sc->start = mdstart_null; 1632 error = mdcreate_null(sc, mdio, td); 1633 break; 1634 } 1635 if (error != 0) { 1636 mddestroy(sc, td); 1637 return (error); 1638 } 1639 1640 /* Prune off any residual fractional sector */ 1641 i = sc->mediasize % sc->sectorsize; 1642 sc->mediasize -= i; 1643 1644 mdinit(sc); 1645 return (0); 1646 case MDIOCDETACH: 1647 if (mdio->md_mediasize != 0 || 1648 (mdio->md_options & ~MD_FORCE) != 0) 1649 return (EINVAL); 1650 1651 sc = mdfind(mdio->md_unit); 1652 if (sc == NULL) 1653 return (ENOENT); 1654 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) && 1655 !(mdio->md_options & MD_FORCE)) 1656 return (EBUSY); 1657 return (mddestroy(sc, td)); 1658 case MDIOCRESIZE: 1659 if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0) 1660 return (EINVAL); 1661 1662 sc = mdfind(mdio->md_unit); 1663 if (sc == NULL) 1664 return (ENOENT); 1665 if (mdio->md_mediasize < sc->sectorsize) 1666 return (EINVAL); 1667 if (mdio->md_mediasize < sc->mediasize && 1668 !(sc->flags & MD_FORCE) && 1669 !(mdio->md_options & MD_FORCE)) 1670 return (EBUSY); 1671 return (mdresize(sc, mdio)); 1672 case MDIOCQUERY: 1673 sc = mdfind(mdio->md_unit); 1674 if (sc == NULL) 1675 return (ENOENT); 1676 mdio->md_type = sc->type; 1677 mdio->md_options = sc->flags; 1678 mdio->md_mediasize = sc->mediasize; 1679 mdio->md_sectorsize = sc->sectorsize; 1680 if (sc->type == MD_VNODE) 1681 error = copyout(sc->file, mdio->md_file, 1682 strlen(sc->file) + 1); 1683 return (error); 1684 case MDIOCLIST: 1685 i = 1; 1686 LIST_FOREACH(sc, &md_softc_list, list) { 1687 if (i == MDNPAD - 1) 1688 mdio->md_pad[i] = -1; 1689 else 1690 mdio->md_pad[i++] = sc->unit; 1691 } 1692 mdio->md_pad[0] = i - 1; 1693 return (0); 1694 default: 1695 return (ENOIOCTL); 1696 }; 1697 } 1698 1699 static int 1700 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1701 { 1702 int error; 1703 1704 sx_xlock(&md_sx); 1705 error = xmdctlioctl(dev, cmd, addr, flags, td); 1706 sx_xunlock(&md_sx); 1707 return (error); 1708 } 1709 1710 static void 1711 md_preloaded(u_char *image, size_t length, const char *name) 1712 { 1713 struct md_s *sc; 1714 int error; 1715 1716 sc = mdnew(-1, &error, MD_PRELOAD); 1717 if (sc == NULL) 1718 return; 1719 sc->mediasize = length; 1720 sc->sectorsize = DEV_BSIZE; 1721 sc->pl_ptr = image; 1722 sc->pl_len = length; 1723 sc->start = mdstart_preload; 1724 #ifdef MD_ROOT 1725 if (sc->unit == 0) 1726 rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0"; 1727 #endif 1728 mdinit(sc); 1729 if (name != NULL) { 1730 printf("%s%d: Preloaded image <%s> %zd bytes at %p\n", 1731 MD_NAME, sc->unit, name, length, image); 1732 } else { 1733 printf("%s%d: Embedded image %zd bytes at %p\n", 1734 MD_NAME, sc->unit, length, image); 1735 } 1736 } 1737 1738 static void 1739 g_md_init(struct g_class *mp __unused) 1740 { 1741 caddr_t mod; 1742 u_char *ptr, *name, *type; 1743 unsigned len; 1744 int i; 1745 1746 /* figure out log2(NINDIR) */ 1747 for (i = NINDIR, nshift = -1; i; nshift++) 1748 i >>= 1; 1749 1750 mod = NULL; 1751 sx_init(&md_sx, "MD config lock"); 1752 g_topology_unlock(); 1753 md_uh = new_unrhdr(0, INT_MAX, NULL); 1754 #ifdef MD_ROOT 1755 if (mfs_root_size != 0) { 1756 sx_xlock(&md_sx); 1757 md_preloaded(__DEVOLATILE(u_char *, &mfs_root), mfs_root_size, 1758 NULL); 1759 sx_xunlock(&md_sx); 1760 } 1761 #endif 1762 /* XXX: are preload_* static or do they need Giant ? */ 1763 while ((mod = preload_search_next_name(mod)) != NULL) { 1764 name = (char *)preload_search_info(mod, MODINFO_NAME); 1765 if (name == NULL) 1766 continue; 1767 type = (char *)preload_search_info(mod, MODINFO_TYPE); 1768 if (type == NULL) 1769 continue; 1770 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 1771 continue; 1772 ptr = preload_fetch_addr(mod); 1773 len = preload_fetch_size(mod); 1774 if (ptr != NULL && len != 0) { 1775 sx_xlock(&md_sx); 1776 md_preloaded(ptr, len, name); 1777 sx_xunlock(&md_sx); 1778 } 1779 } 1780 md_vnode_pbuf_freecnt = nswbuf / 10; 1781 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL, 1782 0600, MDCTL_NAME); 1783 g_topology_lock(); 1784 } 1785 1786 static void 1787 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1788 struct g_consumer *cp __unused, struct g_provider *pp) 1789 { 1790 struct md_s *mp; 1791 char *type; 1792 1793 mp = gp->softc; 1794 if (mp == NULL) 1795 return; 1796 1797 switch (mp->type) { 1798 case MD_MALLOC: 1799 type = "malloc"; 1800 break; 1801 case MD_PRELOAD: 1802 type = "preload"; 1803 break; 1804 case MD_VNODE: 1805 type = "vnode"; 1806 break; 1807 case MD_SWAP: 1808 type = "swap"; 1809 break; 1810 case MD_NULL: 1811 type = "null"; 1812 break; 1813 default: 1814 type = "unknown"; 1815 break; 1816 } 1817 1818 if (pp != NULL) { 1819 if (indent == NULL) { 1820 sbuf_printf(sb, " u %d", mp->unit); 1821 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize); 1822 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads); 1823 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors); 1824 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize); 1825 sbuf_printf(sb, " t %s", type); 1826 if (mp->type == MD_VNODE && mp->vnode != NULL) 1827 sbuf_printf(sb, " file %s", mp->file); 1828 } else { 1829 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, 1830 mp->unit); 1831 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n", 1832 indent, (uintmax_t) mp->sectorsize); 1833 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n", 1834 indent, (uintmax_t) mp->fwheads); 1835 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n", 1836 indent, (uintmax_t) mp->fwsectors); 1837 sbuf_printf(sb, "%s<length>%ju</length>\n", 1838 indent, (uintmax_t) mp->mediasize); 1839 sbuf_printf(sb, "%s<compression>%s</compression>\n", indent, 1840 (mp->flags & MD_COMPRESS) == 0 ? "off": "on"); 1841 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 1842 (mp->flags & MD_READONLY) == 0 ? "read-write": 1843 "read-only"); 1844 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1845 type); 1846 if (mp->type == MD_VNODE && mp->vnode != NULL) { 1847 sbuf_printf(sb, "%s<file>", indent); 1848 g_conf_printf_escaped(sb, "%s", mp->file); 1849 sbuf_printf(sb, "</file>\n"); 1850 } 1851 } 1852 } 1853 } 1854 1855 static void 1856 g_md_fini(struct g_class *mp __unused) 1857 { 1858 1859 sx_destroy(&md_sx); 1860 if (status_dev != NULL) 1861 destroy_dev(status_dev); 1862 delete_unrhdr(md_uh); 1863 } 1864