1 /*- 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * $FreeBSD$ 10 * 11 */ 12 13 /*- 14 * The following functions are based in the vn(4) driver: mdstart_swap(), 15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 16 * and as such under the following copyright: 17 * 18 * Copyright (c) 1988 University of Utah. 19 * Copyright (c) 1990, 1993 20 * The Regents of the University of California. All rights reserved. 21 * Copyright (c) 2013 The FreeBSD Foundation 22 * All rights reserved. 23 * 24 * This code is derived from software contributed to Berkeley by 25 * the Systems Programming Group of the University of Utah Computer 26 * Science Department. 27 * 28 * Portions of this software were developed by Konstantin Belousov 29 * under sponsorship from the FreeBSD Foundation. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 3. Neither the name of the University nor the names of its contributors 40 * may be used to endorse or promote products derived from this software 41 * without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 53 * SUCH DAMAGE. 54 * 55 * from: Utah Hdr: vn.c 1.13 94/04/02 56 * 57 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 58 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 59 */ 60 61 #include "opt_rootdevname.h" 62 #include "opt_geom.h" 63 #include "opt_md.h" 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/bio.h> 68 #include <sys/buf.h> 69 #include <sys/conf.h> 70 #include <sys/devicestat.h> 71 #include <sys/fcntl.h> 72 #include <sys/kernel.h> 73 #include <sys/kthread.h> 74 #include <sys/limits.h> 75 #include <sys/linker.h> 76 #include <sys/lock.h> 77 #include <sys/malloc.h> 78 #include <sys/mdioctl.h> 79 #include <sys/mount.h> 80 #include <sys/mutex.h> 81 #include <sys/sx.h> 82 #include <sys/namei.h> 83 #include <sys/proc.h> 84 #include <sys/queue.h> 85 #include <sys/rwlock.h> 86 #include <sys/sbuf.h> 87 #include <sys/sched.h> 88 #include <sys/sf_buf.h> 89 #include <sys/sysctl.h> 90 #include <sys/vnode.h> 91 #include <sys/disk.h> 92 93 #include <geom/geom.h> 94 #include <geom/geom_int.h> 95 96 #include <vm/vm.h> 97 #include <vm/vm_param.h> 98 #include <vm/vm_object.h> 99 #include <vm/vm_page.h> 100 #include <vm/vm_pager.h> 101 #include <vm/swap_pager.h> 102 #include <vm/uma.h> 103 104 #include <machine/bus.h> 105 106 #define MD_MODVER 1 107 108 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 109 #define MD_EXITING 0x20000 /* Worker thread is exiting. */ 110 111 #ifndef MD_NSECT 112 #define MD_NSECT (10000 * 2) 113 #endif 114 115 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk"); 116 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors"); 117 118 static int md_debug; 119 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, 120 "Enable md(4) debug messages"); 121 static int md_malloc_wait; 122 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, 123 "Allow malloc to wait for memory allocations"); 124 125 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE) 126 #define MD_ROOT_FSTYPE "ufs" 127 #endif 128 129 #if defined(MD_ROOT) 130 /* 131 * Preloaded image gets put here. 132 */ 133 #if defined(MD_ROOT_SIZE) 134 /* 135 * We put the mfs_root symbol into the oldmfs section of the kernel object file. 136 * Applications that patch the object with the image can determine 137 * the size looking at the oldmfs section size within the kernel. 138 */ 139 u_char mfs_root[MD_ROOT_SIZE*1024] __attribute__ ((section ("oldmfs"))); 140 const int mfs_root_size = sizeof(mfs_root); 141 #else 142 extern volatile u_char __weak_symbol mfs_root; 143 extern volatile u_char __weak_symbol mfs_root_end; 144 __GLOBL(mfs_root); 145 __GLOBL(mfs_root_end); 146 #define mfs_root_size ((uintptr_t)(&mfs_root_end - &mfs_root)) 147 #endif 148 #endif 149 150 static g_init_t g_md_init; 151 static g_fini_t g_md_fini; 152 static g_start_t g_md_start; 153 static g_access_t g_md_access; 154 static void g_md_dumpconf(struct sbuf *sb, const char *indent, 155 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp); 156 157 static struct cdev *status_dev = NULL; 158 static struct sx md_sx; 159 static struct unrhdr *md_uh; 160 161 static d_ioctl_t mdctlioctl; 162 163 static struct cdevsw mdctl_cdevsw = { 164 .d_version = D_VERSION, 165 .d_ioctl = mdctlioctl, 166 .d_name = MD_NAME, 167 }; 168 169 struct g_class g_md_class = { 170 .name = "MD", 171 .version = G_VERSION, 172 .init = g_md_init, 173 .fini = g_md_fini, 174 .start = g_md_start, 175 .access = g_md_access, 176 .dumpconf = g_md_dumpconf, 177 }; 178 179 DECLARE_GEOM_CLASS(g_md_class, g_md); 180 181 182 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list); 183 184 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 185 #define NMASK (NINDIR-1) 186 static int nshift; 187 188 static int md_vnode_pbuf_freecnt; 189 190 struct indir { 191 uintptr_t *array; 192 u_int total; 193 u_int used; 194 u_int shift; 195 }; 196 197 struct md_s { 198 int unit; 199 LIST_ENTRY(md_s) list; 200 struct bio_queue_head bio_queue; 201 struct mtx queue_mtx; 202 struct mtx stat_mtx; 203 struct cdev *dev; 204 enum md_types type; 205 off_t mediasize; 206 unsigned sectorsize; 207 unsigned opencount; 208 unsigned fwheads; 209 unsigned fwsectors; 210 unsigned flags; 211 char name[20]; 212 struct proc *procp; 213 struct g_geom *gp; 214 struct g_provider *pp; 215 int (*start)(struct md_s *sc, struct bio *bp); 216 struct devstat *devstat; 217 218 /* MD_MALLOC related fields */ 219 struct indir *indir; 220 uma_zone_t uma; 221 222 /* MD_PRELOAD related fields */ 223 u_char *pl_ptr; 224 size_t pl_len; 225 226 /* MD_VNODE related fields */ 227 struct vnode *vnode; 228 char file[PATH_MAX]; 229 struct ucred *cred; 230 231 /* MD_SWAP related fields */ 232 vm_object_t object; 233 }; 234 235 static struct indir * 236 new_indir(u_int shift) 237 { 238 struct indir *ip; 239 240 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT) 241 | M_ZERO); 242 if (ip == NULL) 243 return (NULL); 244 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 245 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); 246 if (ip->array == NULL) { 247 free(ip, M_MD); 248 return (NULL); 249 } 250 ip->total = NINDIR; 251 ip->shift = shift; 252 return (ip); 253 } 254 255 static void 256 del_indir(struct indir *ip) 257 { 258 259 free(ip->array, M_MDSECT); 260 free(ip, M_MD); 261 } 262 263 static void 264 destroy_indir(struct md_s *sc, struct indir *ip) 265 { 266 int i; 267 268 for (i = 0; i < NINDIR; i++) { 269 if (!ip->array[i]) 270 continue; 271 if (ip->shift) 272 destroy_indir(sc, (struct indir*)(ip->array[i])); 273 else if (ip->array[i] > 255) 274 uma_zfree(sc->uma, (void *)(ip->array[i])); 275 } 276 del_indir(ip); 277 } 278 279 /* 280 * This function does the math and allocates the top level "indir" structure 281 * for a device of "size" sectors. 282 */ 283 284 static struct indir * 285 dimension(off_t size) 286 { 287 off_t rcnt; 288 struct indir *ip; 289 int layer; 290 291 rcnt = size; 292 layer = 0; 293 while (rcnt > NINDIR) { 294 rcnt /= NINDIR; 295 layer++; 296 } 297 298 /* 299 * XXX: the top layer is probably not fully populated, so we allocate 300 * too much space for ip->array in here. 301 */ 302 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 303 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 304 M_MDSECT, M_WAITOK | M_ZERO); 305 ip->total = NINDIR; 306 ip->shift = layer * nshift; 307 return (ip); 308 } 309 310 /* 311 * Read a given sector 312 */ 313 314 static uintptr_t 315 s_read(struct indir *ip, off_t offset) 316 { 317 struct indir *cip; 318 int idx; 319 uintptr_t up; 320 321 if (md_debug > 1) 322 printf("s_read(%jd)\n", (intmax_t)offset); 323 up = 0; 324 for (cip = ip; cip != NULL;) { 325 if (cip->shift) { 326 idx = (offset >> cip->shift) & NMASK; 327 up = cip->array[idx]; 328 cip = (struct indir *)up; 329 continue; 330 } 331 idx = offset & NMASK; 332 return (cip->array[idx]); 333 } 334 return (0); 335 } 336 337 /* 338 * Write a given sector, prune the tree if the value is 0 339 */ 340 341 static int 342 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 343 { 344 struct indir *cip, *lip[10]; 345 int idx, li; 346 uintptr_t up; 347 348 if (md_debug > 1) 349 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 350 up = 0; 351 li = 0; 352 cip = ip; 353 for (;;) { 354 lip[li++] = cip; 355 if (cip->shift) { 356 idx = (offset >> cip->shift) & NMASK; 357 up = cip->array[idx]; 358 if (up != 0) { 359 cip = (struct indir *)up; 360 continue; 361 } 362 /* Allocate branch */ 363 cip->array[idx] = 364 (uintptr_t)new_indir(cip->shift - nshift); 365 if (cip->array[idx] == 0) 366 return (ENOSPC); 367 cip->used++; 368 up = cip->array[idx]; 369 cip = (struct indir *)up; 370 continue; 371 } 372 /* leafnode */ 373 idx = offset & NMASK; 374 up = cip->array[idx]; 375 if (up != 0) 376 cip->used--; 377 cip->array[idx] = ptr; 378 if (ptr != 0) 379 cip->used++; 380 break; 381 } 382 if (cip->used != 0 || li == 1) 383 return (0); 384 li--; 385 while (cip->used == 0 && cip != ip) { 386 li--; 387 idx = (offset >> lip[li]->shift) & NMASK; 388 up = lip[li]->array[idx]; 389 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 390 del_indir(cip); 391 lip[li]->array[idx] = 0; 392 lip[li]->used--; 393 cip = lip[li]; 394 } 395 return (0); 396 } 397 398 399 static int 400 g_md_access(struct g_provider *pp, int r, int w, int e) 401 { 402 struct md_s *sc; 403 404 sc = pp->geom->softc; 405 if (sc == NULL) { 406 if (r <= 0 && w <= 0 && e <= 0) 407 return (0); 408 return (ENXIO); 409 } 410 r += pp->acr; 411 w += pp->acw; 412 e += pp->ace; 413 if ((sc->flags & MD_READONLY) != 0 && w > 0) 414 return (EROFS); 415 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 416 sc->opencount = 1; 417 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 418 sc->opencount = 0; 419 } 420 return (0); 421 } 422 423 static void 424 g_md_start(struct bio *bp) 425 { 426 struct md_s *sc; 427 428 sc = bp->bio_to->geom->softc; 429 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) { 430 mtx_lock(&sc->stat_mtx); 431 devstat_start_transaction_bio(sc->devstat, bp); 432 mtx_unlock(&sc->stat_mtx); 433 } 434 mtx_lock(&sc->queue_mtx); 435 bioq_disksort(&sc->bio_queue, bp); 436 mtx_unlock(&sc->queue_mtx); 437 wakeup(sc); 438 } 439 440 #define MD_MALLOC_MOVE_ZERO 1 441 #define MD_MALLOC_MOVE_FILL 2 442 #define MD_MALLOC_MOVE_READ 3 443 #define MD_MALLOC_MOVE_WRITE 4 444 #define MD_MALLOC_MOVE_CMP 5 445 446 static int 447 md_malloc_move_ma(vm_page_t **mp, int *ma_offs, unsigned sectorsize, 448 void *ptr, u_char fill, int op) 449 { 450 struct sf_buf *sf; 451 vm_page_t m, *mp1; 452 char *p, first; 453 off_t *uc; 454 unsigned n; 455 int error, i, ma_offs1, sz, first_read; 456 457 m = NULL; 458 error = 0; 459 sf = NULL; 460 /* if (op == MD_MALLOC_MOVE_CMP) { gcc */ 461 first = 0; 462 first_read = 0; 463 uc = ptr; 464 mp1 = *mp; 465 ma_offs1 = *ma_offs; 466 /* } */ 467 sched_pin(); 468 for (n = sectorsize; n != 0; n -= sz) { 469 sz = imin(PAGE_SIZE - *ma_offs, n); 470 if (m != **mp) { 471 if (sf != NULL) 472 sf_buf_free(sf); 473 m = **mp; 474 sf = sf_buf_alloc(m, SFB_CPUPRIVATE | 475 (md_malloc_wait ? 0 : SFB_NOWAIT)); 476 if (sf == NULL) { 477 error = ENOMEM; 478 break; 479 } 480 } 481 p = (char *)sf_buf_kva(sf) + *ma_offs; 482 switch (op) { 483 case MD_MALLOC_MOVE_ZERO: 484 bzero(p, sz); 485 break; 486 case MD_MALLOC_MOVE_FILL: 487 memset(p, fill, sz); 488 break; 489 case MD_MALLOC_MOVE_READ: 490 bcopy(ptr, p, sz); 491 cpu_flush_dcache(p, sz); 492 break; 493 case MD_MALLOC_MOVE_WRITE: 494 bcopy(p, ptr, sz); 495 break; 496 case MD_MALLOC_MOVE_CMP: 497 for (i = 0; i < sz; i++, p++) { 498 if (!first_read) { 499 *uc = (u_char)*p; 500 first = *p; 501 first_read = 1; 502 } else if (*p != first) { 503 error = EDOOFUS; 504 break; 505 } 506 } 507 break; 508 default: 509 KASSERT(0, ("md_malloc_move_ma unknown op %d\n", op)); 510 break; 511 } 512 if (error != 0) 513 break; 514 *ma_offs += sz; 515 *ma_offs %= PAGE_SIZE; 516 if (*ma_offs == 0) 517 (*mp)++; 518 ptr = (char *)ptr + sz; 519 } 520 521 if (sf != NULL) 522 sf_buf_free(sf); 523 sched_unpin(); 524 if (op == MD_MALLOC_MOVE_CMP && error != 0) { 525 *mp = mp1; 526 *ma_offs = ma_offs1; 527 } 528 return (error); 529 } 530 531 static int 532 md_malloc_move_vlist(bus_dma_segment_t **pvlist, int *pma_offs, 533 unsigned len, void *ptr, u_char fill, int op) 534 { 535 bus_dma_segment_t *vlist; 536 uint8_t *p, *end, first; 537 off_t *uc; 538 int ma_offs, seg_len; 539 540 vlist = *pvlist; 541 ma_offs = *pma_offs; 542 uc = ptr; 543 544 for (; len != 0; len -= seg_len) { 545 seg_len = imin(vlist->ds_len - ma_offs, len); 546 p = (uint8_t *)(uintptr_t)vlist->ds_addr + ma_offs; 547 switch (op) { 548 case MD_MALLOC_MOVE_ZERO: 549 bzero(p, seg_len); 550 break; 551 case MD_MALLOC_MOVE_FILL: 552 memset(p, fill, seg_len); 553 break; 554 case MD_MALLOC_MOVE_READ: 555 bcopy(ptr, p, seg_len); 556 cpu_flush_dcache(p, seg_len); 557 break; 558 case MD_MALLOC_MOVE_WRITE: 559 bcopy(p, ptr, seg_len); 560 break; 561 case MD_MALLOC_MOVE_CMP: 562 end = p + seg_len; 563 first = *uc = *p; 564 /* Confirm all following bytes match the first */ 565 while (++p < end) { 566 if (*p != first) 567 return (EDOOFUS); 568 } 569 break; 570 default: 571 KASSERT(0, ("md_malloc_move_vlist unknown op %d\n", op)); 572 break; 573 } 574 575 ma_offs += seg_len; 576 if (ma_offs == vlist->ds_len) { 577 ma_offs = 0; 578 vlist++; 579 } 580 ptr = (uint8_t *)ptr + seg_len; 581 } 582 *pvlist = vlist; 583 *pma_offs = ma_offs; 584 585 return (0); 586 } 587 588 static int 589 mdstart_malloc(struct md_s *sc, struct bio *bp) 590 { 591 u_char *dst; 592 vm_page_t *m; 593 bus_dma_segment_t *vlist; 594 int i, error, error1, ma_offs, notmapped; 595 off_t secno, nsec, uc; 596 uintptr_t sp, osp; 597 598 switch (bp->bio_cmd) { 599 case BIO_READ: 600 case BIO_WRITE: 601 case BIO_DELETE: 602 break; 603 default: 604 return (EOPNOTSUPP); 605 } 606 607 notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0; 608 vlist = (bp->bio_flags & BIO_VLIST) != 0 ? 609 (bus_dma_segment_t *)bp->bio_data : NULL; 610 if (notmapped) { 611 m = bp->bio_ma; 612 ma_offs = bp->bio_ma_offset; 613 dst = NULL; 614 KASSERT(vlist == NULL, ("vlists cannot be unmapped")); 615 } else if (vlist != NULL) { 616 ma_offs = bp->bio_ma_offset; 617 dst = NULL; 618 } else { 619 dst = bp->bio_data; 620 } 621 622 nsec = bp->bio_length / sc->sectorsize; 623 secno = bp->bio_offset / sc->sectorsize; 624 error = 0; 625 while (nsec--) { 626 osp = s_read(sc->indir, secno); 627 if (bp->bio_cmd == BIO_DELETE) { 628 if (osp != 0) 629 error = s_write(sc->indir, secno, 0); 630 } else if (bp->bio_cmd == BIO_READ) { 631 if (osp == 0) { 632 if (notmapped) { 633 error = md_malloc_move_ma(&m, &ma_offs, 634 sc->sectorsize, NULL, 0, 635 MD_MALLOC_MOVE_ZERO); 636 } else if (vlist != NULL) { 637 error = md_malloc_move_vlist(&vlist, 638 &ma_offs, sc->sectorsize, NULL, 0, 639 MD_MALLOC_MOVE_ZERO); 640 } else 641 bzero(dst, sc->sectorsize); 642 } else if (osp <= 255) { 643 if (notmapped) { 644 error = md_malloc_move_ma(&m, &ma_offs, 645 sc->sectorsize, NULL, osp, 646 MD_MALLOC_MOVE_FILL); 647 } else if (vlist != NULL) { 648 error = md_malloc_move_vlist(&vlist, 649 &ma_offs, sc->sectorsize, NULL, osp, 650 MD_MALLOC_MOVE_FILL); 651 } else 652 memset(dst, osp, sc->sectorsize); 653 } else { 654 if (notmapped) { 655 error = md_malloc_move_ma(&m, &ma_offs, 656 sc->sectorsize, (void *)osp, 0, 657 MD_MALLOC_MOVE_READ); 658 } else if (vlist != NULL) { 659 error = md_malloc_move_vlist(&vlist, 660 &ma_offs, sc->sectorsize, 661 (void *)osp, 0, 662 MD_MALLOC_MOVE_READ); 663 } else { 664 bcopy((void *)osp, dst, sc->sectorsize); 665 cpu_flush_dcache(dst, sc->sectorsize); 666 } 667 } 668 osp = 0; 669 } else if (bp->bio_cmd == BIO_WRITE) { 670 if (sc->flags & MD_COMPRESS) { 671 if (notmapped) { 672 error1 = md_malloc_move_ma(&m, &ma_offs, 673 sc->sectorsize, &uc, 0, 674 MD_MALLOC_MOVE_CMP); 675 i = error1 == 0 ? sc->sectorsize : 0; 676 } else if (vlist != NULL) { 677 error1 = md_malloc_move_vlist(&vlist, 678 &ma_offs, sc->sectorsize, &uc, 0, 679 MD_MALLOC_MOVE_CMP); 680 i = error1 == 0 ? sc->sectorsize : 0; 681 } else { 682 uc = dst[0]; 683 for (i = 1; i < sc->sectorsize; i++) { 684 if (dst[i] != uc) 685 break; 686 } 687 } 688 } else { 689 i = 0; 690 uc = 0; 691 } 692 if (i == sc->sectorsize) { 693 if (osp != uc) 694 error = s_write(sc->indir, secno, uc); 695 } else { 696 if (osp <= 255) { 697 sp = (uintptr_t)uma_zalloc(sc->uma, 698 md_malloc_wait ? M_WAITOK : 699 M_NOWAIT); 700 if (sp == 0) { 701 error = ENOSPC; 702 break; 703 } 704 if (notmapped) { 705 error = md_malloc_move_ma(&m, 706 &ma_offs, sc->sectorsize, 707 (void *)sp, 0, 708 MD_MALLOC_MOVE_WRITE); 709 } else if (vlist != NULL) { 710 error = md_malloc_move_vlist( 711 &vlist, &ma_offs, 712 sc->sectorsize, (void *)sp, 713 0, MD_MALLOC_MOVE_WRITE); 714 } else { 715 bcopy(dst, (void *)sp, 716 sc->sectorsize); 717 } 718 error = s_write(sc->indir, secno, sp); 719 } else { 720 if (notmapped) { 721 error = md_malloc_move_ma(&m, 722 &ma_offs, sc->sectorsize, 723 (void *)osp, 0, 724 MD_MALLOC_MOVE_WRITE); 725 } else if (vlist != NULL) { 726 error = md_malloc_move_vlist( 727 &vlist, &ma_offs, 728 sc->sectorsize, (void *)osp, 729 0, MD_MALLOC_MOVE_WRITE); 730 } else { 731 bcopy(dst, (void *)osp, 732 sc->sectorsize); 733 } 734 osp = 0; 735 } 736 } 737 } else { 738 error = EOPNOTSUPP; 739 } 740 if (osp > 255) 741 uma_zfree(sc->uma, (void*)osp); 742 if (error != 0) 743 break; 744 secno++; 745 if (!notmapped && vlist == NULL) 746 dst += sc->sectorsize; 747 } 748 bp->bio_resid = 0; 749 return (error); 750 } 751 752 static void 753 mdcopyto_vlist(void *src, bus_dma_segment_t *vlist, off_t offset, off_t len) 754 { 755 off_t seg_len; 756 757 while (offset >= vlist->ds_len) { 758 offset -= vlist->ds_len; 759 vlist++; 760 } 761 762 while (len != 0) { 763 seg_len = omin(len, vlist->ds_len - offset); 764 bcopy(src, (void *)(uintptr_t)(vlist->ds_addr + offset), 765 seg_len); 766 offset = 0; 767 src = (uint8_t *)src + seg_len; 768 len -= seg_len; 769 vlist++; 770 } 771 } 772 773 static void 774 mdcopyfrom_vlist(bus_dma_segment_t *vlist, off_t offset, void *dst, off_t len) 775 { 776 off_t seg_len; 777 778 while (offset >= vlist->ds_len) { 779 offset -= vlist->ds_len; 780 vlist++; 781 } 782 783 while (len != 0) { 784 seg_len = omin(len, vlist->ds_len - offset); 785 bcopy((void *)(uintptr_t)(vlist->ds_addr + offset), dst, 786 seg_len); 787 offset = 0; 788 dst = (uint8_t *)dst + seg_len; 789 len -= seg_len; 790 vlist++; 791 } 792 } 793 794 static int 795 mdstart_preload(struct md_s *sc, struct bio *bp) 796 { 797 uint8_t *p; 798 799 p = sc->pl_ptr + bp->bio_offset; 800 switch (bp->bio_cmd) { 801 case BIO_READ: 802 if ((bp->bio_flags & BIO_VLIST) != 0) { 803 mdcopyto_vlist(p, (bus_dma_segment_t *)bp->bio_data, 804 bp->bio_ma_offset, bp->bio_length); 805 } else { 806 bcopy(p, bp->bio_data, bp->bio_length); 807 } 808 cpu_flush_dcache(bp->bio_data, bp->bio_length); 809 break; 810 case BIO_WRITE: 811 if ((bp->bio_flags & BIO_VLIST) != 0) { 812 mdcopyfrom_vlist((bus_dma_segment_t *)bp->bio_data, 813 bp->bio_ma_offset, p, bp->bio_length); 814 } else { 815 bcopy(bp->bio_data, p, bp->bio_length); 816 } 817 break; 818 } 819 bp->bio_resid = 0; 820 return (0); 821 } 822 823 static int 824 mdstart_vnode(struct md_s *sc, struct bio *bp) 825 { 826 int error; 827 struct uio auio; 828 struct iovec aiov; 829 struct iovec *piov; 830 struct mount *mp; 831 struct vnode *vp; 832 struct buf *pb; 833 bus_dma_segment_t *vlist; 834 struct thread *td; 835 off_t iolen, len, zerosize; 836 int ma_offs, npages; 837 838 switch (bp->bio_cmd) { 839 case BIO_READ: 840 auio.uio_rw = UIO_READ; 841 break; 842 case BIO_WRITE: 843 case BIO_DELETE: 844 auio.uio_rw = UIO_WRITE; 845 break; 846 case BIO_FLUSH: 847 break; 848 default: 849 return (EOPNOTSUPP); 850 } 851 852 td = curthread; 853 vp = sc->vnode; 854 pb = NULL; 855 piov = NULL; 856 ma_offs = bp->bio_ma_offset; 857 len = bp->bio_length; 858 859 /* 860 * VNODE I/O 861 * 862 * If an error occurs, we set BIO_ERROR but we do not set 863 * B_INVAL because (for a write anyway), the buffer is 864 * still valid. 865 */ 866 867 if (bp->bio_cmd == BIO_FLUSH) { 868 (void) vn_start_write(vp, &mp, V_WAIT); 869 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 870 error = VOP_FSYNC(vp, MNT_WAIT, td); 871 VOP_UNLOCK(vp, 0); 872 vn_finished_write(mp); 873 return (error); 874 } 875 876 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 877 auio.uio_resid = bp->bio_length; 878 auio.uio_segflg = UIO_SYSSPACE; 879 auio.uio_td = td; 880 881 if (bp->bio_cmd == BIO_DELETE) { 882 /* 883 * Emulate BIO_DELETE by writing zeros. 884 */ 885 zerosize = ZERO_REGION_SIZE - 886 (ZERO_REGION_SIZE % sc->sectorsize); 887 auio.uio_iovcnt = howmany(bp->bio_length, zerosize); 888 piov = malloc(sizeof(*piov) * auio.uio_iovcnt, M_MD, M_WAITOK); 889 auio.uio_iov = piov; 890 while (len > 0) { 891 piov->iov_base = __DECONST(void *, zero_region); 892 piov->iov_len = len; 893 if (len > zerosize) 894 piov->iov_len = zerosize; 895 len -= piov->iov_len; 896 piov++; 897 } 898 piov = auio.uio_iov; 899 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 900 piov = malloc(sizeof(*piov) * bp->bio_ma_n, M_MD, M_WAITOK); 901 auio.uio_iov = piov; 902 vlist = (bus_dma_segment_t *)bp->bio_data; 903 while (len > 0) { 904 piov->iov_base = (void *)(uintptr_t)(vlist->ds_addr + 905 ma_offs); 906 piov->iov_len = vlist->ds_len - ma_offs; 907 if (piov->iov_len > len) 908 piov->iov_len = len; 909 len -= piov->iov_len; 910 ma_offs = 0; 911 vlist++; 912 piov++; 913 } 914 auio.uio_iovcnt = piov - auio.uio_iov; 915 piov = auio.uio_iov; 916 } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 917 pb = getpbuf(&md_vnode_pbuf_freecnt); 918 bp->bio_resid = len; 919 unmapped_step: 920 npages = atop(min(MAXPHYS, round_page(len + (ma_offs & 921 PAGE_MASK)))); 922 iolen = min(ptoa(npages) - (ma_offs & PAGE_MASK), len); 923 KASSERT(iolen > 0, ("zero iolen")); 924 pmap_qenter((vm_offset_t)pb->b_data, 925 &bp->bio_ma[atop(ma_offs)], npages); 926 aiov.iov_base = (void *)((vm_offset_t)pb->b_data + 927 (ma_offs & PAGE_MASK)); 928 aiov.iov_len = iolen; 929 auio.uio_iov = &aiov; 930 auio.uio_iovcnt = 1; 931 auio.uio_resid = iolen; 932 } else { 933 aiov.iov_base = bp->bio_data; 934 aiov.iov_len = bp->bio_length; 935 auio.uio_iov = &aiov; 936 auio.uio_iovcnt = 1; 937 } 938 /* 939 * When reading set IO_DIRECT to try to avoid double-caching 940 * the data. When writing IO_DIRECT is not optimal. 941 */ 942 if (auio.uio_rw == UIO_READ) { 943 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 944 error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred); 945 VOP_UNLOCK(vp, 0); 946 } else { 947 (void) vn_start_write(vp, &mp, V_WAIT); 948 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 949 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC, 950 sc->cred); 951 VOP_UNLOCK(vp, 0); 952 vn_finished_write(mp); 953 if (error == 0) 954 sc->flags &= ~MD_VERIFY; 955 } 956 957 if (pb != NULL) { 958 pmap_qremove((vm_offset_t)pb->b_data, npages); 959 if (error == 0) { 960 len -= iolen; 961 bp->bio_resid -= iolen; 962 ma_offs += iolen; 963 if (len > 0) 964 goto unmapped_step; 965 } 966 relpbuf(pb, &md_vnode_pbuf_freecnt); 967 } 968 969 free(piov, M_MD); 970 if (pb == NULL) 971 bp->bio_resid = auio.uio_resid; 972 return (error); 973 } 974 975 static void 976 md_swap_page_free(vm_page_t m) 977 { 978 979 vm_page_xunbusy(m); 980 vm_page_lock(m); 981 vm_page_free(m); 982 vm_page_unlock(m); 983 } 984 985 static int 986 mdstart_swap(struct md_s *sc, struct bio *bp) 987 { 988 vm_page_t m; 989 u_char *p; 990 vm_pindex_t i, lastp; 991 bus_dma_segment_t *vlist; 992 int rv, ma_offs, offs, len, lastend; 993 994 switch (bp->bio_cmd) { 995 case BIO_READ: 996 case BIO_WRITE: 997 case BIO_DELETE: 998 break; 999 default: 1000 return (EOPNOTSUPP); 1001 } 1002 1003 p = bp->bio_data; 1004 ma_offs = (bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0 ? 1005 bp->bio_ma_offset : 0; 1006 vlist = (bp->bio_flags & BIO_VLIST) != 0 ? 1007 (bus_dma_segment_t *)bp->bio_data : NULL; 1008 1009 /* 1010 * offs is the offset at which to start operating on the 1011 * next (ie, first) page. lastp is the last page on 1012 * which we're going to operate. lastend is the ending 1013 * position within that last page (ie, PAGE_SIZE if 1014 * we're operating on complete aligned pages). 1015 */ 1016 offs = bp->bio_offset % PAGE_SIZE; 1017 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 1018 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 1019 1020 rv = VM_PAGER_OK; 1021 VM_OBJECT_WLOCK(sc->object); 1022 vm_object_pip_add(sc->object, 1); 1023 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 1024 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 1025 m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM); 1026 if (bp->bio_cmd == BIO_READ) { 1027 if (m->valid == VM_PAGE_BITS_ALL) 1028 rv = VM_PAGER_OK; 1029 else 1030 rv = vm_pager_get_pages(sc->object, &m, 1, 1031 NULL, NULL); 1032 if (rv == VM_PAGER_ERROR) { 1033 md_swap_page_free(m); 1034 break; 1035 } else if (rv == VM_PAGER_FAIL) { 1036 /* 1037 * Pager does not have the page. Zero 1038 * the allocated page, and mark it as 1039 * valid. Do not set dirty, the page 1040 * can be recreated if thrown out. 1041 */ 1042 pmap_zero_page(m); 1043 m->valid = VM_PAGE_BITS_ALL; 1044 } 1045 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 1046 pmap_copy_pages(&m, offs, bp->bio_ma, 1047 ma_offs, len); 1048 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 1049 physcopyout_vlist(VM_PAGE_TO_PHYS(m) + offs, 1050 vlist, ma_offs, len); 1051 cpu_flush_dcache(p, len); 1052 } else { 1053 physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len); 1054 cpu_flush_dcache(p, len); 1055 } 1056 } else if (bp->bio_cmd == BIO_WRITE) { 1057 if (len == PAGE_SIZE || m->valid == VM_PAGE_BITS_ALL) 1058 rv = VM_PAGER_OK; 1059 else 1060 rv = vm_pager_get_pages(sc->object, &m, 1, 1061 NULL, NULL); 1062 if (rv == VM_PAGER_ERROR) { 1063 md_swap_page_free(m); 1064 break; 1065 } else if (rv == VM_PAGER_FAIL) 1066 pmap_zero_page(m); 1067 1068 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 1069 pmap_copy_pages(bp->bio_ma, ma_offs, &m, 1070 offs, len); 1071 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 1072 physcopyin_vlist(vlist, ma_offs, 1073 VM_PAGE_TO_PHYS(m) + offs, len); 1074 } else { 1075 physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len); 1076 } 1077 1078 m->valid = VM_PAGE_BITS_ALL; 1079 if (m->dirty != VM_PAGE_BITS_ALL) { 1080 vm_page_dirty(m); 1081 vm_pager_page_unswapped(m); 1082 } 1083 } else if (bp->bio_cmd == BIO_DELETE) { 1084 if (len == PAGE_SIZE || m->valid == VM_PAGE_BITS_ALL) 1085 rv = VM_PAGER_OK; 1086 else 1087 rv = vm_pager_get_pages(sc->object, &m, 1, 1088 NULL, NULL); 1089 if (rv == VM_PAGER_ERROR) { 1090 md_swap_page_free(m); 1091 break; 1092 } else if (rv == VM_PAGER_FAIL) { 1093 md_swap_page_free(m); 1094 m = NULL; 1095 } else { 1096 /* Page is valid. */ 1097 if (len != PAGE_SIZE) { 1098 pmap_zero_page_area(m, offs, len); 1099 if (m->dirty != VM_PAGE_BITS_ALL) { 1100 vm_page_dirty(m); 1101 vm_pager_page_unswapped(m); 1102 } 1103 } else { 1104 vm_pager_page_unswapped(m); 1105 md_swap_page_free(m); 1106 m = NULL; 1107 } 1108 } 1109 } 1110 if (m != NULL) { 1111 vm_page_xunbusy(m); 1112 vm_page_lock(m); 1113 vm_page_activate(m); 1114 vm_page_unlock(m); 1115 } 1116 1117 /* Actions on further pages start at offset 0 */ 1118 p += PAGE_SIZE - offs; 1119 offs = 0; 1120 ma_offs += len; 1121 } 1122 vm_object_pip_wakeup(sc->object); 1123 VM_OBJECT_WUNLOCK(sc->object); 1124 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 1125 } 1126 1127 static int 1128 mdstart_null(struct md_s *sc, struct bio *bp) 1129 { 1130 1131 switch (bp->bio_cmd) { 1132 case BIO_READ: 1133 bzero(bp->bio_data, bp->bio_length); 1134 cpu_flush_dcache(bp->bio_data, bp->bio_length); 1135 break; 1136 case BIO_WRITE: 1137 break; 1138 } 1139 bp->bio_resid = 0; 1140 return (0); 1141 } 1142 1143 static void 1144 md_kthread(void *arg) 1145 { 1146 struct md_s *sc; 1147 struct bio *bp; 1148 int error; 1149 1150 sc = arg; 1151 thread_lock(curthread); 1152 sched_prio(curthread, PRIBIO); 1153 thread_unlock(curthread); 1154 if (sc->type == MD_VNODE) 1155 curthread->td_pflags |= TDP_NORUNNINGBUF; 1156 1157 for (;;) { 1158 mtx_lock(&sc->queue_mtx); 1159 if (sc->flags & MD_SHUTDOWN) { 1160 sc->flags |= MD_EXITING; 1161 mtx_unlock(&sc->queue_mtx); 1162 kproc_exit(0); 1163 } 1164 bp = bioq_takefirst(&sc->bio_queue); 1165 if (!bp) { 1166 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 1167 continue; 1168 } 1169 mtx_unlock(&sc->queue_mtx); 1170 if (bp->bio_cmd == BIO_GETATTR) { 1171 int isv = ((sc->flags & MD_VERIFY) != 0); 1172 1173 if ((sc->fwsectors && sc->fwheads && 1174 (g_handleattr_int(bp, "GEOM::fwsectors", 1175 sc->fwsectors) || 1176 g_handleattr_int(bp, "GEOM::fwheads", 1177 sc->fwheads))) || 1178 g_handleattr_int(bp, "GEOM::candelete", 1)) 1179 error = -1; 1180 else if (g_handleattr_int(bp, "MNT::verified", isv)) 1181 error = -1; 1182 else 1183 error = EOPNOTSUPP; 1184 } else { 1185 error = sc->start(sc, bp); 1186 } 1187 1188 if (error != -1) { 1189 bp->bio_completed = bp->bio_length; 1190 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) 1191 devstat_end_transaction_bio(sc->devstat, bp); 1192 g_io_deliver(bp, error); 1193 } 1194 } 1195 } 1196 1197 static struct md_s * 1198 mdfind(int unit) 1199 { 1200 struct md_s *sc; 1201 1202 LIST_FOREACH(sc, &md_softc_list, list) { 1203 if (sc->unit == unit) 1204 break; 1205 } 1206 return (sc); 1207 } 1208 1209 static struct md_s * 1210 mdnew(int unit, int *errp, enum md_types type) 1211 { 1212 struct md_s *sc; 1213 int error; 1214 1215 *errp = 0; 1216 if (unit == -1) 1217 unit = alloc_unr(md_uh); 1218 else 1219 unit = alloc_unr_specific(md_uh, unit); 1220 1221 if (unit == -1) { 1222 *errp = EBUSY; 1223 return (NULL); 1224 } 1225 1226 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); 1227 sc->type = type; 1228 bioq_init(&sc->bio_queue); 1229 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 1230 mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF); 1231 sc->unit = unit; 1232 sprintf(sc->name, "md%d", unit); 1233 LIST_INSERT_HEAD(&md_softc_list, sc, list); 1234 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 1235 if (error == 0) 1236 return (sc); 1237 LIST_REMOVE(sc, list); 1238 mtx_destroy(&sc->stat_mtx); 1239 mtx_destroy(&sc->queue_mtx); 1240 free_unr(md_uh, sc->unit); 1241 free(sc, M_MD); 1242 *errp = error; 1243 return (NULL); 1244 } 1245 1246 static void 1247 mdinit(struct md_s *sc) 1248 { 1249 struct g_geom *gp; 1250 struct g_provider *pp; 1251 1252 g_topology_lock(); 1253 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 1254 gp->softc = sc; 1255 pp = g_new_providerf(gp, "md%d", sc->unit); 1256 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 1257 pp->mediasize = sc->mediasize; 1258 pp->sectorsize = sc->sectorsize; 1259 switch (sc->type) { 1260 case MD_MALLOC: 1261 case MD_VNODE: 1262 case MD_SWAP: 1263 pp->flags |= G_PF_ACCEPT_UNMAPPED; 1264 break; 1265 case MD_PRELOAD: 1266 case MD_NULL: 1267 break; 1268 } 1269 sc->gp = gp; 1270 sc->pp = pp; 1271 g_error_provider(pp, 0); 1272 g_topology_unlock(); 1273 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize, 1274 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 1275 } 1276 1277 static int 1278 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio) 1279 { 1280 uintptr_t sp; 1281 int error; 1282 off_t u; 1283 1284 error = 0; 1285 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 1286 return (EINVAL); 1287 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize)) 1288 return (EINVAL); 1289 /* Compression doesn't make sense if we have reserved space */ 1290 if (mdio->md_options & MD_RESERVE) 1291 mdio->md_options &= ~MD_COMPRESS; 1292 if (mdio->md_fwsectors != 0) 1293 sc->fwsectors = mdio->md_fwsectors; 1294 if (mdio->md_fwheads != 0) 1295 sc->fwheads = mdio->md_fwheads; 1296 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE); 1297 sc->indir = dimension(sc->mediasize / sc->sectorsize); 1298 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 1299 0x1ff, 0); 1300 if (mdio->md_options & MD_RESERVE) { 1301 off_t nsectors; 1302 1303 nsectors = sc->mediasize / sc->sectorsize; 1304 for (u = 0; u < nsectors; u++) { 1305 sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ? 1306 M_WAITOK : M_NOWAIT) | M_ZERO); 1307 if (sp != 0) 1308 error = s_write(sc->indir, u, sp); 1309 else 1310 error = ENOMEM; 1311 if (error != 0) 1312 break; 1313 } 1314 } 1315 return (error); 1316 } 1317 1318 1319 static int 1320 mdsetcred(struct md_s *sc, struct ucred *cred) 1321 { 1322 char *tmpbuf; 1323 int error = 0; 1324 1325 /* 1326 * Set credits in our softc 1327 */ 1328 1329 if (sc->cred) 1330 crfree(sc->cred); 1331 sc->cred = crhold(cred); 1332 1333 /* 1334 * Horrible kludge to establish credentials for NFS XXX. 1335 */ 1336 1337 if (sc->vnode) { 1338 struct uio auio; 1339 struct iovec aiov; 1340 1341 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 1342 bzero(&auio, sizeof(auio)); 1343 1344 aiov.iov_base = tmpbuf; 1345 aiov.iov_len = sc->sectorsize; 1346 auio.uio_iov = &aiov; 1347 auio.uio_iovcnt = 1; 1348 auio.uio_offset = 0; 1349 auio.uio_rw = UIO_READ; 1350 auio.uio_segflg = UIO_SYSSPACE; 1351 auio.uio_resid = aiov.iov_len; 1352 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1353 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 1354 VOP_UNLOCK(sc->vnode, 0); 1355 free(tmpbuf, M_TEMP); 1356 } 1357 return (error); 1358 } 1359 1360 static int 1361 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1362 { 1363 struct vattr vattr; 1364 struct nameidata nd; 1365 char *fname; 1366 int error, flags; 1367 1368 /* 1369 * Kernel-originated requests must have the filename appended 1370 * to the mdio structure to protect against malicious software. 1371 */ 1372 fname = mdio->md_file; 1373 if ((void *)fname != (void *)(mdio + 1)) { 1374 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL); 1375 if (error != 0) 1376 return (error); 1377 } else 1378 strlcpy(sc->file, fname, sizeof(sc->file)); 1379 1380 /* 1381 * If the user specified that this is a read only device, don't 1382 * set the FWRITE mask before trying to open the backing store. 1383 */ 1384 flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE) \ 1385 | ((mdio->md_options & MD_VERIFY) ? 0 : O_VERIFY); 1386 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td); 1387 error = vn_open(&nd, &flags, 0, NULL); 1388 if (error != 0) 1389 return (error); 1390 NDFREE(&nd, NDF_ONLY_PNBUF); 1391 if (nd.ni_vp->v_type != VREG) { 1392 error = EINVAL; 1393 goto bad; 1394 } 1395 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred); 1396 if (error != 0) 1397 goto bad; 1398 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { 1399 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); 1400 if (nd.ni_vp->v_iflag & VI_DOOMED) { 1401 /* Forced unmount. */ 1402 error = EBADF; 1403 goto bad; 1404 } 1405 } 1406 nd.ni_vp->v_vflag |= VV_MD; 1407 VOP_UNLOCK(nd.ni_vp, 0); 1408 1409 if (mdio->md_fwsectors != 0) 1410 sc->fwsectors = mdio->md_fwsectors; 1411 if (mdio->md_fwheads != 0) 1412 sc->fwheads = mdio->md_fwheads; 1413 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC | MD_VERIFY); 1414 if (!(flags & FWRITE)) 1415 sc->flags |= MD_READONLY; 1416 sc->vnode = nd.ni_vp; 1417 1418 error = mdsetcred(sc, td->td_ucred); 1419 if (error != 0) { 1420 sc->vnode = NULL; 1421 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); 1422 nd.ni_vp->v_vflag &= ~VV_MD; 1423 goto bad; 1424 } 1425 return (0); 1426 bad: 1427 VOP_UNLOCK(nd.ni_vp, 0); 1428 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 1429 return (error); 1430 } 1431 1432 static int 1433 mddestroy(struct md_s *sc, struct thread *td) 1434 { 1435 1436 if (sc->gp) { 1437 sc->gp->softc = NULL; 1438 g_topology_lock(); 1439 g_wither_geom(sc->gp, ENXIO); 1440 g_topology_unlock(); 1441 sc->gp = NULL; 1442 sc->pp = NULL; 1443 } 1444 if (sc->devstat) { 1445 devstat_remove_entry(sc->devstat); 1446 sc->devstat = NULL; 1447 } 1448 mtx_lock(&sc->queue_mtx); 1449 sc->flags |= MD_SHUTDOWN; 1450 wakeup(sc); 1451 while (!(sc->flags & MD_EXITING)) 1452 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10); 1453 mtx_unlock(&sc->queue_mtx); 1454 mtx_destroy(&sc->stat_mtx); 1455 mtx_destroy(&sc->queue_mtx); 1456 if (sc->vnode != NULL) { 1457 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1458 sc->vnode->v_vflag &= ~VV_MD; 1459 VOP_UNLOCK(sc->vnode, 0); 1460 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 1461 FREAD : (FREAD|FWRITE), sc->cred, td); 1462 } 1463 if (sc->cred != NULL) 1464 crfree(sc->cred); 1465 if (sc->object != NULL) 1466 vm_object_deallocate(sc->object); 1467 if (sc->indir) 1468 destroy_indir(sc, sc->indir); 1469 if (sc->uma) 1470 uma_zdestroy(sc->uma); 1471 1472 LIST_REMOVE(sc, list); 1473 free_unr(md_uh, sc->unit); 1474 free(sc, M_MD); 1475 return (0); 1476 } 1477 1478 static int 1479 mdresize(struct md_s *sc, struct md_ioctl *mdio) 1480 { 1481 int error, res; 1482 vm_pindex_t oldpages, newpages; 1483 1484 switch (sc->type) { 1485 case MD_VNODE: 1486 case MD_NULL: 1487 break; 1488 case MD_SWAP: 1489 if (mdio->md_mediasize <= 0 || 1490 (mdio->md_mediasize % PAGE_SIZE) != 0) 1491 return (EDOM); 1492 oldpages = OFF_TO_IDX(round_page(sc->mediasize)); 1493 newpages = OFF_TO_IDX(round_page(mdio->md_mediasize)); 1494 if (newpages < oldpages) { 1495 VM_OBJECT_WLOCK(sc->object); 1496 vm_object_page_remove(sc->object, newpages, 0, 0); 1497 swap_pager_freespace(sc->object, newpages, 1498 oldpages - newpages); 1499 swap_release_by_cred(IDX_TO_OFF(oldpages - 1500 newpages), sc->cred); 1501 sc->object->charge = IDX_TO_OFF(newpages); 1502 sc->object->size = newpages; 1503 VM_OBJECT_WUNLOCK(sc->object); 1504 } else if (newpages > oldpages) { 1505 res = swap_reserve_by_cred(IDX_TO_OFF(newpages - 1506 oldpages), sc->cred); 1507 if (!res) 1508 return (ENOMEM); 1509 if ((mdio->md_options & MD_RESERVE) || 1510 (sc->flags & MD_RESERVE)) { 1511 error = swap_pager_reserve(sc->object, 1512 oldpages, newpages - oldpages); 1513 if (error < 0) { 1514 swap_release_by_cred( 1515 IDX_TO_OFF(newpages - oldpages), 1516 sc->cred); 1517 return (EDOM); 1518 } 1519 } 1520 VM_OBJECT_WLOCK(sc->object); 1521 sc->object->charge = IDX_TO_OFF(newpages); 1522 sc->object->size = newpages; 1523 VM_OBJECT_WUNLOCK(sc->object); 1524 } 1525 break; 1526 default: 1527 return (EOPNOTSUPP); 1528 } 1529 1530 sc->mediasize = mdio->md_mediasize; 1531 g_topology_lock(); 1532 g_resize_provider(sc->pp, sc->mediasize); 1533 g_topology_unlock(); 1534 return (0); 1535 } 1536 1537 static int 1538 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1539 { 1540 vm_ooffset_t npage; 1541 int error; 1542 1543 /* 1544 * Range check. Disallow negative sizes and sizes not being 1545 * multiple of page size. 1546 */ 1547 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1548 return (EDOM); 1549 1550 /* 1551 * Allocate an OBJT_SWAP object. 1552 * 1553 * Note the truncation. 1554 */ 1555 1556 if ((mdio->md_options & MD_VERIFY) != 0) 1557 return (EINVAL); 1558 npage = mdio->md_mediasize / PAGE_SIZE; 1559 if (mdio->md_fwsectors != 0) 1560 sc->fwsectors = mdio->md_fwsectors; 1561 if (mdio->md_fwheads != 0) 1562 sc->fwheads = mdio->md_fwheads; 1563 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, 1564 VM_PROT_DEFAULT, 0, td->td_ucred); 1565 if (sc->object == NULL) 1566 return (ENOMEM); 1567 sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE); 1568 if (mdio->md_options & MD_RESERVE) { 1569 if (swap_pager_reserve(sc->object, 0, npage) < 0) { 1570 error = EDOM; 1571 goto finish; 1572 } 1573 } 1574 error = mdsetcred(sc, td->td_ucred); 1575 finish: 1576 if (error != 0) { 1577 vm_object_deallocate(sc->object); 1578 sc->object = NULL; 1579 } 1580 return (error); 1581 } 1582 1583 static int 1584 mdcreate_null(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1585 { 1586 1587 /* 1588 * Range check. Disallow negative sizes and sizes not being 1589 * multiple of page size. 1590 */ 1591 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1592 return (EDOM); 1593 1594 return (0); 1595 } 1596 1597 static int 1598 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1599 { 1600 struct md_ioctl *mdio; 1601 struct md_s *sc; 1602 int error, i; 1603 unsigned sectsize; 1604 1605 if (md_debug) 1606 printf("mdctlioctl(%s %lx %p %x %p)\n", 1607 devtoname(dev), cmd, addr, flags, td); 1608 1609 mdio = (struct md_ioctl *)addr; 1610 if (mdio->md_version != MDIOVERSION) 1611 return (EINVAL); 1612 1613 /* 1614 * We assert the version number in the individual ioctl 1615 * handlers instead of out here because (a) it is possible we 1616 * may add another ioctl in the future which doesn't read an 1617 * mdio, and (b) the correct return value for an unknown ioctl 1618 * is ENOIOCTL, not EINVAL. 1619 */ 1620 error = 0; 1621 switch (cmd) { 1622 case MDIOCATTACH: 1623 switch (mdio->md_type) { 1624 case MD_MALLOC: 1625 case MD_PRELOAD: 1626 case MD_VNODE: 1627 case MD_SWAP: 1628 case MD_NULL: 1629 break; 1630 default: 1631 return (EINVAL); 1632 } 1633 if (mdio->md_sectorsize == 0) 1634 sectsize = DEV_BSIZE; 1635 else 1636 sectsize = mdio->md_sectorsize; 1637 if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize) 1638 return (EINVAL); 1639 if (mdio->md_options & MD_AUTOUNIT) 1640 sc = mdnew(-1, &error, mdio->md_type); 1641 else { 1642 if (mdio->md_unit > INT_MAX) 1643 return (EINVAL); 1644 sc = mdnew(mdio->md_unit, &error, mdio->md_type); 1645 } 1646 if (sc == NULL) 1647 return (error); 1648 if (mdio->md_options & MD_AUTOUNIT) 1649 mdio->md_unit = sc->unit; 1650 sc->mediasize = mdio->md_mediasize; 1651 sc->sectorsize = sectsize; 1652 error = EDOOFUS; 1653 switch (sc->type) { 1654 case MD_MALLOC: 1655 sc->start = mdstart_malloc; 1656 error = mdcreate_malloc(sc, mdio); 1657 break; 1658 case MD_PRELOAD: 1659 /* 1660 * We disallow attaching preloaded memory disks via 1661 * ioctl. Preloaded memory disks are automatically 1662 * attached in g_md_init(). 1663 */ 1664 error = EOPNOTSUPP; 1665 break; 1666 case MD_VNODE: 1667 sc->start = mdstart_vnode; 1668 error = mdcreate_vnode(sc, mdio, td); 1669 break; 1670 case MD_SWAP: 1671 sc->start = mdstart_swap; 1672 error = mdcreate_swap(sc, mdio, td); 1673 break; 1674 case MD_NULL: 1675 sc->start = mdstart_null; 1676 error = mdcreate_null(sc, mdio, td); 1677 break; 1678 } 1679 if (error != 0) { 1680 mddestroy(sc, td); 1681 return (error); 1682 } 1683 1684 /* Prune off any residual fractional sector */ 1685 i = sc->mediasize % sc->sectorsize; 1686 sc->mediasize -= i; 1687 1688 mdinit(sc); 1689 return (0); 1690 case MDIOCDETACH: 1691 if (mdio->md_mediasize != 0 || 1692 (mdio->md_options & ~MD_FORCE) != 0) 1693 return (EINVAL); 1694 1695 sc = mdfind(mdio->md_unit); 1696 if (sc == NULL) 1697 return (ENOENT); 1698 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) && 1699 !(mdio->md_options & MD_FORCE)) 1700 return (EBUSY); 1701 return (mddestroy(sc, td)); 1702 case MDIOCRESIZE: 1703 if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0) 1704 return (EINVAL); 1705 1706 sc = mdfind(mdio->md_unit); 1707 if (sc == NULL) 1708 return (ENOENT); 1709 if (mdio->md_mediasize < sc->sectorsize) 1710 return (EINVAL); 1711 if (mdio->md_mediasize < sc->mediasize && 1712 !(sc->flags & MD_FORCE) && 1713 !(mdio->md_options & MD_FORCE)) 1714 return (EBUSY); 1715 return (mdresize(sc, mdio)); 1716 case MDIOCQUERY: 1717 sc = mdfind(mdio->md_unit); 1718 if (sc == NULL) 1719 return (ENOENT); 1720 mdio->md_type = sc->type; 1721 mdio->md_options = sc->flags; 1722 mdio->md_mediasize = sc->mediasize; 1723 mdio->md_sectorsize = sc->sectorsize; 1724 if (sc->type == MD_VNODE || 1725 (sc->type == MD_PRELOAD && mdio->md_file != NULL)) 1726 error = copyout(sc->file, mdio->md_file, 1727 strlen(sc->file) + 1); 1728 return (error); 1729 case MDIOCLIST: 1730 i = 1; 1731 LIST_FOREACH(sc, &md_softc_list, list) { 1732 if (i == MDNPAD - 1) 1733 mdio->md_pad[i] = -1; 1734 else 1735 mdio->md_pad[i++] = sc->unit; 1736 } 1737 mdio->md_pad[0] = i - 1; 1738 return (0); 1739 default: 1740 return (ENOIOCTL); 1741 }; 1742 } 1743 1744 static int 1745 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1746 { 1747 int error; 1748 1749 sx_xlock(&md_sx); 1750 error = xmdctlioctl(dev, cmd, addr, flags, td); 1751 sx_xunlock(&md_sx); 1752 return (error); 1753 } 1754 1755 static void 1756 md_preloaded(u_char *image, size_t length, const char *name) 1757 { 1758 struct md_s *sc; 1759 int error; 1760 1761 sc = mdnew(-1, &error, MD_PRELOAD); 1762 if (sc == NULL) 1763 return; 1764 sc->mediasize = length; 1765 sc->sectorsize = DEV_BSIZE; 1766 sc->pl_ptr = image; 1767 sc->pl_len = length; 1768 sc->start = mdstart_preload; 1769 if (name != NULL) 1770 strlcpy(sc->file, name, sizeof(sc->file)); 1771 #if defined(MD_ROOT) && !defined(ROOTDEVNAME) 1772 if (sc->unit == 0) 1773 rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0"; 1774 #endif 1775 mdinit(sc); 1776 if (name != NULL) { 1777 printf("%s%d: Preloaded image <%s> %zd bytes at %p\n", 1778 MD_NAME, sc->unit, name, length, image); 1779 } else { 1780 printf("%s%d: Embedded image %zd bytes at %p\n", 1781 MD_NAME, sc->unit, length, image); 1782 } 1783 } 1784 1785 static void 1786 g_md_init(struct g_class *mp __unused) 1787 { 1788 caddr_t mod; 1789 u_char *ptr, *name, *type; 1790 unsigned len; 1791 int i; 1792 1793 /* figure out log2(NINDIR) */ 1794 for (i = NINDIR, nshift = -1; i; nshift++) 1795 i >>= 1; 1796 1797 mod = NULL; 1798 sx_init(&md_sx, "MD config lock"); 1799 g_topology_unlock(); 1800 md_uh = new_unrhdr(0, INT_MAX, NULL); 1801 #ifdef MD_ROOT 1802 if (mfs_root_size != 0) { 1803 sx_xlock(&md_sx); 1804 md_preloaded(__DEVOLATILE(u_char *, &mfs_root), mfs_root_size, 1805 NULL); 1806 sx_xunlock(&md_sx); 1807 } 1808 #endif 1809 /* XXX: are preload_* static or do they need Giant ? */ 1810 while ((mod = preload_search_next_name(mod)) != NULL) { 1811 name = (char *)preload_search_info(mod, MODINFO_NAME); 1812 if (name == NULL) 1813 continue; 1814 type = (char *)preload_search_info(mod, MODINFO_TYPE); 1815 if (type == NULL) 1816 continue; 1817 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 1818 continue; 1819 ptr = preload_fetch_addr(mod); 1820 len = preload_fetch_size(mod); 1821 if (ptr != NULL && len != 0) { 1822 sx_xlock(&md_sx); 1823 md_preloaded(ptr, len, name); 1824 sx_xunlock(&md_sx); 1825 } 1826 } 1827 md_vnode_pbuf_freecnt = nswbuf / 10; 1828 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL, 1829 0600, MDCTL_NAME); 1830 g_topology_lock(); 1831 } 1832 1833 static void 1834 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1835 struct g_consumer *cp __unused, struct g_provider *pp) 1836 { 1837 struct md_s *mp; 1838 char *type; 1839 1840 mp = gp->softc; 1841 if (mp == NULL) 1842 return; 1843 1844 switch (mp->type) { 1845 case MD_MALLOC: 1846 type = "malloc"; 1847 break; 1848 case MD_PRELOAD: 1849 type = "preload"; 1850 break; 1851 case MD_VNODE: 1852 type = "vnode"; 1853 break; 1854 case MD_SWAP: 1855 type = "swap"; 1856 break; 1857 case MD_NULL: 1858 type = "null"; 1859 break; 1860 default: 1861 type = "unknown"; 1862 break; 1863 } 1864 1865 if (pp != NULL) { 1866 if (indent == NULL) { 1867 sbuf_printf(sb, " u %d", mp->unit); 1868 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize); 1869 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads); 1870 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors); 1871 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize); 1872 sbuf_printf(sb, " t %s", type); 1873 if ((mp->type == MD_VNODE && mp->vnode != NULL) || 1874 (mp->type == MD_PRELOAD && mp->file[0] != '\0')) 1875 sbuf_printf(sb, " file %s", mp->file); 1876 } else { 1877 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, 1878 mp->unit); 1879 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n", 1880 indent, (uintmax_t) mp->sectorsize); 1881 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n", 1882 indent, (uintmax_t) mp->fwheads); 1883 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n", 1884 indent, (uintmax_t) mp->fwsectors); 1885 sbuf_printf(sb, "%s<length>%ju</length>\n", 1886 indent, (uintmax_t) mp->mediasize); 1887 sbuf_printf(sb, "%s<compression>%s</compression>\n", indent, 1888 (mp->flags & MD_COMPRESS) == 0 ? "off": "on"); 1889 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 1890 (mp->flags & MD_READONLY) == 0 ? "read-write": 1891 "read-only"); 1892 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1893 type); 1894 if ((mp->type == MD_VNODE && mp->vnode != NULL) || 1895 (mp->type == MD_PRELOAD && mp->file[0] != '\0')) { 1896 sbuf_printf(sb, "%s<file>", indent); 1897 g_conf_printf_escaped(sb, "%s", mp->file); 1898 sbuf_printf(sb, "</file>\n"); 1899 } 1900 } 1901 } 1902 } 1903 1904 static void 1905 g_md_fini(struct g_class *mp __unused) 1906 { 1907 1908 sx_destroy(&md_sx); 1909 if (status_dev != NULL) 1910 destroy_dev(status_dev); 1911 delete_unrhdr(md_uh); 1912 } 1913