1 /*- 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * $FreeBSD$ 10 * 11 */ 12 13 /*- 14 * The following functions are based in the vn(4) driver: mdstart_swap(), 15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 16 * and as such under the following copyright: 17 * 18 * Copyright (c) 1988 University of Utah. 19 * Copyright (c) 1990, 1993 20 * The Regents of the University of California. All rights reserved. 21 * Copyright (c) 2013 The FreeBSD Foundation 22 * All rights reserved. 23 * 24 * This code is derived from software contributed to Berkeley by 25 * the Systems Programming Group of the University of Utah Computer 26 * Science Department. 27 * 28 * Portions of this software were developed by Konstantin Belousov 29 * under sponsorship from the FreeBSD Foundation. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 4. Neither the name of the University nor the names of its contributors 40 * may be used to endorse or promote products derived from this software 41 * without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 53 * SUCH DAMAGE. 54 * 55 * from: Utah Hdr: vn.c 1.13 94/04/02 56 * 57 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 58 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 59 */ 60 61 #include "opt_geom.h" 62 #include "opt_md.h" 63 64 #include <sys/param.h> 65 #include <sys/systm.h> 66 #include <sys/bio.h> 67 #include <sys/buf.h> 68 #include <sys/conf.h> 69 #include <sys/devicestat.h> 70 #include <sys/fcntl.h> 71 #include <sys/kernel.h> 72 #include <sys/kthread.h> 73 #include <sys/limits.h> 74 #include <sys/linker.h> 75 #include <sys/lock.h> 76 #include <sys/malloc.h> 77 #include <sys/mdioctl.h> 78 #include <sys/mount.h> 79 #include <sys/mutex.h> 80 #include <sys/sx.h> 81 #include <sys/namei.h> 82 #include <sys/proc.h> 83 #include <sys/queue.h> 84 #include <sys/rwlock.h> 85 #include <sys/sbuf.h> 86 #include <sys/sched.h> 87 #include <sys/sf_buf.h> 88 #include <sys/sysctl.h> 89 #include <sys/vnode.h> 90 91 #include <geom/geom.h> 92 #include <geom/geom_int.h> 93 94 #include <vm/vm.h> 95 #include <vm/vm_param.h> 96 #include <vm/vm_object.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_pager.h> 99 #include <vm/swap_pager.h> 100 #include <vm/uma.h> 101 102 #include <machine/bus.h> 103 104 #define MD_MODVER 1 105 106 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 107 #define MD_EXITING 0x20000 /* Worker thread is exiting. */ 108 109 #ifndef MD_NSECT 110 #define MD_NSECT (10000 * 2) 111 #endif 112 113 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk"); 114 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors"); 115 116 static int md_debug; 117 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, 118 "Enable md(4) debug messages"); 119 static int md_malloc_wait; 120 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, 121 "Allow malloc to wait for memory allocations"); 122 123 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE) 124 #define MD_ROOT_FSTYPE "ufs" 125 #endif 126 127 #if defined(MD_ROOT) 128 /* 129 * Preloaded image gets put here. 130 */ 131 #if defined(MD_ROOT_SIZE) 132 /* 133 * Applications that patch the object with the image can determine 134 * the size looking at the start and end markers (strings), 135 * so we want them contiguous. 136 */ 137 static struct { 138 u_char start[MD_ROOT_SIZE*1024]; 139 u_char end[128]; 140 } mfs_root = { 141 .start = "MFS Filesystem goes here", 142 .end = "MFS Filesystem had better STOP here", 143 }; 144 const int mfs_root_size = sizeof(mfs_root.start); 145 #else 146 extern volatile u_char __weak_symbol mfs_root; 147 extern volatile u_char __weak_symbol mfs_root_end; 148 __GLOBL(mfs_root); 149 __GLOBL(mfs_root_end); 150 #define mfs_root_size ((uintptr_t)(&mfs_root_end - &mfs_root)) 151 #endif 152 #endif 153 154 static g_init_t g_md_init; 155 static g_fini_t g_md_fini; 156 static g_start_t g_md_start; 157 static g_access_t g_md_access; 158 static void g_md_dumpconf(struct sbuf *sb, const char *indent, 159 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp); 160 161 static struct cdev *status_dev = 0; 162 static struct sx md_sx; 163 static struct unrhdr *md_uh; 164 165 static d_ioctl_t mdctlioctl; 166 167 static struct cdevsw mdctl_cdevsw = { 168 .d_version = D_VERSION, 169 .d_ioctl = mdctlioctl, 170 .d_name = MD_NAME, 171 }; 172 173 struct g_class g_md_class = { 174 .name = "MD", 175 .version = G_VERSION, 176 .init = g_md_init, 177 .fini = g_md_fini, 178 .start = g_md_start, 179 .access = g_md_access, 180 .dumpconf = g_md_dumpconf, 181 }; 182 183 DECLARE_GEOM_CLASS(g_md_class, g_md); 184 185 186 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list); 187 188 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 189 #define NMASK (NINDIR-1) 190 static int nshift; 191 192 static int md_vnode_pbuf_freecnt; 193 194 struct indir { 195 uintptr_t *array; 196 u_int total; 197 u_int used; 198 u_int shift; 199 }; 200 201 struct md_s { 202 int unit; 203 LIST_ENTRY(md_s) list; 204 struct bio_queue_head bio_queue; 205 struct mtx queue_mtx; 206 struct mtx stat_mtx; 207 struct cdev *dev; 208 enum md_types type; 209 off_t mediasize; 210 unsigned sectorsize; 211 unsigned opencount; 212 unsigned fwheads; 213 unsigned fwsectors; 214 unsigned flags; 215 char name[20]; 216 struct proc *procp; 217 struct g_geom *gp; 218 struct g_provider *pp; 219 int (*start)(struct md_s *sc, struct bio *bp); 220 struct devstat *devstat; 221 222 /* MD_MALLOC related fields */ 223 struct indir *indir; 224 uma_zone_t uma; 225 226 /* MD_PRELOAD related fields */ 227 u_char *pl_ptr; 228 size_t pl_len; 229 230 /* MD_VNODE related fields */ 231 struct vnode *vnode; 232 char file[PATH_MAX]; 233 struct ucred *cred; 234 235 /* MD_SWAP related fields */ 236 vm_object_t object; 237 }; 238 239 static struct indir * 240 new_indir(u_int shift) 241 { 242 struct indir *ip; 243 244 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT) 245 | M_ZERO); 246 if (ip == NULL) 247 return (NULL); 248 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 249 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); 250 if (ip->array == NULL) { 251 free(ip, M_MD); 252 return (NULL); 253 } 254 ip->total = NINDIR; 255 ip->shift = shift; 256 return (ip); 257 } 258 259 static void 260 del_indir(struct indir *ip) 261 { 262 263 free(ip->array, M_MDSECT); 264 free(ip, M_MD); 265 } 266 267 static void 268 destroy_indir(struct md_s *sc, struct indir *ip) 269 { 270 int i; 271 272 for (i = 0; i < NINDIR; i++) { 273 if (!ip->array[i]) 274 continue; 275 if (ip->shift) 276 destroy_indir(sc, (struct indir*)(ip->array[i])); 277 else if (ip->array[i] > 255) 278 uma_zfree(sc->uma, (void *)(ip->array[i])); 279 } 280 del_indir(ip); 281 } 282 283 /* 284 * This function does the math and allocates the top level "indir" structure 285 * for a device of "size" sectors. 286 */ 287 288 static struct indir * 289 dimension(off_t size) 290 { 291 off_t rcnt; 292 struct indir *ip; 293 int layer; 294 295 rcnt = size; 296 layer = 0; 297 while (rcnt > NINDIR) { 298 rcnt /= NINDIR; 299 layer++; 300 } 301 302 /* 303 * XXX: the top layer is probably not fully populated, so we allocate 304 * too much space for ip->array in here. 305 */ 306 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 307 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 308 M_MDSECT, M_WAITOK | M_ZERO); 309 ip->total = NINDIR; 310 ip->shift = layer * nshift; 311 return (ip); 312 } 313 314 /* 315 * Read a given sector 316 */ 317 318 static uintptr_t 319 s_read(struct indir *ip, off_t offset) 320 { 321 struct indir *cip; 322 int idx; 323 uintptr_t up; 324 325 if (md_debug > 1) 326 printf("s_read(%jd)\n", (intmax_t)offset); 327 up = 0; 328 for (cip = ip; cip != NULL;) { 329 if (cip->shift) { 330 idx = (offset >> cip->shift) & NMASK; 331 up = cip->array[idx]; 332 cip = (struct indir *)up; 333 continue; 334 } 335 idx = offset & NMASK; 336 return (cip->array[idx]); 337 } 338 return (0); 339 } 340 341 /* 342 * Write a given sector, prune the tree if the value is 0 343 */ 344 345 static int 346 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 347 { 348 struct indir *cip, *lip[10]; 349 int idx, li; 350 uintptr_t up; 351 352 if (md_debug > 1) 353 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 354 up = 0; 355 li = 0; 356 cip = ip; 357 for (;;) { 358 lip[li++] = cip; 359 if (cip->shift) { 360 idx = (offset >> cip->shift) & NMASK; 361 up = cip->array[idx]; 362 if (up != 0) { 363 cip = (struct indir *)up; 364 continue; 365 } 366 /* Allocate branch */ 367 cip->array[idx] = 368 (uintptr_t)new_indir(cip->shift - nshift); 369 if (cip->array[idx] == 0) 370 return (ENOSPC); 371 cip->used++; 372 up = cip->array[idx]; 373 cip = (struct indir *)up; 374 continue; 375 } 376 /* leafnode */ 377 idx = offset & NMASK; 378 up = cip->array[idx]; 379 if (up != 0) 380 cip->used--; 381 cip->array[idx] = ptr; 382 if (ptr != 0) 383 cip->used++; 384 break; 385 } 386 if (cip->used != 0 || li == 1) 387 return (0); 388 li--; 389 while (cip->used == 0 && cip != ip) { 390 li--; 391 idx = (offset >> lip[li]->shift) & NMASK; 392 up = lip[li]->array[idx]; 393 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 394 del_indir(cip); 395 lip[li]->array[idx] = 0; 396 lip[li]->used--; 397 cip = lip[li]; 398 } 399 return (0); 400 } 401 402 403 static int 404 g_md_access(struct g_provider *pp, int r, int w, int e) 405 { 406 struct md_s *sc; 407 408 sc = pp->geom->softc; 409 if (sc == NULL) { 410 if (r <= 0 && w <= 0 && e <= 0) 411 return (0); 412 return (ENXIO); 413 } 414 r += pp->acr; 415 w += pp->acw; 416 e += pp->ace; 417 if ((sc->flags & MD_READONLY) != 0 && w > 0) 418 return (EROFS); 419 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 420 sc->opencount = 1; 421 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 422 sc->opencount = 0; 423 } 424 return (0); 425 } 426 427 static void 428 g_md_start(struct bio *bp) 429 { 430 struct md_s *sc; 431 432 sc = bp->bio_to->geom->softc; 433 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) { 434 mtx_lock(&sc->stat_mtx); 435 devstat_start_transaction_bio(sc->devstat, bp); 436 mtx_unlock(&sc->stat_mtx); 437 } 438 mtx_lock(&sc->queue_mtx); 439 bioq_disksort(&sc->bio_queue, bp); 440 mtx_unlock(&sc->queue_mtx); 441 wakeup(sc); 442 } 443 444 #define MD_MALLOC_MOVE_ZERO 1 445 #define MD_MALLOC_MOVE_FILL 2 446 #define MD_MALLOC_MOVE_READ 3 447 #define MD_MALLOC_MOVE_WRITE 4 448 #define MD_MALLOC_MOVE_CMP 5 449 450 static int 451 md_malloc_move_ma(vm_page_t **mp, int *ma_offs, unsigned sectorsize, 452 void *ptr, u_char fill, int op) 453 { 454 struct sf_buf *sf; 455 vm_page_t m, *mp1; 456 char *p, first; 457 off_t *uc; 458 unsigned n; 459 int error, i, ma_offs1, sz, first_read; 460 461 m = NULL; 462 error = 0; 463 sf = NULL; 464 /* if (op == MD_MALLOC_MOVE_CMP) { gcc */ 465 first = 0; 466 first_read = 0; 467 uc = ptr; 468 mp1 = *mp; 469 ma_offs1 = *ma_offs; 470 /* } */ 471 sched_pin(); 472 for (n = sectorsize; n != 0; n -= sz) { 473 sz = imin(PAGE_SIZE - *ma_offs, n); 474 if (m != **mp) { 475 if (sf != NULL) 476 sf_buf_free(sf); 477 m = **mp; 478 sf = sf_buf_alloc(m, SFB_CPUPRIVATE | 479 (md_malloc_wait ? 0 : SFB_NOWAIT)); 480 if (sf == NULL) { 481 error = ENOMEM; 482 break; 483 } 484 } 485 p = (char *)sf_buf_kva(sf) + *ma_offs; 486 switch (op) { 487 case MD_MALLOC_MOVE_ZERO: 488 bzero(p, sz); 489 break; 490 case MD_MALLOC_MOVE_FILL: 491 memset(p, fill, sz); 492 break; 493 case MD_MALLOC_MOVE_READ: 494 bcopy(ptr, p, sz); 495 cpu_flush_dcache(p, sz); 496 break; 497 case MD_MALLOC_MOVE_WRITE: 498 bcopy(p, ptr, sz); 499 break; 500 case MD_MALLOC_MOVE_CMP: 501 for (i = 0; i < sz; i++, p++) { 502 if (!first_read) { 503 *uc = (u_char)*p; 504 first = *p; 505 first_read = 1; 506 } else if (*p != first) { 507 error = EDOOFUS; 508 break; 509 } 510 } 511 break; 512 default: 513 KASSERT(0, ("md_malloc_move_ma unknown op %d\n", op)); 514 break; 515 } 516 if (error != 0) 517 break; 518 *ma_offs += sz; 519 *ma_offs %= PAGE_SIZE; 520 if (*ma_offs == 0) 521 (*mp)++; 522 ptr = (char *)ptr + sz; 523 } 524 525 if (sf != NULL) 526 sf_buf_free(sf); 527 sched_unpin(); 528 if (op == MD_MALLOC_MOVE_CMP && error != 0) { 529 *mp = mp1; 530 *ma_offs = ma_offs1; 531 } 532 return (error); 533 } 534 535 static int 536 md_malloc_move_vlist(bus_dma_segment_t **pvlist, int *pma_offs, 537 unsigned len, void *ptr, u_char fill, int op) 538 { 539 bus_dma_segment_t *vlist; 540 uint8_t *p, *end, first; 541 off_t *uc; 542 int ma_offs, seg_len; 543 544 vlist = *pvlist; 545 ma_offs = *pma_offs; 546 uc = ptr; 547 548 for (; len != 0; len -= seg_len) { 549 seg_len = imin(vlist->ds_len - ma_offs, len); 550 p = (uint8_t *)(uintptr_t)vlist->ds_addr + ma_offs; 551 switch (op) { 552 case MD_MALLOC_MOVE_ZERO: 553 bzero(p, seg_len); 554 break; 555 case MD_MALLOC_MOVE_FILL: 556 memset(p, fill, seg_len); 557 break; 558 case MD_MALLOC_MOVE_READ: 559 bcopy(ptr, p, seg_len); 560 cpu_flush_dcache(p, seg_len); 561 break; 562 case MD_MALLOC_MOVE_WRITE: 563 bcopy(p, ptr, seg_len); 564 break; 565 case MD_MALLOC_MOVE_CMP: 566 end = p + seg_len; 567 first = *uc = *p; 568 /* Confirm all following bytes match the first */ 569 while (++p < end) { 570 if (*p != first) 571 return (EDOOFUS); 572 } 573 break; 574 default: 575 KASSERT(0, ("md_malloc_move_vlist unknown op %d\n", op)); 576 break; 577 } 578 579 ma_offs += seg_len; 580 if (ma_offs == vlist->ds_len) { 581 ma_offs = 0; 582 vlist++; 583 } 584 ptr = (uint8_t *)ptr + seg_len; 585 } 586 *pvlist = vlist; 587 *pma_offs = ma_offs; 588 589 return (0); 590 } 591 592 static int 593 mdstart_malloc(struct md_s *sc, struct bio *bp) 594 { 595 u_char *dst; 596 vm_page_t *m; 597 bus_dma_segment_t *vlist; 598 int i, error, error1, ma_offs, notmapped; 599 off_t secno, nsec, uc; 600 uintptr_t sp, osp; 601 602 switch (bp->bio_cmd) { 603 case BIO_READ: 604 case BIO_WRITE: 605 case BIO_DELETE: 606 break; 607 default: 608 return (EOPNOTSUPP); 609 } 610 611 notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0; 612 vlist = (bp->bio_flags & BIO_VLIST) != 0 ? 613 (bus_dma_segment_t *)bp->bio_data : NULL; 614 if (notmapped) { 615 m = bp->bio_ma; 616 ma_offs = bp->bio_ma_offset; 617 dst = NULL; 618 KASSERT(vlist == NULL, ("vlists cannot be unmapped")); 619 } else if (vlist != NULL) { 620 ma_offs = bp->bio_ma_offset; 621 dst = NULL; 622 } else { 623 dst = bp->bio_data; 624 } 625 626 nsec = bp->bio_length / sc->sectorsize; 627 secno = bp->bio_offset / sc->sectorsize; 628 error = 0; 629 while (nsec--) { 630 osp = s_read(sc->indir, secno); 631 if (bp->bio_cmd == BIO_DELETE) { 632 if (osp != 0) 633 error = s_write(sc->indir, secno, 0); 634 } else if (bp->bio_cmd == BIO_READ) { 635 if (osp == 0) { 636 if (notmapped) { 637 error = md_malloc_move_ma(&m, &ma_offs, 638 sc->sectorsize, NULL, 0, 639 MD_MALLOC_MOVE_ZERO); 640 } else if (vlist != NULL) { 641 error = md_malloc_move_vlist(&vlist, 642 &ma_offs, sc->sectorsize, NULL, 0, 643 MD_MALLOC_MOVE_ZERO); 644 } else 645 bzero(dst, sc->sectorsize); 646 } else if (osp <= 255) { 647 if (notmapped) { 648 error = md_malloc_move_ma(&m, &ma_offs, 649 sc->sectorsize, NULL, osp, 650 MD_MALLOC_MOVE_FILL); 651 } else if (vlist != NULL) { 652 error = md_malloc_move_vlist(&vlist, 653 &ma_offs, sc->sectorsize, NULL, osp, 654 MD_MALLOC_MOVE_FILL); 655 } else 656 memset(dst, osp, sc->sectorsize); 657 } else { 658 if (notmapped) { 659 error = md_malloc_move_ma(&m, &ma_offs, 660 sc->sectorsize, (void *)osp, 0, 661 MD_MALLOC_MOVE_READ); 662 } else if (vlist != NULL) { 663 error = md_malloc_move_vlist(&vlist, 664 &ma_offs, sc->sectorsize, 665 (void *)osp, 0, 666 MD_MALLOC_MOVE_READ); 667 } else { 668 bcopy((void *)osp, dst, sc->sectorsize); 669 cpu_flush_dcache(dst, sc->sectorsize); 670 } 671 } 672 osp = 0; 673 } else if (bp->bio_cmd == BIO_WRITE) { 674 if (sc->flags & MD_COMPRESS) { 675 if (notmapped) { 676 error1 = md_malloc_move_ma(&m, &ma_offs, 677 sc->sectorsize, &uc, 0, 678 MD_MALLOC_MOVE_CMP); 679 i = error1 == 0 ? sc->sectorsize : 0; 680 } else if (vlist != NULL) { 681 error1 = md_malloc_move_vlist(&vlist, 682 &ma_offs, sc->sectorsize, &uc, 0, 683 MD_MALLOC_MOVE_CMP); 684 i = error1 == 0 ? sc->sectorsize : 0; 685 } else { 686 uc = dst[0]; 687 for (i = 1; i < sc->sectorsize; i++) { 688 if (dst[i] != uc) 689 break; 690 } 691 } 692 } else { 693 i = 0; 694 uc = 0; 695 } 696 if (i == sc->sectorsize) { 697 if (osp != uc) 698 error = s_write(sc->indir, secno, uc); 699 } else { 700 if (osp <= 255) { 701 sp = (uintptr_t)uma_zalloc(sc->uma, 702 md_malloc_wait ? M_WAITOK : 703 M_NOWAIT); 704 if (sp == 0) { 705 error = ENOSPC; 706 break; 707 } 708 if (notmapped) { 709 error = md_malloc_move_ma(&m, 710 &ma_offs, sc->sectorsize, 711 (void *)sp, 0, 712 MD_MALLOC_MOVE_WRITE); 713 } else if (vlist != NULL) { 714 error = md_malloc_move_vlist( 715 &vlist, &ma_offs, 716 sc->sectorsize, (void *)sp, 717 0, MD_MALLOC_MOVE_WRITE); 718 } else { 719 bcopy(dst, (void *)sp, 720 sc->sectorsize); 721 } 722 error = s_write(sc->indir, secno, sp); 723 } else { 724 if (notmapped) { 725 error = md_malloc_move_ma(&m, 726 &ma_offs, sc->sectorsize, 727 (void *)osp, 0, 728 MD_MALLOC_MOVE_WRITE); 729 } else if (vlist != NULL) { 730 error = md_malloc_move_vlist( 731 &vlist, &ma_offs, 732 sc->sectorsize, (void *)osp, 733 0, MD_MALLOC_MOVE_WRITE); 734 } else { 735 bcopy(dst, (void *)osp, 736 sc->sectorsize); 737 } 738 osp = 0; 739 } 740 } 741 } else { 742 error = EOPNOTSUPP; 743 } 744 if (osp > 255) 745 uma_zfree(sc->uma, (void*)osp); 746 if (error != 0) 747 break; 748 secno++; 749 if (!notmapped && vlist == NULL) 750 dst += sc->sectorsize; 751 } 752 bp->bio_resid = 0; 753 return (error); 754 } 755 756 static void 757 mdcopyto_vlist(void *src, bus_dma_segment_t *vlist, off_t offset, off_t len) 758 { 759 off_t seg_len; 760 761 while (offset >= vlist->ds_len) { 762 offset -= vlist->ds_len; 763 vlist++; 764 } 765 766 while (len != 0) { 767 seg_len = omin(len, vlist->ds_len - offset); 768 bcopy(src, (void *)(uintptr_t)(vlist->ds_addr + offset), 769 seg_len); 770 offset = 0; 771 src = (uint8_t *)src + seg_len; 772 len -= seg_len; 773 vlist++; 774 } 775 } 776 777 static void 778 mdcopyfrom_vlist(bus_dma_segment_t *vlist, off_t offset, void *dst, off_t len) 779 { 780 off_t seg_len; 781 782 while (offset >= vlist->ds_len) { 783 offset -= vlist->ds_len; 784 vlist++; 785 } 786 787 while (len != 0) { 788 seg_len = omin(len, vlist->ds_len - offset); 789 bcopy((void *)(uintptr_t)(vlist->ds_addr + offset), dst, 790 seg_len); 791 offset = 0; 792 dst = (uint8_t *)dst + seg_len; 793 len -= seg_len; 794 vlist++; 795 } 796 } 797 798 static int 799 mdstart_preload(struct md_s *sc, struct bio *bp) 800 { 801 uint8_t *p; 802 803 p = sc->pl_ptr + bp->bio_offset; 804 switch (bp->bio_cmd) { 805 case BIO_READ: 806 if ((bp->bio_flags & BIO_VLIST) != 0) { 807 mdcopyto_vlist(p, (bus_dma_segment_t *)bp->bio_data, 808 bp->bio_ma_offset, bp->bio_length); 809 } else { 810 bcopy(p, bp->bio_data, bp->bio_length); 811 } 812 cpu_flush_dcache(bp->bio_data, bp->bio_length); 813 break; 814 case BIO_WRITE: 815 if ((bp->bio_flags & BIO_VLIST) != 0) { 816 mdcopyfrom_vlist((bus_dma_segment_t *)bp->bio_data, 817 bp->bio_ma_offset, p, bp->bio_length); 818 } else { 819 bcopy(bp->bio_data, p, bp->bio_length); 820 } 821 break; 822 } 823 bp->bio_resid = 0; 824 return (0); 825 } 826 827 static int 828 mdstart_vnode(struct md_s *sc, struct bio *bp) 829 { 830 int error; 831 struct uio auio; 832 struct iovec aiov; 833 struct iovec *piov; 834 struct mount *mp; 835 struct vnode *vp; 836 struct buf *pb; 837 bus_dma_segment_t *vlist; 838 struct thread *td; 839 off_t iolen, len, zerosize; 840 int ma_offs, npages; 841 842 switch (bp->bio_cmd) { 843 case BIO_READ: 844 auio.uio_rw = UIO_READ; 845 break; 846 case BIO_WRITE: 847 case BIO_DELETE: 848 auio.uio_rw = UIO_WRITE; 849 break; 850 case BIO_FLUSH: 851 break; 852 default: 853 return (EOPNOTSUPP); 854 } 855 856 td = curthread; 857 vp = sc->vnode; 858 pb = NULL; 859 piov = NULL; 860 ma_offs = bp->bio_ma_offset; 861 len = bp->bio_length; 862 863 /* 864 * VNODE I/O 865 * 866 * If an error occurs, we set BIO_ERROR but we do not set 867 * B_INVAL because (for a write anyway), the buffer is 868 * still valid. 869 */ 870 871 if (bp->bio_cmd == BIO_FLUSH) { 872 (void) vn_start_write(vp, &mp, V_WAIT); 873 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 874 error = VOP_FSYNC(vp, MNT_WAIT, td); 875 VOP_UNLOCK(vp, 0); 876 vn_finished_write(mp); 877 return (error); 878 } 879 880 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 881 auio.uio_resid = bp->bio_length; 882 auio.uio_segflg = UIO_SYSSPACE; 883 auio.uio_td = td; 884 885 if (bp->bio_cmd == BIO_DELETE) { 886 /* 887 * Emulate BIO_DELETE by writing zeros. 888 */ 889 zerosize = ZERO_REGION_SIZE - 890 (ZERO_REGION_SIZE % sc->sectorsize); 891 auio.uio_iovcnt = howmany(bp->bio_length, zerosize); 892 piov = malloc(sizeof(*piov) * auio.uio_iovcnt, M_MD, M_WAITOK); 893 auio.uio_iov = piov; 894 while (len > 0) { 895 piov->iov_base = __DECONST(void *, zero_region); 896 piov->iov_len = len; 897 if (len > zerosize) 898 piov->iov_len = zerosize; 899 len -= piov->iov_len; 900 piov++; 901 } 902 piov = auio.uio_iov; 903 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 904 piov = malloc(sizeof(*piov) * bp->bio_ma_n, M_MD, M_WAITOK); 905 auio.uio_iov = piov; 906 vlist = (bus_dma_segment_t *)bp->bio_data; 907 while (len > 0) { 908 piov->iov_base = (void *)(uintptr_t)(vlist->ds_addr + 909 ma_offs); 910 piov->iov_len = vlist->ds_len - ma_offs; 911 if (piov->iov_len > len) 912 piov->iov_len = len; 913 len -= piov->iov_len; 914 ma_offs = 0; 915 vlist++; 916 piov++; 917 } 918 auio.uio_iovcnt = piov - auio.uio_iov; 919 piov = auio.uio_iov; 920 } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 921 pb = getpbuf(&md_vnode_pbuf_freecnt); 922 bp->bio_resid = len; 923 unmapped_step: 924 npages = atop(min(MAXPHYS, round_page(len + (ma_offs & 925 PAGE_MASK)))); 926 iolen = min(ptoa(npages) - (ma_offs & PAGE_MASK), len); 927 KASSERT(iolen > 0, ("zero iolen")); 928 pmap_qenter((vm_offset_t)pb->b_data, 929 &bp->bio_ma[atop(ma_offs)], npages); 930 aiov.iov_base = (void *)((vm_offset_t)pb->b_data + 931 (ma_offs & PAGE_MASK)); 932 aiov.iov_len = iolen; 933 auio.uio_iov = &aiov; 934 auio.uio_iovcnt = 1; 935 auio.uio_resid = iolen; 936 } else { 937 aiov.iov_base = bp->bio_data; 938 aiov.iov_len = bp->bio_length; 939 auio.uio_iov = &aiov; 940 auio.uio_iovcnt = 1; 941 } 942 /* 943 * When reading set IO_DIRECT to try to avoid double-caching 944 * the data. When writing IO_DIRECT is not optimal. 945 */ 946 if (auio.uio_rw == UIO_READ) { 947 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 948 error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred); 949 VOP_UNLOCK(vp, 0); 950 } else { 951 (void) vn_start_write(vp, &mp, V_WAIT); 952 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 953 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC, 954 sc->cred); 955 VOP_UNLOCK(vp, 0); 956 vn_finished_write(mp); 957 } 958 959 if (pb != NULL) { 960 pmap_qremove((vm_offset_t)pb->b_data, npages); 961 if (error == 0) { 962 len -= iolen; 963 bp->bio_resid -= iolen; 964 ma_offs += iolen; 965 if (len > 0) 966 goto unmapped_step; 967 } 968 relpbuf(pb, &md_vnode_pbuf_freecnt); 969 } 970 971 free(piov, M_MD); 972 if (pb == NULL) 973 bp->bio_resid = auio.uio_resid; 974 return (error); 975 } 976 977 static int 978 mdstart_swap(struct md_s *sc, struct bio *bp) 979 { 980 vm_page_t m; 981 u_char *p; 982 vm_pindex_t i, lastp; 983 bus_dma_segment_t *vlist; 984 int rv, ma_offs, offs, len, lastend; 985 986 switch (bp->bio_cmd) { 987 case BIO_READ: 988 case BIO_WRITE: 989 case BIO_DELETE: 990 break; 991 default: 992 return (EOPNOTSUPP); 993 } 994 995 p = bp->bio_data; 996 ma_offs = (bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0 ? 997 bp->bio_ma_offset : 0; 998 vlist = (bp->bio_flags & BIO_VLIST) != 0 ? 999 (bus_dma_segment_t *)bp->bio_data : NULL; 1000 1001 /* 1002 * offs is the offset at which to start operating on the 1003 * next (ie, first) page. lastp is the last page on 1004 * which we're going to operate. lastend is the ending 1005 * position within that last page (ie, PAGE_SIZE if 1006 * we're operating on complete aligned pages). 1007 */ 1008 offs = bp->bio_offset % PAGE_SIZE; 1009 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 1010 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 1011 1012 rv = VM_PAGER_OK; 1013 VM_OBJECT_WLOCK(sc->object); 1014 vm_object_pip_add(sc->object, 1); 1015 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 1016 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 1017 m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM); 1018 if (bp->bio_cmd == BIO_READ) { 1019 if (m->valid == VM_PAGE_BITS_ALL) 1020 rv = VM_PAGER_OK; 1021 else 1022 rv = vm_pager_get_pages(sc->object, &m, 1, 1023 NULL, NULL); 1024 if (rv == VM_PAGER_ERROR) { 1025 vm_page_xunbusy(m); 1026 break; 1027 } else if (rv == VM_PAGER_FAIL) { 1028 /* 1029 * Pager does not have the page. Zero 1030 * the allocated page, and mark it as 1031 * valid. Do not set dirty, the page 1032 * can be recreated if thrown out. 1033 */ 1034 pmap_zero_page(m); 1035 m->valid = VM_PAGE_BITS_ALL; 1036 } 1037 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 1038 pmap_copy_pages(&m, offs, bp->bio_ma, 1039 ma_offs, len); 1040 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 1041 physcopyout_vlist(VM_PAGE_TO_PHYS(m) + offs, 1042 vlist, ma_offs, len); 1043 cpu_flush_dcache(p, len); 1044 } else { 1045 physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len); 1046 cpu_flush_dcache(p, len); 1047 } 1048 } else if (bp->bio_cmd == BIO_WRITE) { 1049 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 1050 rv = vm_pager_get_pages(sc->object, &m, 1, 1051 NULL, NULL); 1052 else 1053 rv = VM_PAGER_OK; 1054 if (rv == VM_PAGER_ERROR) { 1055 vm_page_xunbusy(m); 1056 break; 1057 } 1058 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 1059 pmap_copy_pages(bp->bio_ma, ma_offs, &m, 1060 offs, len); 1061 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 1062 physcopyin_vlist(vlist, ma_offs, 1063 VM_PAGE_TO_PHYS(m) + offs, len); 1064 } else { 1065 physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len); 1066 } 1067 m->valid = VM_PAGE_BITS_ALL; 1068 } else if (bp->bio_cmd == BIO_DELETE) { 1069 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) 1070 rv = vm_pager_get_pages(sc->object, &m, 1, 1071 NULL, NULL); 1072 else 1073 rv = VM_PAGER_OK; 1074 if (rv == VM_PAGER_ERROR) { 1075 vm_page_xunbusy(m); 1076 break; 1077 } 1078 if (len != PAGE_SIZE) { 1079 pmap_zero_page_area(m, offs, len); 1080 vm_page_clear_dirty(m, offs, len); 1081 m->valid = VM_PAGE_BITS_ALL; 1082 } else 1083 vm_pager_page_unswapped(m); 1084 } 1085 vm_page_xunbusy(m); 1086 vm_page_lock(m); 1087 if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE) 1088 vm_page_free(m); 1089 else 1090 vm_page_activate(m); 1091 vm_page_unlock(m); 1092 if (bp->bio_cmd == BIO_WRITE) { 1093 vm_page_dirty(m); 1094 vm_pager_page_unswapped(m); 1095 } 1096 1097 /* Actions on further pages start at offset 0 */ 1098 p += PAGE_SIZE - offs; 1099 offs = 0; 1100 ma_offs += len; 1101 } 1102 vm_object_pip_wakeup(sc->object); 1103 VM_OBJECT_WUNLOCK(sc->object); 1104 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 1105 } 1106 1107 static int 1108 mdstart_null(struct md_s *sc, struct bio *bp) 1109 { 1110 1111 switch (bp->bio_cmd) { 1112 case BIO_READ: 1113 bzero(bp->bio_data, bp->bio_length); 1114 cpu_flush_dcache(bp->bio_data, bp->bio_length); 1115 break; 1116 case BIO_WRITE: 1117 break; 1118 } 1119 bp->bio_resid = 0; 1120 return (0); 1121 } 1122 1123 static void 1124 md_kthread(void *arg) 1125 { 1126 struct md_s *sc; 1127 struct bio *bp; 1128 int error; 1129 1130 sc = arg; 1131 thread_lock(curthread); 1132 sched_prio(curthread, PRIBIO); 1133 thread_unlock(curthread); 1134 if (sc->type == MD_VNODE) 1135 curthread->td_pflags |= TDP_NORUNNINGBUF; 1136 1137 for (;;) { 1138 mtx_lock(&sc->queue_mtx); 1139 if (sc->flags & MD_SHUTDOWN) { 1140 sc->flags |= MD_EXITING; 1141 mtx_unlock(&sc->queue_mtx); 1142 kproc_exit(0); 1143 } 1144 bp = bioq_takefirst(&sc->bio_queue); 1145 if (!bp) { 1146 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 1147 continue; 1148 } 1149 mtx_unlock(&sc->queue_mtx); 1150 if (bp->bio_cmd == BIO_GETATTR) { 1151 if ((sc->fwsectors && sc->fwheads && 1152 (g_handleattr_int(bp, "GEOM::fwsectors", 1153 sc->fwsectors) || 1154 g_handleattr_int(bp, "GEOM::fwheads", 1155 sc->fwheads))) || 1156 g_handleattr_int(bp, "GEOM::candelete", 1)) 1157 error = -1; 1158 else 1159 error = EOPNOTSUPP; 1160 } else { 1161 error = sc->start(sc, bp); 1162 } 1163 1164 if (error != -1) { 1165 bp->bio_completed = bp->bio_length; 1166 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) 1167 devstat_end_transaction_bio(sc->devstat, bp); 1168 g_io_deliver(bp, error); 1169 } 1170 } 1171 } 1172 1173 static struct md_s * 1174 mdfind(int unit) 1175 { 1176 struct md_s *sc; 1177 1178 LIST_FOREACH(sc, &md_softc_list, list) { 1179 if (sc->unit == unit) 1180 break; 1181 } 1182 return (sc); 1183 } 1184 1185 static struct md_s * 1186 mdnew(int unit, int *errp, enum md_types type) 1187 { 1188 struct md_s *sc; 1189 int error; 1190 1191 *errp = 0; 1192 if (unit == -1) 1193 unit = alloc_unr(md_uh); 1194 else 1195 unit = alloc_unr_specific(md_uh, unit); 1196 1197 if (unit == -1) { 1198 *errp = EBUSY; 1199 return (NULL); 1200 } 1201 1202 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); 1203 sc->type = type; 1204 bioq_init(&sc->bio_queue); 1205 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 1206 mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF); 1207 sc->unit = unit; 1208 sprintf(sc->name, "md%d", unit); 1209 LIST_INSERT_HEAD(&md_softc_list, sc, list); 1210 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 1211 if (error == 0) 1212 return (sc); 1213 LIST_REMOVE(sc, list); 1214 mtx_destroy(&sc->stat_mtx); 1215 mtx_destroy(&sc->queue_mtx); 1216 free_unr(md_uh, sc->unit); 1217 free(sc, M_MD); 1218 *errp = error; 1219 return (NULL); 1220 } 1221 1222 static void 1223 mdinit(struct md_s *sc) 1224 { 1225 struct g_geom *gp; 1226 struct g_provider *pp; 1227 1228 g_topology_lock(); 1229 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 1230 gp->softc = sc; 1231 pp = g_new_providerf(gp, "md%d", sc->unit); 1232 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 1233 pp->mediasize = sc->mediasize; 1234 pp->sectorsize = sc->sectorsize; 1235 switch (sc->type) { 1236 case MD_MALLOC: 1237 case MD_VNODE: 1238 case MD_SWAP: 1239 pp->flags |= G_PF_ACCEPT_UNMAPPED; 1240 break; 1241 case MD_PRELOAD: 1242 case MD_NULL: 1243 break; 1244 } 1245 sc->gp = gp; 1246 sc->pp = pp; 1247 g_error_provider(pp, 0); 1248 g_topology_unlock(); 1249 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize, 1250 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 1251 } 1252 1253 static int 1254 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio) 1255 { 1256 uintptr_t sp; 1257 int error; 1258 off_t u; 1259 1260 error = 0; 1261 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 1262 return (EINVAL); 1263 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize)) 1264 return (EINVAL); 1265 /* Compression doesn't make sense if we have reserved space */ 1266 if (mdio->md_options & MD_RESERVE) 1267 mdio->md_options &= ~MD_COMPRESS; 1268 if (mdio->md_fwsectors != 0) 1269 sc->fwsectors = mdio->md_fwsectors; 1270 if (mdio->md_fwheads != 0) 1271 sc->fwheads = mdio->md_fwheads; 1272 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE); 1273 sc->indir = dimension(sc->mediasize / sc->sectorsize); 1274 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 1275 0x1ff, 0); 1276 if (mdio->md_options & MD_RESERVE) { 1277 off_t nsectors; 1278 1279 nsectors = sc->mediasize / sc->sectorsize; 1280 for (u = 0; u < nsectors; u++) { 1281 sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ? 1282 M_WAITOK : M_NOWAIT) | M_ZERO); 1283 if (sp != 0) 1284 error = s_write(sc->indir, u, sp); 1285 else 1286 error = ENOMEM; 1287 if (error != 0) 1288 break; 1289 } 1290 } 1291 return (error); 1292 } 1293 1294 1295 static int 1296 mdsetcred(struct md_s *sc, struct ucred *cred) 1297 { 1298 char *tmpbuf; 1299 int error = 0; 1300 1301 /* 1302 * Set credits in our softc 1303 */ 1304 1305 if (sc->cred) 1306 crfree(sc->cred); 1307 sc->cred = crhold(cred); 1308 1309 /* 1310 * Horrible kludge to establish credentials for NFS XXX. 1311 */ 1312 1313 if (sc->vnode) { 1314 struct uio auio; 1315 struct iovec aiov; 1316 1317 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 1318 bzero(&auio, sizeof(auio)); 1319 1320 aiov.iov_base = tmpbuf; 1321 aiov.iov_len = sc->sectorsize; 1322 auio.uio_iov = &aiov; 1323 auio.uio_iovcnt = 1; 1324 auio.uio_offset = 0; 1325 auio.uio_rw = UIO_READ; 1326 auio.uio_segflg = UIO_SYSSPACE; 1327 auio.uio_resid = aiov.iov_len; 1328 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1329 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 1330 VOP_UNLOCK(sc->vnode, 0); 1331 free(tmpbuf, M_TEMP); 1332 } 1333 return (error); 1334 } 1335 1336 static int 1337 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1338 { 1339 struct vattr vattr; 1340 struct nameidata nd; 1341 char *fname; 1342 int error, flags; 1343 1344 /* 1345 * Kernel-originated requests must have the filename appended 1346 * to the mdio structure to protect against malicious software. 1347 */ 1348 fname = mdio->md_file; 1349 if ((void *)fname != (void *)(mdio + 1)) { 1350 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL); 1351 if (error != 0) 1352 return (error); 1353 } else 1354 strlcpy(sc->file, fname, sizeof(sc->file)); 1355 1356 /* 1357 * If the user specified that this is a read only device, don't 1358 * set the FWRITE mask before trying to open the backing store. 1359 */ 1360 flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE); 1361 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td); 1362 error = vn_open(&nd, &flags, 0, NULL); 1363 if (error != 0) 1364 return (error); 1365 NDFREE(&nd, NDF_ONLY_PNBUF); 1366 if (nd.ni_vp->v_type != VREG) { 1367 error = EINVAL; 1368 goto bad; 1369 } 1370 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred); 1371 if (error != 0) 1372 goto bad; 1373 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { 1374 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); 1375 if (nd.ni_vp->v_iflag & VI_DOOMED) { 1376 /* Forced unmount. */ 1377 error = EBADF; 1378 goto bad; 1379 } 1380 } 1381 nd.ni_vp->v_vflag |= VV_MD; 1382 VOP_UNLOCK(nd.ni_vp, 0); 1383 1384 if (mdio->md_fwsectors != 0) 1385 sc->fwsectors = mdio->md_fwsectors; 1386 if (mdio->md_fwheads != 0) 1387 sc->fwheads = mdio->md_fwheads; 1388 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC); 1389 if (!(flags & FWRITE)) 1390 sc->flags |= MD_READONLY; 1391 sc->vnode = nd.ni_vp; 1392 1393 error = mdsetcred(sc, td->td_ucred); 1394 if (error != 0) { 1395 sc->vnode = NULL; 1396 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); 1397 nd.ni_vp->v_vflag &= ~VV_MD; 1398 goto bad; 1399 } 1400 return (0); 1401 bad: 1402 VOP_UNLOCK(nd.ni_vp, 0); 1403 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 1404 return (error); 1405 } 1406 1407 static int 1408 mddestroy(struct md_s *sc, struct thread *td) 1409 { 1410 1411 if (sc->gp) { 1412 sc->gp->softc = NULL; 1413 g_topology_lock(); 1414 g_wither_geom(sc->gp, ENXIO); 1415 g_topology_unlock(); 1416 sc->gp = NULL; 1417 sc->pp = NULL; 1418 } 1419 if (sc->devstat) { 1420 devstat_remove_entry(sc->devstat); 1421 sc->devstat = NULL; 1422 } 1423 mtx_lock(&sc->queue_mtx); 1424 sc->flags |= MD_SHUTDOWN; 1425 wakeup(sc); 1426 while (!(sc->flags & MD_EXITING)) 1427 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10); 1428 mtx_unlock(&sc->queue_mtx); 1429 mtx_destroy(&sc->stat_mtx); 1430 mtx_destroy(&sc->queue_mtx); 1431 if (sc->vnode != NULL) { 1432 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1433 sc->vnode->v_vflag &= ~VV_MD; 1434 VOP_UNLOCK(sc->vnode, 0); 1435 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 1436 FREAD : (FREAD|FWRITE), sc->cred, td); 1437 } 1438 if (sc->cred != NULL) 1439 crfree(sc->cred); 1440 if (sc->object != NULL) 1441 vm_object_deallocate(sc->object); 1442 if (sc->indir) 1443 destroy_indir(sc, sc->indir); 1444 if (sc->uma) 1445 uma_zdestroy(sc->uma); 1446 1447 LIST_REMOVE(sc, list); 1448 free_unr(md_uh, sc->unit); 1449 free(sc, M_MD); 1450 return (0); 1451 } 1452 1453 static int 1454 mdresize(struct md_s *sc, struct md_ioctl *mdio) 1455 { 1456 int error, res; 1457 vm_pindex_t oldpages, newpages; 1458 1459 switch (sc->type) { 1460 case MD_VNODE: 1461 case MD_NULL: 1462 break; 1463 case MD_SWAP: 1464 if (mdio->md_mediasize <= 0 || 1465 (mdio->md_mediasize % PAGE_SIZE) != 0) 1466 return (EDOM); 1467 oldpages = OFF_TO_IDX(round_page(sc->mediasize)); 1468 newpages = OFF_TO_IDX(round_page(mdio->md_mediasize)); 1469 if (newpages < oldpages) { 1470 VM_OBJECT_WLOCK(sc->object); 1471 vm_object_page_remove(sc->object, newpages, 0, 0); 1472 swap_pager_freespace(sc->object, newpages, 1473 oldpages - newpages); 1474 swap_release_by_cred(IDX_TO_OFF(oldpages - 1475 newpages), sc->cred); 1476 sc->object->charge = IDX_TO_OFF(newpages); 1477 sc->object->size = newpages; 1478 VM_OBJECT_WUNLOCK(sc->object); 1479 } else if (newpages > oldpages) { 1480 res = swap_reserve_by_cred(IDX_TO_OFF(newpages - 1481 oldpages), sc->cred); 1482 if (!res) 1483 return (ENOMEM); 1484 if ((mdio->md_options & MD_RESERVE) || 1485 (sc->flags & MD_RESERVE)) { 1486 error = swap_pager_reserve(sc->object, 1487 oldpages, newpages - oldpages); 1488 if (error < 0) { 1489 swap_release_by_cred( 1490 IDX_TO_OFF(newpages - oldpages), 1491 sc->cred); 1492 return (EDOM); 1493 } 1494 } 1495 VM_OBJECT_WLOCK(sc->object); 1496 sc->object->charge = IDX_TO_OFF(newpages); 1497 sc->object->size = newpages; 1498 VM_OBJECT_WUNLOCK(sc->object); 1499 } 1500 break; 1501 default: 1502 return (EOPNOTSUPP); 1503 } 1504 1505 sc->mediasize = mdio->md_mediasize; 1506 g_topology_lock(); 1507 g_resize_provider(sc->pp, sc->mediasize); 1508 g_topology_unlock(); 1509 return (0); 1510 } 1511 1512 static int 1513 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1514 { 1515 vm_ooffset_t npage; 1516 int error; 1517 1518 /* 1519 * Range check. Disallow negative sizes and sizes not being 1520 * multiple of page size. 1521 */ 1522 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1523 return (EDOM); 1524 1525 /* 1526 * Allocate an OBJT_SWAP object. 1527 * 1528 * Note the truncation. 1529 */ 1530 1531 npage = mdio->md_mediasize / PAGE_SIZE; 1532 if (mdio->md_fwsectors != 0) 1533 sc->fwsectors = mdio->md_fwsectors; 1534 if (mdio->md_fwheads != 0) 1535 sc->fwheads = mdio->md_fwheads; 1536 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, 1537 VM_PROT_DEFAULT, 0, td->td_ucred); 1538 if (sc->object == NULL) 1539 return (ENOMEM); 1540 sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE); 1541 if (mdio->md_options & MD_RESERVE) { 1542 if (swap_pager_reserve(sc->object, 0, npage) < 0) { 1543 error = EDOM; 1544 goto finish; 1545 } 1546 } 1547 error = mdsetcred(sc, td->td_ucred); 1548 finish: 1549 if (error != 0) { 1550 vm_object_deallocate(sc->object); 1551 sc->object = NULL; 1552 } 1553 return (error); 1554 } 1555 1556 static int 1557 mdcreate_null(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1558 { 1559 1560 /* 1561 * Range check. Disallow negative sizes and sizes not being 1562 * multiple of page size. 1563 */ 1564 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1565 return (EDOM); 1566 1567 return (0); 1568 } 1569 1570 static int 1571 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1572 { 1573 struct md_ioctl *mdio; 1574 struct md_s *sc; 1575 int error, i; 1576 unsigned sectsize; 1577 1578 if (md_debug) 1579 printf("mdctlioctl(%s %lx %p %x %p)\n", 1580 devtoname(dev), cmd, addr, flags, td); 1581 1582 mdio = (struct md_ioctl *)addr; 1583 if (mdio->md_version != MDIOVERSION) 1584 return (EINVAL); 1585 1586 /* 1587 * We assert the version number in the individual ioctl 1588 * handlers instead of out here because (a) it is possible we 1589 * may add another ioctl in the future which doesn't read an 1590 * mdio, and (b) the correct return value for an unknown ioctl 1591 * is ENOIOCTL, not EINVAL. 1592 */ 1593 error = 0; 1594 switch (cmd) { 1595 case MDIOCATTACH: 1596 switch (mdio->md_type) { 1597 case MD_MALLOC: 1598 case MD_PRELOAD: 1599 case MD_VNODE: 1600 case MD_SWAP: 1601 case MD_NULL: 1602 break; 1603 default: 1604 return (EINVAL); 1605 } 1606 if (mdio->md_sectorsize == 0) 1607 sectsize = DEV_BSIZE; 1608 else 1609 sectsize = mdio->md_sectorsize; 1610 if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize) 1611 return (EINVAL); 1612 if (mdio->md_options & MD_AUTOUNIT) 1613 sc = mdnew(-1, &error, mdio->md_type); 1614 else { 1615 if (mdio->md_unit > INT_MAX) 1616 return (EINVAL); 1617 sc = mdnew(mdio->md_unit, &error, mdio->md_type); 1618 } 1619 if (sc == NULL) 1620 return (error); 1621 if (mdio->md_options & MD_AUTOUNIT) 1622 mdio->md_unit = sc->unit; 1623 sc->mediasize = mdio->md_mediasize; 1624 sc->sectorsize = sectsize; 1625 error = EDOOFUS; 1626 switch (sc->type) { 1627 case MD_MALLOC: 1628 sc->start = mdstart_malloc; 1629 error = mdcreate_malloc(sc, mdio); 1630 break; 1631 case MD_PRELOAD: 1632 /* 1633 * We disallow attaching preloaded memory disks via 1634 * ioctl. Preloaded memory disks are automatically 1635 * attached in g_md_init(). 1636 */ 1637 error = EOPNOTSUPP; 1638 break; 1639 case MD_VNODE: 1640 sc->start = mdstart_vnode; 1641 error = mdcreate_vnode(sc, mdio, td); 1642 break; 1643 case MD_SWAP: 1644 sc->start = mdstart_swap; 1645 error = mdcreate_swap(sc, mdio, td); 1646 break; 1647 case MD_NULL: 1648 sc->start = mdstart_null; 1649 error = mdcreate_null(sc, mdio, td); 1650 break; 1651 } 1652 if (error != 0) { 1653 mddestroy(sc, td); 1654 return (error); 1655 } 1656 1657 /* Prune off any residual fractional sector */ 1658 i = sc->mediasize % sc->sectorsize; 1659 sc->mediasize -= i; 1660 1661 mdinit(sc); 1662 return (0); 1663 case MDIOCDETACH: 1664 if (mdio->md_mediasize != 0 || 1665 (mdio->md_options & ~MD_FORCE) != 0) 1666 return (EINVAL); 1667 1668 sc = mdfind(mdio->md_unit); 1669 if (sc == NULL) 1670 return (ENOENT); 1671 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) && 1672 !(mdio->md_options & MD_FORCE)) 1673 return (EBUSY); 1674 return (mddestroy(sc, td)); 1675 case MDIOCRESIZE: 1676 if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0) 1677 return (EINVAL); 1678 1679 sc = mdfind(mdio->md_unit); 1680 if (sc == NULL) 1681 return (ENOENT); 1682 if (mdio->md_mediasize < sc->sectorsize) 1683 return (EINVAL); 1684 if (mdio->md_mediasize < sc->mediasize && 1685 !(sc->flags & MD_FORCE) && 1686 !(mdio->md_options & MD_FORCE)) 1687 return (EBUSY); 1688 return (mdresize(sc, mdio)); 1689 case MDIOCQUERY: 1690 sc = mdfind(mdio->md_unit); 1691 if (sc == NULL) 1692 return (ENOENT); 1693 mdio->md_type = sc->type; 1694 mdio->md_options = sc->flags; 1695 mdio->md_mediasize = sc->mediasize; 1696 mdio->md_sectorsize = sc->sectorsize; 1697 if (sc->type == MD_VNODE) 1698 error = copyout(sc->file, mdio->md_file, 1699 strlen(sc->file) + 1); 1700 return (error); 1701 case MDIOCLIST: 1702 i = 1; 1703 LIST_FOREACH(sc, &md_softc_list, list) { 1704 if (i == MDNPAD - 1) 1705 mdio->md_pad[i] = -1; 1706 else 1707 mdio->md_pad[i++] = sc->unit; 1708 } 1709 mdio->md_pad[0] = i - 1; 1710 return (0); 1711 default: 1712 return (ENOIOCTL); 1713 }; 1714 } 1715 1716 static int 1717 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1718 { 1719 int error; 1720 1721 sx_xlock(&md_sx); 1722 error = xmdctlioctl(dev, cmd, addr, flags, td); 1723 sx_xunlock(&md_sx); 1724 return (error); 1725 } 1726 1727 static void 1728 md_preloaded(u_char *image, size_t length, const char *name) 1729 { 1730 struct md_s *sc; 1731 int error; 1732 1733 sc = mdnew(-1, &error, MD_PRELOAD); 1734 if (sc == NULL) 1735 return; 1736 sc->mediasize = length; 1737 sc->sectorsize = DEV_BSIZE; 1738 sc->pl_ptr = image; 1739 sc->pl_len = length; 1740 sc->start = mdstart_preload; 1741 #ifdef MD_ROOT 1742 if (sc->unit == 0) 1743 rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0"; 1744 #endif 1745 mdinit(sc); 1746 if (name != NULL) { 1747 printf("%s%d: Preloaded image <%s> %zd bytes at %p\n", 1748 MD_NAME, sc->unit, name, length, image); 1749 } else { 1750 printf("%s%d: Embedded image %zd bytes at %p\n", 1751 MD_NAME, sc->unit, length, image); 1752 } 1753 } 1754 1755 static void 1756 g_md_init(struct g_class *mp __unused) 1757 { 1758 caddr_t mod; 1759 u_char *ptr, *name, *type; 1760 unsigned len; 1761 int i; 1762 1763 /* figure out log2(NINDIR) */ 1764 for (i = NINDIR, nshift = -1; i; nshift++) 1765 i >>= 1; 1766 1767 mod = NULL; 1768 sx_init(&md_sx, "MD config lock"); 1769 g_topology_unlock(); 1770 md_uh = new_unrhdr(0, INT_MAX, NULL); 1771 #ifdef MD_ROOT 1772 if (mfs_root_size != 0) { 1773 sx_xlock(&md_sx); 1774 md_preloaded(__DEVOLATILE(u_char *, &mfs_root), mfs_root_size, 1775 NULL); 1776 sx_xunlock(&md_sx); 1777 } 1778 #endif 1779 /* XXX: are preload_* static or do they need Giant ? */ 1780 while ((mod = preload_search_next_name(mod)) != NULL) { 1781 name = (char *)preload_search_info(mod, MODINFO_NAME); 1782 if (name == NULL) 1783 continue; 1784 type = (char *)preload_search_info(mod, MODINFO_TYPE); 1785 if (type == NULL) 1786 continue; 1787 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 1788 continue; 1789 ptr = preload_fetch_addr(mod); 1790 len = preload_fetch_size(mod); 1791 if (ptr != NULL && len != 0) { 1792 sx_xlock(&md_sx); 1793 md_preloaded(ptr, len, name); 1794 sx_xunlock(&md_sx); 1795 } 1796 } 1797 md_vnode_pbuf_freecnt = nswbuf / 10; 1798 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL, 1799 0600, MDCTL_NAME); 1800 g_topology_lock(); 1801 } 1802 1803 static void 1804 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1805 struct g_consumer *cp __unused, struct g_provider *pp) 1806 { 1807 struct md_s *mp; 1808 char *type; 1809 1810 mp = gp->softc; 1811 if (mp == NULL) 1812 return; 1813 1814 switch (mp->type) { 1815 case MD_MALLOC: 1816 type = "malloc"; 1817 break; 1818 case MD_PRELOAD: 1819 type = "preload"; 1820 break; 1821 case MD_VNODE: 1822 type = "vnode"; 1823 break; 1824 case MD_SWAP: 1825 type = "swap"; 1826 break; 1827 case MD_NULL: 1828 type = "null"; 1829 break; 1830 default: 1831 type = "unknown"; 1832 break; 1833 } 1834 1835 if (pp != NULL) { 1836 if (indent == NULL) { 1837 sbuf_printf(sb, " u %d", mp->unit); 1838 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize); 1839 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads); 1840 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors); 1841 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize); 1842 sbuf_printf(sb, " t %s", type); 1843 if (mp->type == MD_VNODE && mp->vnode != NULL) 1844 sbuf_printf(sb, " file %s", mp->file); 1845 } else { 1846 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, 1847 mp->unit); 1848 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n", 1849 indent, (uintmax_t) mp->sectorsize); 1850 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n", 1851 indent, (uintmax_t) mp->fwheads); 1852 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n", 1853 indent, (uintmax_t) mp->fwsectors); 1854 sbuf_printf(sb, "%s<length>%ju</length>\n", 1855 indent, (uintmax_t) mp->mediasize); 1856 sbuf_printf(sb, "%s<compression>%s</compression>\n", indent, 1857 (mp->flags & MD_COMPRESS) == 0 ? "off": "on"); 1858 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 1859 (mp->flags & MD_READONLY) == 0 ? "read-write": 1860 "read-only"); 1861 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1862 type); 1863 if (mp->type == MD_VNODE && mp->vnode != NULL) { 1864 sbuf_printf(sb, "%s<file>", indent); 1865 g_conf_printf_escaped(sb, "%s", mp->file); 1866 sbuf_printf(sb, "</file>\n"); 1867 } 1868 } 1869 } 1870 } 1871 1872 static void 1873 g_md_fini(struct g_class *mp __unused) 1874 { 1875 1876 sx_destroy(&md_sx); 1877 if (status_dev != NULL) 1878 destroy_dev(status_dev); 1879 delete_unrhdr(md_uh); 1880 } 1881