1 /*- 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * $FreeBSD$ 10 * 11 */ 12 13 /*- 14 * The following functions are based in the vn(4) driver: mdstart_swap(), 15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 16 * and as such under the following copyright: 17 * 18 * Copyright (c) 1988 University of Utah. 19 * Copyright (c) 1990, 1993 20 * The Regents of the University of California. All rights reserved. 21 * Copyright (c) 2013 The FreeBSD Foundation 22 * All rights reserved. 23 * 24 * This code is derived from software contributed to Berkeley by 25 * the Systems Programming Group of the University of Utah Computer 26 * Science Department. 27 * 28 * Portions of this software were developed by Konstantin Belousov 29 * under sponsorship from the FreeBSD Foundation. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 3. Neither the name of the University nor the names of its contributors 40 * may be used to endorse or promote products derived from this software 41 * without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 53 * SUCH DAMAGE. 54 * 55 * from: Utah Hdr: vn.c 1.13 94/04/02 56 * 57 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 58 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 59 */ 60 61 #include "opt_rootdevname.h" 62 #include "opt_geom.h" 63 #include "opt_md.h" 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/bio.h> 68 #include <sys/buf.h> 69 #include <sys/conf.h> 70 #include <sys/devicestat.h> 71 #include <sys/fcntl.h> 72 #include <sys/kernel.h> 73 #include <sys/kthread.h> 74 #include <sys/limits.h> 75 #include <sys/linker.h> 76 #include <sys/lock.h> 77 #include <sys/malloc.h> 78 #include <sys/mdioctl.h> 79 #include <sys/mount.h> 80 #include <sys/mutex.h> 81 #include <sys/sx.h> 82 #include <sys/namei.h> 83 #include <sys/proc.h> 84 #include <sys/queue.h> 85 #include <sys/rwlock.h> 86 #include <sys/sbuf.h> 87 #include <sys/sched.h> 88 #include <sys/sf_buf.h> 89 #include <sys/sysctl.h> 90 #include <sys/vnode.h> 91 #include <sys/disk.h> 92 93 #include <geom/geom.h> 94 #include <geom/geom_int.h> 95 96 #include <vm/vm.h> 97 #include <vm/vm_param.h> 98 #include <vm/vm_object.h> 99 #include <vm/vm_page.h> 100 #include <vm/vm_pager.h> 101 #include <vm/swap_pager.h> 102 #include <vm/uma.h> 103 104 #include <machine/bus.h> 105 106 #define MD_MODVER 1 107 108 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 109 #define MD_EXITING 0x20000 /* Worker thread is exiting. */ 110 111 #ifndef MD_NSECT 112 #define MD_NSECT (10000 * 2) 113 #endif 114 115 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk"); 116 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors"); 117 118 static int md_debug; 119 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, 120 "Enable md(4) debug messages"); 121 static int md_malloc_wait; 122 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, 123 "Allow malloc to wait for memory allocations"); 124 125 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE) 126 #define MD_ROOT_FSTYPE "ufs" 127 #endif 128 129 #if defined(MD_ROOT) 130 /* 131 * Preloaded image gets put here. 132 */ 133 #if defined(MD_ROOT_SIZE) 134 /* 135 * We put the mfs_root symbol into the oldmfs section of the kernel object file. 136 * Applications that patch the object with the image can determine 137 * the size looking at the oldmfs section size within the kernel. 138 */ 139 u_char mfs_root[MD_ROOT_SIZE*1024] __attribute__ ((section ("oldmfs"))); 140 const int mfs_root_size = sizeof(mfs_root); 141 #else 142 extern volatile u_char __weak_symbol mfs_root; 143 extern volatile u_char __weak_symbol mfs_root_end; 144 __GLOBL(mfs_root); 145 __GLOBL(mfs_root_end); 146 #define mfs_root_size ((uintptr_t)(&mfs_root_end - &mfs_root)) 147 #endif 148 #endif 149 150 static g_init_t g_md_init; 151 static g_fini_t g_md_fini; 152 static g_start_t g_md_start; 153 static g_access_t g_md_access; 154 static void g_md_dumpconf(struct sbuf *sb, const char *indent, 155 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp); 156 157 static struct cdev *status_dev = NULL; 158 static struct sx md_sx; 159 static struct unrhdr *md_uh; 160 161 static d_ioctl_t mdctlioctl; 162 163 static struct cdevsw mdctl_cdevsw = { 164 .d_version = D_VERSION, 165 .d_ioctl = mdctlioctl, 166 .d_name = MD_NAME, 167 }; 168 169 struct g_class g_md_class = { 170 .name = "MD", 171 .version = G_VERSION, 172 .init = g_md_init, 173 .fini = g_md_fini, 174 .start = g_md_start, 175 .access = g_md_access, 176 .dumpconf = g_md_dumpconf, 177 }; 178 179 DECLARE_GEOM_CLASS(g_md_class, g_md); 180 181 182 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list); 183 184 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 185 #define NMASK (NINDIR-1) 186 static int nshift; 187 188 static int md_vnode_pbuf_freecnt; 189 190 struct indir { 191 uintptr_t *array; 192 u_int total; 193 u_int used; 194 u_int shift; 195 }; 196 197 struct md_s { 198 int unit; 199 LIST_ENTRY(md_s) list; 200 struct bio_queue_head bio_queue; 201 struct mtx queue_mtx; 202 struct mtx stat_mtx; 203 struct cdev *dev; 204 enum md_types type; 205 off_t mediasize; 206 unsigned sectorsize; 207 unsigned opencount; 208 unsigned fwheads; 209 unsigned fwsectors; 210 char ident[32]; 211 unsigned flags; 212 char name[20]; 213 struct proc *procp; 214 struct g_geom *gp; 215 struct g_provider *pp; 216 int (*start)(struct md_s *sc, struct bio *bp); 217 struct devstat *devstat; 218 219 /* MD_MALLOC related fields */ 220 struct indir *indir; 221 uma_zone_t uma; 222 223 /* MD_PRELOAD related fields */ 224 u_char *pl_ptr; 225 size_t pl_len; 226 227 /* MD_VNODE related fields */ 228 struct vnode *vnode; 229 char file[PATH_MAX]; 230 char label[PATH_MAX]; 231 struct ucred *cred; 232 233 /* MD_SWAP related fields */ 234 vm_object_t object; 235 }; 236 237 static struct indir * 238 new_indir(u_int shift) 239 { 240 struct indir *ip; 241 242 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT) 243 | M_ZERO); 244 if (ip == NULL) 245 return (NULL); 246 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 247 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); 248 if (ip->array == NULL) { 249 free(ip, M_MD); 250 return (NULL); 251 } 252 ip->total = NINDIR; 253 ip->shift = shift; 254 return (ip); 255 } 256 257 static void 258 del_indir(struct indir *ip) 259 { 260 261 free(ip->array, M_MDSECT); 262 free(ip, M_MD); 263 } 264 265 static void 266 destroy_indir(struct md_s *sc, struct indir *ip) 267 { 268 int i; 269 270 for (i = 0; i < NINDIR; i++) { 271 if (!ip->array[i]) 272 continue; 273 if (ip->shift) 274 destroy_indir(sc, (struct indir*)(ip->array[i])); 275 else if (ip->array[i] > 255) 276 uma_zfree(sc->uma, (void *)(ip->array[i])); 277 } 278 del_indir(ip); 279 } 280 281 /* 282 * This function does the math and allocates the top level "indir" structure 283 * for a device of "size" sectors. 284 */ 285 286 static struct indir * 287 dimension(off_t size) 288 { 289 off_t rcnt; 290 struct indir *ip; 291 int layer; 292 293 rcnt = size; 294 layer = 0; 295 while (rcnt > NINDIR) { 296 rcnt /= NINDIR; 297 layer++; 298 } 299 300 /* 301 * XXX: the top layer is probably not fully populated, so we allocate 302 * too much space for ip->array in here. 303 */ 304 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 305 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 306 M_MDSECT, M_WAITOK | M_ZERO); 307 ip->total = NINDIR; 308 ip->shift = layer * nshift; 309 return (ip); 310 } 311 312 /* 313 * Read a given sector 314 */ 315 316 static uintptr_t 317 s_read(struct indir *ip, off_t offset) 318 { 319 struct indir *cip; 320 int idx; 321 uintptr_t up; 322 323 if (md_debug > 1) 324 printf("s_read(%jd)\n", (intmax_t)offset); 325 up = 0; 326 for (cip = ip; cip != NULL;) { 327 if (cip->shift) { 328 idx = (offset >> cip->shift) & NMASK; 329 up = cip->array[idx]; 330 cip = (struct indir *)up; 331 continue; 332 } 333 idx = offset & NMASK; 334 return (cip->array[idx]); 335 } 336 return (0); 337 } 338 339 /* 340 * Write a given sector, prune the tree if the value is 0 341 */ 342 343 static int 344 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 345 { 346 struct indir *cip, *lip[10]; 347 int idx, li; 348 uintptr_t up; 349 350 if (md_debug > 1) 351 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 352 up = 0; 353 li = 0; 354 cip = ip; 355 for (;;) { 356 lip[li++] = cip; 357 if (cip->shift) { 358 idx = (offset >> cip->shift) & NMASK; 359 up = cip->array[idx]; 360 if (up != 0) { 361 cip = (struct indir *)up; 362 continue; 363 } 364 /* Allocate branch */ 365 cip->array[idx] = 366 (uintptr_t)new_indir(cip->shift - nshift); 367 if (cip->array[idx] == 0) 368 return (ENOSPC); 369 cip->used++; 370 up = cip->array[idx]; 371 cip = (struct indir *)up; 372 continue; 373 } 374 /* leafnode */ 375 idx = offset & NMASK; 376 up = cip->array[idx]; 377 if (up != 0) 378 cip->used--; 379 cip->array[idx] = ptr; 380 if (ptr != 0) 381 cip->used++; 382 break; 383 } 384 if (cip->used != 0 || li == 1) 385 return (0); 386 li--; 387 while (cip->used == 0 && cip != ip) { 388 li--; 389 idx = (offset >> lip[li]->shift) & NMASK; 390 up = lip[li]->array[idx]; 391 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 392 del_indir(cip); 393 lip[li]->array[idx] = 0; 394 lip[li]->used--; 395 cip = lip[li]; 396 } 397 return (0); 398 } 399 400 401 static int 402 g_md_access(struct g_provider *pp, int r, int w, int e) 403 { 404 struct md_s *sc; 405 406 sc = pp->geom->softc; 407 if (sc == NULL) { 408 if (r <= 0 && w <= 0 && e <= 0) 409 return (0); 410 return (ENXIO); 411 } 412 r += pp->acr; 413 w += pp->acw; 414 e += pp->ace; 415 if ((sc->flags & MD_READONLY) != 0 && w > 0) 416 return (EROFS); 417 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 418 sc->opencount = 1; 419 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 420 sc->opencount = 0; 421 } 422 return (0); 423 } 424 425 static void 426 g_md_start(struct bio *bp) 427 { 428 struct md_s *sc; 429 430 sc = bp->bio_to->geom->softc; 431 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) { 432 mtx_lock(&sc->stat_mtx); 433 devstat_start_transaction_bio(sc->devstat, bp); 434 mtx_unlock(&sc->stat_mtx); 435 } 436 mtx_lock(&sc->queue_mtx); 437 bioq_disksort(&sc->bio_queue, bp); 438 mtx_unlock(&sc->queue_mtx); 439 wakeup(sc); 440 } 441 442 #define MD_MALLOC_MOVE_ZERO 1 443 #define MD_MALLOC_MOVE_FILL 2 444 #define MD_MALLOC_MOVE_READ 3 445 #define MD_MALLOC_MOVE_WRITE 4 446 #define MD_MALLOC_MOVE_CMP 5 447 448 static int 449 md_malloc_move_ma(vm_page_t **mp, int *ma_offs, unsigned sectorsize, 450 void *ptr, u_char fill, int op) 451 { 452 struct sf_buf *sf; 453 vm_page_t m, *mp1; 454 char *p, first; 455 off_t *uc; 456 unsigned n; 457 int error, i, ma_offs1, sz, first_read; 458 459 m = NULL; 460 error = 0; 461 sf = NULL; 462 /* if (op == MD_MALLOC_MOVE_CMP) { gcc */ 463 first = 0; 464 first_read = 0; 465 uc = ptr; 466 mp1 = *mp; 467 ma_offs1 = *ma_offs; 468 /* } */ 469 sched_pin(); 470 for (n = sectorsize; n != 0; n -= sz) { 471 sz = imin(PAGE_SIZE - *ma_offs, n); 472 if (m != **mp) { 473 if (sf != NULL) 474 sf_buf_free(sf); 475 m = **mp; 476 sf = sf_buf_alloc(m, SFB_CPUPRIVATE | 477 (md_malloc_wait ? 0 : SFB_NOWAIT)); 478 if (sf == NULL) { 479 error = ENOMEM; 480 break; 481 } 482 } 483 p = (char *)sf_buf_kva(sf) + *ma_offs; 484 switch (op) { 485 case MD_MALLOC_MOVE_ZERO: 486 bzero(p, sz); 487 break; 488 case MD_MALLOC_MOVE_FILL: 489 memset(p, fill, sz); 490 break; 491 case MD_MALLOC_MOVE_READ: 492 bcopy(ptr, p, sz); 493 cpu_flush_dcache(p, sz); 494 break; 495 case MD_MALLOC_MOVE_WRITE: 496 bcopy(p, ptr, sz); 497 break; 498 case MD_MALLOC_MOVE_CMP: 499 for (i = 0; i < sz; i++, p++) { 500 if (!first_read) { 501 *uc = (u_char)*p; 502 first = *p; 503 first_read = 1; 504 } else if (*p != first) { 505 error = EDOOFUS; 506 break; 507 } 508 } 509 break; 510 default: 511 KASSERT(0, ("md_malloc_move_ma unknown op %d\n", op)); 512 break; 513 } 514 if (error != 0) 515 break; 516 *ma_offs += sz; 517 *ma_offs %= PAGE_SIZE; 518 if (*ma_offs == 0) 519 (*mp)++; 520 ptr = (char *)ptr + sz; 521 } 522 523 if (sf != NULL) 524 sf_buf_free(sf); 525 sched_unpin(); 526 if (op == MD_MALLOC_MOVE_CMP && error != 0) { 527 *mp = mp1; 528 *ma_offs = ma_offs1; 529 } 530 return (error); 531 } 532 533 static int 534 md_malloc_move_vlist(bus_dma_segment_t **pvlist, int *pma_offs, 535 unsigned len, void *ptr, u_char fill, int op) 536 { 537 bus_dma_segment_t *vlist; 538 uint8_t *p, *end, first; 539 off_t *uc; 540 int ma_offs, seg_len; 541 542 vlist = *pvlist; 543 ma_offs = *pma_offs; 544 uc = ptr; 545 546 for (; len != 0; len -= seg_len) { 547 seg_len = imin(vlist->ds_len - ma_offs, len); 548 p = (uint8_t *)(uintptr_t)vlist->ds_addr + ma_offs; 549 switch (op) { 550 case MD_MALLOC_MOVE_ZERO: 551 bzero(p, seg_len); 552 break; 553 case MD_MALLOC_MOVE_FILL: 554 memset(p, fill, seg_len); 555 break; 556 case MD_MALLOC_MOVE_READ: 557 bcopy(ptr, p, seg_len); 558 cpu_flush_dcache(p, seg_len); 559 break; 560 case MD_MALLOC_MOVE_WRITE: 561 bcopy(p, ptr, seg_len); 562 break; 563 case MD_MALLOC_MOVE_CMP: 564 end = p + seg_len; 565 first = *uc = *p; 566 /* Confirm all following bytes match the first */ 567 while (++p < end) { 568 if (*p != first) 569 return (EDOOFUS); 570 } 571 break; 572 default: 573 KASSERT(0, ("md_malloc_move_vlist unknown op %d\n", op)); 574 break; 575 } 576 577 ma_offs += seg_len; 578 if (ma_offs == vlist->ds_len) { 579 ma_offs = 0; 580 vlist++; 581 } 582 ptr = (uint8_t *)ptr + seg_len; 583 } 584 *pvlist = vlist; 585 *pma_offs = ma_offs; 586 587 return (0); 588 } 589 590 static int 591 mdstart_malloc(struct md_s *sc, struct bio *bp) 592 { 593 u_char *dst; 594 vm_page_t *m; 595 bus_dma_segment_t *vlist; 596 int i, error, error1, ma_offs, notmapped; 597 off_t secno, nsec, uc; 598 uintptr_t sp, osp; 599 600 switch (bp->bio_cmd) { 601 case BIO_READ: 602 case BIO_WRITE: 603 case BIO_DELETE: 604 break; 605 default: 606 return (EOPNOTSUPP); 607 } 608 609 notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0; 610 vlist = (bp->bio_flags & BIO_VLIST) != 0 ? 611 (bus_dma_segment_t *)bp->bio_data : NULL; 612 if (notmapped) { 613 m = bp->bio_ma; 614 ma_offs = bp->bio_ma_offset; 615 dst = NULL; 616 KASSERT(vlist == NULL, ("vlists cannot be unmapped")); 617 } else if (vlist != NULL) { 618 ma_offs = bp->bio_ma_offset; 619 dst = NULL; 620 } else { 621 dst = bp->bio_data; 622 } 623 624 nsec = bp->bio_length / sc->sectorsize; 625 secno = bp->bio_offset / sc->sectorsize; 626 error = 0; 627 while (nsec--) { 628 osp = s_read(sc->indir, secno); 629 if (bp->bio_cmd == BIO_DELETE) { 630 if (osp != 0) 631 error = s_write(sc->indir, secno, 0); 632 } else if (bp->bio_cmd == BIO_READ) { 633 if (osp == 0) { 634 if (notmapped) { 635 error = md_malloc_move_ma(&m, &ma_offs, 636 sc->sectorsize, NULL, 0, 637 MD_MALLOC_MOVE_ZERO); 638 } else if (vlist != NULL) { 639 error = md_malloc_move_vlist(&vlist, 640 &ma_offs, sc->sectorsize, NULL, 0, 641 MD_MALLOC_MOVE_ZERO); 642 } else 643 bzero(dst, sc->sectorsize); 644 } else if (osp <= 255) { 645 if (notmapped) { 646 error = md_malloc_move_ma(&m, &ma_offs, 647 sc->sectorsize, NULL, osp, 648 MD_MALLOC_MOVE_FILL); 649 } else if (vlist != NULL) { 650 error = md_malloc_move_vlist(&vlist, 651 &ma_offs, sc->sectorsize, NULL, osp, 652 MD_MALLOC_MOVE_FILL); 653 } else 654 memset(dst, osp, sc->sectorsize); 655 } else { 656 if (notmapped) { 657 error = md_malloc_move_ma(&m, &ma_offs, 658 sc->sectorsize, (void *)osp, 0, 659 MD_MALLOC_MOVE_READ); 660 } else if (vlist != NULL) { 661 error = md_malloc_move_vlist(&vlist, 662 &ma_offs, sc->sectorsize, 663 (void *)osp, 0, 664 MD_MALLOC_MOVE_READ); 665 } else { 666 bcopy((void *)osp, dst, sc->sectorsize); 667 cpu_flush_dcache(dst, sc->sectorsize); 668 } 669 } 670 osp = 0; 671 } else if (bp->bio_cmd == BIO_WRITE) { 672 if (sc->flags & MD_COMPRESS) { 673 if (notmapped) { 674 error1 = md_malloc_move_ma(&m, &ma_offs, 675 sc->sectorsize, &uc, 0, 676 MD_MALLOC_MOVE_CMP); 677 i = error1 == 0 ? sc->sectorsize : 0; 678 } else if (vlist != NULL) { 679 error1 = md_malloc_move_vlist(&vlist, 680 &ma_offs, sc->sectorsize, &uc, 0, 681 MD_MALLOC_MOVE_CMP); 682 i = error1 == 0 ? sc->sectorsize : 0; 683 } else { 684 uc = dst[0]; 685 for (i = 1; i < sc->sectorsize; i++) { 686 if (dst[i] != uc) 687 break; 688 } 689 } 690 } else { 691 i = 0; 692 uc = 0; 693 } 694 if (i == sc->sectorsize) { 695 if (osp != uc) 696 error = s_write(sc->indir, secno, uc); 697 } else { 698 if (osp <= 255) { 699 sp = (uintptr_t)uma_zalloc(sc->uma, 700 md_malloc_wait ? M_WAITOK : 701 M_NOWAIT); 702 if (sp == 0) { 703 error = ENOSPC; 704 break; 705 } 706 if (notmapped) { 707 error = md_malloc_move_ma(&m, 708 &ma_offs, sc->sectorsize, 709 (void *)sp, 0, 710 MD_MALLOC_MOVE_WRITE); 711 } else if (vlist != NULL) { 712 error = md_malloc_move_vlist( 713 &vlist, &ma_offs, 714 sc->sectorsize, (void *)sp, 715 0, MD_MALLOC_MOVE_WRITE); 716 } else { 717 bcopy(dst, (void *)sp, 718 sc->sectorsize); 719 } 720 error = s_write(sc->indir, secno, sp); 721 } else { 722 if (notmapped) { 723 error = md_malloc_move_ma(&m, 724 &ma_offs, sc->sectorsize, 725 (void *)osp, 0, 726 MD_MALLOC_MOVE_WRITE); 727 } else if (vlist != NULL) { 728 error = md_malloc_move_vlist( 729 &vlist, &ma_offs, 730 sc->sectorsize, (void *)osp, 731 0, MD_MALLOC_MOVE_WRITE); 732 } else { 733 bcopy(dst, (void *)osp, 734 sc->sectorsize); 735 } 736 osp = 0; 737 } 738 } 739 } else { 740 error = EOPNOTSUPP; 741 } 742 if (osp > 255) 743 uma_zfree(sc->uma, (void*)osp); 744 if (error != 0) 745 break; 746 secno++; 747 if (!notmapped && vlist == NULL) 748 dst += sc->sectorsize; 749 } 750 bp->bio_resid = 0; 751 return (error); 752 } 753 754 static void 755 mdcopyto_vlist(void *src, bus_dma_segment_t *vlist, off_t offset, off_t len) 756 { 757 off_t seg_len; 758 759 while (offset >= vlist->ds_len) { 760 offset -= vlist->ds_len; 761 vlist++; 762 } 763 764 while (len != 0) { 765 seg_len = omin(len, vlist->ds_len - offset); 766 bcopy(src, (void *)(uintptr_t)(vlist->ds_addr + offset), 767 seg_len); 768 offset = 0; 769 src = (uint8_t *)src + seg_len; 770 len -= seg_len; 771 vlist++; 772 } 773 } 774 775 static void 776 mdcopyfrom_vlist(bus_dma_segment_t *vlist, off_t offset, void *dst, off_t len) 777 { 778 off_t seg_len; 779 780 while (offset >= vlist->ds_len) { 781 offset -= vlist->ds_len; 782 vlist++; 783 } 784 785 while (len != 0) { 786 seg_len = omin(len, vlist->ds_len - offset); 787 bcopy((void *)(uintptr_t)(vlist->ds_addr + offset), dst, 788 seg_len); 789 offset = 0; 790 dst = (uint8_t *)dst + seg_len; 791 len -= seg_len; 792 vlist++; 793 } 794 } 795 796 static int 797 mdstart_preload(struct md_s *sc, struct bio *bp) 798 { 799 uint8_t *p; 800 801 p = sc->pl_ptr + bp->bio_offset; 802 switch (bp->bio_cmd) { 803 case BIO_READ: 804 if ((bp->bio_flags & BIO_VLIST) != 0) { 805 mdcopyto_vlist(p, (bus_dma_segment_t *)bp->bio_data, 806 bp->bio_ma_offset, bp->bio_length); 807 } else { 808 bcopy(p, bp->bio_data, bp->bio_length); 809 } 810 cpu_flush_dcache(bp->bio_data, bp->bio_length); 811 break; 812 case BIO_WRITE: 813 if ((bp->bio_flags & BIO_VLIST) != 0) { 814 mdcopyfrom_vlist((bus_dma_segment_t *)bp->bio_data, 815 bp->bio_ma_offset, p, bp->bio_length); 816 } else { 817 bcopy(bp->bio_data, p, bp->bio_length); 818 } 819 break; 820 } 821 bp->bio_resid = 0; 822 return (0); 823 } 824 825 static int 826 mdstart_vnode(struct md_s *sc, struct bio *bp) 827 { 828 int error; 829 struct uio auio; 830 struct iovec aiov; 831 struct iovec *piov; 832 struct mount *mp; 833 struct vnode *vp; 834 struct buf *pb; 835 bus_dma_segment_t *vlist; 836 struct thread *td; 837 off_t iolen, len, zerosize; 838 int ma_offs, npages; 839 840 switch (bp->bio_cmd) { 841 case BIO_READ: 842 auio.uio_rw = UIO_READ; 843 break; 844 case BIO_WRITE: 845 case BIO_DELETE: 846 auio.uio_rw = UIO_WRITE; 847 break; 848 case BIO_FLUSH: 849 break; 850 default: 851 return (EOPNOTSUPP); 852 } 853 854 td = curthread; 855 vp = sc->vnode; 856 pb = NULL; 857 piov = NULL; 858 ma_offs = bp->bio_ma_offset; 859 len = bp->bio_length; 860 861 /* 862 * VNODE I/O 863 * 864 * If an error occurs, we set BIO_ERROR but we do not set 865 * B_INVAL because (for a write anyway), the buffer is 866 * still valid. 867 */ 868 869 if (bp->bio_cmd == BIO_FLUSH) { 870 (void) vn_start_write(vp, &mp, V_WAIT); 871 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 872 error = VOP_FSYNC(vp, MNT_WAIT, td); 873 VOP_UNLOCK(vp, 0); 874 vn_finished_write(mp); 875 return (error); 876 } 877 878 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 879 auio.uio_resid = bp->bio_length; 880 auio.uio_segflg = UIO_SYSSPACE; 881 auio.uio_td = td; 882 883 if (bp->bio_cmd == BIO_DELETE) { 884 /* 885 * Emulate BIO_DELETE by writing zeros. 886 */ 887 zerosize = ZERO_REGION_SIZE - 888 (ZERO_REGION_SIZE % sc->sectorsize); 889 auio.uio_iovcnt = howmany(bp->bio_length, zerosize); 890 piov = malloc(sizeof(*piov) * auio.uio_iovcnt, M_MD, M_WAITOK); 891 auio.uio_iov = piov; 892 while (len > 0) { 893 piov->iov_base = __DECONST(void *, zero_region); 894 piov->iov_len = len; 895 if (len > zerosize) 896 piov->iov_len = zerosize; 897 len -= piov->iov_len; 898 piov++; 899 } 900 piov = auio.uio_iov; 901 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 902 piov = malloc(sizeof(*piov) * bp->bio_ma_n, M_MD, M_WAITOK); 903 auio.uio_iov = piov; 904 vlist = (bus_dma_segment_t *)bp->bio_data; 905 while (len > 0) { 906 piov->iov_base = (void *)(uintptr_t)(vlist->ds_addr + 907 ma_offs); 908 piov->iov_len = vlist->ds_len - ma_offs; 909 if (piov->iov_len > len) 910 piov->iov_len = len; 911 len -= piov->iov_len; 912 ma_offs = 0; 913 vlist++; 914 piov++; 915 } 916 auio.uio_iovcnt = piov - auio.uio_iov; 917 piov = auio.uio_iov; 918 } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 919 pb = getpbuf(&md_vnode_pbuf_freecnt); 920 bp->bio_resid = len; 921 unmapped_step: 922 npages = atop(min(MAXPHYS, round_page(len + (ma_offs & 923 PAGE_MASK)))); 924 iolen = min(ptoa(npages) - (ma_offs & PAGE_MASK), len); 925 KASSERT(iolen > 0, ("zero iolen")); 926 pmap_qenter((vm_offset_t)pb->b_data, 927 &bp->bio_ma[atop(ma_offs)], npages); 928 aiov.iov_base = (void *)((vm_offset_t)pb->b_data + 929 (ma_offs & PAGE_MASK)); 930 aiov.iov_len = iolen; 931 auio.uio_iov = &aiov; 932 auio.uio_iovcnt = 1; 933 auio.uio_resid = iolen; 934 } else { 935 aiov.iov_base = bp->bio_data; 936 aiov.iov_len = bp->bio_length; 937 auio.uio_iov = &aiov; 938 auio.uio_iovcnt = 1; 939 } 940 /* 941 * When reading set IO_DIRECT to try to avoid double-caching 942 * the data. When writing IO_DIRECT is not optimal. 943 */ 944 if (auio.uio_rw == UIO_READ) { 945 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 946 error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred); 947 VOP_UNLOCK(vp, 0); 948 } else { 949 (void) vn_start_write(vp, &mp, V_WAIT); 950 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 951 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC, 952 sc->cred); 953 VOP_UNLOCK(vp, 0); 954 vn_finished_write(mp); 955 if (error == 0) 956 sc->flags &= ~MD_VERIFY; 957 } 958 959 if (pb != NULL) { 960 pmap_qremove((vm_offset_t)pb->b_data, npages); 961 if (error == 0) { 962 len -= iolen; 963 bp->bio_resid -= iolen; 964 ma_offs += iolen; 965 if (len > 0) 966 goto unmapped_step; 967 } 968 relpbuf(pb, &md_vnode_pbuf_freecnt); 969 } 970 971 free(piov, M_MD); 972 if (pb == NULL) 973 bp->bio_resid = auio.uio_resid; 974 return (error); 975 } 976 977 static void 978 md_swap_page_free(vm_page_t m) 979 { 980 981 vm_page_xunbusy(m); 982 vm_page_lock(m); 983 vm_page_free(m); 984 vm_page_unlock(m); 985 } 986 987 static int 988 mdstart_swap(struct md_s *sc, struct bio *bp) 989 { 990 vm_page_t m; 991 u_char *p; 992 vm_pindex_t i, lastp; 993 bus_dma_segment_t *vlist; 994 int rv, ma_offs, offs, len, lastend; 995 996 switch (bp->bio_cmd) { 997 case BIO_READ: 998 case BIO_WRITE: 999 case BIO_DELETE: 1000 break; 1001 default: 1002 return (EOPNOTSUPP); 1003 } 1004 1005 p = bp->bio_data; 1006 ma_offs = (bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0 ? 1007 bp->bio_ma_offset : 0; 1008 vlist = (bp->bio_flags & BIO_VLIST) != 0 ? 1009 (bus_dma_segment_t *)bp->bio_data : NULL; 1010 1011 /* 1012 * offs is the offset at which to start operating on the 1013 * next (ie, first) page. lastp is the last page on 1014 * which we're going to operate. lastend is the ending 1015 * position within that last page (ie, PAGE_SIZE if 1016 * we're operating on complete aligned pages). 1017 */ 1018 offs = bp->bio_offset % PAGE_SIZE; 1019 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 1020 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 1021 1022 rv = VM_PAGER_OK; 1023 VM_OBJECT_WLOCK(sc->object); 1024 vm_object_pip_add(sc->object, 1); 1025 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 1026 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 1027 m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM); 1028 if (bp->bio_cmd == BIO_READ) { 1029 if (m->valid == VM_PAGE_BITS_ALL) 1030 rv = VM_PAGER_OK; 1031 else 1032 rv = vm_pager_get_pages(sc->object, &m, 1, 1033 NULL, NULL); 1034 if (rv == VM_PAGER_ERROR) { 1035 md_swap_page_free(m); 1036 break; 1037 } else if (rv == VM_PAGER_FAIL) { 1038 /* 1039 * Pager does not have the page. Zero 1040 * the allocated page, and mark it as 1041 * valid. Do not set dirty, the page 1042 * can be recreated if thrown out. 1043 */ 1044 pmap_zero_page(m); 1045 m->valid = VM_PAGE_BITS_ALL; 1046 } 1047 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 1048 pmap_copy_pages(&m, offs, bp->bio_ma, 1049 ma_offs, len); 1050 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 1051 physcopyout_vlist(VM_PAGE_TO_PHYS(m) + offs, 1052 vlist, ma_offs, len); 1053 cpu_flush_dcache(p, len); 1054 } else { 1055 physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len); 1056 cpu_flush_dcache(p, len); 1057 } 1058 } else if (bp->bio_cmd == BIO_WRITE) { 1059 if (len == PAGE_SIZE || m->valid == VM_PAGE_BITS_ALL) 1060 rv = VM_PAGER_OK; 1061 else 1062 rv = vm_pager_get_pages(sc->object, &m, 1, 1063 NULL, NULL); 1064 if (rv == VM_PAGER_ERROR) { 1065 md_swap_page_free(m); 1066 break; 1067 } else if (rv == VM_PAGER_FAIL) 1068 pmap_zero_page(m); 1069 1070 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 1071 pmap_copy_pages(bp->bio_ma, ma_offs, &m, 1072 offs, len); 1073 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 1074 physcopyin_vlist(vlist, ma_offs, 1075 VM_PAGE_TO_PHYS(m) + offs, len); 1076 } else { 1077 physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len); 1078 } 1079 1080 m->valid = VM_PAGE_BITS_ALL; 1081 if (m->dirty != VM_PAGE_BITS_ALL) { 1082 vm_page_dirty(m); 1083 vm_pager_page_unswapped(m); 1084 } 1085 } else if (bp->bio_cmd == BIO_DELETE) { 1086 if (len == PAGE_SIZE || m->valid == VM_PAGE_BITS_ALL) 1087 rv = VM_PAGER_OK; 1088 else 1089 rv = vm_pager_get_pages(sc->object, &m, 1, 1090 NULL, NULL); 1091 if (rv == VM_PAGER_ERROR) { 1092 md_swap_page_free(m); 1093 break; 1094 } else if (rv == VM_PAGER_FAIL) { 1095 md_swap_page_free(m); 1096 m = NULL; 1097 } else { 1098 /* Page is valid. */ 1099 if (len != PAGE_SIZE) { 1100 pmap_zero_page_area(m, offs, len); 1101 if (m->dirty != VM_PAGE_BITS_ALL) { 1102 vm_page_dirty(m); 1103 vm_pager_page_unswapped(m); 1104 } 1105 } else { 1106 vm_pager_page_unswapped(m); 1107 md_swap_page_free(m); 1108 m = NULL; 1109 } 1110 } 1111 } 1112 if (m != NULL) { 1113 vm_page_xunbusy(m); 1114 vm_page_lock(m); 1115 if (vm_page_active(m)) 1116 vm_page_reference(m); 1117 else 1118 vm_page_activate(m); 1119 vm_page_unlock(m); 1120 } 1121 1122 /* Actions on further pages start at offset 0 */ 1123 p += PAGE_SIZE - offs; 1124 offs = 0; 1125 ma_offs += len; 1126 } 1127 vm_object_pip_wakeup(sc->object); 1128 VM_OBJECT_WUNLOCK(sc->object); 1129 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 1130 } 1131 1132 static int 1133 mdstart_null(struct md_s *sc, struct bio *bp) 1134 { 1135 1136 switch (bp->bio_cmd) { 1137 case BIO_READ: 1138 bzero(bp->bio_data, bp->bio_length); 1139 cpu_flush_dcache(bp->bio_data, bp->bio_length); 1140 break; 1141 case BIO_WRITE: 1142 break; 1143 } 1144 bp->bio_resid = 0; 1145 return (0); 1146 } 1147 1148 static void 1149 md_kthread(void *arg) 1150 { 1151 struct md_s *sc; 1152 struct bio *bp; 1153 int error; 1154 1155 sc = arg; 1156 thread_lock(curthread); 1157 sched_prio(curthread, PRIBIO); 1158 thread_unlock(curthread); 1159 if (sc->type == MD_VNODE) 1160 curthread->td_pflags |= TDP_NORUNNINGBUF; 1161 1162 for (;;) { 1163 mtx_lock(&sc->queue_mtx); 1164 if (sc->flags & MD_SHUTDOWN) { 1165 sc->flags |= MD_EXITING; 1166 mtx_unlock(&sc->queue_mtx); 1167 kproc_exit(0); 1168 } 1169 bp = bioq_takefirst(&sc->bio_queue); 1170 if (!bp) { 1171 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 1172 continue; 1173 } 1174 mtx_unlock(&sc->queue_mtx); 1175 if (bp->bio_cmd == BIO_GETATTR) { 1176 int isv = ((sc->flags & MD_VERIFY) != 0); 1177 1178 if ((sc->fwsectors && sc->fwheads && 1179 (g_handleattr_int(bp, "GEOM::fwsectors", 1180 sc->fwsectors) || 1181 g_handleattr_int(bp, "GEOM::fwheads", 1182 sc->fwheads))) || 1183 g_handleattr_int(bp, "GEOM::candelete", 1)) 1184 error = -1; 1185 else if (sc->ident[0] != '\0' && 1186 g_handleattr_str(bp, "GEOM::ident", sc->ident)) 1187 error = -1; 1188 else if (g_handleattr_int(bp, "MNT::verified", isv)) 1189 error = -1; 1190 else 1191 error = EOPNOTSUPP; 1192 } else { 1193 error = sc->start(sc, bp); 1194 } 1195 1196 if (error != -1) { 1197 bp->bio_completed = bp->bio_length; 1198 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) 1199 devstat_end_transaction_bio(sc->devstat, bp); 1200 g_io_deliver(bp, error); 1201 } 1202 } 1203 } 1204 1205 static struct md_s * 1206 mdfind(int unit) 1207 { 1208 struct md_s *sc; 1209 1210 LIST_FOREACH(sc, &md_softc_list, list) { 1211 if (sc->unit == unit) 1212 break; 1213 } 1214 return (sc); 1215 } 1216 1217 static struct md_s * 1218 mdnew(int unit, int *errp, enum md_types type) 1219 { 1220 struct md_s *sc; 1221 int error; 1222 1223 *errp = 0; 1224 if (unit == -1) 1225 unit = alloc_unr(md_uh); 1226 else 1227 unit = alloc_unr_specific(md_uh, unit); 1228 1229 if (unit == -1) { 1230 *errp = EBUSY; 1231 return (NULL); 1232 } 1233 1234 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); 1235 sc->type = type; 1236 bioq_init(&sc->bio_queue); 1237 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 1238 mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF); 1239 sc->unit = unit; 1240 sprintf(sc->name, "md%d", unit); 1241 LIST_INSERT_HEAD(&md_softc_list, sc, list); 1242 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 1243 if (error == 0) 1244 return (sc); 1245 LIST_REMOVE(sc, list); 1246 mtx_destroy(&sc->stat_mtx); 1247 mtx_destroy(&sc->queue_mtx); 1248 free_unr(md_uh, sc->unit); 1249 free(sc, M_MD); 1250 *errp = error; 1251 return (NULL); 1252 } 1253 1254 static void 1255 mdinit(struct md_s *sc) 1256 { 1257 struct g_geom *gp; 1258 struct g_provider *pp; 1259 1260 g_topology_lock(); 1261 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 1262 gp->softc = sc; 1263 pp = g_new_providerf(gp, "md%d", sc->unit); 1264 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 1265 pp->mediasize = sc->mediasize; 1266 pp->sectorsize = sc->sectorsize; 1267 switch (sc->type) { 1268 case MD_MALLOC: 1269 case MD_VNODE: 1270 case MD_SWAP: 1271 pp->flags |= G_PF_ACCEPT_UNMAPPED; 1272 break; 1273 case MD_PRELOAD: 1274 case MD_NULL: 1275 break; 1276 } 1277 sc->gp = gp; 1278 sc->pp = pp; 1279 g_error_provider(pp, 0); 1280 g_topology_unlock(); 1281 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize, 1282 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 1283 } 1284 1285 static int 1286 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio) 1287 { 1288 uintptr_t sp; 1289 int error; 1290 off_t u; 1291 1292 error = 0; 1293 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 1294 return (EINVAL); 1295 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize)) 1296 return (EINVAL); 1297 /* Compression doesn't make sense if we have reserved space */ 1298 if (mdio->md_options & MD_RESERVE) 1299 mdio->md_options &= ~MD_COMPRESS; 1300 if (mdio->md_fwsectors != 0) 1301 sc->fwsectors = mdio->md_fwsectors; 1302 if (mdio->md_fwheads != 0) 1303 sc->fwheads = mdio->md_fwheads; 1304 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE); 1305 sc->indir = dimension(sc->mediasize / sc->sectorsize); 1306 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 1307 0x1ff, 0); 1308 if (mdio->md_options & MD_RESERVE) { 1309 off_t nsectors; 1310 1311 nsectors = sc->mediasize / sc->sectorsize; 1312 for (u = 0; u < nsectors; u++) { 1313 sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ? 1314 M_WAITOK : M_NOWAIT) | M_ZERO); 1315 if (sp != 0) 1316 error = s_write(sc->indir, u, sp); 1317 else 1318 error = ENOMEM; 1319 if (error != 0) 1320 break; 1321 } 1322 } 1323 return (error); 1324 } 1325 1326 1327 static int 1328 mdsetcred(struct md_s *sc, struct ucred *cred) 1329 { 1330 char *tmpbuf; 1331 int error = 0; 1332 1333 /* 1334 * Set credits in our softc 1335 */ 1336 1337 if (sc->cred) 1338 crfree(sc->cred); 1339 sc->cred = crhold(cred); 1340 1341 /* 1342 * Horrible kludge to establish credentials for NFS XXX. 1343 */ 1344 1345 if (sc->vnode) { 1346 struct uio auio; 1347 struct iovec aiov; 1348 1349 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 1350 bzero(&auio, sizeof(auio)); 1351 1352 aiov.iov_base = tmpbuf; 1353 aiov.iov_len = sc->sectorsize; 1354 auio.uio_iov = &aiov; 1355 auio.uio_iovcnt = 1; 1356 auio.uio_offset = 0; 1357 auio.uio_rw = UIO_READ; 1358 auio.uio_segflg = UIO_SYSSPACE; 1359 auio.uio_resid = aiov.iov_len; 1360 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1361 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 1362 VOP_UNLOCK(sc->vnode, 0); 1363 free(tmpbuf, M_TEMP); 1364 } 1365 return (error); 1366 } 1367 1368 static int 1369 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1370 { 1371 struct vattr vattr; 1372 struct nameidata nd; 1373 char *fname; 1374 int error, flags; 1375 1376 /* 1377 * Kernel-originated requests must have the filename appended 1378 * to the mdio structure to protect against malicious software. 1379 */ 1380 fname = mdio->md_file; 1381 if ((void *)fname != (void *)(mdio + 1)) { 1382 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL); 1383 if (error != 0) 1384 return (error); 1385 } else 1386 strlcpy(sc->file, fname, sizeof(sc->file)); 1387 1388 /* 1389 * If the user specified that this is a read only device, don't 1390 * set the FWRITE mask before trying to open the backing store. 1391 */ 1392 flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE) \ 1393 | ((mdio->md_options & MD_VERIFY) ? 0 : O_VERIFY); 1394 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td); 1395 error = vn_open(&nd, &flags, 0, NULL); 1396 if (error != 0) 1397 return (error); 1398 NDFREE(&nd, NDF_ONLY_PNBUF); 1399 if (nd.ni_vp->v_type != VREG) { 1400 error = EINVAL; 1401 goto bad; 1402 } 1403 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred); 1404 if (error != 0) 1405 goto bad; 1406 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { 1407 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); 1408 if (nd.ni_vp->v_iflag & VI_DOOMED) { 1409 /* Forced unmount. */ 1410 error = EBADF; 1411 goto bad; 1412 } 1413 } 1414 nd.ni_vp->v_vflag |= VV_MD; 1415 VOP_UNLOCK(nd.ni_vp, 0); 1416 1417 if (mdio->md_fwsectors != 0) 1418 sc->fwsectors = mdio->md_fwsectors; 1419 if (mdio->md_fwheads != 0) 1420 sc->fwheads = mdio->md_fwheads; 1421 snprintf(sc->ident, sizeof(sc->ident), "MD-DEV%ju-INO%ju", 1422 (uintmax_t)vattr.va_fsid, (uintmax_t)vattr.va_fileid); 1423 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC | MD_VERIFY); 1424 if (!(flags & FWRITE)) 1425 sc->flags |= MD_READONLY; 1426 sc->vnode = nd.ni_vp; 1427 1428 error = mdsetcred(sc, td->td_ucred); 1429 if (error != 0) { 1430 sc->vnode = NULL; 1431 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); 1432 nd.ni_vp->v_vflag &= ~VV_MD; 1433 goto bad; 1434 } 1435 return (0); 1436 bad: 1437 VOP_UNLOCK(nd.ni_vp, 0); 1438 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 1439 return (error); 1440 } 1441 1442 static int 1443 mddestroy(struct md_s *sc, struct thread *td) 1444 { 1445 1446 if (sc->gp) { 1447 sc->gp->softc = NULL; 1448 g_topology_lock(); 1449 g_wither_geom(sc->gp, ENXIO); 1450 g_topology_unlock(); 1451 sc->gp = NULL; 1452 sc->pp = NULL; 1453 } 1454 if (sc->devstat) { 1455 devstat_remove_entry(sc->devstat); 1456 sc->devstat = NULL; 1457 } 1458 mtx_lock(&sc->queue_mtx); 1459 sc->flags |= MD_SHUTDOWN; 1460 wakeup(sc); 1461 while (!(sc->flags & MD_EXITING)) 1462 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10); 1463 mtx_unlock(&sc->queue_mtx); 1464 mtx_destroy(&sc->stat_mtx); 1465 mtx_destroy(&sc->queue_mtx); 1466 if (sc->vnode != NULL) { 1467 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1468 sc->vnode->v_vflag &= ~VV_MD; 1469 VOP_UNLOCK(sc->vnode, 0); 1470 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 1471 FREAD : (FREAD|FWRITE), sc->cred, td); 1472 } 1473 if (sc->cred != NULL) 1474 crfree(sc->cred); 1475 if (sc->object != NULL) 1476 vm_object_deallocate(sc->object); 1477 if (sc->indir) 1478 destroy_indir(sc, sc->indir); 1479 if (sc->uma) 1480 uma_zdestroy(sc->uma); 1481 1482 LIST_REMOVE(sc, list); 1483 free_unr(md_uh, sc->unit); 1484 free(sc, M_MD); 1485 return (0); 1486 } 1487 1488 static int 1489 mdresize(struct md_s *sc, struct md_ioctl *mdio) 1490 { 1491 int error, res; 1492 vm_pindex_t oldpages, newpages; 1493 1494 switch (sc->type) { 1495 case MD_VNODE: 1496 case MD_NULL: 1497 break; 1498 case MD_SWAP: 1499 if (mdio->md_mediasize <= 0 || 1500 (mdio->md_mediasize % PAGE_SIZE) != 0) 1501 return (EDOM); 1502 oldpages = OFF_TO_IDX(round_page(sc->mediasize)); 1503 newpages = OFF_TO_IDX(round_page(mdio->md_mediasize)); 1504 if (newpages < oldpages) { 1505 VM_OBJECT_WLOCK(sc->object); 1506 vm_object_page_remove(sc->object, newpages, 0, 0); 1507 swap_pager_freespace(sc->object, newpages, 1508 oldpages - newpages); 1509 swap_release_by_cred(IDX_TO_OFF(oldpages - 1510 newpages), sc->cred); 1511 sc->object->charge = IDX_TO_OFF(newpages); 1512 sc->object->size = newpages; 1513 VM_OBJECT_WUNLOCK(sc->object); 1514 } else if (newpages > oldpages) { 1515 res = swap_reserve_by_cred(IDX_TO_OFF(newpages - 1516 oldpages), sc->cred); 1517 if (!res) 1518 return (ENOMEM); 1519 if ((mdio->md_options & MD_RESERVE) || 1520 (sc->flags & MD_RESERVE)) { 1521 error = swap_pager_reserve(sc->object, 1522 oldpages, newpages - oldpages); 1523 if (error < 0) { 1524 swap_release_by_cred( 1525 IDX_TO_OFF(newpages - oldpages), 1526 sc->cred); 1527 return (EDOM); 1528 } 1529 } 1530 VM_OBJECT_WLOCK(sc->object); 1531 sc->object->charge = IDX_TO_OFF(newpages); 1532 sc->object->size = newpages; 1533 VM_OBJECT_WUNLOCK(sc->object); 1534 } 1535 break; 1536 default: 1537 return (EOPNOTSUPP); 1538 } 1539 1540 sc->mediasize = mdio->md_mediasize; 1541 g_topology_lock(); 1542 g_resize_provider(sc->pp, sc->mediasize); 1543 g_topology_unlock(); 1544 return (0); 1545 } 1546 1547 static int 1548 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1549 { 1550 vm_ooffset_t npage; 1551 int error; 1552 1553 /* 1554 * Range check. Disallow negative sizes and sizes not being 1555 * multiple of page size. 1556 */ 1557 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1558 return (EDOM); 1559 1560 /* 1561 * Allocate an OBJT_SWAP object. 1562 * 1563 * Note the truncation. 1564 */ 1565 1566 if ((mdio->md_options & MD_VERIFY) != 0) 1567 return (EINVAL); 1568 npage = mdio->md_mediasize / PAGE_SIZE; 1569 if (mdio->md_fwsectors != 0) 1570 sc->fwsectors = mdio->md_fwsectors; 1571 if (mdio->md_fwheads != 0) 1572 sc->fwheads = mdio->md_fwheads; 1573 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, 1574 VM_PROT_DEFAULT, 0, td->td_ucred); 1575 if (sc->object == NULL) 1576 return (ENOMEM); 1577 sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE); 1578 if (mdio->md_options & MD_RESERVE) { 1579 if (swap_pager_reserve(sc->object, 0, npage) < 0) { 1580 error = EDOM; 1581 goto finish; 1582 } 1583 } 1584 error = mdsetcred(sc, td->td_ucred); 1585 finish: 1586 if (error != 0) { 1587 vm_object_deallocate(sc->object); 1588 sc->object = NULL; 1589 } 1590 return (error); 1591 } 1592 1593 static int 1594 mdcreate_null(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1595 { 1596 1597 /* 1598 * Range check. Disallow negative sizes and sizes not being 1599 * multiple of page size. 1600 */ 1601 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1602 return (EDOM); 1603 1604 return (0); 1605 } 1606 1607 static int 1608 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1609 { 1610 struct md_ioctl *mdio; 1611 struct md_s *sc; 1612 int error, i; 1613 unsigned sectsize; 1614 1615 if (md_debug) 1616 printf("mdctlioctl(%s %lx %p %x %p)\n", 1617 devtoname(dev), cmd, addr, flags, td); 1618 1619 mdio = (struct md_ioctl *)addr; 1620 if (mdio->md_version != MDIOVERSION) 1621 return (EINVAL); 1622 1623 /* 1624 * We assert the version number in the individual ioctl 1625 * handlers instead of out here because (a) it is possible we 1626 * may add another ioctl in the future which doesn't read an 1627 * mdio, and (b) the correct return value for an unknown ioctl 1628 * is ENOIOCTL, not EINVAL. 1629 */ 1630 error = 0; 1631 switch (cmd) { 1632 case MDIOCATTACH: 1633 switch (mdio->md_type) { 1634 case MD_MALLOC: 1635 case MD_PRELOAD: 1636 case MD_VNODE: 1637 case MD_SWAP: 1638 case MD_NULL: 1639 break; 1640 default: 1641 return (EINVAL); 1642 } 1643 if (mdio->md_sectorsize == 0) 1644 sectsize = DEV_BSIZE; 1645 else 1646 sectsize = mdio->md_sectorsize; 1647 if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize) 1648 return (EINVAL); 1649 if (mdio->md_options & MD_AUTOUNIT) 1650 sc = mdnew(-1, &error, mdio->md_type); 1651 else { 1652 if (mdio->md_unit > INT_MAX) 1653 return (EINVAL); 1654 sc = mdnew(mdio->md_unit, &error, mdio->md_type); 1655 } 1656 if (sc == NULL) 1657 return (error); 1658 if (mdio->md_label != NULL) 1659 error = copyinstr(mdio->md_label, sc->label, 1660 sizeof(sc->label), NULL); 1661 if (error != 0) 1662 goto err_after_new; 1663 if (mdio->md_options & MD_AUTOUNIT) 1664 mdio->md_unit = sc->unit; 1665 sc->mediasize = mdio->md_mediasize; 1666 sc->sectorsize = sectsize; 1667 error = EDOOFUS; 1668 switch (sc->type) { 1669 case MD_MALLOC: 1670 sc->start = mdstart_malloc; 1671 error = mdcreate_malloc(sc, mdio); 1672 break; 1673 case MD_PRELOAD: 1674 /* 1675 * We disallow attaching preloaded memory disks via 1676 * ioctl. Preloaded memory disks are automatically 1677 * attached in g_md_init(). 1678 */ 1679 error = EOPNOTSUPP; 1680 break; 1681 case MD_VNODE: 1682 sc->start = mdstart_vnode; 1683 error = mdcreate_vnode(sc, mdio, td); 1684 break; 1685 case MD_SWAP: 1686 sc->start = mdstart_swap; 1687 error = mdcreate_swap(sc, mdio, td); 1688 break; 1689 case MD_NULL: 1690 sc->start = mdstart_null; 1691 error = mdcreate_null(sc, mdio, td); 1692 break; 1693 } 1694 err_after_new: 1695 if (error != 0) { 1696 mddestroy(sc, td); 1697 return (error); 1698 } 1699 1700 /* Prune off any residual fractional sector */ 1701 i = sc->mediasize % sc->sectorsize; 1702 sc->mediasize -= i; 1703 1704 mdinit(sc); 1705 return (0); 1706 case MDIOCDETACH: 1707 if (mdio->md_mediasize != 0 || 1708 (mdio->md_options & ~MD_FORCE) != 0) 1709 return (EINVAL); 1710 1711 sc = mdfind(mdio->md_unit); 1712 if (sc == NULL) 1713 return (ENOENT); 1714 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) && 1715 !(mdio->md_options & MD_FORCE)) 1716 return (EBUSY); 1717 return (mddestroy(sc, td)); 1718 case MDIOCRESIZE: 1719 if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0) 1720 return (EINVAL); 1721 1722 sc = mdfind(mdio->md_unit); 1723 if (sc == NULL) 1724 return (ENOENT); 1725 if (mdio->md_mediasize < sc->sectorsize) 1726 return (EINVAL); 1727 if (mdio->md_mediasize < sc->mediasize && 1728 !(sc->flags & MD_FORCE) && 1729 !(mdio->md_options & MD_FORCE)) 1730 return (EBUSY); 1731 return (mdresize(sc, mdio)); 1732 case MDIOCQUERY: 1733 sc = mdfind(mdio->md_unit); 1734 if (sc == NULL) 1735 return (ENOENT); 1736 mdio->md_type = sc->type; 1737 mdio->md_options = sc->flags; 1738 mdio->md_mediasize = sc->mediasize; 1739 mdio->md_sectorsize = sc->sectorsize; 1740 error = 0; 1741 if (mdio->md_label != NULL) { 1742 error = copyout(sc->label, mdio->md_label, 1743 strlen(sc->label) + 1); 1744 } 1745 if (sc->type == MD_VNODE || 1746 (sc->type == MD_PRELOAD && mdio->md_file != NULL)) 1747 error = copyout(sc->file, mdio->md_file, 1748 strlen(sc->file) + 1); 1749 return (error); 1750 case MDIOCLIST: 1751 i = 1; 1752 LIST_FOREACH(sc, &md_softc_list, list) { 1753 if (i == MDNPAD - 1) 1754 mdio->md_pad[i] = -1; 1755 else 1756 mdio->md_pad[i++] = sc->unit; 1757 } 1758 mdio->md_pad[0] = i - 1; 1759 return (0); 1760 default: 1761 return (ENOIOCTL); 1762 }; 1763 } 1764 1765 static int 1766 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1767 { 1768 int error; 1769 1770 sx_xlock(&md_sx); 1771 error = xmdctlioctl(dev, cmd, addr, flags, td); 1772 sx_xunlock(&md_sx); 1773 return (error); 1774 } 1775 1776 static void 1777 md_preloaded(u_char *image, size_t length, const char *name) 1778 { 1779 struct md_s *sc; 1780 int error; 1781 1782 sc = mdnew(-1, &error, MD_PRELOAD); 1783 if (sc == NULL) 1784 return; 1785 sc->mediasize = length; 1786 sc->sectorsize = DEV_BSIZE; 1787 sc->pl_ptr = image; 1788 sc->pl_len = length; 1789 sc->start = mdstart_preload; 1790 if (name != NULL) 1791 strlcpy(sc->file, name, sizeof(sc->file)); 1792 #if defined(MD_ROOT) && !defined(ROOTDEVNAME) 1793 if (sc->unit == 0) 1794 rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0"; 1795 #endif 1796 mdinit(sc); 1797 if (name != NULL) { 1798 printf("%s%d: Preloaded image <%s> %zd bytes at %p\n", 1799 MD_NAME, sc->unit, name, length, image); 1800 } else { 1801 printf("%s%d: Embedded image %zd bytes at %p\n", 1802 MD_NAME, sc->unit, length, image); 1803 } 1804 } 1805 1806 static void 1807 g_md_init(struct g_class *mp __unused) 1808 { 1809 caddr_t mod; 1810 u_char *ptr, *name, *type; 1811 unsigned len; 1812 int i; 1813 1814 /* figure out log2(NINDIR) */ 1815 for (i = NINDIR, nshift = -1; i; nshift++) 1816 i >>= 1; 1817 1818 mod = NULL; 1819 sx_init(&md_sx, "MD config lock"); 1820 g_topology_unlock(); 1821 md_uh = new_unrhdr(0, INT_MAX, NULL); 1822 #ifdef MD_ROOT 1823 if (mfs_root_size != 0) { 1824 sx_xlock(&md_sx); 1825 md_preloaded(__DEVOLATILE(u_char *, &mfs_root), mfs_root_size, 1826 NULL); 1827 sx_xunlock(&md_sx); 1828 } 1829 #endif 1830 /* XXX: are preload_* static or do they need Giant ? */ 1831 while ((mod = preload_search_next_name(mod)) != NULL) { 1832 name = (char *)preload_search_info(mod, MODINFO_NAME); 1833 if (name == NULL) 1834 continue; 1835 type = (char *)preload_search_info(mod, MODINFO_TYPE); 1836 if (type == NULL) 1837 continue; 1838 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 1839 continue; 1840 ptr = preload_fetch_addr(mod); 1841 len = preload_fetch_size(mod); 1842 if (ptr != NULL && len != 0) { 1843 sx_xlock(&md_sx); 1844 md_preloaded(ptr, len, name); 1845 sx_xunlock(&md_sx); 1846 } 1847 } 1848 md_vnode_pbuf_freecnt = nswbuf / 10; 1849 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL, 1850 0600, MDCTL_NAME); 1851 g_topology_lock(); 1852 } 1853 1854 static void 1855 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1856 struct g_consumer *cp __unused, struct g_provider *pp) 1857 { 1858 struct md_s *mp; 1859 char *type; 1860 1861 mp = gp->softc; 1862 if (mp == NULL) 1863 return; 1864 1865 switch (mp->type) { 1866 case MD_MALLOC: 1867 type = "malloc"; 1868 break; 1869 case MD_PRELOAD: 1870 type = "preload"; 1871 break; 1872 case MD_VNODE: 1873 type = "vnode"; 1874 break; 1875 case MD_SWAP: 1876 type = "swap"; 1877 break; 1878 case MD_NULL: 1879 type = "null"; 1880 break; 1881 default: 1882 type = "unknown"; 1883 break; 1884 } 1885 1886 if (pp != NULL) { 1887 if (indent == NULL) { 1888 sbuf_printf(sb, " u %d", mp->unit); 1889 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize); 1890 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads); 1891 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors); 1892 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize); 1893 sbuf_printf(sb, " t %s", type); 1894 if ((mp->type == MD_VNODE && mp->vnode != NULL) || 1895 (mp->type == MD_PRELOAD && mp->file[0] != '\0')) 1896 sbuf_printf(sb, " file %s", mp->file); 1897 sbuf_printf(sb, " label %s", mp->label); 1898 } else { 1899 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, 1900 mp->unit); 1901 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n", 1902 indent, (uintmax_t) mp->sectorsize); 1903 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n", 1904 indent, (uintmax_t) mp->fwheads); 1905 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n", 1906 indent, (uintmax_t) mp->fwsectors); 1907 if (mp->ident[0] != '\0') { 1908 sbuf_printf(sb, "%s<ident>", indent); 1909 g_conf_printf_escaped(sb, "%s", mp->ident); 1910 sbuf_printf(sb, "</ident>\n"); 1911 } 1912 sbuf_printf(sb, "%s<length>%ju</length>\n", 1913 indent, (uintmax_t) mp->mediasize); 1914 sbuf_printf(sb, "%s<compression>%s</compression>\n", indent, 1915 (mp->flags & MD_COMPRESS) == 0 ? "off": "on"); 1916 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 1917 (mp->flags & MD_READONLY) == 0 ? "read-write": 1918 "read-only"); 1919 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1920 type); 1921 if ((mp->type == MD_VNODE && mp->vnode != NULL) || 1922 (mp->type == MD_PRELOAD && mp->file[0] != '\0')) { 1923 sbuf_printf(sb, "%s<file>", indent); 1924 g_conf_printf_escaped(sb, "%s", mp->file); 1925 sbuf_printf(sb, "</file>\n"); 1926 } 1927 sbuf_printf(sb, "%s<label>", indent); 1928 g_conf_printf_escaped(sb, "%s", mp->label); 1929 sbuf_printf(sb, "</label>\n"); 1930 } 1931 } 1932 } 1933 1934 static void 1935 g_md_fini(struct g_class *mp __unused) 1936 { 1937 1938 sx_destroy(&md_sx); 1939 if (status_dev != NULL) 1940 destroy_dev(status_dev); 1941 delete_unrhdr(md_uh); 1942 } 1943