1 /*- 2 * SPDX-License-Identifier: (Beerware AND BSD-3-Clause) 3 * 4 * ---------------------------------------------------------------------------- 5 * "THE BEER-WARE LICENSE" (Revision 42): 6 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 7 * can do whatever you want with this stuff. If we meet some day, and you think 8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 9 * ---------------------------------------------------------------------------- 10 * 11 * $FreeBSD$ 12 * 13 */ 14 15 /*- 16 * The following functions are based in the vn(4) driver: mdstart_swap(), 17 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 18 * and as such under the following copyright: 19 * 20 * Copyright (c) 1988 University of Utah. 21 * Copyright (c) 1990, 1993 22 * The Regents of the University of California. All rights reserved. 23 * Copyright (c) 2013 The FreeBSD Foundation 24 * All rights reserved. 25 * 26 * This code is derived from software contributed to Berkeley by 27 * the Systems Programming Group of the University of Utah Computer 28 * Science Department. 29 * 30 * Portions of this software were developed by Konstantin Belousov 31 * under sponsorship from the FreeBSD Foundation. 32 * 33 * Redistribution and use in source and binary forms, with or without 34 * modification, are permitted provided that the following conditions 35 * are met: 36 * 1. Redistributions of source code must retain the above copyright 37 * notice, this list of conditions and the following disclaimer. 38 * 2. Redistributions in binary form must reproduce the above copyright 39 * notice, this list of conditions and the following disclaimer in the 40 * documentation and/or other materials provided with the distribution. 41 * 3. Neither the name of the University nor the names of its contributors 42 * may be used to endorse or promote products derived from this software 43 * without specific prior written permission. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 * 57 * from: Utah Hdr: vn.c 1.13 94/04/02 58 * 59 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 60 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 61 */ 62 63 #include "opt_rootdevname.h" 64 #include "opt_geom.h" 65 #include "opt_md.h" 66 67 #include <sys/param.h> 68 #include <sys/systm.h> 69 #include <sys/bio.h> 70 #include <sys/buf.h> 71 #include <sys/conf.h> 72 #include <sys/devicestat.h> 73 #include <sys/fcntl.h> 74 #include <sys/kernel.h> 75 #include <sys/kthread.h> 76 #include <sys/limits.h> 77 #include <sys/linker.h> 78 #include <sys/lock.h> 79 #include <sys/malloc.h> 80 #include <sys/mdioctl.h> 81 #include <sys/mount.h> 82 #include <sys/mutex.h> 83 #include <sys/sx.h> 84 #include <sys/namei.h> 85 #include <sys/proc.h> 86 #include <sys/queue.h> 87 #include <sys/rwlock.h> 88 #include <sys/sbuf.h> 89 #include <sys/sched.h> 90 #include <sys/sf_buf.h> 91 #include <sys/sysctl.h> 92 #include <sys/vnode.h> 93 #include <sys/disk.h> 94 95 #include <geom/geom.h> 96 #include <geom/geom_int.h> 97 98 #include <vm/vm.h> 99 #include <vm/vm_param.h> 100 #include <vm/vm_object.h> 101 #include <vm/vm_page.h> 102 #include <vm/vm_pager.h> 103 #include <vm/swap_pager.h> 104 #include <vm/uma.h> 105 106 #include <machine/bus.h> 107 108 #define MD_MODVER 1 109 110 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 111 #define MD_EXITING 0x20000 /* Worker thread is exiting. */ 112 113 #ifndef MD_NSECT 114 #define MD_NSECT (10000 * 2) 115 #endif 116 117 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk"); 118 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors"); 119 120 static int md_debug; 121 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, 122 "Enable md(4) debug messages"); 123 static int md_malloc_wait; 124 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, 125 "Allow malloc to wait for memory allocations"); 126 127 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE) 128 #define MD_ROOT_FSTYPE "ufs" 129 #endif 130 131 #if defined(MD_ROOT) 132 /* 133 * Preloaded image gets put here. 134 */ 135 #if defined(MD_ROOT_SIZE) 136 /* 137 * We put the mfs_root symbol into the oldmfs section of the kernel object file. 138 * Applications that patch the object with the image can determine 139 * the size looking at the oldmfs section size within the kernel. 140 */ 141 u_char mfs_root[MD_ROOT_SIZE*1024] __attribute__ ((section ("oldmfs"))); 142 const int mfs_root_size = sizeof(mfs_root); 143 #else 144 extern volatile u_char __weak_symbol mfs_root; 145 extern volatile u_char __weak_symbol mfs_root_end; 146 __GLOBL(mfs_root); 147 __GLOBL(mfs_root_end); 148 #define mfs_root_size ((uintptr_t)(&mfs_root_end - &mfs_root)) 149 #endif 150 #endif 151 152 static g_init_t g_md_init; 153 static g_fini_t g_md_fini; 154 static g_start_t g_md_start; 155 static g_access_t g_md_access; 156 static void g_md_dumpconf(struct sbuf *sb, const char *indent, 157 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp); 158 159 static struct cdev *status_dev = NULL; 160 static struct sx md_sx; 161 static struct unrhdr *md_uh; 162 163 static d_ioctl_t mdctlioctl; 164 165 static struct cdevsw mdctl_cdevsw = { 166 .d_version = D_VERSION, 167 .d_ioctl = mdctlioctl, 168 .d_name = MD_NAME, 169 }; 170 171 struct g_class g_md_class = { 172 .name = "MD", 173 .version = G_VERSION, 174 .init = g_md_init, 175 .fini = g_md_fini, 176 .start = g_md_start, 177 .access = g_md_access, 178 .dumpconf = g_md_dumpconf, 179 }; 180 181 DECLARE_GEOM_CLASS(g_md_class, g_md); 182 183 184 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list); 185 186 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 187 #define NMASK (NINDIR-1) 188 static int nshift; 189 190 static int md_vnode_pbuf_freecnt; 191 192 struct indir { 193 uintptr_t *array; 194 u_int total; 195 u_int used; 196 u_int shift; 197 }; 198 199 struct md_s { 200 int unit; 201 LIST_ENTRY(md_s) list; 202 struct bio_queue_head bio_queue; 203 struct mtx queue_mtx; 204 struct mtx stat_mtx; 205 struct cdev *dev; 206 enum md_types type; 207 off_t mediasize; 208 unsigned sectorsize; 209 unsigned opencount; 210 unsigned fwheads; 211 unsigned fwsectors; 212 char ident[32]; 213 unsigned flags; 214 char name[20]; 215 struct proc *procp; 216 struct g_geom *gp; 217 struct g_provider *pp; 218 int (*start)(struct md_s *sc, struct bio *bp); 219 struct devstat *devstat; 220 221 /* MD_MALLOC related fields */ 222 struct indir *indir; 223 uma_zone_t uma; 224 225 /* MD_PRELOAD related fields */ 226 u_char *pl_ptr; 227 size_t pl_len; 228 229 /* MD_VNODE related fields */ 230 struct vnode *vnode; 231 char file[PATH_MAX]; 232 char label[PATH_MAX]; 233 struct ucred *cred; 234 235 /* MD_SWAP related fields */ 236 vm_object_t object; 237 }; 238 239 static struct indir * 240 new_indir(u_int shift) 241 { 242 struct indir *ip; 243 244 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT) 245 | M_ZERO); 246 if (ip == NULL) 247 return (NULL); 248 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 249 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); 250 if (ip->array == NULL) { 251 free(ip, M_MD); 252 return (NULL); 253 } 254 ip->total = NINDIR; 255 ip->shift = shift; 256 return (ip); 257 } 258 259 static void 260 del_indir(struct indir *ip) 261 { 262 263 free(ip->array, M_MDSECT); 264 free(ip, M_MD); 265 } 266 267 static void 268 destroy_indir(struct md_s *sc, struct indir *ip) 269 { 270 int i; 271 272 for (i = 0; i < NINDIR; i++) { 273 if (!ip->array[i]) 274 continue; 275 if (ip->shift) 276 destroy_indir(sc, (struct indir*)(ip->array[i])); 277 else if (ip->array[i] > 255) 278 uma_zfree(sc->uma, (void *)(ip->array[i])); 279 } 280 del_indir(ip); 281 } 282 283 /* 284 * This function does the math and allocates the top level "indir" structure 285 * for a device of "size" sectors. 286 */ 287 288 static struct indir * 289 dimension(off_t size) 290 { 291 off_t rcnt; 292 struct indir *ip; 293 int layer; 294 295 rcnt = size; 296 layer = 0; 297 while (rcnt > NINDIR) { 298 rcnt /= NINDIR; 299 layer++; 300 } 301 302 /* 303 * XXX: the top layer is probably not fully populated, so we allocate 304 * too much space for ip->array in here. 305 */ 306 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 307 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 308 M_MDSECT, M_WAITOK | M_ZERO); 309 ip->total = NINDIR; 310 ip->shift = layer * nshift; 311 return (ip); 312 } 313 314 /* 315 * Read a given sector 316 */ 317 318 static uintptr_t 319 s_read(struct indir *ip, off_t offset) 320 { 321 struct indir *cip; 322 int idx; 323 uintptr_t up; 324 325 if (md_debug > 1) 326 printf("s_read(%jd)\n", (intmax_t)offset); 327 up = 0; 328 for (cip = ip; cip != NULL;) { 329 if (cip->shift) { 330 idx = (offset >> cip->shift) & NMASK; 331 up = cip->array[idx]; 332 cip = (struct indir *)up; 333 continue; 334 } 335 idx = offset & NMASK; 336 return (cip->array[idx]); 337 } 338 return (0); 339 } 340 341 /* 342 * Write a given sector, prune the tree if the value is 0 343 */ 344 345 static int 346 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 347 { 348 struct indir *cip, *lip[10]; 349 int idx, li; 350 uintptr_t up; 351 352 if (md_debug > 1) 353 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 354 up = 0; 355 li = 0; 356 cip = ip; 357 for (;;) { 358 lip[li++] = cip; 359 if (cip->shift) { 360 idx = (offset >> cip->shift) & NMASK; 361 up = cip->array[idx]; 362 if (up != 0) { 363 cip = (struct indir *)up; 364 continue; 365 } 366 /* Allocate branch */ 367 cip->array[idx] = 368 (uintptr_t)new_indir(cip->shift - nshift); 369 if (cip->array[idx] == 0) 370 return (ENOSPC); 371 cip->used++; 372 up = cip->array[idx]; 373 cip = (struct indir *)up; 374 continue; 375 } 376 /* leafnode */ 377 idx = offset & NMASK; 378 up = cip->array[idx]; 379 if (up != 0) 380 cip->used--; 381 cip->array[idx] = ptr; 382 if (ptr != 0) 383 cip->used++; 384 break; 385 } 386 if (cip->used != 0 || li == 1) 387 return (0); 388 li--; 389 while (cip->used == 0 && cip != ip) { 390 li--; 391 idx = (offset >> lip[li]->shift) & NMASK; 392 up = lip[li]->array[idx]; 393 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 394 del_indir(cip); 395 lip[li]->array[idx] = 0; 396 lip[li]->used--; 397 cip = lip[li]; 398 } 399 return (0); 400 } 401 402 403 static int 404 g_md_access(struct g_provider *pp, int r, int w, int e) 405 { 406 struct md_s *sc; 407 408 sc = pp->geom->softc; 409 if (sc == NULL) { 410 if (r <= 0 && w <= 0 && e <= 0) 411 return (0); 412 return (ENXIO); 413 } 414 r += pp->acr; 415 w += pp->acw; 416 e += pp->ace; 417 if ((sc->flags & MD_READONLY) != 0 && w > 0) 418 return (EROFS); 419 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 420 sc->opencount = 1; 421 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 422 sc->opencount = 0; 423 } 424 return (0); 425 } 426 427 static void 428 g_md_start(struct bio *bp) 429 { 430 struct md_s *sc; 431 432 sc = bp->bio_to->geom->softc; 433 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) { 434 mtx_lock(&sc->stat_mtx); 435 devstat_start_transaction_bio(sc->devstat, bp); 436 mtx_unlock(&sc->stat_mtx); 437 } 438 mtx_lock(&sc->queue_mtx); 439 bioq_disksort(&sc->bio_queue, bp); 440 mtx_unlock(&sc->queue_mtx); 441 wakeup(sc); 442 } 443 444 #define MD_MALLOC_MOVE_ZERO 1 445 #define MD_MALLOC_MOVE_FILL 2 446 #define MD_MALLOC_MOVE_READ 3 447 #define MD_MALLOC_MOVE_WRITE 4 448 #define MD_MALLOC_MOVE_CMP 5 449 450 static int 451 md_malloc_move_ma(vm_page_t **mp, int *ma_offs, unsigned sectorsize, 452 void *ptr, u_char fill, int op) 453 { 454 struct sf_buf *sf; 455 vm_page_t m, *mp1; 456 char *p, first; 457 off_t *uc; 458 unsigned n; 459 int error, i, ma_offs1, sz, first_read; 460 461 m = NULL; 462 error = 0; 463 sf = NULL; 464 /* if (op == MD_MALLOC_MOVE_CMP) { gcc */ 465 first = 0; 466 first_read = 0; 467 uc = ptr; 468 mp1 = *mp; 469 ma_offs1 = *ma_offs; 470 /* } */ 471 sched_pin(); 472 for (n = sectorsize; n != 0; n -= sz) { 473 sz = imin(PAGE_SIZE - *ma_offs, n); 474 if (m != **mp) { 475 if (sf != NULL) 476 sf_buf_free(sf); 477 m = **mp; 478 sf = sf_buf_alloc(m, SFB_CPUPRIVATE | 479 (md_malloc_wait ? 0 : SFB_NOWAIT)); 480 if (sf == NULL) { 481 error = ENOMEM; 482 break; 483 } 484 } 485 p = (char *)sf_buf_kva(sf) + *ma_offs; 486 switch (op) { 487 case MD_MALLOC_MOVE_ZERO: 488 bzero(p, sz); 489 break; 490 case MD_MALLOC_MOVE_FILL: 491 memset(p, fill, sz); 492 break; 493 case MD_MALLOC_MOVE_READ: 494 bcopy(ptr, p, sz); 495 cpu_flush_dcache(p, sz); 496 break; 497 case MD_MALLOC_MOVE_WRITE: 498 bcopy(p, ptr, sz); 499 break; 500 case MD_MALLOC_MOVE_CMP: 501 for (i = 0; i < sz; i++, p++) { 502 if (!first_read) { 503 *uc = (u_char)*p; 504 first = *p; 505 first_read = 1; 506 } else if (*p != first) { 507 error = EDOOFUS; 508 break; 509 } 510 } 511 break; 512 default: 513 KASSERT(0, ("md_malloc_move_ma unknown op %d\n", op)); 514 break; 515 } 516 if (error != 0) 517 break; 518 *ma_offs += sz; 519 *ma_offs %= PAGE_SIZE; 520 if (*ma_offs == 0) 521 (*mp)++; 522 ptr = (char *)ptr + sz; 523 } 524 525 if (sf != NULL) 526 sf_buf_free(sf); 527 sched_unpin(); 528 if (op == MD_MALLOC_MOVE_CMP && error != 0) { 529 *mp = mp1; 530 *ma_offs = ma_offs1; 531 } 532 return (error); 533 } 534 535 static int 536 md_malloc_move_vlist(bus_dma_segment_t **pvlist, int *pma_offs, 537 unsigned len, void *ptr, u_char fill, int op) 538 { 539 bus_dma_segment_t *vlist; 540 uint8_t *p, *end, first; 541 off_t *uc; 542 int ma_offs, seg_len; 543 544 vlist = *pvlist; 545 ma_offs = *pma_offs; 546 uc = ptr; 547 548 for (; len != 0; len -= seg_len) { 549 seg_len = imin(vlist->ds_len - ma_offs, len); 550 p = (uint8_t *)(uintptr_t)vlist->ds_addr + ma_offs; 551 switch (op) { 552 case MD_MALLOC_MOVE_ZERO: 553 bzero(p, seg_len); 554 break; 555 case MD_MALLOC_MOVE_FILL: 556 memset(p, fill, seg_len); 557 break; 558 case MD_MALLOC_MOVE_READ: 559 bcopy(ptr, p, seg_len); 560 cpu_flush_dcache(p, seg_len); 561 break; 562 case MD_MALLOC_MOVE_WRITE: 563 bcopy(p, ptr, seg_len); 564 break; 565 case MD_MALLOC_MOVE_CMP: 566 end = p + seg_len; 567 first = *uc = *p; 568 /* Confirm all following bytes match the first */ 569 while (++p < end) { 570 if (*p != first) 571 return (EDOOFUS); 572 } 573 break; 574 default: 575 KASSERT(0, ("md_malloc_move_vlist unknown op %d\n", op)); 576 break; 577 } 578 579 ma_offs += seg_len; 580 if (ma_offs == vlist->ds_len) { 581 ma_offs = 0; 582 vlist++; 583 } 584 ptr = (uint8_t *)ptr + seg_len; 585 } 586 *pvlist = vlist; 587 *pma_offs = ma_offs; 588 589 return (0); 590 } 591 592 static int 593 mdstart_malloc(struct md_s *sc, struct bio *bp) 594 { 595 u_char *dst; 596 vm_page_t *m; 597 bus_dma_segment_t *vlist; 598 int i, error, error1, ma_offs, notmapped; 599 off_t secno, nsec, uc; 600 uintptr_t sp, osp; 601 602 switch (bp->bio_cmd) { 603 case BIO_READ: 604 case BIO_WRITE: 605 case BIO_DELETE: 606 break; 607 default: 608 return (EOPNOTSUPP); 609 } 610 611 notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0; 612 vlist = (bp->bio_flags & BIO_VLIST) != 0 ? 613 (bus_dma_segment_t *)bp->bio_data : NULL; 614 if (notmapped) { 615 m = bp->bio_ma; 616 ma_offs = bp->bio_ma_offset; 617 dst = NULL; 618 KASSERT(vlist == NULL, ("vlists cannot be unmapped")); 619 } else if (vlist != NULL) { 620 ma_offs = bp->bio_ma_offset; 621 dst = NULL; 622 } else { 623 dst = bp->bio_data; 624 } 625 626 nsec = bp->bio_length / sc->sectorsize; 627 secno = bp->bio_offset / sc->sectorsize; 628 error = 0; 629 while (nsec--) { 630 osp = s_read(sc->indir, secno); 631 if (bp->bio_cmd == BIO_DELETE) { 632 if (osp != 0) 633 error = s_write(sc->indir, secno, 0); 634 } else if (bp->bio_cmd == BIO_READ) { 635 if (osp == 0) { 636 if (notmapped) { 637 error = md_malloc_move_ma(&m, &ma_offs, 638 sc->sectorsize, NULL, 0, 639 MD_MALLOC_MOVE_ZERO); 640 } else if (vlist != NULL) { 641 error = md_malloc_move_vlist(&vlist, 642 &ma_offs, sc->sectorsize, NULL, 0, 643 MD_MALLOC_MOVE_ZERO); 644 } else 645 bzero(dst, sc->sectorsize); 646 } else if (osp <= 255) { 647 if (notmapped) { 648 error = md_malloc_move_ma(&m, &ma_offs, 649 sc->sectorsize, NULL, osp, 650 MD_MALLOC_MOVE_FILL); 651 } else if (vlist != NULL) { 652 error = md_malloc_move_vlist(&vlist, 653 &ma_offs, sc->sectorsize, NULL, osp, 654 MD_MALLOC_MOVE_FILL); 655 } else 656 memset(dst, osp, sc->sectorsize); 657 } else { 658 if (notmapped) { 659 error = md_malloc_move_ma(&m, &ma_offs, 660 sc->sectorsize, (void *)osp, 0, 661 MD_MALLOC_MOVE_READ); 662 } else if (vlist != NULL) { 663 error = md_malloc_move_vlist(&vlist, 664 &ma_offs, sc->sectorsize, 665 (void *)osp, 0, 666 MD_MALLOC_MOVE_READ); 667 } else { 668 bcopy((void *)osp, dst, sc->sectorsize); 669 cpu_flush_dcache(dst, sc->sectorsize); 670 } 671 } 672 osp = 0; 673 } else if (bp->bio_cmd == BIO_WRITE) { 674 if (sc->flags & MD_COMPRESS) { 675 if (notmapped) { 676 error1 = md_malloc_move_ma(&m, &ma_offs, 677 sc->sectorsize, &uc, 0, 678 MD_MALLOC_MOVE_CMP); 679 i = error1 == 0 ? sc->sectorsize : 0; 680 } else if (vlist != NULL) { 681 error1 = md_malloc_move_vlist(&vlist, 682 &ma_offs, sc->sectorsize, &uc, 0, 683 MD_MALLOC_MOVE_CMP); 684 i = error1 == 0 ? sc->sectorsize : 0; 685 } else { 686 uc = dst[0]; 687 for (i = 1; i < sc->sectorsize; i++) { 688 if (dst[i] != uc) 689 break; 690 } 691 } 692 } else { 693 i = 0; 694 uc = 0; 695 } 696 if (i == sc->sectorsize) { 697 if (osp != uc) 698 error = s_write(sc->indir, secno, uc); 699 } else { 700 if (osp <= 255) { 701 sp = (uintptr_t)uma_zalloc(sc->uma, 702 md_malloc_wait ? M_WAITOK : 703 M_NOWAIT); 704 if (sp == 0) { 705 error = ENOSPC; 706 break; 707 } 708 if (notmapped) { 709 error = md_malloc_move_ma(&m, 710 &ma_offs, sc->sectorsize, 711 (void *)sp, 0, 712 MD_MALLOC_MOVE_WRITE); 713 } else if (vlist != NULL) { 714 error = md_malloc_move_vlist( 715 &vlist, &ma_offs, 716 sc->sectorsize, (void *)sp, 717 0, MD_MALLOC_MOVE_WRITE); 718 } else { 719 bcopy(dst, (void *)sp, 720 sc->sectorsize); 721 } 722 error = s_write(sc->indir, secno, sp); 723 } else { 724 if (notmapped) { 725 error = md_malloc_move_ma(&m, 726 &ma_offs, sc->sectorsize, 727 (void *)osp, 0, 728 MD_MALLOC_MOVE_WRITE); 729 } else if (vlist != NULL) { 730 error = md_malloc_move_vlist( 731 &vlist, &ma_offs, 732 sc->sectorsize, (void *)osp, 733 0, MD_MALLOC_MOVE_WRITE); 734 } else { 735 bcopy(dst, (void *)osp, 736 sc->sectorsize); 737 } 738 osp = 0; 739 } 740 } 741 } else { 742 error = EOPNOTSUPP; 743 } 744 if (osp > 255) 745 uma_zfree(sc->uma, (void*)osp); 746 if (error != 0) 747 break; 748 secno++; 749 if (!notmapped && vlist == NULL) 750 dst += sc->sectorsize; 751 } 752 bp->bio_resid = 0; 753 return (error); 754 } 755 756 static void 757 mdcopyto_vlist(void *src, bus_dma_segment_t *vlist, off_t offset, off_t len) 758 { 759 off_t seg_len; 760 761 while (offset >= vlist->ds_len) { 762 offset -= vlist->ds_len; 763 vlist++; 764 } 765 766 while (len != 0) { 767 seg_len = omin(len, vlist->ds_len - offset); 768 bcopy(src, (void *)(uintptr_t)(vlist->ds_addr + offset), 769 seg_len); 770 offset = 0; 771 src = (uint8_t *)src + seg_len; 772 len -= seg_len; 773 vlist++; 774 } 775 } 776 777 static void 778 mdcopyfrom_vlist(bus_dma_segment_t *vlist, off_t offset, void *dst, off_t len) 779 { 780 off_t seg_len; 781 782 while (offset >= vlist->ds_len) { 783 offset -= vlist->ds_len; 784 vlist++; 785 } 786 787 while (len != 0) { 788 seg_len = omin(len, vlist->ds_len - offset); 789 bcopy((void *)(uintptr_t)(vlist->ds_addr + offset), dst, 790 seg_len); 791 offset = 0; 792 dst = (uint8_t *)dst + seg_len; 793 len -= seg_len; 794 vlist++; 795 } 796 } 797 798 static int 799 mdstart_preload(struct md_s *sc, struct bio *bp) 800 { 801 uint8_t *p; 802 803 p = sc->pl_ptr + bp->bio_offset; 804 switch (bp->bio_cmd) { 805 case BIO_READ: 806 if ((bp->bio_flags & BIO_VLIST) != 0) { 807 mdcopyto_vlist(p, (bus_dma_segment_t *)bp->bio_data, 808 bp->bio_ma_offset, bp->bio_length); 809 } else { 810 bcopy(p, bp->bio_data, bp->bio_length); 811 } 812 cpu_flush_dcache(bp->bio_data, bp->bio_length); 813 break; 814 case BIO_WRITE: 815 if ((bp->bio_flags & BIO_VLIST) != 0) { 816 mdcopyfrom_vlist((bus_dma_segment_t *)bp->bio_data, 817 bp->bio_ma_offset, p, bp->bio_length); 818 } else { 819 bcopy(bp->bio_data, p, bp->bio_length); 820 } 821 break; 822 } 823 bp->bio_resid = 0; 824 return (0); 825 } 826 827 static int 828 mdstart_vnode(struct md_s *sc, struct bio *bp) 829 { 830 int error; 831 struct uio auio; 832 struct iovec aiov; 833 struct iovec *piov; 834 struct mount *mp; 835 struct vnode *vp; 836 struct buf *pb; 837 bus_dma_segment_t *vlist; 838 struct thread *td; 839 off_t iolen, len, zerosize; 840 int ma_offs, npages; 841 842 switch (bp->bio_cmd) { 843 case BIO_READ: 844 auio.uio_rw = UIO_READ; 845 break; 846 case BIO_WRITE: 847 case BIO_DELETE: 848 auio.uio_rw = UIO_WRITE; 849 break; 850 case BIO_FLUSH: 851 break; 852 default: 853 return (EOPNOTSUPP); 854 } 855 856 td = curthread; 857 vp = sc->vnode; 858 pb = NULL; 859 piov = NULL; 860 ma_offs = bp->bio_ma_offset; 861 len = bp->bio_length; 862 863 /* 864 * VNODE I/O 865 * 866 * If an error occurs, we set BIO_ERROR but we do not set 867 * B_INVAL because (for a write anyway), the buffer is 868 * still valid. 869 */ 870 871 if (bp->bio_cmd == BIO_FLUSH) { 872 (void) vn_start_write(vp, &mp, V_WAIT); 873 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 874 error = VOP_FSYNC(vp, MNT_WAIT, td); 875 VOP_UNLOCK(vp, 0); 876 vn_finished_write(mp); 877 return (error); 878 } 879 880 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 881 auio.uio_resid = bp->bio_length; 882 auio.uio_segflg = UIO_SYSSPACE; 883 auio.uio_td = td; 884 885 if (bp->bio_cmd == BIO_DELETE) { 886 /* 887 * Emulate BIO_DELETE by writing zeros. 888 */ 889 zerosize = ZERO_REGION_SIZE - 890 (ZERO_REGION_SIZE % sc->sectorsize); 891 auio.uio_iovcnt = howmany(bp->bio_length, zerosize); 892 piov = malloc(sizeof(*piov) * auio.uio_iovcnt, M_MD, M_WAITOK); 893 auio.uio_iov = piov; 894 while (len > 0) { 895 piov->iov_base = __DECONST(void *, zero_region); 896 piov->iov_len = len; 897 if (len > zerosize) 898 piov->iov_len = zerosize; 899 len -= piov->iov_len; 900 piov++; 901 } 902 piov = auio.uio_iov; 903 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 904 piov = malloc(sizeof(*piov) * bp->bio_ma_n, M_MD, M_WAITOK); 905 auio.uio_iov = piov; 906 vlist = (bus_dma_segment_t *)bp->bio_data; 907 while (len > 0) { 908 piov->iov_base = (void *)(uintptr_t)(vlist->ds_addr + 909 ma_offs); 910 piov->iov_len = vlist->ds_len - ma_offs; 911 if (piov->iov_len > len) 912 piov->iov_len = len; 913 len -= piov->iov_len; 914 ma_offs = 0; 915 vlist++; 916 piov++; 917 } 918 auio.uio_iovcnt = piov - auio.uio_iov; 919 piov = auio.uio_iov; 920 } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 921 pb = getpbuf(&md_vnode_pbuf_freecnt); 922 bp->bio_resid = len; 923 unmapped_step: 924 npages = atop(min(MAXPHYS, round_page(len + (ma_offs & 925 PAGE_MASK)))); 926 iolen = min(ptoa(npages) - (ma_offs & PAGE_MASK), len); 927 KASSERT(iolen > 0, ("zero iolen")); 928 pmap_qenter((vm_offset_t)pb->b_data, 929 &bp->bio_ma[atop(ma_offs)], npages); 930 aiov.iov_base = (void *)((vm_offset_t)pb->b_data + 931 (ma_offs & PAGE_MASK)); 932 aiov.iov_len = iolen; 933 auio.uio_iov = &aiov; 934 auio.uio_iovcnt = 1; 935 auio.uio_resid = iolen; 936 } else { 937 aiov.iov_base = bp->bio_data; 938 aiov.iov_len = bp->bio_length; 939 auio.uio_iov = &aiov; 940 auio.uio_iovcnt = 1; 941 } 942 /* 943 * When reading set IO_DIRECT to try to avoid double-caching 944 * the data. When writing IO_DIRECT is not optimal. 945 */ 946 if (auio.uio_rw == UIO_READ) { 947 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 948 error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred); 949 VOP_UNLOCK(vp, 0); 950 } else { 951 (void) vn_start_write(vp, &mp, V_WAIT); 952 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 953 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC, 954 sc->cred); 955 VOP_UNLOCK(vp, 0); 956 vn_finished_write(mp); 957 if (error == 0) 958 sc->flags &= ~MD_VERIFY; 959 } 960 961 if (pb != NULL) { 962 pmap_qremove((vm_offset_t)pb->b_data, npages); 963 if (error == 0) { 964 len -= iolen; 965 bp->bio_resid -= iolen; 966 ma_offs += iolen; 967 if (len > 0) 968 goto unmapped_step; 969 } 970 relpbuf(pb, &md_vnode_pbuf_freecnt); 971 } 972 973 free(piov, M_MD); 974 if (pb == NULL) 975 bp->bio_resid = auio.uio_resid; 976 return (error); 977 } 978 979 static void 980 md_swap_page_free(vm_page_t m) 981 { 982 983 vm_page_xunbusy(m); 984 vm_page_lock(m); 985 vm_page_free(m); 986 vm_page_unlock(m); 987 } 988 989 static int 990 mdstart_swap(struct md_s *sc, struct bio *bp) 991 { 992 vm_page_t m; 993 u_char *p; 994 vm_pindex_t i, lastp; 995 bus_dma_segment_t *vlist; 996 int rv, ma_offs, offs, len, lastend; 997 998 switch (bp->bio_cmd) { 999 case BIO_READ: 1000 case BIO_WRITE: 1001 case BIO_DELETE: 1002 break; 1003 default: 1004 return (EOPNOTSUPP); 1005 } 1006 1007 p = bp->bio_data; 1008 ma_offs = (bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0 ? 1009 bp->bio_ma_offset : 0; 1010 vlist = (bp->bio_flags & BIO_VLIST) != 0 ? 1011 (bus_dma_segment_t *)bp->bio_data : NULL; 1012 1013 /* 1014 * offs is the offset at which to start operating on the 1015 * next (ie, first) page. lastp is the last page on 1016 * which we're going to operate. lastend is the ending 1017 * position within that last page (ie, PAGE_SIZE if 1018 * we're operating on complete aligned pages). 1019 */ 1020 offs = bp->bio_offset % PAGE_SIZE; 1021 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 1022 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 1023 1024 rv = VM_PAGER_OK; 1025 VM_OBJECT_WLOCK(sc->object); 1026 vm_object_pip_add(sc->object, 1); 1027 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 1028 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 1029 m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM); 1030 if (bp->bio_cmd == BIO_READ) { 1031 if (m->valid == VM_PAGE_BITS_ALL) 1032 rv = VM_PAGER_OK; 1033 else 1034 rv = vm_pager_get_pages(sc->object, &m, 1, 1035 NULL, NULL); 1036 if (rv == VM_PAGER_ERROR) { 1037 md_swap_page_free(m); 1038 break; 1039 } else if (rv == VM_PAGER_FAIL) { 1040 /* 1041 * Pager does not have the page. Zero 1042 * the allocated page, and mark it as 1043 * valid. Do not set dirty, the page 1044 * can be recreated if thrown out. 1045 */ 1046 pmap_zero_page(m); 1047 m->valid = VM_PAGE_BITS_ALL; 1048 } 1049 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 1050 pmap_copy_pages(&m, offs, bp->bio_ma, 1051 ma_offs, len); 1052 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 1053 physcopyout_vlist(VM_PAGE_TO_PHYS(m) + offs, 1054 vlist, ma_offs, len); 1055 cpu_flush_dcache(p, len); 1056 } else { 1057 physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len); 1058 cpu_flush_dcache(p, len); 1059 } 1060 } else if (bp->bio_cmd == BIO_WRITE) { 1061 if (len == PAGE_SIZE || m->valid == VM_PAGE_BITS_ALL) 1062 rv = VM_PAGER_OK; 1063 else 1064 rv = vm_pager_get_pages(sc->object, &m, 1, 1065 NULL, NULL); 1066 if (rv == VM_PAGER_ERROR) { 1067 md_swap_page_free(m); 1068 break; 1069 } else if (rv == VM_PAGER_FAIL) 1070 pmap_zero_page(m); 1071 1072 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 1073 pmap_copy_pages(bp->bio_ma, ma_offs, &m, 1074 offs, len); 1075 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 1076 physcopyin_vlist(vlist, ma_offs, 1077 VM_PAGE_TO_PHYS(m) + offs, len); 1078 } else { 1079 physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len); 1080 } 1081 1082 m->valid = VM_PAGE_BITS_ALL; 1083 if (m->dirty != VM_PAGE_BITS_ALL) { 1084 vm_page_dirty(m); 1085 vm_pager_page_unswapped(m); 1086 } 1087 } else if (bp->bio_cmd == BIO_DELETE) { 1088 if (len == PAGE_SIZE || m->valid == VM_PAGE_BITS_ALL) 1089 rv = VM_PAGER_OK; 1090 else 1091 rv = vm_pager_get_pages(sc->object, &m, 1, 1092 NULL, NULL); 1093 if (rv == VM_PAGER_ERROR) { 1094 md_swap_page_free(m); 1095 break; 1096 } else if (rv == VM_PAGER_FAIL) { 1097 md_swap_page_free(m); 1098 m = NULL; 1099 } else { 1100 /* Page is valid. */ 1101 if (len != PAGE_SIZE) { 1102 pmap_zero_page_area(m, offs, len); 1103 if (m->dirty != VM_PAGE_BITS_ALL) { 1104 vm_page_dirty(m); 1105 vm_pager_page_unswapped(m); 1106 } 1107 } else { 1108 vm_pager_page_unswapped(m); 1109 md_swap_page_free(m); 1110 m = NULL; 1111 } 1112 } 1113 } 1114 if (m != NULL) { 1115 vm_page_xunbusy(m); 1116 vm_page_lock(m); 1117 if (vm_page_active(m)) 1118 vm_page_reference(m); 1119 else 1120 vm_page_activate(m); 1121 vm_page_unlock(m); 1122 } 1123 1124 /* Actions on further pages start at offset 0 */ 1125 p += PAGE_SIZE - offs; 1126 offs = 0; 1127 ma_offs += len; 1128 } 1129 vm_object_pip_wakeup(sc->object); 1130 VM_OBJECT_WUNLOCK(sc->object); 1131 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 1132 } 1133 1134 static int 1135 mdstart_null(struct md_s *sc, struct bio *bp) 1136 { 1137 1138 switch (bp->bio_cmd) { 1139 case BIO_READ: 1140 bzero(bp->bio_data, bp->bio_length); 1141 cpu_flush_dcache(bp->bio_data, bp->bio_length); 1142 break; 1143 case BIO_WRITE: 1144 break; 1145 } 1146 bp->bio_resid = 0; 1147 return (0); 1148 } 1149 1150 static void 1151 md_kthread(void *arg) 1152 { 1153 struct md_s *sc; 1154 struct bio *bp; 1155 int error; 1156 1157 sc = arg; 1158 thread_lock(curthread); 1159 sched_prio(curthread, PRIBIO); 1160 thread_unlock(curthread); 1161 if (sc->type == MD_VNODE) 1162 curthread->td_pflags |= TDP_NORUNNINGBUF; 1163 1164 for (;;) { 1165 mtx_lock(&sc->queue_mtx); 1166 if (sc->flags & MD_SHUTDOWN) { 1167 sc->flags |= MD_EXITING; 1168 mtx_unlock(&sc->queue_mtx); 1169 kproc_exit(0); 1170 } 1171 bp = bioq_takefirst(&sc->bio_queue); 1172 if (!bp) { 1173 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 1174 continue; 1175 } 1176 mtx_unlock(&sc->queue_mtx); 1177 if (bp->bio_cmd == BIO_GETATTR) { 1178 int isv = ((sc->flags & MD_VERIFY) != 0); 1179 1180 if ((sc->fwsectors && sc->fwheads && 1181 (g_handleattr_int(bp, "GEOM::fwsectors", 1182 sc->fwsectors) || 1183 g_handleattr_int(bp, "GEOM::fwheads", 1184 sc->fwheads))) || 1185 g_handleattr_int(bp, "GEOM::candelete", 1)) 1186 error = -1; 1187 else if (sc->ident[0] != '\0' && 1188 g_handleattr_str(bp, "GEOM::ident", sc->ident)) 1189 error = -1; 1190 else if (g_handleattr_int(bp, "MNT::verified", isv)) 1191 error = -1; 1192 else 1193 error = EOPNOTSUPP; 1194 } else { 1195 error = sc->start(sc, bp); 1196 } 1197 1198 if (error != -1) { 1199 bp->bio_completed = bp->bio_length; 1200 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) 1201 devstat_end_transaction_bio(sc->devstat, bp); 1202 g_io_deliver(bp, error); 1203 } 1204 } 1205 } 1206 1207 static struct md_s * 1208 mdfind(int unit) 1209 { 1210 struct md_s *sc; 1211 1212 LIST_FOREACH(sc, &md_softc_list, list) { 1213 if (sc->unit == unit) 1214 break; 1215 } 1216 return (sc); 1217 } 1218 1219 static struct md_s * 1220 mdnew(int unit, int *errp, enum md_types type) 1221 { 1222 struct md_s *sc; 1223 int error; 1224 1225 *errp = 0; 1226 if (unit == -1) 1227 unit = alloc_unr(md_uh); 1228 else 1229 unit = alloc_unr_specific(md_uh, unit); 1230 1231 if (unit == -1) { 1232 *errp = EBUSY; 1233 return (NULL); 1234 } 1235 1236 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); 1237 sc->type = type; 1238 bioq_init(&sc->bio_queue); 1239 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 1240 mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF); 1241 sc->unit = unit; 1242 sprintf(sc->name, "md%d", unit); 1243 LIST_INSERT_HEAD(&md_softc_list, sc, list); 1244 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 1245 if (error == 0) 1246 return (sc); 1247 LIST_REMOVE(sc, list); 1248 mtx_destroy(&sc->stat_mtx); 1249 mtx_destroy(&sc->queue_mtx); 1250 free_unr(md_uh, sc->unit); 1251 free(sc, M_MD); 1252 *errp = error; 1253 return (NULL); 1254 } 1255 1256 static void 1257 mdinit(struct md_s *sc) 1258 { 1259 struct g_geom *gp; 1260 struct g_provider *pp; 1261 1262 g_topology_lock(); 1263 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 1264 gp->softc = sc; 1265 pp = g_new_providerf(gp, "md%d", sc->unit); 1266 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 1267 pp->mediasize = sc->mediasize; 1268 pp->sectorsize = sc->sectorsize; 1269 switch (sc->type) { 1270 case MD_MALLOC: 1271 case MD_VNODE: 1272 case MD_SWAP: 1273 pp->flags |= G_PF_ACCEPT_UNMAPPED; 1274 break; 1275 case MD_PRELOAD: 1276 case MD_NULL: 1277 break; 1278 } 1279 sc->gp = gp; 1280 sc->pp = pp; 1281 g_error_provider(pp, 0); 1282 g_topology_unlock(); 1283 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize, 1284 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 1285 } 1286 1287 static int 1288 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio) 1289 { 1290 uintptr_t sp; 1291 int error; 1292 off_t u; 1293 1294 error = 0; 1295 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 1296 return (EINVAL); 1297 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize)) 1298 return (EINVAL); 1299 /* Compression doesn't make sense if we have reserved space */ 1300 if (mdio->md_options & MD_RESERVE) 1301 mdio->md_options &= ~MD_COMPRESS; 1302 if (mdio->md_fwsectors != 0) 1303 sc->fwsectors = mdio->md_fwsectors; 1304 if (mdio->md_fwheads != 0) 1305 sc->fwheads = mdio->md_fwheads; 1306 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE); 1307 sc->indir = dimension(sc->mediasize / sc->sectorsize); 1308 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 1309 0x1ff, 0); 1310 if (mdio->md_options & MD_RESERVE) { 1311 off_t nsectors; 1312 1313 nsectors = sc->mediasize / sc->sectorsize; 1314 for (u = 0; u < nsectors; u++) { 1315 sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ? 1316 M_WAITOK : M_NOWAIT) | M_ZERO); 1317 if (sp != 0) 1318 error = s_write(sc->indir, u, sp); 1319 else 1320 error = ENOMEM; 1321 if (error != 0) 1322 break; 1323 } 1324 } 1325 return (error); 1326 } 1327 1328 1329 static int 1330 mdsetcred(struct md_s *sc, struct ucred *cred) 1331 { 1332 char *tmpbuf; 1333 int error = 0; 1334 1335 /* 1336 * Set credits in our softc 1337 */ 1338 1339 if (sc->cred) 1340 crfree(sc->cred); 1341 sc->cred = crhold(cred); 1342 1343 /* 1344 * Horrible kludge to establish credentials for NFS XXX. 1345 */ 1346 1347 if (sc->vnode) { 1348 struct uio auio; 1349 struct iovec aiov; 1350 1351 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 1352 bzero(&auio, sizeof(auio)); 1353 1354 aiov.iov_base = tmpbuf; 1355 aiov.iov_len = sc->sectorsize; 1356 auio.uio_iov = &aiov; 1357 auio.uio_iovcnt = 1; 1358 auio.uio_offset = 0; 1359 auio.uio_rw = UIO_READ; 1360 auio.uio_segflg = UIO_SYSSPACE; 1361 auio.uio_resid = aiov.iov_len; 1362 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1363 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 1364 VOP_UNLOCK(sc->vnode, 0); 1365 free(tmpbuf, M_TEMP); 1366 } 1367 return (error); 1368 } 1369 1370 static int 1371 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1372 { 1373 struct vattr vattr; 1374 struct nameidata nd; 1375 char *fname; 1376 int error, flags; 1377 1378 /* 1379 * Kernel-originated requests must have the filename appended 1380 * to the mdio structure to protect against malicious software. 1381 */ 1382 fname = mdio->md_file; 1383 if ((void *)fname != (void *)(mdio + 1)) { 1384 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL); 1385 if (error != 0) 1386 return (error); 1387 } else 1388 strlcpy(sc->file, fname, sizeof(sc->file)); 1389 1390 /* 1391 * If the user specified that this is a read only device, don't 1392 * set the FWRITE mask before trying to open the backing store. 1393 */ 1394 flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE) \ 1395 | ((mdio->md_options & MD_VERIFY) ? 0 : O_VERIFY); 1396 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td); 1397 error = vn_open(&nd, &flags, 0, NULL); 1398 if (error != 0) 1399 return (error); 1400 NDFREE(&nd, NDF_ONLY_PNBUF); 1401 if (nd.ni_vp->v_type != VREG) { 1402 error = EINVAL; 1403 goto bad; 1404 } 1405 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred); 1406 if (error != 0) 1407 goto bad; 1408 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { 1409 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); 1410 if (nd.ni_vp->v_iflag & VI_DOOMED) { 1411 /* Forced unmount. */ 1412 error = EBADF; 1413 goto bad; 1414 } 1415 } 1416 nd.ni_vp->v_vflag |= VV_MD; 1417 VOP_UNLOCK(nd.ni_vp, 0); 1418 1419 if (mdio->md_fwsectors != 0) 1420 sc->fwsectors = mdio->md_fwsectors; 1421 if (mdio->md_fwheads != 0) 1422 sc->fwheads = mdio->md_fwheads; 1423 snprintf(sc->ident, sizeof(sc->ident), "MD-DEV%ju-INO%ju", 1424 (uintmax_t)vattr.va_fsid, (uintmax_t)vattr.va_fileid); 1425 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC | MD_VERIFY); 1426 if (!(flags & FWRITE)) 1427 sc->flags |= MD_READONLY; 1428 sc->vnode = nd.ni_vp; 1429 1430 error = mdsetcred(sc, td->td_ucred); 1431 if (error != 0) { 1432 sc->vnode = NULL; 1433 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); 1434 nd.ni_vp->v_vflag &= ~VV_MD; 1435 goto bad; 1436 } 1437 return (0); 1438 bad: 1439 VOP_UNLOCK(nd.ni_vp, 0); 1440 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 1441 return (error); 1442 } 1443 1444 static int 1445 mddestroy(struct md_s *sc, struct thread *td) 1446 { 1447 1448 if (sc->gp) { 1449 sc->gp->softc = NULL; 1450 g_topology_lock(); 1451 g_wither_geom(sc->gp, ENXIO); 1452 g_topology_unlock(); 1453 sc->gp = NULL; 1454 sc->pp = NULL; 1455 } 1456 if (sc->devstat) { 1457 devstat_remove_entry(sc->devstat); 1458 sc->devstat = NULL; 1459 } 1460 mtx_lock(&sc->queue_mtx); 1461 sc->flags |= MD_SHUTDOWN; 1462 wakeup(sc); 1463 while (!(sc->flags & MD_EXITING)) 1464 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10); 1465 mtx_unlock(&sc->queue_mtx); 1466 mtx_destroy(&sc->stat_mtx); 1467 mtx_destroy(&sc->queue_mtx); 1468 if (sc->vnode != NULL) { 1469 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1470 sc->vnode->v_vflag &= ~VV_MD; 1471 VOP_UNLOCK(sc->vnode, 0); 1472 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 1473 FREAD : (FREAD|FWRITE), sc->cred, td); 1474 } 1475 if (sc->cred != NULL) 1476 crfree(sc->cred); 1477 if (sc->object != NULL) 1478 vm_object_deallocate(sc->object); 1479 if (sc->indir) 1480 destroy_indir(sc, sc->indir); 1481 if (sc->uma) 1482 uma_zdestroy(sc->uma); 1483 1484 LIST_REMOVE(sc, list); 1485 free_unr(md_uh, sc->unit); 1486 free(sc, M_MD); 1487 return (0); 1488 } 1489 1490 static int 1491 mdresize(struct md_s *sc, struct md_ioctl *mdio) 1492 { 1493 int error, res; 1494 vm_pindex_t oldpages, newpages; 1495 1496 switch (sc->type) { 1497 case MD_VNODE: 1498 case MD_NULL: 1499 break; 1500 case MD_SWAP: 1501 if (mdio->md_mediasize <= 0 || 1502 (mdio->md_mediasize % PAGE_SIZE) != 0) 1503 return (EDOM); 1504 oldpages = OFF_TO_IDX(round_page(sc->mediasize)); 1505 newpages = OFF_TO_IDX(round_page(mdio->md_mediasize)); 1506 if (newpages < oldpages) { 1507 VM_OBJECT_WLOCK(sc->object); 1508 vm_object_page_remove(sc->object, newpages, 0, 0); 1509 swap_pager_freespace(sc->object, newpages, 1510 oldpages - newpages); 1511 swap_release_by_cred(IDX_TO_OFF(oldpages - 1512 newpages), sc->cred); 1513 sc->object->charge = IDX_TO_OFF(newpages); 1514 sc->object->size = newpages; 1515 VM_OBJECT_WUNLOCK(sc->object); 1516 } else if (newpages > oldpages) { 1517 res = swap_reserve_by_cred(IDX_TO_OFF(newpages - 1518 oldpages), sc->cred); 1519 if (!res) 1520 return (ENOMEM); 1521 if ((mdio->md_options & MD_RESERVE) || 1522 (sc->flags & MD_RESERVE)) { 1523 error = swap_pager_reserve(sc->object, 1524 oldpages, newpages - oldpages); 1525 if (error < 0) { 1526 swap_release_by_cred( 1527 IDX_TO_OFF(newpages - oldpages), 1528 sc->cred); 1529 return (EDOM); 1530 } 1531 } 1532 VM_OBJECT_WLOCK(sc->object); 1533 sc->object->charge = IDX_TO_OFF(newpages); 1534 sc->object->size = newpages; 1535 VM_OBJECT_WUNLOCK(sc->object); 1536 } 1537 break; 1538 default: 1539 return (EOPNOTSUPP); 1540 } 1541 1542 sc->mediasize = mdio->md_mediasize; 1543 g_topology_lock(); 1544 g_resize_provider(sc->pp, sc->mediasize); 1545 g_topology_unlock(); 1546 return (0); 1547 } 1548 1549 static int 1550 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1551 { 1552 vm_ooffset_t npage; 1553 int error; 1554 1555 /* 1556 * Range check. Disallow negative sizes and sizes not being 1557 * multiple of page size. 1558 */ 1559 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1560 return (EDOM); 1561 1562 /* 1563 * Allocate an OBJT_SWAP object. 1564 * 1565 * Note the truncation. 1566 */ 1567 1568 if ((mdio->md_options & MD_VERIFY) != 0) 1569 return (EINVAL); 1570 npage = mdio->md_mediasize / PAGE_SIZE; 1571 if (mdio->md_fwsectors != 0) 1572 sc->fwsectors = mdio->md_fwsectors; 1573 if (mdio->md_fwheads != 0) 1574 sc->fwheads = mdio->md_fwheads; 1575 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, 1576 VM_PROT_DEFAULT, 0, td->td_ucred); 1577 if (sc->object == NULL) 1578 return (ENOMEM); 1579 sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE); 1580 if (mdio->md_options & MD_RESERVE) { 1581 if (swap_pager_reserve(sc->object, 0, npage) < 0) { 1582 error = EDOM; 1583 goto finish; 1584 } 1585 } 1586 error = mdsetcred(sc, td->td_ucred); 1587 finish: 1588 if (error != 0) { 1589 vm_object_deallocate(sc->object); 1590 sc->object = NULL; 1591 } 1592 return (error); 1593 } 1594 1595 static int 1596 mdcreate_null(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) 1597 { 1598 1599 /* 1600 * Range check. Disallow negative sizes and sizes not being 1601 * multiple of page size. 1602 */ 1603 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1604 return (EDOM); 1605 1606 return (0); 1607 } 1608 1609 static int 1610 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1611 { 1612 struct md_ioctl *mdio; 1613 struct md_s *sc; 1614 int error, i; 1615 unsigned sectsize; 1616 1617 if (md_debug) 1618 printf("mdctlioctl(%s %lx %p %x %p)\n", 1619 devtoname(dev), cmd, addr, flags, td); 1620 1621 mdio = (struct md_ioctl *)addr; 1622 if (mdio->md_version != MDIOVERSION) 1623 return (EINVAL); 1624 1625 /* 1626 * We assert the version number in the individual ioctl 1627 * handlers instead of out here because (a) it is possible we 1628 * may add another ioctl in the future which doesn't read an 1629 * mdio, and (b) the correct return value for an unknown ioctl 1630 * is ENOIOCTL, not EINVAL. 1631 */ 1632 error = 0; 1633 switch (cmd) { 1634 case MDIOCATTACH: 1635 switch (mdio->md_type) { 1636 case MD_MALLOC: 1637 case MD_PRELOAD: 1638 case MD_VNODE: 1639 case MD_SWAP: 1640 case MD_NULL: 1641 break; 1642 default: 1643 return (EINVAL); 1644 } 1645 if (mdio->md_sectorsize == 0) 1646 sectsize = DEV_BSIZE; 1647 else 1648 sectsize = mdio->md_sectorsize; 1649 if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize) 1650 return (EINVAL); 1651 if (mdio->md_options & MD_AUTOUNIT) 1652 sc = mdnew(-1, &error, mdio->md_type); 1653 else { 1654 if (mdio->md_unit > INT_MAX) 1655 return (EINVAL); 1656 sc = mdnew(mdio->md_unit, &error, mdio->md_type); 1657 } 1658 if (sc == NULL) 1659 return (error); 1660 if (mdio->md_label != NULL) 1661 error = copyinstr(mdio->md_label, sc->label, 1662 sizeof(sc->label), NULL); 1663 if (error != 0) 1664 goto err_after_new; 1665 if (mdio->md_options & MD_AUTOUNIT) 1666 mdio->md_unit = sc->unit; 1667 sc->mediasize = mdio->md_mediasize; 1668 sc->sectorsize = sectsize; 1669 error = EDOOFUS; 1670 switch (sc->type) { 1671 case MD_MALLOC: 1672 sc->start = mdstart_malloc; 1673 error = mdcreate_malloc(sc, mdio); 1674 break; 1675 case MD_PRELOAD: 1676 /* 1677 * We disallow attaching preloaded memory disks via 1678 * ioctl. Preloaded memory disks are automatically 1679 * attached in g_md_init(). 1680 */ 1681 error = EOPNOTSUPP; 1682 break; 1683 case MD_VNODE: 1684 sc->start = mdstart_vnode; 1685 error = mdcreate_vnode(sc, mdio, td); 1686 break; 1687 case MD_SWAP: 1688 sc->start = mdstart_swap; 1689 error = mdcreate_swap(sc, mdio, td); 1690 break; 1691 case MD_NULL: 1692 sc->start = mdstart_null; 1693 error = mdcreate_null(sc, mdio, td); 1694 break; 1695 } 1696 err_after_new: 1697 if (error != 0) { 1698 mddestroy(sc, td); 1699 return (error); 1700 } 1701 1702 /* Prune off any residual fractional sector */ 1703 i = sc->mediasize % sc->sectorsize; 1704 sc->mediasize -= i; 1705 1706 mdinit(sc); 1707 return (0); 1708 case MDIOCDETACH: 1709 if (mdio->md_mediasize != 0 || 1710 (mdio->md_options & ~MD_FORCE) != 0) 1711 return (EINVAL); 1712 1713 sc = mdfind(mdio->md_unit); 1714 if (sc == NULL) 1715 return (ENOENT); 1716 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) && 1717 !(mdio->md_options & MD_FORCE)) 1718 return (EBUSY); 1719 return (mddestroy(sc, td)); 1720 case MDIOCRESIZE: 1721 if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0) 1722 return (EINVAL); 1723 1724 sc = mdfind(mdio->md_unit); 1725 if (sc == NULL) 1726 return (ENOENT); 1727 if (mdio->md_mediasize < sc->sectorsize) 1728 return (EINVAL); 1729 if (mdio->md_mediasize < sc->mediasize && 1730 !(sc->flags & MD_FORCE) && 1731 !(mdio->md_options & MD_FORCE)) 1732 return (EBUSY); 1733 return (mdresize(sc, mdio)); 1734 case MDIOCQUERY: 1735 sc = mdfind(mdio->md_unit); 1736 if (sc == NULL) 1737 return (ENOENT); 1738 mdio->md_type = sc->type; 1739 mdio->md_options = sc->flags; 1740 mdio->md_mediasize = sc->mediasize; 1741 mdio->md_sectorsize = sc->sectorsize; 1742 error = 0; 1743 if (mdio->md_label != NULL) { 1744 error = copyout(sc->label, mdio->md_label, 1745 strlen(sc->label) + 1); 1746 } 1747 if (sc->type == MD_VNODE || 1748 (sc->type == MD_PRELOAD && mdio->md_file != NULL)) 1749 error = copyout(sc->file, mdio->md_file, 1750 strlen(sc->file) + 1); 1751 return (error); 1752 case MDIOCLIST: 1753 i = 1; 1754 LIST_FOREACH(sc, &md_softc_list, list) { 1755 if (i == MDNPAD - 1) 1756 mdio->md_pad[i] = -1; 1757 else 1758 mdio->md_pad[i++] = sc->unit; 1759 } 1760 mdio->md_pad[0] = i - 1; 1761 return (0); 1762 default: 1763 return (ENOIOCTL); 1764 }; 1765 } 1766 1767 static int 1768 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 1769 { 1770 int error; 1771 1772 sx_xlock(&md_sx); 1773 error = xmdctlioctl(dev, cmd, addr, flags, td); 1774 sx_xunlock(&md_sx); 1775 return (error); 1776 } 1777 1778 static void 1779 md_preloaded(u_char *image, size_t length, const char *name) 1780 { 1781 struct md_s *sc; 1782 int error; 1783 1784 sc = mdnew(-1, &error, MD_PRELOAD); 1785 if (sc == NULL) 1786 return; 1787 sc->mediasize = length; 1788 sc->sectorsize = DEV_BSIZE; 1789 sc->pl_ptr = image; 1790 sc->pl_len = length; 1791 sc->start = mdstart_preload; 1792 if (name != NULL) 1793 strlcpy(sc->file, name, sizeof(sc->file)); 1794 #ifdef MD_ROOT 1795 if (sc->unit == 0) { 1796 #ifndef ROOTDEVNAME 1797 rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0"; 1798 #endif 1799 #ifdef MD_ROOT_READONLY 1800 sc->flags |= MD_READONLY; 1801 #endif 1802 } 1803 #endif 1804 mdinit(sc); 1805 if (name != NULL) { 1806 printf("%s%d: Preloaded image <%s> %zd bytes at %p\n", 1807 MD_NAME, sc->unit, name, length, image); 1808 } else { 1809 printf("%s%d: Embedded image %zd bytes at %p\n", 1810 MD_NAME, sc->unit, length, image); 1811 } 1812 } 1813 1814 static void 1815 g_md_init(struct g_class *mp __unused) 1816 { 1817 caddr_t mod; 1818 u_char *ptr, *name, *type; 1819 unsigned len; 1820 int i; 1821 1822 /* figure out log2(NINDIR) */ 1823 for (i = NINDIR, nshift = -1; i; nshift++) 1824 i >>= 1; 1825 1826 mod = NULL; 1827 sx_init(&md_sx, "MD config lock"); 1828 g_topology_unlock(); 1829 md_uh = new_unrhdr(0, INT_MAX, NULL); 1830 #ifdef MD_ROOT 1831 if (mfs_root_size != 0) { 1832 sx_xlock(&md_sx); 1833 md_preloaded(__DEVOLATILE(u_char *, &mfs_root), mfs_root_size, 1834 NULL); 1835 sx_xunlock(&md_sx); 1836 } 1837 #endif 1838 /* XXX: are preload_* static or do they need Giant ? */ 1839 while ((mod = preload_search_next_name(mod)) != NULL) { 1840 name = (char *)preload_search_info(mod, MODINFO_NAME); 1841 if (name == NULL) 1842 continue; 1843 type = (char *)preload_search_info(mod, MODINFO_TYPE); 1844 if (type == NULL) 1845 continue; 1846 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 1847 continue; 1848 ptr = preload_fetch_addr(mod); 1849 len = preload_fetch_size(mod); 1850 if (ptr != NULL && len != 0) { 1851 sx_xlock(&md_sx); 1852 md_preloaded(ptr, len, name); 1853 sx_xunlock(&md_sx); 1854 } 1855 } 1856 md_vnode_pbuf_freecnt = nswbuf / 10; 1857 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL, 1858 0600, MDCTL_NAME); 1859 g_topology_lock(); 1860 } 1861 1862 static void 1863 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1864 struct g_consumer *cp __unused, struct g_provider *pp) 1865 { 1866 struct md_s *mp; 1867 char *type; 1868 1869 mp = gp->softc; 1870 if (mp == NULL) 1871 return; 1872 1873 switch (mp->type) { 1874 case MD_MALLOC: 1875 type = "malloc"; 1876 break; 1877 case MD_PRELOAD: 1878 type = "preload"; 1879 break; 1880 case MD_VNODE: 1881 type = "vnode"; 1882 break; 1883 case MD_SWAP: 1884 type = "swap"; 1885 break; 1886 case MD_NULL: 1887 type = "null"; 1888 break; 1889 default: 1890 type = "unknown"; 1891 break; 1892 } 1893 1894 if (pp != NULL) { 1895 if (indent == NULL) { 1896 sbuf_printf(sb, " u %d", mp->unit); 1897 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize); 1898 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads); 1899 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors); 1900 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize); 1901 sbuf_printf(sb, " t %s", type); 1902 if ((mp->type == MD_VNODE && mp->vnode != NULL) || 1903 (mp->type == MD_PRELOAD && mp->file[0] != '\0')) 1904 sbuf_printf(sb, " file %s", mp->file); 1905 sbuf_printf(sb, " label %s", mp->label); 1906 } else { 1907 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, 1908 mp->unit); 1909 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n", 1910 indent, (uintmax_t) mp->sectorsize); 1911 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n", 1912 indent, (uintmax_t) mp->fwheads); 1913 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n", 1914 indent, (uintmax_t) mp->fwsectors); 1915 if (mp->ident[0] != '\0') { 1916 sbuf_printf(sb, "%s<ident>", indent); 1917 g_conf_printf_escaped(sb, "%s", mp->ident); 1918 sbuf_printf(sb, "</ident>\n"); 1919 } 1920 sbuf_printf(sb, "%s<length>%ju</length>\n", 1921 indent, (uintmax_t) mp->mediasize); 1922 sbuf_printf(sb, "%s<compression>%s</compression>\n", indent, 1923 (mp->flags & MD_COMPRESS) == 0 ? "off": "on"); 1924 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 1925 (mp->flags & MD_READONLY) == 0 ? "read-write": 1926 "read-only"); 1927 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 1928 type); 1929 if ((mp->type == MD_VNODE && mp->vnode != NULL) || 1930 (mp->type == MD_PRELOAD && mp->file[0] != '\0')) { 1931 sbuf_printf(sb, "%s<file>", indent); 1932 g_conf_printf_escaped(sb, "%s", mp->file); 1933 sbuf_printf(sb, "</file>\n"); 1934 } 1935 sbuf_printf(sb, "%s<label>", indent); 1936 g_conf_printf_escaped(sb, "%s", mp->label); 1937 sbuf_printf(sb, "</label>\n"); 1938 } 1939 } 1940 } 1941 1942 static void 1943 g_md_fini(struct g_class *mp __unused) 1944 { 1945 1946 sx_destroy(&md_sx); 1947 if (status_dev != NULL) 1948 destroy_dev(status_dev); 1949 delete_unrhdr(md_uh); 1950 } 1951