1 /*- 2 * SPDX-License-Identifier: (Beerware AND BSD-3-Clause) 3 * 4 * ---------------------------------------------------------------------------- 5 * "THE BEER-WARE LICENSE" (Revision 42): 6 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 7 * can do whatever you want with this stuff. If we meet some day, and you think 8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 9 * ---------------------------------------------------------------------------- 10 * 11 */ 12 13 /*- 14 * The following functions are based on the vn(4) driver: mdstart_swap(), 15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 16 * and as such under the following copyright: 17 * 18 * Copyright (c) 1988 University of Utah. 19 * Copyright (c) 1990, 1993 20 * The Regents of the University of California. All rights reserved. 21 * Copyright (c) 2013 The FreeBSD Foundation 22 * All rights reserved. 23 * 24 * This code is derived from software contributed to Berkeley by 25 * the Systems Programming Group of the University of Utah Computer 26 * Science Department. 27 * 28 * Portions of this software were developed by Konstantin Belousov 29 * under sponsorship from the FreeBSD Foundation. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 3. Neither the name of the University nor the names of its contributors 40 * may be used to endorse or promote products derived from this software 41 * without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 53 * SUCH DAMAGE. 54 * 55 * from: Utah Hdr: vn.c 1.13 94/04/02 56 * 57 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 58 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 59 */ 60 61 #include "opt_rootdevname.h" 62 #include "opt_geom.h" 63 #include "opt_md.h" 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/bio.h> 68 #include <sys/buf.h> 69 #include <sys/conf.h> 70 #include <sys/devicestat.h> 71 #include <sys/fcntl.h> 72 #include <sys/kernel.h> 73 #include <sys/kthread.h> 74 #include <sys/limits.h> 75 #include <sys/linker.h> 76 #include <sys/lock.h> 77 #include <sys/malloc.h> 78 #include <sys/mdioctl.h> 79 #include <sys/mount.h> 80 #include <sys/mutex.h> 81 #include <sys/sx.h> 82 #include <sys/namei.h> 83 #include <sys/proc.h> 84 #include <sys/queue.h> 85 #include <sys/rwlock.h> 86 #include <sys/sbuf.h> 87 #include <sys/sched.h> 88 #include <sys/sf_buf.h> 89 #include <sys/sysctl.h> 90 #include <sys/uio.h> 91 #include <sys/unistd.h> 92 #include <sys/vnode.h> 93 #include <sys/disk.h> 94 95 #include <geom/geom.h> 96 #include <geom/geom_int.h> 97 98 #include <vm/vm.h> 99 #include <vm/vm_extern.h> 100 #include <vm/vm_param.h> 101 #include <vm/vm_object.h> 102 #include <vm/vm_page.h> 103 #include <vm/vm_pager.h> 104 #include <vm/swap_pager.h> 105 #include <vm/uma.h> 106 107 #include <machine/bus.h> 108 109 #define MD_MODVER 1 110 111 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 112 #define MD_EXITING 0x20000 /* Worker thread is exiting. */ 113 #define MD_PROVIDERGONE 0x40000 /* Safe to free the softc */ 114 115 #ifndef MD_NSECT 116 #define MD_NSECT (10000 * 2) 117 #endif 118 119 struct md_req { 120 unsigned md_unit; /* unit number */ 121 enum md_types md_type; /* type of disk */ 122 off_t md_mediasize; /* size of disk in bytes */ 123 unsigned md_sectorsize; /* sectorsize */ 124 unsigned md_options; /* options */ 125 int md_fwheads; /* firmware heads */ 126 int md_fwsectors; /* firmware sectors */ 127 char *md_file; /* pathname of file to mount */ 128 enum uio_seg md_file_seg; /* location of md_file */ 129 char *md_label; /* label of the device (userspace) */ 130 int *md_units; /* pointer to units array (kernel) */ 131 size_t md_units_nitems; /* items in md_units array */ 132 }; 133 134 #ifdef COMPAT_FREEBSD32 135 struct md_ioctl32 { 136 unsigned md_version; 137 unsigned md_unit; 138 enum md_types md_type; 139 uint32_t md_file; 140 off_t md_mediasize; 141 unsigned md_sectorsize; 142 unsigned md_options; 143 uint64_t md_base; 144 int md_fwheads; 145 int md_fwsectors; 146 uint32_t md_label; 147 int md_pad[MDNPAD]; 148 } 149 #ifdef __amd64__ 150 __attribute__((__packed__)) 151 #endif 152 ; 153 #ifndef __amd64__ 154 CTASSERT((sizeof(struct md_ioctl32)) == 440); 155 #else 156 CTASSERT((sizeof(struct md_ioctl32)) == 436); 157 #endif 158 159 #define MDIOCATTACH_32 _IOC_NEWTYPE(MDIOCATTACH, struct md_ioctl32) 160 #define MDIOCDETACH_32 _IOC_NEWTYPE(MDIOCDETACH, struct md_ioctl32) 161 #define MDIOCQUERY_32 _IOC_NEWTYPE(MDIOCQUERY, struct md_ioctl32) 162 #define MDIOCRESIZE_32 _IOC_NEWTYPE(MDIOCRESIZE, struct md_ioctl32) 163 #endif /* COMPAT_FREEBSD32 */ 164 165 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk"); 166 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors"); 167 168 static int md_debug; 169 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, 170 "Enable md(4) debug messages"); 171 static int md_malloc_wait; 172 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, 173 "Allow malloc to wait for memory allocations"); 174 175 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE) 176 #define MD_ROOT_FSTYPE "ufs" 177 #endif 178 179 #if defined(MD_ROOT) 180 /* 181 * Preloaded image gets put here. 182 */ 183 #if defined(MD_ROOT_SIZE) 184 /* 185 * We put the mfs_root symbol into the oldmfs section of the kernel object file. 186 * Applications that patch the object with the image can determine 187 * the size looking at the oldmfs section size within the kernel. 188 */ 189 u_char mfs_root[MD_ROOT_SIZE*1024] __attribute__ ((section ("oldmfs"))); 190 const int mfs_root_size = sizeof(mfs_root); 191 #elif defined(MD_ROOT_MEM) 192 /* MD region already mapped in the memory */ 193 u_char *mfs_root; 194 int mfs_root_size; 195 #else 196 extern volatile u_char __weak_symbol mfs_root; 197 extern volatile u_char __weak_symbol mfs_root_end; 198 #define mfs_root_size ((uintptr_t)(&mfs_root_end - &mfs_root)) 199 #endif 200 #endif 201 202 static g_init_t g_md_init; 203 static g_fini_t g_md_fini; 204 static g_start_t g_md_start; 205 static g_access_t g_md_access; 206 static void g_md_dumpconf(struct sbuf *sb, const char *indent, 207 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp); 208 static g_provgone_t g_md_providergone; 209 210 static struct cdev *status_dev = NULL; 211 static struct sx md_sx; 212 static struct unrhdr *md_uh; 213 214 static d_ioctl_t mdctlioctl; 215 216 static struct cdevsw mdctl_cdevsw = { 217 .d_version = D_VERSION, 218 .d_ioctl = mdctlioctl, 219 .d_name = MD_NAME, 220 }; 221 222 struct g_class g_md_class = { 223 .name = "MD", 224 .version = G_VERSION, 225 .init = g_md_init, 226 .fini = g_md_fini, 227 .start = g_md_start, 228 .access = g_md_access, 229 .dumpconf = g_md_dumpconf, 230 .providergone = g_md_providergone, 231 }; 232 233 DECLARE_GEOM_CLASS(g_md_class, g_md); 234 MODULE_VERSION(geom_md, 0); 235 236 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list); 237 238 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 239 #define NMASK (NINDIR-1) 240 static int nshift; 241 242 struct indir { 243 uintptr_t *array; 244 u_int total; 245 u_int used; 246 u_int shift; 247 }; 248 249 struct md_s { 250 int unit; 251 LIST_ENTRY(md_s) list; 252 struct bio_queue_head bio_queue; 253 struct mtx queue_mtx; 254 struct cdev *dev; 255 enum md_types type; 256 off_t mediasize; 257 unsigned sectorsize; 258 unsigned opencount; 259 unsigned fwheads; 260 unsigned fwsectors; 261 char ident[32]; 262 unsigned flags; 263 char name[20]; 264 struct proc *procp; 265 struct g_geom *gp; 266 struct g_provider *pp; 267 int (*start)(struct md_s *sc, struct bio *bp); 268 struct devstat *devstat; 269 bool candelete; 270 271 /* MD_MALLOC related fields */ 272 struct indir *indir; 273 uma_zone_t uma; 274 275 /* MD_PRELOAD related fields */ 276 u_char *pl_ptr; 277 size_t pl_len; 278 279 /* MD_VNODE related fields */ 280 struct vnode *vnode; 281 char file[PATH_MAX]; 282 char label[PATH_MAX]; 283 struct ucred *cred; 284 vm_offset_t kva; 285 286 /* MD_SWAP related fields */ 287 vm_object_t object; 288 }; 289 290 static struct indir * 291 new_indir(u_int shift) 292 { 293 struct indir *ip; 294 295 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT) 296 | M_ZERO); 297 if (ip == NULL) 298 return (NULL); 299 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 300 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); 301 if (ip->array == NULL) { 302 free(ip, M_MD); 303 return (NULL); 304 } 305 ip->total = NINDIR; 306 ip->shift = shift; 307 return (ip); 308 } 309 310 static void 311 del_indir(struct indir *ip) 312 { 313 314 free(ip->array, M_MDSECT); 315 free(ip, M_MD); 316 } 317 318 static void 319 destroy_indir(struct md_s *sc, struct indir *ip) 320 { 321 int i; 322 323 for (i = 0; i < NINDIR; i++) { 324 if (!ip->array[i]) 325 continue; 326 if (ip->shift) 327 destroy_indir(sc, (struct indir*)(ip->array[i])); 328 else if (ip->array[i] > 255) 329 uma_zfree(sc->uma, (void *)(ip->array[i])); 330 } 331 del_indir(ip); 332 } 333 334 /* 335 * This function does the math and allocates the top level "indir" structure 336 * for a device of "size" sectors. 337 */ 338 339 static struct indir * 340 dimension(off_t size) 341 { 342 off_t rcnt; 343 struct indir *ip; 344 int layer; 345 346 rcnt = size; 347 layer = 0; 348 while (rcnt > NINDIR) { 349 rcnt /= NINDIR; 350 layer++; 351 } 352 353 /* 354 * XXX: the top layer is probably not fully populated, so we allocate 355 * too much space for ip->array in here. 356 */ 357 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 358 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 359 M_MDSECT, M_WAITOK | M_ZERO); 360 ip->total = NINDIR; 361 ip->shift = layer * nshift; 362 return (ip); 363 } 364 365 /* 366 * Read a given sector 367 */ 368 369 static uintptr_t 370 s_read(struct indir *ip, off_t offset) 371 { 372 struct indir *cip; 373 int idx; 374 uintptr_t up; 375 376 if (md_debug > 1) 377 printf("s_read(%jd)\n", (intmax_t)offset); 378 up = 0; 379 for (cip = ip; cip != NULL;) { 380 if (cip->shift) { 381 idx = (offset >> cip->shift) & NMASK; 382 up = cip->array[idx]; 383 cip = (struct indir *)up; 384 continue; 385 } 386 idx = offset & NMASK; 387 return (cip->array[idx]); 388 } 389 return (0); 390 } 391 392 /* 393 * Write a given sector, prune the tree if the value is 0 394 */ 395 396 static int 397 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 398 { 399 struct indir *cip, *lip[10]; 400 int idx, li; 401 uintptr_t up; 402 403 if (md_debug > 1) 404 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 405 up = 0; 406 li = 0; 407 cip = ip; 408 for (;;) { 409 lip[li++] = cip; 410 if (cip->shift) { 411 idx = (offset >> cip->shift) & NMASK; 412 up = cip->array[idx]; 413 if (up != 0) { 414 cip = (struct indir *)up; 415 continue; 416 } 417 /* Allocate branch */ 418 cip->array[idx] = 419 (uintptr_t)new_indir(cip->shift - nshift); 420 if (cip->array[idx] == 0) 421 return (ENOSPC); 422 cip->used++; 423 up = cip->array[idx]; 424 cip = (struct indir *)up; 425 continue; 426 } 427 /* leafnode */ 428 idx = offset & NMASK; 429 up = cip->array[idx]; 430 if (up != 0) 431 cip->used--; 432 cip->array[idx] = ptr; 433 if (ptr != 0) 434 cip->used++; 435 break; 436 } 437 if (cip->used != 0 || li == 1) 438 return (0); 439 li--; 440 while (cip->used == 0 && cip != ip) { 441 li--; 442 idx = (offset >> lip[li]->shift) & NMASK; 443 up = lip[li]->array[idx]; 444 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 445 del_indir(cip); 446 lip[li]->array[idx] = 0; 447 lip[li]->used--; 448 cip = lip[li]; 449 } 450 return (0); 451 } 452 453 static int 454 g_md_access(struct g_provider *pp, int r, int w, int e) 455 { 456 struct md_s *sc; 457 458 sc = pp->geom->softc; 459 if (sc == NULL) { 460 if (r <= 0 && w <= 0 && e <= 0) 461 return (0); 462 return (ENXIO); 463 } 464 r += pp->acr; 465 w += pp->acw; 466 e += pp->ace; 467 if ((sc->flags & MD_READONLY) != 0 && w > 0) 468 return (EROFS); 469 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 470 sc->opencount = 1; 471 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 472 sc->opencount = 0; 473 } 474 return (0); 475 } 476 477 static void 478 g_md_start(struct bio *bp) 479 { 480 struct md_s *sc; 481 482 sc = bp->bio_to->geom->softc; 483 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) { 484 devstat_start_transaction_bio(sc->devstat, bp); 485 } 486 mtx_lock(&sc->queue_mtx); 487 bioq_disksort(&sc->bio_queue, bp); 488 wakeup(sc); 489 mtx_unlock(&sc->queue_mtx); 490 } 491 492 #define MD_MALLOC_MOVE_ZERO 1 493 #define MD_MALLOC_MOVE_FILL 2 494 #define MD_MALLOC_MOVE_READ 3 495 #define MD_MALLOC_MOVE_WRITE 4 496 #define MD_MALLOC_MOVE_CMP 5 497 498 static int 499 md_malloc_move_ma(vm_page_t **mp, int *ma_offs, unsigned sectorsize, 500 void *ptr, u_char fill, int op) 501 { 502 struct sf_buf *sf; 503 vm_page_t m, *mp1; 504 char *p, first; 505 off_t *uc; 506 unsigned n; 507 int error, i, ma_offs1, sz, first_read; 508 509 m = NULL; 510 error = 0; 511 sf = NULL; 512 /* if (op == MD_MALLOC_MOVE_CMP) { gcc */ 513 first = 0; 514 first_read = 0; 515 uc = ptr; 516 mp1 = *mp; 517 ma_offs1 = *ma_offs; 518 /* } */ 519 sched_pin(); 520 for (n = sectorsize; n != 0; n -= sz) { 521 sz = imin(PAGE_SIZE - *ma_offs, n); 522 if (m != **mp) { 523 if (sf != NULL) 524 sf_buf_free(sf); 525 m = **mp; 526 sf = sf_buf_alloc(m, SFB_CPUPRIVATE | 527 (md_malloc_wait ? 0 : SFB_NOWAIT)); 528 if (sf == NULL) { 529 error = ENOMEM; 530 break; 531 } 532 } 533 p = (char *)sf_buf_kva(sf) + *ma_offs; 534 switch (op) { 535 case MD_MALLOC_MOVE_ZERO: 536 bzero(p, sz); 537 break; 538 case MD_MALLOC_MOVE_FILL: 539 memset(p, fill, sz); 540 break; 541 case MD_MALLOC_MOVE_READ: 542 bcopy(ptr, p, sz); 543 cpu_flush_dcache(p, sz); 544 break; 545 case MD_MALLOC_MOVE_WRITE: 546 bcopy(p, ptr, sz); 547 break; 548 case MD_MALLOC_MOVE_CMP: 549 for (i = 0; i < sz; i++, p++) { 550 if (!first_read) { 551 *uc = (u_char)*p; 552 first = *p; 553 first_read = 1; 554 } else if (*p != first) { 555 error = EDOOFUS; 556 break; 557 } 558 } 559 break; 560 default: 561 KASSERT(0, ("md_malloc_move_ma unknown op %d\n", op)); 562 break; 563 } 564 if (error != 0) 565 break; 566 *ma_offs += sz; 567 *ma_offs %= PAGE_SIZE; 568 if (*ma_offs == 0) 569 (*mp)++; 570 ptr = (char *)ptr + sz; 571 } 572 573 if (sf != NULL) 574 sf_buf_free(sf); 575 sched_unpin(); 576 if (op == MD_MALLOC_MOVE_CMP && error != 0) { 577 *mp = mp1; 578 *ma_offs = ma_offs1; 579 } 580 return (error); 581 } 582 583 static int 584 md_malloc_move_vlist(bus_dma_segment_t **pvlist, int *pma_offs, 585 unsigned len, void *ptr, u_char fill, int op) 586 { 587 bus_dma_segment_t *vlist; 588 uint8_t *p, *end, first; 589 off_t *uc; 590 int ma_offs, seg_len; 591 592 vlist = *pvlist; 593 ma_offs = *pma_offs; 594 uc = ptr; 595 596 for (; len != 0; len -= seg_len) { 597 seg_len = imin(vlist->ds_len - ma_offs, len); 598 p = (uint8_t *)(uintptr_t)vlist->ds_addr + ma_offs; 599 switch (op) { 600 case MD_MALLOC_MOVE_ZERO: 601 bzero(p, seg_len); 602 break; 603 case MD_MALLOC_MOVE_FILL: 604 memset(p, fill, seg_len); 605 break; 606 case MD_MALLOC_MOVE_READ: 607 bcopy(ptr, p, seg_len); 608 cpu_flush_dcache(p, seg_len); 609 break; 610 case MD_MALLOC_MOVE_WRITE: 611 bcopy(p, ptr, seg_len); 612 break; 613 case MD_MALLOC_MOVE_CMP: 614 end = p + seg_len; 615 first = *uc = *p; 616 /* Confirm all following bytes match the first */ 617 while (++p < end) { 618 if (*p != first) 619 return (EDOOFUS); 620 } 621 break; 622 default: 623 KASSERT(0, ("md_malloc_move_vlist unknown op %d\n", op)); 624 break; 625 } 626 627 ma_offs += seg_len; 628 if (ma_offs == vlist->ds_len) { 629 ma_offs = 0; 630 vlist++; 631 } 632 ptr = (uint8_t *)ptr + seg_len; 633 } 634 *pvlist = vlist; 635 *pma_offs = ma_offs; 636 637 return (0); 638 } 639 640 static int 641 mdstart_malloc(struct md_s *sc, struct bio *bp) 642 { 643 u_char *dst; 644 vm_page_t *m; 645 bus_dma_segment_t *vlist; 646 int i, error, error1, ma_offs, notmapped; 647 off_t secno, nsec, uc; 648 uintptr_t sp, osp; 649 650 switch (bp->bio_cmd) { 651 case BIO_READ: 652 case BIO_WRITE: 653 case BIO_DELETE: 654 break; 655 case BIO_FLUSH: 656 return (0); 657 default: 658 return (EOPNOTSUPP); 659 } 660 661 notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0; 662 vlist = (bp->bio_flags & BIO_VLIST) != 0 ? 663 (bus_dma_segment_t *)bp->bio_data : NULL; 664 if (notmapped) { 665 m = bp->bio_ma; 666 ma_offs = bp->bio_ma_offset; 667 dst = NULL; 668 KASSERT(vlist == NULL, ("vlists cannot be unmapped")); 669 } else if (vlist != NULL) { 670 ma_offs = bp->bio_ma_offset; 671 dst = NULL; 672 } else { 673 dst = bp->bio_data; 674 } 675 676 nsec = bp->bio_length / sc->sectorsize; 677 secno = bp->bio_offset / sc->sectorsize; 678 error = 0; 679 while (nsec--) { 680 osp = s_read(sc->indir, secno); 681 if (bp->bio_cmd == BIO_DELETE) { 682 if (osp != 0) 683 error = s_write(sc->indir, secno, 0); 684 } else if (bp->bio_cmd == BIO_READ) { 685 if (osp == 0) { 686 if (notmapped) { 687 error = md_malloc_move_ma(&m, &ma_offs, 688 sc->sectorsize, NULL, 0, 689 MD_MALLOC_MOVE_ZERO); 690 } else if (vlist != NULL) { 691 error = md_malloc_move_vlist(&vlist, 692 &ma_offs, sc->sectorsize, NULL, 0, 693 MD_MALLOC_MOVE_ZERO); 694 } else 695 bzero(dst, sc->sectorsize); 696 } else if (osp <= 255) { 697 if (notmapped) { 698 error = md_malloc_move_ma(&m, &ma_offs, 699 sc->sectorsize, NULL, osp, 700 MD_MALLOC_MOVE_FILL); 701 } else if (vlist != NULL) { 702 error = md_malloc_move_vlist(&vlist, 703 &ma_offs, sc->sectorsize, NULL, osp, 704 MD_MALLOC_MOVE_FILL); 705 } else 706 memset(dst, osp, sc->sectorsize); 707 } else { 708 if (notmapped) { 709 error = md_malloc_move_ma(&m, &ma_offs, 710 sc->sectorsize, (void *)osp, 0, 711 MD_MALLOC_MOVE_READ); 712 } else if (vlist != NULL) { 713 error = md_malloc_move_vlist(&vlist, 714 &ma_offs, sc->sectorsize, 715 (void *)osp, 0, 716 MD_MALLOC_MOVE_READ); 717 } else { 718 bcopy((void *)osp, dst, sc->sectorsize); 719 cpu_flush_dcache(dst, sc->sectorsize); 720 } 721 } 722 osp = 0; 723 } else if (bp->bio_cmd == BIO_WRITE) { 724 if (sc->flags & MD_COMPRESS) { 725 if (notmapped) { 726 error1 = md_malloc_move_ma(&m, &ma_offs, 727 sc->sectorsize, &uc, 0, 728 MD_MALLOC_MOVE_CMP); 729 i = error1 == 0 ? sc->sectorsize : 0; 730 } else if (vlist != NULL) { 731 error1 = md_malloc_move_vlist(&vlist, 732 &ma_offs, sc->sectorsize, &uc, 0, 733 MD_MALLOC_MOVE_CMP); 734 i = error1 == 0 ? sc->sectorsize : 0; 735 } else { 736 uc = dst[0]; 737 for (i = 1; i < sc->sectorsize; i++) { 738 if (dst[i] != uc) 739 break; 740 } 741 } 742 } else { 743 i = 0; 744 uc = 0; 745 } 746 if (i == sc->sectorsize) { 747 if (osp != uc) 748 error = s_write(sc->indir, secno, uc); 749 } else { 750 if (osp <= 255) { 751 sp = (uintptr_t)uma_zalloc(sc->uma, 752 md_malloc_wait ? M_WAITOK : 753 M_NOWAIT); 754 if (sp == 0) { 755 error = ENOSPC; 756 break; 757 } 758 if (notmapped) { 759 error = md_malloc_move_ma(&m, 760 &ma_offs, sc->sectorsize, 761 (void *)sp, 0, 762 MD_MALLOC_MOVE_WRITE); 763 } else if (vlist != NULL) { 764 error = md_malloc_move_vlist( 765 &vlist, &ma_offs, 766 sc->sectorsize, (void *)sp, 767 0, MD_MALLOC_MOVE_WRITE); 768 } else { 769 bcopy(dst, (void *)sp, 770 sc->sectorsize); 771 } 772 error = s_write(sc->indir, secno, sp); 773 } else { 774 if (notmapped) { 775 error = md_malloc_move_ma(&m, 776 &ma_offs, sc->sectorsize, 777 (void *)osp, 0, 778 MD_MALLOC_MOVE_WRITE); 779 } else if (vlist != NULL) { 780 error = md_malloc_move_vlist( 781 &vlist, &ma_offs, 782 sc->sectorsize, (void *)osp, 783 0, MD_MALLOC_MOVE_WRITE); 784 } else { 785 bcopy(dst, (void *)osp, 786 sc->sectorsize); 787 } 788 osp = 0; 789 } 790 } 791 } else { 792 error = EOPNOTSUPP; 793 } 794 if (osp > 255) 795 uma_zfree(sc->uma, (void*)osp); 796 if (error != 0) 797 break; 798 secno++; 799 if (!notmapped && vlist == NULL) 800 dst += sc->sectorsize; 801 } 802 bp->bio_resid = 0; 803 return (error); 804 } 805 806 static void 807 mdcopyto_vlist(void *src, bus_dma_segment_t *vlist, off_t offset, off_t len) 808 { 809 off_t seg_len; 810 811 while (offset >= vlist->ds_len) { 812 offset -= vlist->ds_len; 813 vlist++; 814 } 815 816 while (len != 0) { 817 seg_len = omin(len, vlist->ds_len - offset); 818 bcopy(src, (void *)(uintptr_t)(vlist->ds_addr + offset), 819 seg_len); 820 offset = 0; 821 src = (uint8_t *)src + seg_len; 822 len -= seg_len; 823 vlist++; 824 } 825 } 826 827 static void 828 mdcopyfrom_vlist(bus_dma_segment_t *vlist, off_t offset, void *dst, off_t len) 829 { 830 off_t seg_len; 831 832 while (offset >= vlist->ds_len) { 833 offset -= vlist->ds_len; 834 vlist++; 835 } 836 837 while (len != 0) { 838 seg_len = omin(len, vlist->ds_len - offset); 839 bcopy((void *)(uintptr_t)(vlist->ds_addr + offset), dst, 840 seg_len); 841 offset = 0; 842 dst = (uint8_t *)dst + seg_len; 843 len -= seg_len; 844 vlist++; 845 } 846 } 847 848 static int 849 mdstart_preload(struct md_s *sc, struct bio *bp) 850 { 851 uint8_t *p; 852 853 p = sc->pl_ptr + bp->bio_offset; 854 switch (bp->bio_cmd) { 855 case BIO_READ: 856 if ((bp->bio_flags & BIO_VLIST) != 0) { 857 mdcopyto_vlist(p, (bus_dma_segment_t *)bp->bio_data, 858 bp->bio_ma_offset, bp->bio_length); 859 } else { 860 bcopy(p, bp->bio_data, bp->bio_length); 861 } 862 cpu_flush_dcache(bp->bio_data, bp->bio_length); 863 break; 864 case BIO_WRITE: 865 if ((bp->bio_flags & BIO_VLIST) != 0) { 866 mdcopyfrom_vlist((bus_dma_segment_t *)bp->bio_data, 867 bp->bio_ma_offset, p, bp->bio_length); 868 } else { 869 bcopy(bp->bio_data, p, bp->bio_length); 870 } 871 break; 872 } 873 bp->bio_resid = 0; 874 return (0); 875 } 876 877 static int 878 mdstart_vnode(struct md_s *sc, struct bio *bp) 879 { 880 int error; 881 struct uio auio; 882 struct iovec aiov; 883 struct iovec *piov; 884 struct mount *mp; 885 struct vnode *vp; 886 bus_dma_segment_t *vlist; 887 struct thread *td; 888 off_t iolen, iostart, off, len; 889 int ma_offs, npages; 890 bool mapped; 891 892 switch (bp->bio_cmd) { 893 case BIO_READ: 894 auio.uio_rw = UIO_READ; 895 break; 896 case BIO_WRITE: 897 auio.uio_rw = UIO_WRITE; 898 break; 899 case BIO_FLUSH: 900 break; 901 case BIO_DELETE: 902 if (sc->candelete) 903 break; 904 /* FALLTHROUGH */ 905 default: 906 return (EOPNOTSUPP); 907 } 908 909 td = curthread; 910 vp = sc->vnode; 911 piov = NULL; 912 ma_offs = bp->bio_ma_offset; 913 off = bp->bio_offset; 914 len = bp->bio_length; 915 mapped = false; 916 917 /* 918 * VNODE I/O 919 * 920 * If an error occurs, we set BIO_ERROR but we do not set 921 * B_INVAL because (for a write anyway), the buffer is 922 * still valid. 923 */ 924 925 if (bp->bio_cmd == BIO_FLUSH) { 926 do { 927 (void)vn_start_write(vp, &mp, V_WAIT); 928 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 929 error = VOP_FSYNC(vp, MNT_WAIT, td); 930 VOP_UNLOCK(vp); 931 vn_finished_write(mp); 932 } while (error == ERELOOKUP); 933 return (error); 934 } else if (bp->bio_cmd == BIO_DELETE) { 935 error = vn_deallocate(vp, &off, &len, 0, 936 sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred, NOCRED); 937 bp->bio_resid = len; 938 return (error); 939 } 940 941 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 942 auio.uio_resid = bp->bio_length; 943 auio.uio_segflg = UIO_SYSSPACE; 944 auio.uio_td = td; 945 946 if ((bp->bio_flags & BIO_VLIST) != 0) { 947 piov = malloc(sizeof(*piov) * bp->bio_ma_n, M_MD, M_WAITOK); 948 auio.uio_iov = piov; 949 vlist = (bus_dma_segment_t *)bp->bio_data; 950 while (len > 0) { 951 piov->iov_base = (void *)(uintptr_t)(vlist->ds_addr + 952 ma_offs); 953 piov->iov_len = vlist->ds_len - ma_offs; 954 if (piov->iov_len > len) 955 piov->iov_len = len; 956 len -= piov->iov_len; 957 ma_offs = 0; 958 vlist++; 959 piov++; 960 } 961 auio.uio_iovcnt = piov - auio.uio_iov; 962 piov = auio.uio_iov; 963 } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 964 bp->bio_resid = len; 965 unmapped_step: 966 npages = atop(min(maxphys, round_page(len + (ma_offs & 967 PAGE_MASK)))); 968 iolen = min(ptoa(npages) - (ma_offs & PAGE_MASK), len); 969 KASSERT(iolen > 0, ("zero iolen")); 970 KASSERT(npages <= atop(MAXPHYS + PAGE_SIZE), 971 ("npages %d too large", npages)); 972 pmap_qenter(sc->kva, &bp->bio_ma[atop(ma_offs)], npages); 973 aiov.iov_base = (void *)(sc->kva + (ma_offs & PAGE_MASK)); 974 aiov.iov_len = iolen; 975 auio.uio_iov = &aiov; 976 auio.uio_iovcnt = 1; 977 auio.uio_resid = iolen; 978 mapped = true; 979 } else { 980 aiov.iov_base = bp->bio_data; 981 aiov.iov_len = bp->bio_length; 982 auio.uio_iov = &aiov; 983 auio.uio_iovcnt = 1; 984 } 985 iostart = auio.uio_offset; 986 if (auio.uio_rw == UIO_READ) { 987 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 988 error = VOP_READ(vp, &auio, 0, sc->cred); 989 VOP_UNLOCK(vp); 990 } else { 991 (void) vn_start_write(vp, &mp, V_WAIT); 992 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 993 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC, 994 sc->cred); 995 VOP_UNLOCK(vp); 996 vn_finished_write(mp); 997 if (error == 0) 998 sc->flags &= ~MD_VERIFY; 999 } 1000 1001 /* When MD_CACHE is set, try to avoid double-caching the data. */ 1002 if (error == 0 && (sc->flags & MD_CACHE) == 0) 1003 VOP_ADVISE(vp, iostart, auio.uio_offset - 1, 1004 POSIX_FADV_DONTNEED); 1005 1006 if (mapped) { 1007 pmap_qremove(sc->kva, npages); 1008 if (error == 0) { 1009 len -= iolen; 1010 bp->bio_resid -= iolen; 1011 ma_offs += iolen; 1012 if (len > 0) 1013 goto unmapped_step; 1014 } 1015 } else { 1016 bp->bio_resid = auio.uio_resid; 1017 } 1018 1019 free(piov, M_MD); 1020 return (error); 1021 } 1022 1023 static int 1024 mdstart_swap(struct md_s *sc, struct bio *bp) 1025 { 1026 vm_page_t m; 1027 u_char *p; 1028 vm_pindex_t i, lastp; 1029 bus_dma_segment_t *vlist; 1030 int rv, ma_offs, offs, len, lastend; 1031 1032 switch (bp->bio_cmd) { 1033 case BIO_READ: 1034 case BIO_WRITE: 1035 case BIO_DELETE: 1036 break; 1037 case BIO_FLUSH: 1038 return (0); 1039 default: 1040 return (EOPNOTSUPP); 1041 } 1042 1043 p = bp->bio_data; 1044 ma_offs = (bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0 ? 1045 bp->bio_ma_offset : 0; 1046 vlist = (bp->bio_flags & BIO_VLIST) != 0 ? 1047 (bus_dma_segment_t *)bp->bio_data : NULL; 1048 1049 /* 1050 * offs is the offset at which to start operating on the 1051 * next (ie, first) page. lastp is the last page on 1052 * which we're going to operate. lastend is the ending 1053 * position within that last page (ie, PAGE_SIZE if 1054 * we're operating on complete aligned pages). 1055 */ 1056 offs = bp->bio_offset % PAGE_SIZE; 1057 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 1058 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 1059 1060 rv = VM_PAGER_OK; 1061 vm_object_pip_add(sc->object, 1); 1062 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 1063 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 1064 m = vm_page_grab_unlocked(sc->object, i, VM_ALLOC_SYSTEM); 1065 if (bp->bio_cmd == BIO_READ) { 1066 if (vm_page_all_valid(m)) 1067 rv = VM_PAGER_OK; 1068 else 1069 rv = vm_pager_get_pages(sc->object, &m, 1, 1070 NULL, NULL); 1071 if (rv == VM_PAGER_ERROR) { 1072 VM_OBJECT_WLOCK(sc->object); 1073 vm_page_free(m); 1074 VM_OBJECT_WUNLOCK(sc->object); 1075 break; 1076 } else if (rv == VM_PAGER_FAIL) { 1077 /* 1078 * Pager does not have the page. Zero 1079 * the allocated page, and mark it as 1080 * valid. Do not set dirty, the page 1081 * can be recreated if thrown out. 1082 */ 1083 pmap_zero_page(m); 1084 vm_page_valid(m); 1085 } 1086 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 1087 pmap_copy_pages(&m, offs, bp->bio_ma, 1088 ma_offs, len); 1089 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 1090 physcopyout_vlist(VM_PAGE_TO_PHYS(m) + offs, 1091 vlist, ma_offs, len); 1092 cpu_flush_dcache(p, len); 1093 } else { 1094 physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len); 1095 cpu_flush_dcache(p, len); 1096 } 1097 } else if (bp->bio_cmd == BIO_WRITE) { 1098 if (len == PAGE_SIZE || vm_page_all_valid(m)) 1099 rv = VM_PAGER_OK; 1100 else 1101 rv = vm_pager_get_pages(sc->object, &m, 1, 1102 NULL, NULL); 1103 if (rv == VM_PAGER_ERROR) { 1104 VM_OBJECT_WLOCK(sc->object); 1105 vm_page_free(m); 1106 VM_OBJECT_WUNLOCK(sc->object); 1107 break; 1108 } else if (rv == VM_PAGER_FAIL) 1109 pmap_zero_page(m); 1110 1111 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 1112 pmap_copy_pages(bp->bio_ma, ma_offs, &m, 1113 offs, len); 1114 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 1115 physcopyin_vlist(vlist, ma_offs, 1116 VM_PAGE_TO_PHYS(m) + offs, len); 1117 } else { 1118 physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len); 1119 } 1120 1121 vm_page_valid(m); 1122 vm_page_set_dirty(m); 1123 } else if (bp->bio_cmd == BIO_DELETE) { 1124 if (len == PAGE_SIZE || vm_page_all_valid(m)) 1125 rv = VM_PAGER_OK; 1126 else 1127 rv = vm_pager_get_pages(sc->object, &m, 1, 1128 NULL, NULL); 1129 VM_OBJECT_WLOCK(sc->object); 1130 if (rv == VM_PAGER_ERROR) { 1131 vm_page_free(m); 1132 VM_OBJECT_WUNLOCK(sc->object); 1133 break; 1134 } else if (rv == VM_PAGER_FAIL) { 1135 vm_page_free(m); 1136 m = NULL; 1137 } else { 1138 /* Page is valid. */ 1139 if (len != PAGE_SIZE) { 1140 pmap_zero_page_area(m, offs, len); 1141 vm_page_set_dirty(m); 1142 } else { 1143 vm_pager_page_unswapped(m); 1144 vm_page_free(m); 1145 m = NULL; 1146 } 1147 } 1148 VM_OBJECT_WUNLOCK(sc->object); 1149 } 1150 if (m != NULL) { 1151 /* 1152 * The page may be deactivated prior to setting 1153 * PGA_REFERENCED, but in this case it will be 1154 * reactivated by the page daemon. 1155 */ 1156 if (vm_page_active(m)) 1157 vm_page_reference(m); 1158 else 1159 vm_page_activate(m); 1160 vm_page_xunbusy(m); 1161 } 1162 1163 /* Actions on further pages start at offset 0 */ 1164 p += PAGE_SIZE - offs; 1165 offs = 0; 1166 ma_offs += len; 1167 } 1168 vm_object_pip_wakeup(sc->object); 1169 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 1170 } 1171 1172 static int 1173 mdstart_null(struct md_s *sc, struct bio *bp) 1174 { 1175 1176 switch (bp->bio_cmd) { 1177 case BIO_READ: 1178 bzero(bp->bio_data, bp->bio_length); 1179 cpu_flush_dcache(bp->bio_data, bp->bio_length); 1180 break; 1181 case BIO_WRITE: 1182 break; 1183 } 1184 bp->bio_resid = 0; 1185 return (0); 1186 } 1187 1188 static void 1189 md_handleattr(struct md_s *sc, struct bio *bp) 1190 { 1191 if (sc->fwsectors && sc->fwheads && 1192 (g_handleattr_int(bp, "GEOM::fwsectors", sc->fwsectors) != 0 || 1193 g_handleattr_int(bp, "GEOM::fwheads", sc->fwheads) != 0)) 1194 return; 1195 if (g_handleattr_int(bp, "GEOM::candelete", sc->candelete) != 0) 1196 return; 1197 if (sc->ident[0] != '\0' && 1198 g_handleattr_str(bp, "GEOM::ident", sc->ident) != 0) 1199 return; 1200 if (g_handleattr_int(bp, "MNT::verified", (sc->flags & MD_VERIFY) != 0)) 1201 return; 1202 g_io_deliver(bp, EOPNOTSUPP); 1203 } 1204 1205 static void 1206 md_kthread(void *arg) 1207 { 1208 struct md_s *sc; 1209 struct bio *bp; 1210 int error; 1211 1212 sc = arg; 1213 thread_lock(curthread); 1214 sched_prio(curthread, PRIBIO); 1215 thread_unlock(curthread); 1216 if (sc->type == MD_VNODE) 1217 curthread->td_pflags |= TDP_NORUNNINGBUF; 1218 1219 for (;;) { 1220 mtx_lock(&sc->queue_mtx); 1221 if (sc->flags & MD_SHUTDOWN) { 1222 sc->flags |= MD_EXITING; 1223 mtx_unlock(&sc->queue_mtx); 1224 kproc_exit(0); 1225 } 1226 bp = bioq_takefirst(&sc->bio_queue); 1227 if (!bp) { 1228 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 1229 continue; 1230 } 1231 mtx_unlock(&sc->queue_mtx); 1232 if (bp->bio_cmd == BIO_GETATTR) { 1233 md_handleattr(sc, bp); 1234 } else { 1235 error = sc->start(sc, bp); 1236 if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) { 1237 /* 1238 * Devstat uses (bio_bcount, bio_resid) for 1239 * determining the length of the completed part 1240 * of the i/o. g_io_deliver() will translate 1241 * from bio_completed to that, but it also 1242 * destroys the bio so we must do our own 1243 * translation. 1244 */ 1245 bp->bio_bcount = bp->bio_length; 1246 devstat_end_transaction_bio(sc->devstat, bp); 1247 } 1248 bp->bio_completed = bp->bio_length - bp->bio_resid; 1249 g_io_deliver(bp, error); 1250 } 1251 } 1252 } 1253 1254 static struct md_s * 1255 mdfind(int unit) 1256 { 1257 struct md_s *sc; 1258 1259 LIST_FOREACH(sc, &md_softc_list, list) { 1260 if (sc->unit == unit) 1261 break; 1262 } 1263 return (sc); 1264 } 1265 1266 static struct md_s * 1267 mdnew(int unit, int *errp, enum md_types type) 1268 { 1269 struct md_s *sc; 1270 int error; 1271 1272 *errp = 0; 1273 if (unit == -1) 1274 unit = alloc_unr(md_uh); 1275 else 1276 unit = alloc_unr_specific(md_uh, unit); 1277 1278 if (unit == -1) { 1279 *errp = EBUSY; 1280 return (NULL); 1281 } 1282 1283 sc = malloc(sizeof(*sc), M_MD, M_WAITOK | M_ZERO); 1284 sc->type = type; 1285 bioq_init(&sc->bio_queue); 1286 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 1287 sc->unit = unit; 1288 sprintf(sc->name, "md%d", unit); 1289 LIST_INSERT_HEAD(&md_softc_list, sc, list); 1290 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 1291 if (error == 0) 1292 return (sc); 1293 LIST_REMOVE(sc, list); 1294 mtx_destroy(&sc->queue_mtx); 1295 free_unr(md_uh, sc->unit); 1296 free(sc, M_MD); 1297 *errp = error; 1298 return (NULL); 1299 } 1300 1301 static void 1302 mdinit(struct md_s *sc) 1303 { 1304 struct g_geom *gp; 1305 struct g_provider *pp; 1306 1307 g_topology_lock(); 1308 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 1309 gp->softc = sc; 1310 pp = g_new_providerf(gp, "md%d", sc->unit); 1311 devstat_remove_entry(pp->stat); 1312 pp->stat = NULL; 1313 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 1314 pp->mediasize = sc->mediasize; 1315 pp->sectorsize = sc->sectorsize; 1316 switch (sc->type) { 1317 case MD_MALLOC: 1318 case MD_VNODE: 1319 case MD_SWAP: 1320 pp->flags |= G_PF_ACCEPT_UNMAPPED; 1321 break; 1322 case MD_PRELOAD: 1323 case MD_NULL: 1324 break; 1325 } 1326 sc->gp = gp; 1327 sc->pp = pp; 1328 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize, 1329 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 1330 sc->devstat->id = pp; 1331 g_error_provider(pp, 0); 1332 g_topology_unlock(); 1333 } 1334 1335 static int 1336 mdcreate_malloc(struct md_s *sc, struct md_req *mdr) 1337 { 1338 uintptr_t sp; 1339 int error; 1340 off_t u; 1341 1342 error = 0; 1343 if (mdr->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 1344 return (EINVAL); 1345 if (mdr->md_sectorsize != 0 && !powerof2(mdr->md_sectorsize)) 1346 return (EINVAL); 1347 /* Compression doesn't make sense if we have reserved space */ 1348 if (mdr->md_options & MD_RESERVE) 1349 mdr->md_options &= ~MD_COMPRESS; 1350 if (mdr->md_fwsectors != 0) 1351 sc->fwsectors = mdr->md_fwsectors; 1352 if (mdr->md_fwheads != 0) 1353 sc->fwheads = mdr->md_fwheads; 1354 sc->flags = mdr->md_options & (MD_COMPRESS | MD_FORCE); 1355 sc->indir = dimension(sc->mediasize / sc->sectorsize); 1356 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 1357 0x1ff, 0); 1358 if (mdr->md_options & MD_RESERVE) { 1359 off_t nsectors; 1360 1361 nsectors = sc->mediasize / sc->sectorsize; 1362 for (u = 0; u < nsectors; u++) { 1363 sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ? 1364 M_WAITOK : M_NOWAIT) | M_ZERO); 1365 if (sp != 0) 1366 error = s_write(sc->indir, u, sp); 1367 else 1368 error = ENOMEM; 1369 if (error != 0) 1370 break; 1371 } 1372 } 1373 return (error); 1374 } 1375 1376 static int 1377 mdsetcred(struct md_s *sc, struct ucred *cred) 1378 { 1379 char *tmpbuf; 1380 int error = 0; 1381 1382 /* 1383 * Set credits in our softc 1384 */ 1385 1386 if (sc->cred) 1387 crfree(sc->cred); 1388 sc->cred = crhold(cred); 1389 1390 /* 1391 * Horrible kludge to establish credentials for NFS XXX. 1392 */ 1393 1394 if (sc->vnode) { 1395 struct uio auio; 1396 struct iovec aiov; 1397 1398 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 1399 bzero(&auio, sizeof(auio)); 1400 1401 aiov.iov_base = tmpbuf; 1402 aiov.iov_len = sc->sectorsize; 1403 auio.uio_iov = &aiov; 1404 auio.uio_iovcnt = 1; 1405 auio.uio_offset = 0; 1406 auio.uio_rw = UIO_READ; 1407 auio.uio_segflg = UIO_SYSSPACE; 1408 auio.uio_resid = aiov.iov_len; 1409 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1410 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 1411 VOP_UNLOCK(sc->vnode); 1412 free(tmpbuf, M_TEMP); 1413 } 1414 return (error); 1415 } 1416 1417 static int 1418 mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td) 1419 { 1420 struct vattr vattr; 1421 struct nameidata nd; 1422 char *fname; 1423 int error, flags; 1424 long v; 1425 1426 fname = mdr->md_file; 1427 if (mdr->md_file_seg == UIO_USERSPACE) { 1428 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL); 1429 if (error != 0) 1430 return (error); 1431 } else if (mdr->md_file_seg == UIO_SYSSPACE) 1432 strlcpy(sc->file, fname, sizeof(sc->file)); 1433 else 1434 return (EDOOFUS); 1435 1436 /* 1437 * If the user specified that this is a read only device, don't 1438 * set the FWRITE mask before trying to open the backing store. 1439 */ 1440 flags = FREAD | ((mdr->md_options & MD_READONLY) ? 0 : FWRITE) \ 1441 | ((mdr->md_options & MD_VERIFY) ? O_VERIFY : 0); 1442 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file); 1443 error = vn_open(&nd, &flags, 0, NULL); 1444 if (error != 0) 1445 return (error); 1446 NDFREE_PNBUF(&nd); 1447 if (nd.ni_vp->v_type != VREG) { 1448 error = EINVAL; 1449 goto bad; 1450 } 1451 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred); 1452 if (error != 0) 1453 goto bad; 1454 if ((mdr->md_options & MD_MUSTDEALLOC) != 0) { 1455 error = VOP_PATHCONF(nd.ni_vp, _PC_DEALLOC_PRESENT, &v); 1456 if (error != 0) 1457 goto bad; 1458 if (v == 0) 1459 sc->candelete = false; 1460 } 1461 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { 1462 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); 1463 if (VN_IS_DOOMED(nd.ni_vp)) { 1464 /* Forced unmount. */ 1465 error = EBADF; 1466 goto bad; 1467 } 1468 } 1469 nd.ni_vp->v_vflag |= VV_MD; 1470 VOP_UNLOCK(nd.ni_vp); 1471 1472 if (mdr->md_fwsectors != 0) 1473 sc->fwsectors = mdr->md_fwsectors; 1474 if (mdr->md_fwheads != 0) 1475 sc->fwheads = mdr->md_fwheads; 1476 snprintf(sc->ident, sizeof(sc->ident), "MD-DEV%ju-INO%ju", 1477 (uintmax_t)vattr.va_fsid, (uintmax_t)vattr.va_fileid); 1478 sc->flags = mdr->md_options & (MD_ASYNC | MD_CACHE | MD_FORCE | 1479 MD_VERIFY); 1480 if (!(flags & FWRITE)) 1481 sc->flags |= MD_READONLY; 1482 sc->vnode = nd.ni_vp; 1483 1484 error = mdsetcred(sc, td->td_ucred); 1485 if (error != 0) { 1486 sc->vnode = NULL; 1487 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); 1488 nd.ni_vp->v_vflag &= ~VV_MD; 1489 goto bad; 1490 } 1491 1492 sc->kva = kva_alloc(MAXPHYS + PAGE_SIZE); 1493 return (0); 1494 bad: 1495 VOP_UNLOCK(nd.ni_vp); 1496 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 1497 return (error); 1498 } 1499 1500 static void 1501 g_md_providergone(struct g_provider *pp) 1502 { 1503 struct md_s *sc = pp->geom->softc; 1504 1505 mtx_lock(&sc->queue_mtx); 1506 sc->flags |= MD_PROVIDERGONE; 1507 wakeup(&sc->flags); 1508 mtx_unlock(&sc->queue_mtx); 1509 } 1510 1511 static int 1512 mddestroy(struct md_s *sc, struct thread *td) 1513 { 1514 1515 if (sc->gp) { 1516 g_topology_lock(); 1517 g_wither_geom(sc->gp, ENXIO); 1518 g_topology_unlock(); 1519 1520 mtx_lock(&sc->queue_mtx); 1521 while (!(sc->flags & MD_PROVIDERGONE)) 1522 msleep(&sc->flags, &sc->queue_mtx, PRIBIO, "mddestroy", 0); 1523 mtx_unlock(&sc->queue_mtx); 1524 } 1525 if (sc->devstat) { 1526 devstat_remove_entry(sc->devstat); 1527 sc->devstat = NULL; 1528 } 1529 mtx_lock(&sc->queue_mtx); 1530 sc->flags |= MD_SHUTDOWN; 1531 wakeup(sc); 1532 while (!(sc->flags & MD_EXITING)) 1533 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10); 1534 mtx_unlock(&sc->queue_mtx); 1535 mtx_destroy(&sc->queue_mtx); 1536 if (sc->vnode != NULL) { 1537 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1538 sc->vnode->v_vflag &= ~VV_MD; 1539 VOP_UNLOCK(sc->vnode); 1540 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 1541 FREAD : (FREAD|FWRITE), sc->cred, td); 1542 } 1543 if (sc->cred != NULL) 1544 crfree(sc->cred); 1545 if (sc->object != NULL) 1546 vm_object_deallocate(sc->object); 1547 if (sc->indir) 1548 destroy_indir(sc, sc->indir); 1549 if (sc->uma) 1550 uma_zdestroy(sc->uma); 1551 if (sc->kva) 1552 kva_free(sc->kva, MAXPHYS + PAGE_SIZE); 1553 1554 LIST_REMOVE(sc, list); 1555 free_unr(md_uh, sc->unit); 1556 free(sc, M_MD); 1557 return (0); 1558 } 1559 1560 static int 1561 mdresize(struct md_s *sc, struct md_req *mdr) 1562 { 1563 int error, res; 1564 vm_pindex_t oldpages, newpages; 1565 1566 switch (sc->type) { 1567 case MD_VNODE: 1568 case MD_NULL: 1569 break; 1570 case MD_SWAP: 1571 if (mdr->md_mediasize <= 0 || 1572 (mdr->md_mediasize % PAGE_SIZE) != 0) 1573 return (EDOM); 1574 oldpages = OFF_TO_IDX(sc->mediasize); 1575 newpages = OFF_TO_IDX(mdr->md_mediasize); 1576 if (newpages < oldpages) { 1577 VM_OBJECT_WLOCK(sc->object); 1578 vm_object_page_remove(sc->object, newpages, 0, 0); 1579 swap_release_by_cred(IDX_TO_OFF(oldpages - 1580 newpages), sc->cred); 1581 sc->object->charge = IDX_TO_OFF(newpages); 1582 sc->object->size = newpages; 1583 VM_OBJECT_WUNLOCK(sc->object); 1584 } else if (newpages > oldpages) { 1585 res = swap_reserve_by_cred(IDX_TO_OFF(newpages - 1586 oldpages), sc->cred); 1587 if (!res) 1588 return (ENOMEM); 1589 if ((mdr->md_options & MD_RESERVE) || 1590 (sc->flags & MD_RESERVE)) { 1591 error = swap_pager_reserve(sc->object, 1592 oldpages, newpages - oldpages); 1593 if (error < 0) { 1594 swap_release_by_cred( 1595 IDX_TO_OFF(newpages - oldpages), 1596 sc->cred); 1597 return (EDOM); 1598 } 1599 } 1600 VM_OBJECT_WLOCK(sc->object); 1601 sc->object->charge = IDX_TO_OFF(newpages); 1602 sc->object->size = newpages; 1603 VM_OBJECT_WUNLOCK(sc->object); 1604 } 1605 break; 1606 default: 1607 return (EOPNOTSUPP); 1608 } 1609 1610 sc->mediasize = mdr->md_mediasize; 1611 1612 g_topology_lock(); 1613 g_resize_provider(sc->pp, sc->mediasize); 1614 g_topology_unlock(); 1615 return (0); 1616 } 1617 1618 static int 1619 mdcreate_swap(struct md_s *sc, struct md_req *mdr, struct thread *td) 1620 { 1621 vm_ooffset_t npage; 1622 int error; 1623 1624 /* 1625 * Range check. Disallow negative sizes and sizes not being 1626 * multiple of page size. 1627 */ 1628 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1629 return (EDOM); 1630 1631 /* 1632 * Allocate an OBJT_SWAP object. 1633 * 1634 * Note the truncation. 1635 */ 1636 1637 if ((mdr->md_options & MD_VERIFY) != 0) 1638 return (EINVAL); 1639 npage = mdr->md_mediasize / PAGE_SIZE; 1640 if (mdr->md_fwsectors != 0) 1641 sc->fwsectors = mdr->md_fwsectors; 1642 if (mdr->md_fwheads != 0) 1643 sc->fwheads = mdr->md_fwheads; 1644 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, 1645 VM_PROT_DEFAULT, 0, td->td_ucred); 1646 if (sc->object == NULL) 1647 return (ENOMEM); 1648 sc->flags = mdr->md_options & (MD_FORCE | MD_RESERVE); 1649 if (mdr->md_options & MD_RESERVE) { 1650 if (swap_pager_reserve(sc->object, 0, npage) < 0) { 1651 error = EDOM; 1652 goto finish; 1653 } 1654 } 1655 error = mdsetcred(sc, td->td_ucred); 1656 finish: 1657 if (error != 0) { 1658 vm_object_deallocate(sc->object); 1659 sc->object = NULL; 1660 } 1661 return (error); 1662 } 1663 1664 static int 1665 mdcreate_null(struct md_s *sc, struct md_req *mdr, struct thread *td) 1666 { 1667 1668 /* 1669 * Range check. Disallow negative sizes and sizes not being 1670 * multiple of page size. 1671 */ 1672 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1673 return (EDOM); 1674 1675 return (0); 1676 } 1677 1678 static int 1679 kern_mdattach_locked(struct thread *td, struct md_req *mdr) 1680 { 1681 struct md_s *sc; 1682 unsigned sectsize; 1683 int error, i; 1684 1685 sx_assert(&md_sx, SA_XLOCKED); 1686 1687 switch (mdr->md_type) { 1688 case MD_MALLOC: 1689 case MD_PRELOAD: 1690 case MD_VNODE: 1691 case MD_SWAP: 1692 case MD_NULL: 1693 break; 1694 default: 1695 return (EINVAL); 1696 } 1697 if (mdr->md_sectorsize == 0) 1698 sectsize = DEV_BSIZE; 1699 else 1700 sectsize = mdr->md_sectorsize; 1701 if (sectsize > maxphys || mdr->md_mediasize < sectsize) 1702 return (EINVAL); 1703 if (mdr->md_options & MD_AUTOUNIT) 1704 sc = mdnew(-1, &error, mdr->md_type); 1705 else { 1706 if (mdr->md_unit > INT_MAX) 1707 return (EINVAL); 1708 sc = mdnew(mdr->md_unit, &error, mdr->md_type); 1709 } 1710 if (sc == NULL) 1711 return (error); 1712 if (mdr->md_label != NULL) 1713 error = copyinstr(mdr->md_label, sc->label, 1714 sizeof(sc->label), NULL); 1715 if (error != 0) 1716 goto err_after_new; 1717 if (mdr->md_options & MD_AUTOUNIT) 1718 mdr->md_unit = sc->unit; 1719 sc->mediasize = mdr->md_mediasize; 1720 sc->sectorsize = sectsize; 1721 sc->candelete = true; 1722 error = EDOOFUS; 1723 switch (sc->type) { 1724 case MD_MALLOC: 1725 sc->start = mdstart_malloc; 1726 error = mdcreate_malloc(sc, mdr); 1727 break; 1728 case MD_PRELOAD: 1729 /* 1730 * We disallow attaching preloaded memory disks via 1731 * ioctl. Preloaded memory disks are automatically 1732 * attached in g_md_init(). 1733 */ 1734 error = EOPNOTSUPP; 1735 break; 1736 case MD_VNODE: 1737 sc->start = mdstart_vnode; 1738 error = mdcreate_vnode(sc, mdr, td); 1739 break; 1740 case MD_SWAP: 1741 sc->start = mdstart_swap; 1742 error = mdcreate_swap(sc, mdr, td); 1743 break; 1744 case MD_NULL: 1745 sc->start = mdstart_null; 1746 error = mdcreate_null(sc, mdr, td); 1747 break; 1748 } 1749 err_after_new: 1750 if (error != 0) { 1751 mddestroy(sc, td); 1752 return (error); 1753 } 1754 1755 /* Prune off any residual fractional sector */ 1756 i = sc->mediasize % sc->sectorsize; 1757 sc->mediasize -= i; 1758 1759 mdinit(sc); 1760 return (0); 1761 } 1762 1763 static int 1764 kern_mdattach(struct thread *td, struct md_req *mdr) 1765 { 1766 int error; 1767 1768 sx_xlock(&md_sx); 1769 error = kern_mdattach_locked(td, mdr); 1770 sx_xunlock(&md_sx); 1771 return (error); 1772 } 1773 1774 static int 1775 kern_mddetach_locked(struct thread *td, struct md_req *mdr) 1776 { 1777 struct md_s *sc; 1778 1779 sx_assert(&md_sx, SA_XLOCKED); 1780 1781 if (mdr->md_mediasize != 0 || 1782 (mdr->md_options & ~MD_FORCE) != 0) 1783 return (EINVAL); 1784 1785 sc = mdfind(mdr->md_unit); 1786 if (sc == NULL) 1787 return (ENOENT); 1788 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) && 1789 !(mdr->md_options & MD_FORCE)) 1790 return (EBUSY); 1791 return (mddestroy(sc, td)); 1792 } 1793 1794 static int 1795 kern_mddetach(struct thread *td, struct md_req *mdr) 1796 { 1797 int error; 1798 1799 sx_xlock(&md_sx); 1800 error = kern_mddetach_locked(td, mdr); 1801 sx_xunlock(&md_sx); 1802 return (error); 1803 } 1804 1805 static int 1806 kern_mdresize_locked(struct md_req *mdr) 1807 { 1808 struct md_s *sc; 1809 1810 sx_assert(&md_sx, SA_XLOCKED); 1811 1812 if ((mdr->md_options & ~(MD_FORCE | MD_RESERVE)) != 0) 1813 return (EINVAL); 1814 1815 sc = mdfind(mdr->md_unit); 1816 if (sc == NULL) 1817 return (ENOENT); 1818 if (mdr->md_mediasize < sc->sectorsize) 1819 return (EINVAL); 1820 mdr->md_mediasize -= mdr->md_mediasize % sc->sectorsize; 1821 if (mdr->md_mediasize < sc->mediasize && 1822 !(sc->flags & MD_FORCE) && 1823 !(mdr->md_options & MD_FORCE)) 1824 return (EBUSY); 1825 return (mdresize(sc, mdr)); 1826 } 1827 1828 static int 1829 kern_mdresize(struct md_req *mdr) 1830 { 1831 int error; 1832 1833 sx_xlock(&md_sx); 1834 error = kern_mdresize_locked(mdr); 1835 sx_xunlock(&md_sx); 1836 return (error); 1837 } 1838 1839 static int 1840 kern_mdquery_locked(struct md_req *mdr) 1841 { 1842 struct md_s *sc; 1843 int error; 1844 1845 sx_assert(&md_sx, SA_XLOCKED); 1846 1847 sc = mdfind(mdr->md_unit); 1848 if (sc == NULL) 1849 return (ENOENT); 1850 mdr->md_type = sc->type; 1851 mdr->md_options = sc->flags; 1852 mdr->md_mediasize = sc->mediasize; 1853 mdr->md_sectorsize = sc->sectorsize; 1854 error = 0; 1855 if (mdr->md_label != NULL) { 1856 error = copyout(sc->label, mdr->md_label, 1857 strlen(sc->label) + 1); 1858 if (error != 0) 1859 return (error); 1860 } 1861 if (sc->type == MD_VNODE || 1862 (sc->type == MD_PRELOAD && mdr->md_file != NULL)) 1863 error = copyout(sc->file, mdr->md_file, 1864 strlen(sc->file) + 1); 1865 return (error); 1866 } 1867 1868 static int 1869 kern_mdquery(struct md_req *mdr) 1870 { 1871 int error; 1872 1873 sx_xlock(&md_sx); 1874 error = kern_mdquery_locked(mdr); 1875 sx_xunlock(&md_sx); 1876 return (error); 1877 } 1878 1879 /* Copy members that are not userspace pointers. */ 1880 #define MD_IOCTL2REQ(mdio, mdr) do { \ 1881 (mdr)->md_unit = (mdio)->md_unit; \ 1882 (mdr)->md_type = (mdio)->md_type; \ 1883 (mdr)->md_mediasize = (mdio)->md_mediasize; \ 1884 (mdr)->md_sectorsize = (mdio)->md_sectorsize; \ 1885 (mdr)->md_options = (mdio)->md_options; \ 1886 (mdr)->md_fwheads = (mdio)->md_fwheads; \ 1887 (mdr)->md_fwsectors = (mdio)->md_fwsectors; \ 1888 (mdr)->md_units = &(mdio)->md_pad[0]; \ 1889 (mdr)->md_units_nitems = nitems((mdio)->md_pad); \ 1890 } while(0) 1891 1892 /* Copy members that might have been updated */ 1893 #define MD_REQ2IOCTL(mdr, mdio) do { \ 1894 (mdio)->md_unit = (mdr)->md_unit; \ 1895 (mdio)->md_type = (mdr)->md_type; \ 1896 (mdio)->md_mediasize = (mdr)->md_mediasize; \ 1897 (mdio)->md_sectorsize = (mdr)->md_sectorsize; \ 1898 (mdio)->md_options = (mdr)->md_options; \ 1899 (mdio)->md_fwheads = (mdr)->md_fwheads; \ 1900 (mdio)->md_fwsectors = (mdr)->md_fwsectors; \ 1901 } while(0) 1902 1903 static int 1904 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, 1905 struct thread *td) 1906 { 1907 struct md_req mdr; 1908 int error; 1909 1910 if (md_debug) 1911 printf("mdctlioctl(%s %lx %p %x %p)\n", 1912 devtoname(dev), cmd, addr, flags, td); 1913 1914 bzero(&mdr, sizeof(mdr)); 1915 switch (cmd) { 1916 case MDIOCATTACH: 1917 case MDIOCDETACH: 1918 case MDIOCRESIZE: 1919 case MDIOCQUERY: { 1920 struct md_ioctl *mdio = (struct md_ioctl *)addr; 1921 if (mdio->md_version != MDIOVERSION) 1922 return (EINVAL); 1923 MD_IOCTL2REQ(mdio, &mdr); 1924 mdr.md_file = mdio->md_file; 1925 mdr.md_file_seg = UIO_USERSPACE; 1926 /* If the file is adjacent to the md_ioctl it's in kernel. */ 1927 if ((void *)mdio->md_file == (void *)(mdio + 1)) 1928 mdr.md_file_seg = UIO_SYSSPACE; 1929 mdr.md_label = mdio->md_label; 1930 break; 1931 } 1932 #ifdef COMPAT_FREEBSD32 1933 case MDIOCATTACH_32: 1934 case MDIOCDETACH_32: 1935 case MDIOCRESIZE_32: 1936 case MDIOCQUERY_32: { 1937 struct md_ioctl32 *mdio = (struct md_ioctl32 *)addr; 1938 if (mdio->md_version != MDIOVERSION) 1939 return (EINVAL); 1940 MD_IOCTL2REQ(mdio, &mdr); 1941 mdr.md_file = (void *)(uintptr_t)mdio->md_file; 1942 mdr.md_file_seg = UIO_USERSPACE; 1943 mdr.md_label = (void *)(uintptr_t)mdio->md_label; 1944 break; 1945 } 1946 #endif 1947 default: 1948 /* Fall through to handler switch. */ 1949 break; 1950 } 1951 1952 error = 0; 1953 switch (cmd) { 1954 case MDIOCATTACH: 1955 #ifdef COMPAT_FREEBSD32 1956 case MDIOCATTACH_32: 1957 #endif 1958 error = kern_mdattach(td, &mdr); 1959 break; 1960 case MDIOCDETACH: 1961 #ifdef COMPAT_FREEBSD32 1962 case MDIOCDETACH_32: 1963 #endif 1964 error = kern_mddetach(td, &mdr); 1965 break; 1966 case MDIOCRESIZE: 1967 #ifdef COMPAT_FREEBSD32 1968 case MDIOCRESIZE_32: 1969 #endif 1970 error = kern_mdresize(&mdr); 1971 break; 1972 case MDIOCQUERY: 1973 #ifdef COMPAT_FREEBSD32 1974 case MDIOCQUERY_32: 1975 #endif 1976 error = kern_mdquery(&mdr); 1977 break; 1978 default: 1979 error = ENOIOCTL; 1980 } 1981 1982 switch (cmd) { 1983 case MDIOCATTACH: 1984 case MDIOCQUERY: { 1985 struct md_ioctl *mdio = (struct md_ioctl *)addr; 1986 MD_REQ2IOCTL(&mdr, mdio); 1987 break; 1988 } 1989 #ifdef COMPAT_FREEBSD32 1990 case MDIOCATTACH_32: 1991 case MDIOCQUERY_32: { 1992 struct md_ioctl32 *mdio = (struct md_ioctl32 *)addr; 1993 MD_REQ2IOCTL(&mdr, mdio); 1994 break; 1995 } 1996 #endif 1997 default: 1998 /* Other commands to not alter mdr. */ 1999 break; 2000 } 2001 2002 return (error); 2003 } 2004 2005 static void 2006 md_preloaded(u_char *image, size_t length, const char *name) 2007 { 2008 struct md_s *sc; 2009 int error; 2010 2011 sc = mdnew(-1, &error, MD_PRELOAD); 2012 if (sc == NULL) 2013 return; 2014 sc->mediasize = length; 2015 sc->sectorsize = DEV_BSIZE; 2016 sc->pl_ptr = image; 2017 sc->pl_len = length; 2018 sc->start = mdstart_preload; 2019 if (name != NULL) 2020 strlcpy(sc->file, name, sizeof(sc->file)); 2021 #ifdef MD_ROOT 2022 if (sc->unit == 0) { 2023 #ifndef ROOTDEVNAME 2024 rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0"; 2025 #endif 2026 #ifdef MD_ROOT_READONLY 2027 sc->flags |= MD_READONLY; 2028 #endif 2029 } 2030 #endif 2031 mdinit(sc); 2032 if (name != NULL) { 2033 printf("%s%d: Preloaded image <%s> %zd bytes at %p\n", 2034 MD_NAME, sc->unit, name, length, image); 2035 } else { 2036 printf("%s%d: Embedded image %zd bytes at %p\n", 2037 MD_NAME, sc->unit, length, image); 2038 } 2039 } 2040 2041 static void 2042 g_md_init(struct g_class *mp __unused) 2043 { 2044 caddr_t mod; 2045 u_char *ptr, *name, *type; 2046 unsigned len; 2047 int i; 2048 2049 /* figure out log2(NINDIR) */ 2050 for (i = NINDIR, nshift = -1; i; nshift++) 2051 i >>= 1; 2052 2053 mod = NULL; 2054 sx_init(&md_sx, "MD config lock"); 2055 g_topology_unlock(); 2056 md_uh = new_unrhdr(0, INT_MAX, NULL); 2057 #ifdef MD_ROOT 2058 if (mfs_root_size != 0) { 2059 sx_xlock(&md_sx); 2060 #ifdef MD_ROOT_MEM 2061 md_preloaded(mfs_root, mfs_root_size, NULL); 2062 #else 2063 md_preloaded(__DEVOLATILE(u_char *, &mfs_root), mfs_root_size, 2064 NULL); 2065 #endif 2066 sx_xunlock(&md_sx); 2067 } 2068 #endif 2069 /* XXX: are preload_* static or do they need Giant ? */ 2070 while ((mod = preload_search_next_name(mod)) != NULL) { 2071 name = (char *)preload_search_info(mod, MODINFO_NAME); 2072 if (name == NULL) 2073 continue; 2074 type = (char *)preload_search_info(mod, MODINFO_TYPE); 2075 if (type == NULL) 2076 continue; 2077 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 2078 continue; 2079 ptr = preload_fetch_addr(mod); 2080 len = preload_fetch_size(mod); 2081 if (ptr != NULL && len != 0) { 2082 sx_xlock(&md_sx); 2083 md_preloaded(ptr, len, name); 2084 sx_xunlock(&md_sx); 2085 } 2086 } 2087 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL, 2088 0600, MDCTL_NAME); 2089 g_topology_lock(); 2090 } 2091 2092 static void 2093 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 2094 struct g_consumer *cp __unused, struct g_provider *pp) 2095 { 2096 struct md_s *mp; 2097 char *type; 2098 2099 mp = gp->softc; 2100 if (mp == NULL) 2101 return; 2102 2103 switch (mp->type) { 2104 case MD_MALLOC: 2105 type = "malloc"; 2106 break; 2107 case MD_PRELOAD: 2108 type = "preload"; 2109 break; 2110 case MD_VNODE: 2111 type = "vnode"; 2112 break; 2113 case MD_SWAP: 2114 type = "swap"; 2115 break; 2116 case MD_NULL: 2117 type = "null"; 2118 break; 2119 default: 2120 type = "unknown"; 2121 break; 2122 } 2123 2124 if (pp != NULL) { 2125 if (indent == NULL) { 2126 sbuf_printf(sb, " u %d", mp->unit); 2127 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize); 2128 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads); 2129 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors); 2130 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize); 2131 sbuf_printf(sb, " t %s", type); 2132 if ((mp->type == MD_VNODE && mp->vnode != NULL) || 2133 (mp->type == MD_PRELOAD && mp->file[0] != '\0')) 2134 sbuf_printf(sb, " file %s", mp->file); 2135 sbuf_printf(sb, " label %s", mp->label); 2136 } else { 2137 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, 2138 mp->unit); 2139 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n", 2140 indent, (uintmax_t) mp->sectorsize); 2141 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n", 2142 indent, (uintmax_t) mp->fwheads); 2143 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n", 2144 indent, (uintmax_t) mp->fwsectors); 2145 if (mp->ident[0] != '\0') { 2146 sbuf_printf(sb, "%s<ident>", indent); 2147 g_conf_printf_escaped(sb, "%s", mp->ident); 2148 sbuf_printf(sb, "</ident>\n"); 2149 } 2150 sbuf_printf(sb, "%s<length>%ju</length>\n", 2151 indent, (uintmax_t) mp->mediasize); 2152 sbuf_printf(sb, "%s<compression>%s</compression>\n", indent, 2153 (mp->flags & MD_COMPRESS) == 0 ? "off": "on"); 2154 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 2155 (mp->flags & MD_READONLY) == 0 ? "read-write": 2156 "read-only"); 2157 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2158 type); 2159 if ((mp->type == MD_VNODE && mp->vnode != NULL) || 2160 (mp->type == MD_PRELOAD && mp->file[0] != '\0')) { 2161 sbuf_printf(sb, "%s<file>", indent); 2162 g_conf_printf_escaped(sb, "%s", mp->file); 2163 sbuf_printf(sb, "</file>\n"); 2164 } 2165 if (mp->type == MD_VNODE) 2166 sbuf_printf(sb, "%s<cache>%s</cache>\n", indent, 2167 (mp->flags & MD_CACHE) == 0 ? "off": "on"); 2168 sbuf_printf(sb, "%s<label>", indent); 2169 g_conf_printf_escaped(sb, "%s", mp->label); 2170 sbuf_printf(sb, "</label>\n"); 2171 } 2172 } 2173 } 2174 2175 static void 2176 g_md_fini(struct g_class *mp __unused) 2177 { 2178 2179 sx_destroy(&md_sx); 2180 if (status_dev != NULL) 2181 destroy_dev(status_dev); 2182 delete_unrhdr(md_uh); 2183 } 2184