1 /*- 2 * SPDX-License-Identifier: (Beerware AND BSD-3-Clause) 3 * 4 * ---------------------------------------------------------------------------- 5 * "THE BEER-WARE LICENSE" (Revision 42): 6 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 7 * can do whatever you want with this stuff. If we meet some day, and you think 8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 9 * ---------------------------------------------------------------------------- 10 * 11 * $FreeBSD$ 12 * 13 */ 14 15 /*- 16 * The following functions are based in the vn(4) driver: mdstart_swap(), 17 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 18 * and as such under the following copyright: 19 * 20 * Copyright (c) 1988 University of Utah. 21 * Copyright (c) 1990, 1993 22 * The Regents of the University of California. All rights reserved. 23 * Copyright (c) 2013 The FreeBSD Foundation 24 * All rights reserved. 25 * 26 * This code is derived from software contributed to Berkeley by 27 * the Systems Programming Group of the University of Utah Computer 28 * Science Department. 29 * 30 * Portions of this software were developed by Konstantin Belousov 31 * under sponsorship from the FreeBSD Foundation. 32 * 33 * Redistribution and use in source and binary forms, with or without 34 * modification, are permitted provided that the following conditions 35 * are met: 36 * 1. Redistributions of source code must retain the above copyright 37 * notice, this list of conditions and the following disclaimer. 38 * 2. Redistributions in binary form must reproduce the above copyright 39 * notice, this list of conditions and the following disclaimer in the 40 * documentation and/or other materials provided with the distribution. 41 * 3. Neither the name of the University nor the names of its contributors 42 * may be used to endorse or promote products derived from this software 43 * without specific prior written permission. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 * 57 * from: Utah Hdr: vn.c 1.13 94/04/02 58 * 59 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 60 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 61 */ 62 63 #include "opt_rootdevname.h" 64 #include "opt_geom.h" 65 #include "opt_md.h" 66 67 #include <sys/param.h> 68 #include <sys/systm.h> 69 #include <sys/bio.h> 70 #include <sys/buf.h> 71 #include <sys/conf.h> 72 #include <sys/devicestat.h> 73 #include <sys/fcntl.h> 74 #include <sys/kernel.h> 75 #include <sys/kthread.h> 76 #include <sys/limits.h> 77 #include <sys/linker.h> 78 #include <sys/lock.h> 79 #include <sys/malloc.h> 80 #include <sys/mdioctl.h> 81 #include <sys/mount.h> 82 #include <sys/mutex.h> 83 #include <sys/sx.h> 84 #include <sys/namei.h> 85 #include <sys/proc.h> 86 #include <sys/queue.h> 87 #include <sys/rwlock.h> 88 #include <sys/sbuf.h> 89 #include <sys/sched.h> 90 #include <sys/sf_buf.h> 91 #include <sys/sysctl.h> 92 #include <sys/uio.h> 93 #include <sys/vnode.h> 94 #include <sys/disk.h> 95 96 #include <geom/geom.h> 97 #include <geom/geom_int.h> 98 99 #include <vm/vm.h> 100 #include <vm/vm_param.h> 101 #include <vm/vm_object.h> 102 #include <vm/vm_page.h> 103 #include <vm/vm_pager.h> 104 #include <vm/swap_pager.h> 105 #include <vm/uma.h> 106 107 #include <machine/bus.h> 108 109 #define MD_MODVER 1 110 111 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 112 #define MD_EXITING 0x20000 /* Worker thread is exiting. */ 113 #define MD_PROVIDERGONE 0x40000 /* Safe to free the softc */ 114 115 #ifndef MD_NSECT 116 #define MD_NSECT (10000 * 2) 117 #endif 118 119 struct md_req { 120 unsigned md_unit; /* unit number */ 121 enum md_types md_type; /* type of disk */ 122 off_t md_mediasize; /* size of disk in bytes */ 123 unsigned md_sectorsize; /* sectorsize */ 124 unsigned md_options; /* options */ 125 int md_fwheads; /* firmware heads */ 126 int md_fwsectors; /* firmware sectors */ 127 char *md_file; /* pathname of file to mount */ 128 enum uio_seg md_file_seg; /* location of md_file */ 129 char *md_label; /* label of the device (userspace) */ 130 int *md_units; /* pointer to units array (kernel) */ 131 size_t md_units_nitems; /* items in md_units array */ 132 }; 133 134 #ifdef COMPAT_FREEBSD32 135 struct md_ioctl32 { 136 unsigned md_version; 137 unsigned md_unit; 138 enum md_types md_type; 139 uint32_t md_file; 140 off_t md_mediasize; 141 unsigned md_sectorsize; 142 unsigned md_options; 143 uint64_t md_base; 144 int md_fwheads; 145 int md_fwsectors; 146 uint32_t md_label; 147 int md_pad[MDNPAD]; 148 } __attribute__((__packed__)); 149 CTASSERT((sizeof(struct md_ioctl32)) == 436); 150 151 #define MDIOCATTACH_32 _IOC_NEWTYPE(MDIOCATTACH, struct md_ioctl32) 152 #define MDIOCDETACH_32 _IOC_NEWTYPE(MDIOCDETACH, struct md_ioctl32) 153 #define MDIOCQUERY_32 _IOC_NEWTYPE(MDIOCQUERY, struct md_ioctl32) 154 #define MDIOCLIST_32 _IOC_NEWTYPE(MDIOCLIST, struct md_ioctl32) 155 #define MDIOCRESIZE_32 _IOC_NEWTYPE(MDIOCRESIZE, struct md_ioctl32) 156 #endif /* COMPAT_FREEBSD32 */ 157 158 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk"); 159 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors"); 160 161 static int md_debug; 162 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, 163 "Enable md(4) debug messages"); 164 static int md_malloc_wait; 165 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, 166 "Allow malloc to wait for memory allocations"); 167 168 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE) 169 #define MD_ROOT_FSTYPE "ufs" 170 #endif 171 172 #if defined(MD_ROOT) 173 /* 174 * Preloaded image gets put here. 175 */ 176 #if defined(MD_ROOT_SIZE) 177 /* 178 * We put the mfs_root symbol into the oldmfs section of the kernel object file. 179 * Applications that patch the object with the image can determine 180 * the size looking at the oldmfs section size within the kernel. 181 */ 182 u_char mfs_root[MD_ROOT_SIZE*1024] __attribute__ ((section ("oldmfs"))); 183 const int mfs_root_size = sizeof(mfs_root); 184 #elif defined(MD_ROOT_MEM) 185 /* MD region already mapped in the memory */ 186 u_char *mfs_root; 187 int mfs_root_size; 188 #else 189 extern volatile u_char __weak_symbol mfs_root; 190 extern volatile u_char __weak_symbol mfs_root_end; 191 __GLOBL(mfs_root); 192 __GLOBL(mfs_root_end); 193 #define mfs_root_size ((uintptr_t)(&mfs_root_end - &mfs_root)) 194 #endif 195 #endif 196 197 static g_init_t g_md_init; 198 static g_fini_t g_md_fini; 199 static g_start_t g_md_start; 200 static g_access_t g_md_access; 201 static void g_md_dumpconf(struct sbuf *sb, const char *indent, 202 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp); 203 static g_provgone_t g_md_providergone; 204 205 static struct cdev *status_dev = NULL; 206 static struct sx md_sx; 207 static struct unrhdr *md_uh; 208 209 static d_ioctl_t mdctlioctl; 210 211 static struct cdevsw mdctl_cdevsw = { 212 .d_version = D_VERSION, 213 .d_ioctl = mdctlioctl, 214 .d_name = MD_NAME, 215 }; 216 217 struct g_class g_md_class = { 218 .name = "MD", 219 .version = G_VERSION, 220 .init = g_md_init, 221 .fini = g_md_fini, 222 .start = g_md_start, 223 .access = g_md_access, 224 .dumpconf = g_md_dumpconf, 225 .providergone = g_md_providergone, 226 }; 227 228 DECLARE_GEOM_CLASS(g_md_class, g_md); 229 230 231 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list); 232 233 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 234 #define NMASK (NINDIR-1) 235 static int nshift; 236 237 static uma_zone_t md_pbuf_zone; 238 239 struct indir { 240 uintptr_t *array; 241 u_int total; 242 u_int used; 243 u_int shift; 244 }; 245 246 struct md_s { 247 int unit; 248 LIST_ENTRY(md_s) list; 249 struct bio_queue_head bio_queue; 250 struct mtx queue_mtx; 251 struct mtx stat_mtx; 252 struct cdev *dev; 253 enum md_types type; 254 off_t mediasize; 255 unsigned sectorsize; 256 unsigned opencount; 257 unsigned fwheads; 258 unsigned fwsectors; 259 char ident[32]; 260 unsigned flags; 261 char name[20]; 262 struct proc *procp; 263 struct g_geom *gp; 264 struct g_provider *pp; 265 int (*start)(struct md_s *sc, struct bio *bp); 266 struct devstat *devstat; 267 268 /* MD_MALLOC related fields */ 269 struct indir *indir; 270 uma_zone_t uma; 271 272 /* MD_PRELOAD related fields */ 273 u_char *pl_ptr; 274 size_t pl_len; 275 276 /* MD_VNODE related fields */ 277 struct vnode *vnode; 278 char file[PATH_MAX]; 279 char label[PATH_MAX]; 280 struct ucred *cred; 281 282 /* MD_SWAP related fields */ 283 vm_object_t object; 284 }; 285 286 static struct indir * 287 new_indir(u_int shift) 288 { 289 struct indir *ip; 290 291 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT) 292 | M_ZERO); 293 if (ip == NULL) 294 return (NULL); 295 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 296 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); 297 if (ip->array == NULL) { 298 free(ip, M_MD); 299 return (NULL); 300 } 301 ip->total = NINDIR; 302 ip->shift = shift; 303 return (ip); 304 } 305 306 static void 307 del_indir(struct indir *ip) 308 { 309 310 free(ip->array, M_MDSECT); 311 free(ip, M_MD); 312 } 313 314 static void 315 destroy_indir(struct md_s *sc, struct indir *ip) 316 { 317 int i; 318 319 for (i = 0; i < NINDIR; i++) { 320 if (!ip->array[i]) 321 continue; 322 if (ip->shift) 323 destroy_indir(sc, (struct indir*)(ip->array[i])); 324 else if (ip->array[i] > 255) 325 uma_zfree(sc->uma, (void *)(ip->array[i])); 326 } 327 del_indir(ip); 328 } 329 330 /* 331 * This function does the math and allocates the top level "indir" structure 332 * for a device of "size" sectors. 333 */ 334 335 static struct indir * 336 dimension(off_t size) 337 { 338 off_t rcnt; 339 struct indir *ip; 340 int layer; 341 342 rcnt = size; 343 layer = 0; 344 while (rcnt > NINDIR) { 345 rcnt /= NINDIR; 346 layer++; 347 } 348 349 /* 350 * XXX: the top layer is probably not fully populated, so we allocate 351 * too much space for ip->array in here. 352 */ 353 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 354 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 355 M_MDSECT, M_WAITOK | M_ZERO); 356 ip->total = NINDIR; 357 ip->shift = layer * nshift; 358 return (ip); 359 } 360 361 /* 362 * Read a given sector 363 */ 364 365 static uintptr_t 366 s_read(struct indir *ip, off_t offset) 367 { 368 struct indir *cip; 369 int idx; 370 uintptr_t up; 371 372 if (md_debug > 1) 373 printf("s_read(%jd)\n", (intmax_t)offset); 374 up = 0; 375 for (cip = ip; cip != NULL;) { 376 if (cip->shift) { 377 idx = (offset >> cip->shift) & NMASK; 378 up = cip->array[idx]; 379 cip = (struct indir *)up; 380 continue; 381 } 382 idx = offset & NMASK; 383 return (cip->array[idx]); 384 } 385 return (0); 386 } 387 388 /* 389 * Write a given sector, prune the tree if the value is 0 390 */ 391 392 static int 393 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 394 { 395 struct indir *cip, *lip[10]; 396 int idx, li; 397 uintptr_t up; 398 399 if (md_debug > 1) 400 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 401 up = 0; 402 li = 0; 403 cip = ip; 404 for (;;) { 405 lip[li++] = cip; 406 if (cip->shift) { 407 idx = (offset >> cip->shift) & NMASK; 408 up = cip->array[idx]; 409 if (up != 0) { 410 cip = (struct indir *)up; 411 continue; 412 } 413 /* Allocate branch */ 414 cip->array[idx] = 415 (uintptr_t)new_indir(cip->shift - nshift); 416 if (cip->array[idx] == 0) 417 return (ENOSPC); 418 cip->used++; 419 up = cip->array[idx]; 420 cip = (struct indir *)up; 421 continue; 422 } 423 /* leafnode */ 424 idx = offset & NMASK; 425 up = cip->array[idx]; 426 if (up != 0) 427 cip->used--; 428 cip->array[idx] = ptr; 429 if (ptr != 0) 430 cip->used++; 431 break; 432 } 433 if (cip->used != 0 || li == 1) 434 return (0); 435 li--; 436 while (cip->used == 0 && cip != ip) { 437 li--; 438 idx = (offset >> lip[li]->shift) & NMASK; 439 up = lip[li]->array[idx]; 440 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 441 del_indir(cip); 442 lip[li]->array[idx] = 0; 443 lip[li]->used--; 444 cip = lip[li]; 445 } 446 return (0); 447 } 448 449 450 static int 451 g_md_access(struct g_provider *pp, int r, int w, int e) 452 { 453 struct md_s *sc; 454 455 sc = pp->geom->softc; 456 if (sc == NULL) { 457 if (r <= 0 && w <= 0 && e <= 0) 458 return (0); 459 return (ENXIO); 460 } 461 r += pp->acr; 462 w += pp->acw; 463 e += pp->ace; 464 if ((sc->flags & MD_READONLY) != 0 && w > 0) 465 return (EROFS); 466 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 467 sc->opencount = 1; 468 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 469 sc->opencount = 0; 470 } 471 return (0); 472 } 473 474 static void 475 g_md_start(struct bio *bp) 476 { 477 struct md_s *sc; 478 479 sc = bp->bio_to->geom->softc; 480 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) { 481 mtx_lock(&sc->stat_mtx); 482 devstat_start_transaction_bio(sc->devstat, bp); 483 mtx_unlock(&sc->stat_mtx); 484 } 485 mtx_lock(&sc->queue_mtx); 486 bioq_disksort(&sc->bio_queue, bp); 487 wakeup(sc); 488 mtx_unlock(&sc->queue_mtx); 489 } 490 491 #define MD_MALLOC_MOVE_ZERO 1 492 #define MD_MALLOC_MOVE_FILL 2 493 #define MD_MALLOC_MOVE_READ 3 494 #define MD_MALLOC_MOVE_WRITE 4 495 #define MD_MALLOC_MOVE_CMP 5 496 497 static int 498 md_malloc_move_ma(vm_page_t **mp, int *ma_offs, unsigned sectorsize, 499 void *ptr, u_char fill, int op) 500 { 501 struct sf_buf *sf; 502 vm_page_t m, *mp1; 503 char *p, first; 504 off_t *uc; 505 unsigned n; 506 int error, i, ma_offs1, sz, first_read; 507 508 m = NULL; 509 error = 0; 510 sf = NULL; 511 /* if (op == MD_MALLOC_MOVE_CMP) { gcc */ 512 first = 0; 513 first_read = 0; 514 uc = ptr; 515 mp1 = *mp; 516 ma_offs1 = *ma_offs; 517 /* } */ 518 sched_pin(); 519 for (n = sectorsize; n != 0; n -= sz) { 520 sz = imin(PAGE_SIZE - *ma_offs, n); 521 if (m != **mp) { 522 if (sf != NULL) 523 sf_buf_free(sf); 524 m = **mp; 525 sf = sf_buf_alloc(m, SFB_CPUPRIVATE | 526 (md_malloc_wait ? 0 : SFB_NOWAIT)); 527 if (sf == NULL) { 528 error = ENOMEM; 529 break; 530 } 531 } 532 p = (char *)sf_buf_kva(sf) + *ma_offs; 533 switch (op) { 534 case MD_MALLOC_MOVE_ZERO: 535 bzero(p, sz); 536 break; 537 case MD_MALLOC_MOVE_FILL: 538 memset(p, fill, sz); 539 break; 540 case MD_MALLOC_MOVE_READ: 541 bcopy(ptr, p, sz); 542 cpu_flush_dcache(p, sz); 543 break; 544 case MD_MALLOC_MOVE_WRITE: 545 bcopy(p, ptr, sz); 546 break; 547 case MD_MALLOC_MOVE_CMP: 548 for (i = 0; i < sz; i++, p++) { 549 if (!first_read) { 550 *uc = (u_char)*p; 551 first = *p; 552 first_read = 1; 553 } else if (*p != first) { 554 error = EDOOFUS; 555 break; 556 } 557 } 558 break; 559 default: 560 KASSERT(0, ("md_malloc_move_ma unknown op %d\n", op)); 561 break; 562 } 563 if (error != 0) 564 break; 565 *ma_offs += sz; 566 *ma_offs %= PAGE_SIZE; 567 if (*ma_offs == 0) 568 (*mp)++; 569 ptr = (char *)ptr + sz; 570 } 571 572 if (sf != NULL) 573 sf_buf_free(sf); 574 sched_unpin(); 575 if (op == MD_MALLOC_MOVE_CMP && error != 0) { 576 *mp = mp1; 577 *ma_offs = ma_offs1; 578 } 579 return (error); 580 } 581 582 static int 583 md_malloc_move_vlist(bus_dma_segment_t **pvlist, int *pma_offs, 584 unsigned len, void *ptr, u_char fill, int op) 585 { 586 bus_dma_segment_t *vlist; 587 uint8_t *p, *end, first; 588 off_t *uc; 589 int ma_offs, seg_len; 590 591 vlist = *pvlist; 592 ma_offs = *pma_offs; 593 uc = ptr; 594 595 for (; len != 0; len -= seg_len) { 596 seg_len = imin(vlist->ds_len - ma_offs, len); 597 p = (uint8_t *)(uintptr_t)vlist->ds_addr + ma_offs; 598 switch (op) { 599 case MD_MALLOC_MOVE_ZERO: 600 bzero(p, seg_len); 601 break; 602 case MD_MALLOC_MOVE_FILL: 603 memset(p, fill, seg_len); 604 break; 605 case MD_MALLOC_MOVE_READ: 606 bcopy(ptr, p, seg_len); 607 cpu_flush_dcache(p, seg_len); 608 break; 609 case MD_MALLOC_MOVE_WRITE: 610 bcopy(p, ptr, seg_len); 611 break; 612 case MD_MALLOC_MOVE_CMP: 613 end = p + seg_len; 614 first = *uc = *p; 615 /* Confirm all following bytes match the first */ 616 while (++p < end) { 617 if (*p != first) 618 return (EDOOFUS); 619 } 620 break; 621 default: 622 KASSERT(0, ("md_malloc_move_vlist unknown op %d\n", op)); 623 break; 624 } 625 626 ma_offs += seg_len; 627 if (ma_offs == vlist->ds_len) { 628 ma_offs = 0; 629 vlist++; 630 } 631 ptr = (uint8_t *)ptr + seg_len; 632 } 633 *pvlist = vlist; 634 *pma_offs = ma_offs; 635 636 return (0); 637 } 638 639 static int 640 mdstart_malloc(struct md_s *sc, struct bio *bp) 641 { 642 u_char *dst; 643 vm_page_t *m; 644 bus_dma_segment_t *vlist; 645 int i, error, error1, ma_offs, notmapped; 646 off_t secno, nsec, uc; 647 uintptr_t sp, osp; 648 649 switch (bp->bio_cmd) { 650 case BIO_READ: 651 case BIO_WRITE: 652 case BIO_DELETE: 653 break; 654 default: 655 return (EOPNOTSUPP); 656 } 657 658 notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0; 659 vlist = (bp->bio_flags & BIO_VLIST) != 0 ? 660 (bus_dma_segment_t *)bp->bio_data : NULL; 661 if (notmapped) { 662 m = bp->bio_ma; 663 ma_offs = bp->bio_ma_offset; 664 dst = NULL; 665 KASSERT(vlist == NULL, ("vlists cannot be unmapped")); 666 } else if (vlist != NULL) { 667 ma_offs = bp->bio_ma_offset; 668 dst = NULL; 669 } else { 670 dst = bp->bio_data; 671 } 672 673 nsec = bp->bio_length / sc->sectorsize; 674 secno = bp->bio_offset / sc->sectorsize; 675 error = 0; 676 while (nsec--) { 677 osp = s_read(sc->indir, secno); 678 if (bp->bio_cmd == BIO_DELETE) { 679 if (osp != 0) 680 error = s_write(sc->indir, secno, 0); 681 } else if (bp->bio_cmd == BIO_READ) { 682 if (osp == 0) { 683 if (notmapped) { 684 error = md_malloc_move_ma(&m, &ma_offs, 685 sc->sectorsize, NULL, 0, 686 MD_MALLOC_MOVE_ZERO); 687 } else if (vlist != NULL) { 688 error = md_malloc_move_vlist(&vlist, 689 &ma_offs, sc->sectorsize, NULL, 0, 690 MD_MALLOC_MOVE_ZERO); 691 } else 692 bzero(dst, sc->sectorsize); 693 } else if (osp <= 255) { 694 if (notmapped) { 695 error = md_malloc_move_ma(&m, &ma_offs, 696 sc->sectorsize, NULL, osp, 697 MD_MALLOC_MOVE_FILL); 698 } else if (vlist != NULL) { 699 error = md_malloc_move_vlist(&vlist, 700 &ma_offs, sc->sectorsize, NULL, osp, 701 MD_MALLOC_MOVE_FILL); 702 } else 703 memset(dst, osp, sc->sectorsize); 704 } else { 705 if (notmapped) { 706 error = md_malloc_move_ma(&m, &ma_offs, 707 sc->sectorsize, (void *)osp, 0, 708 MD_MALLOC_MOVE_READ); 709 } else if (vlist != NULL) { 710 error = md_malloc_move_vlist(&vlist, 711 &ma_offs, sc->sectorsize, 712 (void *)osp, 0, 713 MD_MALLOC_MOVE_READ); 714 } else { 715 bcopy((void *)osp, dst, sc->sectorsize); 716 cpu_flush_dcache(dst, sc->sectorsize); 717 } 718 } 719 osp = 0; 720 } else if (bp->bio_cmd == BIO_WRITE) { 721 if (sc->flags & MD_COMPRESS) { 722 if (notmapped) { 723 error1 = md_malloc_move_ma(&m, &ma_offs, 724 sc->sectorsize, &uc, 0, 725 MD_MALLOC_MOVE_CMP); 726 i = error1 == 0 ? sc->sectorsize : 0; 727 } else if (vlist != NULL) { 728 error1 = md_malloc_move_vlist(&vlist, 729 &ma_offs, sc->sectorsize, &uc, 0, 730 MD_MALLOC_MOVE_CMP); 731 i = error1 == 0 ? sc->sectorsize : 0; 732 } else { 733 uc = dst[0]; 734 for (i = 1; i < sc->sectorsize; i++) { 735 if (dst[i] != uc) 736 break; 737 } 738 } 739 } else { 740 i = 0; 741 uc = 0; 742 } 743 if (i == sc->sectorsize) { 744 if (osp != uc) 745 error = s_write(sc->indir, secno, uc); 746 } else { 747 if (osp <= 255) { 748 sp = (uintptr_t)uma_zalloc(sc->uma, 749 md_malloc_wait ? M_WAITOK : 750 M_NOWAIT); 751 if (sp == 0) { 752 error = ENOSPC; 753 break; 754 } 755 if (notmapped) { 756 error = md_malloc_move_ma(&m, 757 &ma_offs, sc->sectorsize, 758 (void *)sp, 0, 759 MD_MALLOC_MOVE_WRITE); 760 } else if (vlist != NULL) { 761 error = md_malloc_move_vlist( 762 &vlist, &ma_offs, 763 sc->sectorsize, (void *)sp, 764 0, MD_MALLOC_MOVE_WRITE); 765 } else { 766 bcopy(dst, (void *)sp, 767 sc->sectorsize); 768 } 769 error = s_write(sc->indir, secno, sp); 770 } else { 771 if (notmapped) { 772 error = md_malloc_move_ma(&m, 773 &ma_offs, sc->sectorsize, 774 (void *)osp, 0, 775 MD_MALLOC_MOVE_WRITE); 776 } else if (vlist != NULL) { 777 error = md_malloc_move_vlist( 778 &vlist, &ma_offs, 779 sc->sectorsize, (void *)osp, 780 0, MD_MALLOC_MOVE_WRITE); 781 } else { 782 bcopy(dst, (void *)osp, 783 sc->sectorsize); 784 } 785 osp = 0; 786 } 787 } 788 } else { 789 error = EOPNOTSUPP; 790 } 791 if (osp > 255) 792 uma_zfree(sc->uma, (void*)osp); 793 if (error != 0) 794 break; 795 secno++; 796 if (!notmapped && vlist == NULL) 797 dst += sc->sectorsize; 798 } 799 bp->bio_resid = 0; 800 return (error); 801 } 802 803 static void 804 mdcopyto_vlist(void *src, bus_dma_segment_t *vlist, off_t offset, off_t len) 805 { 806 off_t seg_len; 807 808 while (offset >= vlist->ds_len) { 809 offset -= vlist->ds_len; 810 vlist++; 811 } 812 813 while (len != 0) { 814 seg_len = omin(len, vlist->ds_len - offset); 815 bcopy(src, (void *)(uintptr_t)(vlist->ds_addr + offset), 816 seg_len); 817 offset = 0; 818 src = (uint8_t *)src + seg_len; 819 len -= seg_len; 820 vlist++; 821 } 822 } 823 824 static void 825 mdcopyfrom_vlist(bus_dma_segment_t *vlist, off_t offset, void *dst, off_t len) 826 { 827 off_t seg_len; 828 829 while (offset >= vlist->ds_len) { 830 offset -= vlist->ds_len; 831 vlist++; 832 } 833 834 while (len != 0) { 835 seg_len = omin(len, vlist->ds_len - offset); 836 bcopy((void *)(uintptr_t)(vlist->ds_addr + offset), dst, 837 seg_len); 838 offset = 0; 839 dst = (uint8_t *)dst + seg_len; 840 len -= seg_len; 841 vlist++; 842 } 843 } 844 845 static int 846 mdstart_preload(struct md_s *sc, struct bio *bp) 847 { 848 uint8_t *p; 849 850 p = sc->pl_ptr + bp->bio_offset; 851 switch (bp->bio_cmd) { 852 case BIO_READ: 853 if ((bp->bio_flags & BIO_VLIST) != 0) { 854 mdcopyto_vlist(p, (bus_dma_segment_t *)bp->bio_data, 855 bp->bio_ma_offset, bp->bio_length); 856 } else { 857 bcopy(p, bp->bio_data, bp->bio_length); 858 } 859 cpu_flush_dcache(bp->bio_data, bp->bio_length); 860 break; 861 case BIO_WRITE: 862 if ((bp->bio_flags & BIO_VLIST) != 0) { 863 mdcopyfrom_vlist((bus_dma_segment_t *)bp->bio_data, 864 bp->bio_ma_offset, p, bp->bio_length); 865 } else { 866 bcopy(bp->bio_data, p, bp->bio_length); 867 } 868 break; 869 } 870 bp->bio_resid = 0; 871 return (0); 872 } 873 874 static int 875 mdstart_vnode(struct md_s *sc, struct bio *bp) 876 { 877 int error; 878 struct uio auio; 879 struct iovec aiov; 880 struct iovec *piov; 881 struct mount *mp; 882 struct vnode *vp; 883 struct buf *pb; 884 bus_dma_segment_t *vlist; 885 struct thread *td; 886 off_t iolen, iostart, len, zerosize; 887 int ma_offs, npages; 888 889 switch (bp->bio_cmd) { 890 case BIO_READ: 891 auio.uio_rw = UIO_READ; 892 break; 893 case BIO_WRITE: 894 case BIO_DELETE: 895 auio.uio_rw = UIO_WRITE; 896 break; 897 case BIO_FLUSH: 898 break; 899 default: 900 return (EOPNOTSUPP); 901 } 902 903 td = curthread; 904 vp = sc->vnode; 905 pb = NULL; 906 piov = NULL; 907 ma_offs = bp->bio_ma_offset; 908 len = bp->bio_length; 909 910 /* 911 * VNODE I/O 912 * 913 * If an error occurs, we set BIO_ERROR but we do not set 914 * B_INVAL because (for a write anyway), the buffer is 915 * still valid. 916 */ 917 918 if (bp->bio_cmd == BIO_FLUSH) { 919 (void) vn_start_write(vp, &mp, V_WAIT); 920 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 921 error = VOP_FSYNC(vp, MNT_WAIT, td); 922 VOP_UNLOCK(vp, 0); 923 vn_finished_write(mp); 924 return (error); 925 } 926 927 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 928 auio.uio_resid = bp->bio_length; 929 auio.uio_segflg = UIO_SYSSPACE; 930 auio.uio_td = td; 931 932 if (bp->bio_cmd == BIO_DELETE) { 933 /* 934 * Emulate BIO_DELETE by writing zeros. 935 */ 936 zerosize = ZERO_REGION_SIZE - 937 (ZERO_REGION_SIZE % sc->sectorsize); 938 auio.uio_iovcnt = howmany(bp->bio_length, zerosize); 939 piov = malloc(sizeof(*piov) * auio.uio_iovcnt, M_MD, M_WAITOK); 940 auio.uio_iov = piov; 941 while (len > 0) { 942 piov->iov_base = __DECONST(void *, zero_region); 943 piov->iov_len = len; 944 if (len > zerosize) 945 piov->iov_len = zerosize; 946 len -= piov->iov_len; 947 piov++; 948 } 949 piov = auio.uio_iov; 950 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 951 piov = malloc(sizeof(*piov) * bp->bio_ma_n, M_MD, M_WAITOK); 952 auio.uio_iov = piov; 953 vlist = (bus_dma_segment_t *)bp->bio_data; 954 while (len > 0) { 955 piov->iov_base = (void *)(uintptr_t)(vlist->ds_addr + 956 ma_offs); 957 piov->iov_len = vlist->ds_len - ma_offs; 958 if (piov->iov_len > len) 959 piov->iov_len = len; 960 len -= piov->iov_len; 961 ma_offs = 0; 962 vlist++; 963 piov++; 964 } 965 auio.uio_iovcnt = piov - auio.uio_iov; 966 piov = auio.uio_iov; 967 } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 968 pb = uma_zalloc(md_pbuf_zone, M_WAITOK); 969 bp->bio_resid = len; 970 unmapped_step: 971 npages = atop(min(MAXPHYS, round_page(len + (ma_offs & 972 PAGE_MASK)))); 973 iolen = min(ptoa(npages) - (ma_offs & PAGE_MASK), len); 974 KASSERT(iolen > 0, ("zero iolen")); 975 pmap_qenter((vm_offset_t)pb->b_data, 976 &bp->bio_ma[atop(ma_offs)], npages); 977 aiov.iov_base = (void *)((vm_offset_t)pb->b_data + 978 (ma_offs & PAGE_MASK)); 979 aiov.iov_len = iolen; 980 auio.uio_iov = &aiov; 981 auio.uio_iovcnt = 1; 982 auio.uio_resid = iolen; 983 } else { 984 aiov.iov_base = bp->bio_data; 985 aiov.iov_len = bp->bio_length; 986 auio.uio_iov = &aiov; 987 auio.uio_iovcnt = 1; 988 } 989 iostart = auio.uio_offset; 990 if (auio.uio_rw == UIO_READ) { 991 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 992 error = VOP_READ(vp, &auio, 0, sc->cred); 993 VOP_UNLOCK(vp, 0); 994 } else { 995 (void) vn_start_write(vp, &mp, V_WAIT); 996 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 997 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC, 998 sc->cred); 999 VOP_UNLOCK(vp, 0); 1000 vn_finished_write(mp); 1001 if (error == 0) 1002 sc->flags &= ~MD_VERIFY; 1003 } 1004 1005 /* When MD_CACHE is set, try to avoid double-caching the data. */ 1006 if (error == 0 && (sc->flags & MD_CACHE) == 0) 1007 VOP_ADVISE(vp, iostart, auio.uio_offset - 1, 1008 POSIX_FADV_DONTNEED); 1009 1010 if (pb != NULL) { 1011 pmap_qremove((vm_offset_t)pb->b_data, npages); 1012 if (error == 0) { 1013 len -= iolen; 1014 bp->bio_resid -= iolen; 1015 ma_offs += iolen; 1016 if (len > 0) 1017 goto unmapped_step; 1018 } 1019 uma_zfree(md_pbuf_zone, pb); 1020 } 1021 1022 free(piov, M_MD); 1023 if (pb == NULL) 1024 bp->bio_resid = auio.uio_resid; 1025 return (error); 1026 } 1027 1028 static void 1029 md_swap_page_free(vm_page_t m) 1030 { 1031 1032 vm_page_xunbusy(m); 1033 vm_page_lock(m); 1034 vm_page_free(m); 1035 vm_page_unlock(m); 1036 } 1037 1038 static int 1039 mdstart_swap(struct md_s *sc, struct bio *bp) 1040 { 1041 vm_page_t m; 1042 u_char *p; 1043 vm_pindex_t i, lastp; 1044 bus_dma_segment_t *vlist; 1045 int rv, ma_offs, offs, len, lastend; 1046 1047 switch (bp->bio_cmd) { 1048 case BIO_READ: 1049 case BIO_WRITE: 1050 case BIO_DELETE: 1051 break; 1052 default: 1053 return (EOPNOTSUPP); 1054 } 1055 1056 p = bp->bio_data; 1057 ma_offs = (bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0 ? 1058 bp->bio_ma_offset : 0; 1059 vlist = (bp->bio_flags & BIO_VLIST) != 0 ? 1060 (bus_dma_segment_t *)bp->bio_data : NULL; 1061 1062 /* 1063 * offs is the offset at which to start operating on the 1064 * next (ie, first) page. lastp is the last page on 1065 * which we're going to operate. lastend is the ending 1066 * position within that last page (ie, PAGE_SIZE if 1067 * we're operating on complete aligned pages). 1068 */ 1069 offs = bp->bio_offset % PAGE_SIZE; 1070 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 1071 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 1072 1073 rv = VM_PAGER_OK; 1074 VM_OBJECT_WLOCK(sc->object); 1075 vm_object_pip_add(sc->object, 1); 1076 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 1077 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 1078 m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM); 1079 if (bp->bio_cmd == BIO_READ) { 1080 if (m->valid == VM_PAGE_BITS_ALL) 1081 rv = VM_PAGER_OK; 1082 else 1083 rv = vm_pager_get_pages(sc->object, &m, 1, 1084 NULL, NULL); 1085 if (rv == VM_PAGER_ERROR) { 1086 md_swap_page_free(m); 1087 break; 1088 } else if (rv == VM_PAGER_FAIL) { 1089 /* 1090 * Pager does not have the page. Zero 1091 * the allocated page, and mark it as 1092 * valid. Do not set dirty, the page 1093 * can be recreated if thrown out. 1094 */ 1095 pmap_zero_page(m); 1096 m->valid = VM_PAGE_BITS_ALL; 1097 } 1098 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 1099 pmap_copy_pages(&m, offs, bp->bio_ma, 1100 ma_offs, len); 1101 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 1102 physcopyout_vlist(VM_PAGE_TO_PHYS(m) + offs, 1103 vlist, ma_offs, len); 1104 cpu_flush_dcache(p, len); 1105 } else { 1106 physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len); 1107 cpu_flush_dcache(p, len); 1108 } 1109 } else if (bp->bio_cmd == BIO_WRITE) { 1110 if (len == PAGE_SIZE || m->valid == VM_PAGE_BITS_ALL) 1111 rv = VM_PAGER_OK; 1112 else 1113 rv = vm_pager_get_pages(sc->object, &m, 1, 1114 NULL, NULL); 1115 if (rv == VM_PAGER_ERROR) { 1116 md_swap_page_free(m); 1117 break; 1118 } else if (rv == VM_PAGER_FAIL) 1119 pmap_zero_page(m); 1120 1121 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 1122 pmap_copy_pages(bp->bio_ma, ma_offs, &m, 1123 offs, len); 1124 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 1125 physcopyin_vlist(vlist, ma_offs, 1126 VM_PAGE_TO_PHYS(m) + offs, len); 1127 } else { 1128 physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len); 1129 } 1130 1131 m->valid = VM_PAGE_BITS_ALL; 1132 if (m->dirty != VM_PAGE_BITS_ALL) { 1133 vm_page_dirty(m); 1134 vm_pager_page_unswapped(m); 1135 } 1136 } else if (bp->bio_cmd == BIO_DELETE) { 1137 if (len == PAGE_SIZE || m->valid == VM_PAGE_BITS_ALL) 1138 rv = VM_PAGER_OK; 1139 else 1140 rv = vm_pager_get_pages(sc->object, &m, 1, 1141 NULL, NULL); 1142 if (rv == VM_PAGER_ERROR) { 1143 md_swap_page_free(m); 1144 break; 1145 } else if (rv == VM_PAGER_FAIL) { 1146 md_swap_page_free(m); 1147 m = NULL; 1148 } else { 1149 /* Page is valid. */ 1150 if (len != PAGE_SIZE) { 1151 pmap_zero_page_area(m, offs, len); 1152 if (m->dirty != VM_PAGE_BITS_ALL) { 1153 vm_page_dirty(m); 1154 vm_pager_page_unswapped(m); 1155 } 1156 } else { 1157 vm_pager_page_unswapped(m); 1158 md_swap_page_free(m); 1159 m = NULL; 1160 } 1161 } 1162 } 1163 if (m != NULL) { 1164 vm_page_xunbusy(m); 1165 vm_page_lock(m); 1166 if (vm_page_active(m)) 1167 vm_page_reference(m); 1168 else 1169 vm_page_activate(m); 1170 vm_page_unlock(m); 1171 } 1172 1173 /* Actions on further pages start at offset 0 */ 1174 p += PAGE_SIZE - offs; 1175 offs = 0; 1176 ma_offs += len; 1177 } 1178 vm_object_pip_wakeup(sc->object); 1179 VM_OBJECT_WUNLOCK(sc->object); 1180 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 1181 } 1182 1183 static int 1184 mdstart_null(struct md_s *sc, struct bio *bp) 1185 { 1186 1187 switch (bp->bio_cmd) { 1188 case BIO_READ: 1189 bzero(bp->bio_data, bp->bio_length); 1190 cpu_flush_dcache(bp->bio_data, bp->bio_length); 1191 break; 1192 case BIO_WRITE: 1193 break; 1194 } 1195 bp->bio_resid = 0; 1196 return (0); 1197 } 1198 1199 static void 1200 md_kthread(void *arg) 1201 { 1202 struct md_s *sc; 1203 struct bio *bp; 1204 int error; 1205 1206 sc = arg; 1207 thread_lock(curthread); 1208 sched_prio(curthread, PRIBIO); 1209 thread_unlock(curthread); 1210 if (sc->type == MD_VNODE) 1211 curthread->td_pflags |= TDP_NORUNNINGBUF; 1212 1213 for (;;) { 1214 mtx_lock(&sc->queue_mtx); 1215 if (sc->flags & MD_SHUTDOWN) { 1216 sc->flags |= MD_EXITING; 1217 mtx_unlock(&sc->queue_mtx); 1218 kproc_exit(0); 1219 } 1220 bp = bioq_takefirst(&sc->bio_queue); 1221 if (!bp) { 1222 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 1223 continue; 1224 } 1225 mtx_unlock(&sc->queue_mtx); 1226 if (bp->bio_cmd == BIO_GETATTR) { 1227 int isv = ((sc->flags & MD_VERIFY) != 0); 1228 1229 if ((sc->fwsectors && sc->fwheads && 1230 (g_handleattr_int(bp, "GEOM::fwsectors", 1231 sc->fwsectors) || 1232 g_handleattr_int(bp, "GEOM::fwheads", 1233 sc->fwheads))) || 1234 g_handleattr_int(bp, "GEOM::candelete", 1)) 1235 error = -1; 1236 else if (sc->ident[0] != '\0' && 1237 g_handleattr_str(bp, "GEOM::ident", sc->ident)) 1238 error = -1; 1239 else if (g_handleattr_int(bp, "MNT::verified", isv)) 1240 error = -1; 1241 else 1242 error = EOPNOTSUPP; 1243 } else { 1244 error = sc->start(sc, bp); 1245 } 1246 1247 if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) { 1248 /* 1249 * Devstat uses (bio_bcount, bio_resid) for 1250 * determining the length of the completed part of 1251 * the i/o. g_io_deliver() will translate from 1252 * bio_completed to that, but it also destroys the 1253 * bio so we must do our own translation. 1254 */ 1255 bp->bio_bcount = bp->bio_length; 1256 bp->bio_resid = (error == -1 ? bp->bio_bcount : 0); 1257 devstat_end_transaction_bio(sc->devstat, bp); 1258 } 1259 if (error != -1) { 1260 bp->bio_completed = bp->bio_length; 1261 g_io_deliver(bp, error); 1262 } 1263 } 1264 } 1265 1266 static struct md_s * 1267 mdfind(int unit) 1268 { 1269 struct md_s *sc; 1270 1271 LIST_FOREACH(sc, &md_softc_list, list) { 1272 if (sc->unit == unit) 1273 break; 1274 } 1275 return (sc); 1276 } 1277 1278 static struct md_s * 1279 mdnew(int unit, int *errp, enum md_types type) 1280 { 1281 struct md_s *sc; 1282 int error; 1283 1284 *errp = 0; 1285 if (unit == -1) 1286 unit = alloc_unr(md_uh); 1287 else 1288 unit = alloc_unr_specific(md_uh, unit); 1289 1290 if (unit == -1) { 1291 *errp = EBUSY; 1292 return (NULL); 1293 } 1294 1295 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); 1296 sc->type = type; 1297 bioq_init(&sc->bio_queue); 1298 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 1299 mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF); 1300 sc->unit = unit; 1301 sprintf(sc->name, "md%d", unit); 1302 LIST_INSERT_HEAD(&md_softc_list, sc, list); 1303 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 1304 if (error == 0) 1305 return (sc); 1306 LIST_REMOVE(sc, list); 1307 mtx_destroy(&sc->stat_mtx); 1308 mtx_destroy(&sc->queue_mtx); 1309 free_unr(md_uh, sc->unit); 1310 free(sc, M_MD); 1311 *errp = error; 1312 return (NULL); 1313 } 1314 1315 static void 1316 mdinit(struct md_s *sc) 1317 { 1318 struct g_geom *gp; 1319 struct g_provider *pp; 1320 1321 g_topology_lock(); 1322 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 1323 gp->softc = sc; 1324 pp = g_new_providerf(gp, "md%d", sc->unit); 1325 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 1326 pp->mediasize = sc->mediasize; 1327 pp->sectorsize = sc->sectorsize; 1328 switch (sc->type) { 1329 case MD_MALLOC: 1330 case MD_VNODE: 1331 case MD_SWAP: 1332 pp->flags |= G_PF_ACCEPT_UNMAPPED; 1333 break; 1334 case MD_PRELOAD: 1335 case MD_NULL: 1336 break; 1337 } 1338 sc->gp = gp; 1339 sc->pp = pp; 1340 g_error_provider(pp, 0); 1341 g_topology_unlock(); 1342 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize, 1343 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 1344 } 1345 1346 static int 1347 mdcreate_malloc(struct md_s *sc, struct md_req *mdr) 1348 { 1349 uintptr_t sp; 1350 int error; 1351 off_t u; 1352 1353 error = 0; 1354 if (mdr->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 1355 return (EINVAL); 1356 if (mdr->md_sectorsize != 0 && !powerof2(mdr->md_sectorsize)) 1357 return (EINVAL); 1358 /* Compression doesn't make sense if we have reserved space */ 1359 if (mdr->md_options & MD_RESERVE) 1360 mdr->md_options &= ~MD_COMPRESS; 1361 if (mdr->md_fwsectors != 0) 1362 sc->fwsectors = mdr->md_fwsectors; 1363 if (mdr->md_fwheads != 0) 1364 sc->fwheads = mdr->md_fwheads; 1365 sc->flags = mdr->md_options & (MD_COMPRESS | MD_FORCE); 1366 sc->indir = dimension(sc->mediasize / sc->sectorsize); 1367 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 1368 0x1ff, 0); 1369 if (mdr->md_options & MD_RESERVE) { 1370 off_t nsectors; 1371 1372 nsectors = sc->mediasize / sc->sectorsize; 1373 for (u = 0; u < nsectors; u++) { 1374 sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ? 1375 M_WAITOK : M_NOWAIT) | M_ZERO); 1376 if (sp != 0) 1377 error = s_write(sc->indir, u, sp); 1378 else 1379 error = ENOMEM; 1380 if (error != 0) 1381 break; 1382 } 1383 } 1384 return (error); 1385 } 1386 1387 1388 static int 1389 mdsetcred(struct md_s *sc, struct ucred *cred) 1390 { 1391 char *tmpbuf; 1392 int error = 0; 1393 1394 /* 1395 * Set credits in our softc 1396 */ 1397 1398 if (sc->cred) 1399 crfree(sc->cred); 1400 sc->cred = crhold(cred); 1401 1402 /* 1403 * Horrible kludge to establish credentials for NFS XXX. 1404 */ 1405 1406 if (sc->vnode) { 1407 struct uio auio; 1408 struct iovec aiov; 1409 1410 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 1411 bzero(&auio, sizeof(auio)); 1412 1413 aiov.iov_base = tmpbuf; 1414 aiov.iov_len = sc->sectorsize; 1415 auio.uio_iov = &aiov; 1416 auio.uio_iovcnt = 1; 1417 auio.uio_offset = 0; 1418 auio.uio_rw = UIO_READ; 1419 auio.uio_segflg = UIO_SYSSPACE; 1420 auio.uio_resid = aiov.iov_len; 1421 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1422 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 1423 VOP_UNLOCK(sc->vnode, 0); 1424 free(tmpbuf, M_TEMP); 1425 } 1426 return (error); 1427 } 1428 1429 static int 1430 mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td) 1431 { 1432 struct vattr vattr; 1433 struct nameidata nd; 1434 char *fname; 1435 int error, flags; 1436 1437 fname = mdr->md_file; 1438 if (mdr->md_file_seg == UIO_USERSPACE) { 1439 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL); 1440 if (error != 0) 1441 return (error); 1442 } else if (mdr->md_file_seg == UIO_SYSSPACE) 1443 strlcpy(sc->file, fname, sizeof(sc->file)); 1444 else 1445 return (EDOOFUS); 1446 1447 /* 1448 * If the user specified that this is a read only device, don't 1449 * set the FWRITE mask before trying to open the backing store. 1450 */ 1451 flags = FREAD | ((mdr->md_options & MD_READONLY) ? 0 : FWRITE) \ 1452 | ((mdr->md_options & MD_VERIFY) ? O_VERIFY : 0); 1453 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td); 1454 error = vn_open(&nd, &flags, 0, NULL); 1455 if (error != 0) 1456 return (error); 1457 NDFREE(&nd, NDF_ONLY_PNBUF); 1458 if (nd.ni_vp->v_type != VREG) { 1459 error = EINVAL; 1460 goto bad; 1461 } 1462 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred); 1463 if (error != 0) 1464 goto bad; 1465 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { 1466 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); 1467 if (nd.ni_vp->v_iflag & VI_DOOMED) { 1468 /* Forced unmount. */ 1469 error = EBADF; 1470 goto bad; 1471 } 1472 } 1473 nd.ni_vp->v_vflag |= VV_MD; 1474 VOP_UNLOCK(nd.ni_vp, 0); 1475 1476 if (mdr->md_fwsectors != 0) 1477 sc->fwsectors = mdr->md_fwsectors; 1478 if (mdr->md_fwheads != 0) 1479 sc->fwheads = mdr->md_fwheads; 1480 snprintf(sc->ident, sizeof(sc->ident), "MD-DEV%ju-INO%ju", 1481 (uintmax_t)vattr.va_fsid, (uintmax_t)vattr.va_fileid); 1482 sc->flags = mdr->md_options & (MD_ASYNC | MD_CACHE | MD_FORCE | 1483 MD_VERIFY); 1484 if (!(flags & FWRITE)) 1485 sc->flags |= MD_READONLY; 1486 sc->vnode = nd.ni_vp; 1487 1488 error = mdsetcred(sc, td->td_ucred); 1489 if (error != 0) { 1490 sc->vnode = NULL; 1491 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); 1492 nd.ni_vp->v_vflag &= ~VV_MD; 1493 goto bad; 1494 } 1495 return (0); 1496 bad: 1497 VOP_UNLOCK(nd.ni_vp, 0); 1498 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 1499 return (error); 1500 } 1501 1502 static void 1503 g_md_providergone(struct g_provider *pp) 1504 { 1505 struct md_s *sc = pp->geom->softc; 1506 1507 mtx_lock(&sc->queue_mtx); 1508 sc->flags |= MD_PROVIDERGONE; 1509 wakeup(&sc->flags); 1510 mtx_unlock(&sc->queue_mtx); 1511 } 1512 1513 static int 1514 mddestroy(struct md_s *sc, struct thread *td) 1515 { 1516 1517 if (sc->gp) { 1518 g_topology_lock(); 1519 g_wither_geom(sc->gp, ENXIO); 1520 g_topology_unlock(); 1521 1522 mtx_lock(&sc->queue_mtx); 1523 while (!(sc->flags & MD_PROVIDERGONE)) 1524 msleep(&sc->flags, &sc->queue_mtx, PRIBIO, "mddestroy", 0); 1525 mtx_unlock(&sc->queue_mtx); 1526 } 1527 if (sc->devstat) { 1528 devstat_remove_entry(sc->devstat); 1529 sc->devstat = NULL; 1530 } 1531 mtx_lock(&sc->queue_mtx); 1532 sc->flags |= MD_SHUTDOWN; 1533 wakeup(sc); 1534 while (!(sc->flags & MD_EXITING)) 1535 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10); 1536 mtx_unlock(&sc->queue_mtx); 1537 mtx_destroy(&sc->stat_mtx); 1538 mtx_destroy(&sc->queue_mtx); 1539 if (sc->vnode != NULL) { 1540 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1541 sc->vnode->v_vflag &= ~VV_MD; 1542 VOP_UNLOCK(sc->vnode, 0); 1543 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 1544 FREAD : (FREAD|FWRITE), sc->cred, td); 1545 } 1546 if (sc->cred != NULL) 1547 crfree(sc->cred); 1548 if (sc->object != NULL) 1549 vm_object_deallocate(sc->object); 1550 if (sc->indir) 1551 destroy_indir(sc, sc->indir); 1552 if (sc->uma) 1553 uma_zdestroy(sc->uma); 1554 1555 LIST_REMOVE(sc, list); 1556 free_unr(md_uh, sc->unit); 1557 free(sc, M_MD); 1558 return (0); 1559 } 1560 1561 static int 1562 mdresize(struct md_s *sc, struct md_req *mdr) 1563 { 1564 int error, res; 1565 vm_pindex_t oldpages, newpages; 1566 1567 switch (sc->type) { 1568 case MD_VNODE: 1569 case MD_NULL: 1570 break; 1571 case MD_SWAP: 1572 if (mdr->md_mediasize <= 0 || 1573 (mdr->md_mediasize % PAGE_SIZE) != 0) 1574 return (EDOM); 1575 oldpages = OFF_TO_IDX(round_page(sc->mediasize)); 1576 newpages = OFF_TO_IDX(round_page(mdr->md_mediasize)); 1577 if (newpages < oldpages) { 1578 VM_OBJECT_WLOCK(sc->object); 1579 vm_object_page_remove(sc->object, newpages, 0, 0); 1580 swap_pager_freespace(sc->object, newpages, 1581 oldpages - newpages); 1582 swap_release_by_cred(IDX_TO_OFF(oldpages - 1583 newpages), sc->cred); 1584 sc->object->charge = IDX_TO_OFF(newpages); 1585 sc->object->size = newpages; 1586 VM_OBJECT_WUNLOCK(sc->object); 1587 } else if (newpages > oldpages) { 1588 res = swap_reserve_by_cred(IDX_TO_OFF(newpages - 1589 oldpages), sc->cred); 1590 if (!res) 1591 return (ENOMEM); 1592 if ((mdr->md_options & MD_RESERVE) || 1593 (sc->flags & MD_RESERVE)) { 1594 error = swap_pager_reserve(sc->object, 1595 oldpages, newpages - oldpages); 1596 if (error < 0) { 1597 swap_release_by_cred( 1598 IDX_TO_OFF(newpages - oldpages), 1599 sc->cred); 1600 return (EDOM); 1601 } 1602 } 1603 VM_OBJECT_WLOCK(sc->object); 1604 sc->object->charge = IDX_TO_OFF(newpages); 1605 sc->object->size = newpages; 1606 VM_OBJECT_WUNLOCK(sc->object); 1607 } 1608 break; 1609 default: 1610 return (EOPNOTSUPP); 1611 } 1612 1613 sc->mediasize = mdr->md_mediasize; 1614 g_topology_lock(); 1615 g_resize_provider(sc->pp, sc->mediasize); 1616 g_topology_unlock(); 1617 return (0); 1618 } 1619 1620 static int 1621 mdcreate_swap(struct md_s *sc, struct md_req *mdr, struct thread *td) 1622 { 1623 vm_ooffset_t npage; 1624 int error; 1625 1626 /* 1627 * Range check. Disallow negative sizes and sizes not being 1628 * multiple of page size. 1629 */ 1630 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1631 return (EDOM); 1632 1633 /* 1634 * Allocate an OBJT_SWAP object. 1635 * 1636 * Note the truncation. 1637 */ 1638 1639 if ((mdr->md_options & MD_VERIFY) != 0) 1640 return (EINVAL); 1641 npage = mdr->md_mediasize / PAGE_SIZE; 1642 if (mdr->md_fwsectors != 0) 1643 sc->fwsectors = mdr->md_fwsectors; 1644 if (mdr->md_fwheads != 0) 1645 sc->fwheads = mdr->md_fwheads; 1646 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, 1647 VM_PROT_DEFAULT, 0, td->td_ucred); 1648 if (sc->object == NULL) 1649 return (ENOMEM); 1650 sc->flags = mdr->md_options & (MD_FORCE | MD_RESERVE); 1651 if (mdr->md_options & MD_RESERVE) { 1652 if (swap_pager_reserve(sc->object, 0, npage) < 0) { 1653 error = EDOM; 1654 goto finish; 1655 } 1656 } 1657 error = mdsetcred(sc, td->td_ucred); 1658 finish: 1659 if (error != 0) { 1660 vm_object_deallocate(sc->object); 1661 sc->object = NULL; 1662 } 1663 return (error); 1664 } 1665 1666 static int 1667 mdcreate_null(struct md_s *sc, struct md_req *mdr, struct thread *td) 1668 { 1669 1670 /* 1671 * Range check. Disallow negative sizes and sizes not being 1672 * multiple of page size. 1673 */ 1674 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1675 return (EDOM); 1676 1677 return (0); 1678 } 1679 1680 static int 1681 kern_mdattach_locked(struct thread *td, struct md_req *mdr) 1682 { 1683 struct md_s *sc; 1684 unsigned sectsize; 1685 int error, i; 1686 1687 sx_assert(&md_sx, SA_XLOCKED); 1688 1689 switch (mdr->md_type) { 1690 case MD_MALLOC: 1691 case MD_PRELOAD: 1692 case MD_VNODE: 1693 case MD_SWAP: 1694 case MD_NULL: 1695 break; 1696 default: 1697 return (EINVAL); 1698 } 1699 if (mdr->md_sectorsize == 0) 1700 sectsize = DEV_BSIZE; 1701 else 1702 sectsize = mdr->md_sectorsize; 1703 if (sectsize > MAXPHYS || mdr->md_mediasize < sectsize) 1704 return (EINVAL); 1705 if (mdr->md_options & MD_AUTOUNIT) 1706 sc = mdnew(-1, &error, mdr->md_type); 1707 else { 1708 if (mdr->md_unit > INT_MAX) 1709 return (EINVAL); 1710 sc = mdnew(mdr->md_unit, &error, mdr->md_type); 1711 } 1712 if (sc == NULL) 1713 return (error); 1714 if (mdr->md_label != NULL) 1715 error = copyinstr(mdr->md_label, sc->label, 1716 sizeof(sc->label), NULL); 1717 if (error != 0) 1718 goto err_after_new; 1719 if (mdr->md_options & MD_AUTOUNIT) 1720 mdr->md_unit = sc->unit; 1721 sc->mediasize = mdr->md_mediasize; 1722 sc->sectorsize = sectsize; 1723 error = EDOOFUS; 1724 switch (sc->type) { 1725 case MD_MALLOC: 1726 sc->start = mdstart_malloc; 1727 error = mdcreate_malloc(sc, mdr); 1728 break; 1729 case MD_PRELOAD: 1730 /* 1731 * We disallow attaching preloaded memory disks via 1732 * ioctl. Preloaded memory disks are automatically 1733 * attached in g_md_init(). 1734 */ 1735 error = EOPNOTSUPP; 1736 break; 1737 case MD_VNODE: 1738 sc->start = mdstart_vnode; 1739 error = mdcreate_vnode(sc, mdr, td); 1740 break; 1741 case MD_SWAP: 1742 sc->start = mdstart_swap; 1743 error = mdcreate_swap(sc, mdr, td); 1744 break; 1745 case MD_NULL: 1746 sc->start = mdstart_null; 1747 error = mdcreate_null(sc, mdr, td); 1748 break; 1749 } 1750 err_after_new: 1751 if (error != 0) { 1752 mddestroy(sc, td); 1753 return (error); 1754 } 1755 1756 /* Prune off any residual fractional sector */ 1757 i = sc->mediasize % sc->sectorsize; 1758 sc->mediasize -= i; 1759 1760 mdinit(sc); 1761 return (0); 1762 } 1763 1764 static int 1765 kern_mdattach(struct thread *td, struct md_req *mdr) 1766 { 1767 int error; 1768 1769 sx_xlock(&md_sx); 1770 error = kern_mdattach_locked(td, mdr); 1771 sx_xunlock(&md_sx); 1772 return (error); 1773 } 1774 1775 static int 1776 kern_mddetach_locked(struct thread *td, struct md_req *mdr) 1777 { 1778 struct md_s *sc; 1779 1780 sx_assert(&md_sx, SA_XLOCKED); 1781 1782 if (mdr->md_mediasize != 0 || 1783 (mdr->md_options & ~MD_FORCE) != 0) 1784 return (EINVAL); 1785 1786 sc = mdfind(mdr->md_unit); 1787 if (sc == NULL) 1788 return (ENOENT); 1789 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) && 1790 !(mdr->md_options & MD_FORCE)) 1791 return (EBUSY); 1792 return (mddestroy(sc, td)); 1793 } 1794 1795 static int 1796 kern_mddetach(struct thread *td, struct md_req *mdr) 1797 { 1798 int error; 1799 1800 sx_xlock(&md_sx); 1801 error = kern_mddetach_locked(td, mdr); 1802 sx_xunlock(&md_sx); 1803 return (error); 1804 } 1805 1806 static int 1807 kern_mdresize_locked(struct md_req *mdr) 1808 { 1809 struct md_s *sc; 1810 1811 sx_assert(&md_sx, SA_XLOCKED); 1812 1813 if ((mdr->md_options & ~(MD_FORCE | MD_RESERVE)) != 0) 1814 return (EINVAL); 1815 1816 sc = mdfind(mdr->md_unit); 1817 if (sc == NULL) 1818 return (ENOENT); 1819 if (mdr->md_mediasize < sc->sectorsize) 1820 return (EINVAL); 1821 if (mdr->md_mediasize < sc->mediasize && 1822 !(sc->flags & MD_FORCE) && 1823 !(mdr->md_options & MD_FORCE)) 1824 return (EBUSY); 1825 return (mdresize(sc, mdr)); 1826 } 1827 1828 static int 1829 kern_mdresize(struct md_req *mdr) 1830 { 1831 int error; 1832 1833 sx_xlock(&md_sx); 1834 error = kern_mdresize_locked(mdr); 1835 sx_xunlock(&md_sx); 1836 return (error); 1837 } 1838 1839 static int 1840 kern_mdquery_locked(struct md_req *mdr) 1841 { 1842 struct md_s *sc; 1843 int error; 1844 1845 sx_assert(&md_sx, SA_XLOCKED); 1846 1847 sc = mdfind(mdr->md_unit); 1848 if (sc == NULL) 1849 return (ENOENT); 1850 mdr->md_type = sc->type; 1851 mdr->md_options = sc->flags; 1852 mdr->md_mediasize = sc->mediasize; 1853 mdr->md_sectorsize = sc->sectorsize; 1854 error = 0; 1855 if (mdr->md_label != NULL) { 1856 error = copyout(sc->label, mdr->md_label, 1857 strlen(sc->label) + 1); 1858 if (error != 0) 1859 return (error); 1860 } 1861 if (sc->type == MD_VNODE || 1862 (sc->type == MD_PRELOAD && mdr->md_file != NULL)) 1863 error = copyout(sc->file, mdr->md_file, 1864 strlen(sc->file) + 1); 1865 return (error); 1866 } 1867 1868 static int 1869 kern_mdquery(struct md_req *mdr) 1870 { 1871 int error; 1872 1873 sx_xlock(&md_sx); 1874 error = kern_mdquery_locked(mdr); 1875 sx_xunlock(&md_sx); 1876 return (error); 1877 } 1878 1879 static int 1880 kern_mdlist_locked(struct md_req *mdr) 1881 { 1882 struct md_s *sc; 1883 int i; 1884 1885 sx_assert(&md_sx, SA_XLOCKED); 1886 1887 /* 1888 * Write the number of md devices to mdr->md_units[0]. 1889 * Write the unit number of the first (mdr->md_units_nitems - 2) 1890 * units to mdr->md_units[1::(mdr->md_units - 2)] and terminate the 1891 * list with -1. 1892 * 1893 * XXX: There is currently no mechanism to retrieve unit 1894 * numbers for more than (MDNPAD - 2) units. 1895 * 1896 * XXX: Due to the use of LIST_INSERT_HEAD in mdnew(), the 1897 * list of visible unit numbers not stable. 1898 */ 1899 i = 1; 1900 LIST_FOREACH(sc, &md_softc_list, list) { 1901 if (i < mdr->md_units_nitems - 1) 1902 mdr->md_units[i] = sc->unit; 1903 i++; 1904 } 1905 mdr->md_units[MIN(i, mdr->md_units_nitems - 1)] = -1; 1906 mdr->md_units[0] = i - 1; 1907 return (0); 1908 } 1909 1910 static int 1911 kern_mdlist(struct md_req *mdr) 1912 { 1913 int error; 1914 1915 sx_xlock(&md_sx); 1916 error = kern_mdlist_locked(mdr); 1917 sx_xunlock(&md_sx); 1918 return (error); 1919 } 1920 1921 /* Copy members that are not userspace pointers. */ 1922 #define MD_IOCTL2REQ(mdio, mdr) do { \ 1923 (mdr)->md_unit = (mdio)->md_unit; \ 1924 (mdr)->md_type = (mdio)->md_type; \ 1925 (mdr)->md_mediasize = (mdio)->md_mediasize; \ 1926 (mdr)->md_sectorsize = (mdio)->md_sectorsize; \ 1927 (mdr)->md_options = (mdio)->md_options; \ 1928 (mdr)->md_fwheads = (mdio)->md_fwheads; \ 1929 (mdr)->md_fwsectors = (mdio)->md_fwsectors; \ 1930 (mdr)->md_units = &(mdio)->md_pad[0]; \ 1931 (mdr)->md_units_nitems = nitems((mdio)->md_pad); \ 1932 } while(0) 1933 1934 /* Copy members that might have been updated */ 1935 #define MD_REQ2IOCTL(mdr, mdio) do { \ 1936 (mdio)->md_unit = (mdr)->md_unit; \ 1937 (mdio)->md_type = (mdr)->md_type; \ 1938 (mdio)->md_mediasize = (mdr)->md_mediasize; \ 1939 (mdio)->md_sectorsize = (mdr)->md_sectorsize; \ 1940 (mdio)->md_options = (mdr)->md_options; \ 1941 (mdio)->md_fwheads = (mdr)->md_fwheads; \ 1942 (mdio)->md_fwsectors = (mdr)->md_fwsectors; \ 1943 } while(0) 1944 1945 static int 1946 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, 1947 struct thread *td) 1948 { 1949 struct md_req mdr; 1950 int error; 1951 1952 if (md_debug) 1953 printf("mdctlioctl(%s %lx %p %x %p)\n", 1954 devtoname(dev), cmd, addr, flags, td); 1955 1956 bzero(&mdr, sizeof(mdr)); 1957 switch (cmd) { 1958 case MDIOCATTACH: 1959 case MDIOCDETACH: 1960 case MDIOCRESIZE: 1961 case MDIOCQUERY: 1962 case MDIOCLIST: { 1963 struct md_ioctl *mdio = (struct md_ioctl *)addr; 1964 if (mdio->md_version != MDIOVERSION) 1965 return (EINVAL); 1966 MD_IOCTL2REQ(mdio, &mdr); 1967 mdr.md_file = mdio->md_file; 1968 mdr.md_file_seg = UIO_USERSPACE; 1969 /* If the file is adjacent to the md_ioctl it's in kernel. */ 1970 if ((void *)mdio->md_file == (void *)(mdio + 1)) 1971 mdr.md_file_seg = UIO_SYSSPACE; 1972 mdr.md_label = mdio->md_label; 1973 break; 1974 } 1975 #ifdef COMPAT_FREEBSD32 1976 case MDIOCATTACH_32: 1977 case MDIOCDETACH_32: 1978 case MDIOCRESIZE_32: 1979 case MDIOCQUERY_32: 1980 case MDIOCLIST_32: { 1981 struct md_ioctl32 *mdio = (struct md_ioctl32 *)addr; 1982 if (mdio->md_version != MDIOVERSION) 1983 return (EINVAL); 1984 MD_IOCTL2REQ(mdio, &mdr); 1985 mdr.md_file = (void *)(uintptr_t)mdio->md_file; 1986 mdr.md_file_seg = UIO_USERSPACE; 1987 mdr.md_label = (void *)(uintptr_t)mdio->md_label; 1988 break; 1989 } 1990 #endif 1991 default: 1992 /* Fall through to handler switch. */ 1993 break; 1994 } 1995 1996 error = 0; 1997 switch (cmd) { 1998 case MDIOCATTACH: 1999 #ifdef COMPAT_FREEBSD32 2000 case MDIOCATTACH_32: 2001 #endif 2002 error = kern_mdattach(td, &mdr); 2003 break; 2004 case MDIOCDETACH: 2005 #ifdef COMPAT_FREEBSD32 2006 case MDIOCDETACH_32: 2007 #endif 2008 error = kern_mddetach(td, &mdr); 2009 break; 2010 case MDIOCRESIZE: 2011 #ifdef COMPAT_FREEBSD32 2012 case MDIOCRESIZE_32: 2013 #endif 2014 error = kern_mdresize(&mdr); 2015 break; 2016 case MDIOCQUERY: 2017 #ifdef COMPAT_FREEBSD32 2018 case MDIOCQUERY_32: 2019 #endif 2020 error = kern_mdquery(&mdr); 2021 break; 2022 case MDIOCLIST: 2023 #ifdef COMPAT_FREEBSD32 2024 case MDIOCLIST_32: 2025 #endif 2026 error = kern_mdlist(&mdr); 2027 break; 2028 default: 2029 error = ENOIOCTL; 2030 } 2031 2032 switch (cmd) { 2033 case MDIOCATTACH: 2034 case MDIOCQUERY: { 2035 struct md_ioctl *mdio = (struct md_ioctl *)addr; 2036 MD_REQ2IOCTL(&mdr, mdio); 2037 break; 2038 } 2039 #ifdef COMPAT_FREEBSD32 2040 case MDIOCATTACH_32: 2041 case MDIOCQUERY_32: { 2042 struct md_ioctl32 *mdio = (struct md_ioctl32 *)addr; 2043 MD_REQ2IOCTL(&mdr, mdio); 2044 break; 2045 } 2046 #endif 2047 default: 2048 /* Other commands to not alter mdr. */ 2049 break; 2050 } 2051 2052 return (error); 2053 } 2054 2055 static void 2056 md_preloaded(u_char *image, size_t length, const char *name) 2057 { 2058 struct md_s *sc; 2059 int error; 2060 2061 sc = mdnew(-1, &error, MD_PRELOAD); 2062 if (sc == NULL) 2063 return; 2064 sc->mediasize = length; 2065 sc->sectorsize = DEV_BSIZE; 2066 sc->pl_ptr = image; 2067 sc->pl_len = length; 2068 sc->start = mdstart_preload; 2069 if (name != NULL) 2070 strlcpy(sc->file, name, sizeof(sc->file)); 2071 #ifdef MD_ROOT 2072 if (sc->unit == 0) { 2073 #ifndef ROOTDEVNAME 2074 rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0"; 2075 #endif 2076 #ifdef MD_ROOT_READONLY 2077 sc->flags |= MD_READONLY; 2078 #endif 2079 } 2080 #endif 2081 mdinit(sc); 2082 if (name != NULL) { 2083 printf("%s%d: Preloaded image <%s> %zd bytes at %p\n", 2084 MD_NAME, sc->unit, name, length, image); 2085 } else { 2086 printf("%s%d: Embedded image %zd bytes at %p\n", 2087 MD_NAME, sc->unit, length, image); 2088 } 2089 } 2090 2091 static void 2092 g_md_init(struct g_class *mp __unused) 2093 { 2094 caddr_t mod; 2095 u_char *ptr, *name, *type; 2096 unsigned len; 2097 int i; 2098 2099 /* figure out log2(NINDIR) */ 2100 for (i = NINDIR, nshift = -1; i; nshift++) 2101 i >>= 1; 2102 2103 mod = NULL; 2104 sx_init(&md_sx, "MD config lock"); 2105 g_topology_unlock(); 2106 md_uh = new_unrhdr(0, INT_MAX, NULL); 2107 #ifdef MD_ROOT 2108 if (mfs_root_size != 0) { 2109 sx_xlock(&md_sx); 2110 #ifdef MD_ROOT_MEM 2111 md_preloaded(mfs_root, mfs_root_size, NULL); 2112 #else 2113 md_preloaded(__DEVOLATILE(u_char *, &mfs_root), mfs_root_size, 2114 NULL); 2115 #endif 2116 sx_xunlock(&md_sx); 2117 } 2118 #endif 2119 /* XXX: are preload_* static or do they need Giant ? */ 2120 while ((mod = preload_search_next_name(mod)) != NULL) { 2121 name = (char *)preload_search_info(mod, MODINFO_NAME); 2122 if (name == NULL) 2123 continue; 2124 type = (char *)preload_search_info(mod, MODINFO_TYPE); 2125 if (type == NULL) 2126 continue; 2127 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 2128 continue; 2129 ptr = preload_fetch_addr(mod); 2130 len = preload_fetch_size(mod); 2131 if (ptr != NULL && len != 0) { 2132 sx_xlock(&md_sx); 2133 md_preloaded(ptr, len, name); 2134 sx_xunlock(&md_sx); 2135 } 2136 } 2137 md_pbuf_zone = pbuf_zsecond_create("mdpbuf", nswbuf / 10); 2138 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL, 2139 0600, MDCTL_NAME); 2140 g_topology_lock(); 2141 } 2142 2143 static void 2144 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 2145 struct g_consumer *cp __unused, struct g_provider *pp) 2146 { 2147 struct md_s *mp; 2148 char *type; 2149 2150 mp = gp->softc; 2151 if (mp == NULL) 2152 return; 2153 2154 switch (mp->type) { 2155 case MD_MALLOC: 2156 type = "malloc"; 2157 break; 2158 case MD_PRELOAD: 2159 type = "preload"; 2160 break; 2161 case MD_VNODE: 2162 type = "vnode"; 2163 break; 2164 case MD_SWAP: 2165 type = "swap"; 2166 break; 2167 case MD_NULL: 2168 type = "null"; 2169 break; 2170 default: 2171 type = "unknown"; 2172 break; 2173 } 2174 2175 if (pp != NULL) { 2176 if (indent == NULL) { 2177 sbuf_printf(sb, " u %d", mp->unit); 2178 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize); 2179 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads); 2180 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors); 2181 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize); 2182 sbuf_printf(sb, " t %s", type); 2183 if ((mp->type == MD_VNODE && mp->vnode != NULL) || 2184 (mp->type == MD_PRELOAD && mp->file[0] != '\0')) 2185 sbuf_printf(sb, " file %s", mp->file); 2186 sbuf_printf(sb, " label %s", mp->label); 2187 } else { 2188 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, 2189 mp->unit); 2190 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n", 2191 indent, (uintmax_t) mp->sectorsize); 2192 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n", 2193 indent, (uintmax_t) mp->fwheads); 2194 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n", 2195 indent, (uintmax_t) mp->fwsectors); 2196 if (mp->ident[0] != '\0') { 2197 sbuf_printf(sb, "%s<ident>", indent); 2198 g_conf_printf_escaped(sb, "%s", mp->ident); 2199 sbuf_printf(sb, "</ident>\n"); 2200 } 2201 sbuf_printf(sb, "%s<length>%ju</length>\n", 2202 indent, (uintmax_t) mp->mediasize); 2203 sbuf_printf(sb, "%s<compression>%s</compression>\n", indent, 2204 (mp->flags & MD_COMPRESS) == 0 ? "off": "on"); 2205 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 2206 (mp->flags & MD_READONLY) == 0 ? "read-write": 2207 "read-only"); 2208 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2209 type); 2210 if ((mp->type == MD_VNODE && mp->vnode != NULL) || 2211 (mp->type == MD_PRELOAD && mp->file[0] != '\0')) { 2212 sbuf_printf(sb, "%s<file>", indent); 2213 g_conf_printf_escaped(sb, "%s", mp->file); 2214 sbuf_printf(sb, "</file>\n"); 2215 } 2216 if (mp->type == MD_VNODE) 2217 sbuf_printf(sb, "%s<cache>%s</cache>\n", indent, 2218 (mp->flags & MD_CACHE) == 0 ? "off": "on"); 2219 sbuf_printf(sb, "%s<label>", indent); 2220 g_conf_printf_escaped(sb, "%s", mp->label); 2221 sbuf_printf(sb, "</label>\n"); 2222 } 2223 } 2224 } 2225 2226 static void 2227 g_md_fini(struct g_class *mp __unused) 2228 { 2229 2230 sx_destroy(&md_sx); 2231 if (status_dev != NULL) 2232 destroy_dev(status_dev); 2233 uma_zdestroy(md_pbuf_zone); 2234 delete_unrhdr(md_uh); 2235 } 2236