1 /*- 2 * SPDX-License-Identifier: (Beerware AND BSD-3-Clause) 3 * 4 * ---------------------------------------------------------------------------- 5 * "THE BEER-WARE LICENSE" (Revision 42): 6 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 7 * can do whatever you want with this stuff. If we meet some day, and you think 8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 9 * ---------------------------------------------------------------------------- 10 * 11 * $FreeBSD$ 12 * 13 */ 14 15 /*- 16 * The following functions are based in the vn(4) driver: mdstart_swap(), 17 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), 18 * and as such under the following copyright: 19 * 20 * Copyright (c) 1988 University of Utah. 21 * Copyright (c) 1990, 1993 22 * The Regents of the University of California. All rights reserved. 23 * Copyright (c) 2013 The FreeBSD Foundation 24 * All rights reserved. 25 * 26 * This code is derived from software contributed to Berkeley by 27 * the Systems Programming Group of the University of Utah Computer 28 * Science Department. 29 * 30 * Portions of this software were developed by Konstantin Belousov 31 * under sponsorship from the FreeBSD Foundation. 32 * 33 * Redistribution and use in source and binary forms, with or without 34 * modification, are permitted provided that the following conditions 35 * are met: 36 * 1. Redistributions of source code must retain the above copyright 37 * notice, this list of conditions and the following disclaimer. 38 * 2. Redistributions in binary form must reproduce the above copyright 39 * notice, this list of conditions and the following disclaimer in the 40 * documentation and/or other materials provided with the distribution. 41 * 3. Neither the name of the University nor the names of its contributors 42 * may be used to endorse or promote products derived from this software 43 * without specific prior written permission. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 * 57 * from: Utah Hdr: vn.c 1.13 94/04/02 58 * 59 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 60 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 61 */ 62 63 #include "opt_rootdevname.h" 64 #include "opt_geom.h" 65 #include "opt_md.h" 66 67 #include <sys/param.h> 68 #include <sys/systm.h> 69 #include <sys/bio.h> 70 #include <sys/buf.h> 71 #include <sys/conf.h> 72 #include <sys/devicestat.h> 73 #include <sys/fcntl.h> 74 #include <sys/kernel.h> 75 #include <sys/kthread.h> 76 #include <sys/limits.h> 77 #include <sys/linker.h> 78 #include <sys/lock.h> 79 #include <sys/malloc.h> 80 #include <sys/mdioctl.h> 81 #include <sys/mount.h> 82 #include <sys/mutex.h> 83 #include <sys/sx.h> 84 #include <sys/namei.h> 85 #include <sys/proc.h> 86 #include <sys/queue.h> 87 #include <sys/rwlock.h> 88 #include <sys/sbuf.h> 89 #include <sys/sched.h> 90 #include <sys/sf_buf.h> 91 #include <sys/sysctl.h> 92 #include <sys/uio.h> 93 #include <sys/vnode.h> 94 #include <sys/disk.h> 95 96 #include <geom/geom.h> 97 #include <geom/geom_int.h> 98 99 #include <vm/vm.h> 100 #include <vm/vm_param.h> 101 #include <vm/vm_object.h> 102 #include <vm/vm_page.h> 103 #include <vm/vm_pager.h> 104 #include <vm/swap_pager.h> 105 #include <vm/uma.h> 106 107 #include <machine/bus.h> 108 109 #define MD_MODVER 1 110 111 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 112 #define MD_EXITING 0x20000 /* Worker thread is exiting. */ 113 114 #ifndef MD_NSECT 115 #define MD_NSECT (10000 * 2) 116 #endif 117 118 struct md_req { 119 unsigned md_unit; /* unit number */ 120 enum md_types md_type; /* type of disk */ 121 off_t md_mediasize; /* size of disk in bytes */ 122 unsigned md_sectorsize; /* sectorsize */ 123 unsigned md_options; /* options */ 124 int md_fwheads; /* firmware heads */ 125 int md_fwsectors; /* firmware sectors */ 126 char *md_file; /* pathname of file to mount */ 127 enum uio_seg md_file_seg; /* location of md_file */ 128 char *md_label; /* label of the device (userspace) */ 129 int *md_units; /* pointer to units array (kernel) */ 130 size_t md_units_nitems; /* items in md_units array */ 131 }; 132 133 #ifdef COMPAT_FREEBSD32 134 struct md_ioctl32 { 135 unsigned md_version; 136 unsigned md_unit; 137 enum md_types md_type; 138 uint32_t md_file; 139 off_t md_mediasize; 140 unsigned md_sectorsize; 141 unsigned md_options; 142 uint64_t md_base; 143 int md_fwheads; 144 int md_fwsectors; 145 uint32_t md_label; 146 int md_pad[MDNPAD]; 147 } __attribute__((__packed__)); 148 CTASSERT((sizeof(struct md_ioctl32)) == 436); 149 150 #define MDIOCATTACH_32 _IOC_NEWTYPE(MDIOCATTACH, struct md_ioctl32) 151 #define MDIOCDETACH_32 _IOC_NEWTYPE(MDIOCDETACH, struct md_ioctl32) 152 #define MDIOCQUERY_32 _IOC_NEWTYPE(MDIOCQUERY, struct md_ioctl32) 153 #define MDIOCLIST_32 _IOC_NEWTYPE(MDIOCLIST, struct md_ioctl32) 154 #define MDIOCRESIZE_32 _IOC_NEWTYPE(MDIOCRESIZE, struct md_ioctl32) 155 #endif /* COMPAT_FREEBSD32 */ 156 157 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk"); 158 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors"); 159 160 static int md_debug; 161 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, 162 "Enable md(4) debug messages"); 163 static int md_malloc_wait; 164 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, 165 "Allow malloc to wait for memory allocations"); 166 167 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE) 168 #define MD_ROOT_FSTYPE "ufs" 169 #endif 170 171 #if defined(MD_ROOT) 172 /* 173 * Preloaded image gets put here. 174 */ 175 #if defined(MD_ROOT_SIZE) 176 /* 177 * We put the mfs_root symbol into the oldmfs section of the kernel object file. 178 * Applications that patch the object with the image can determine 179 * the size looking at the oldmfs section size within the kernel. 180 */ 181 u_char mfs_root[MD_ROOT_SIZE*1024] __attribute__ ((section ("oldmfs"))); 182 const int mfs_root_size = sizeof(mfs_root); 183 #elif defined(MD_ROOT_MEM) 184 /* MD region already mapped in the memory */ 185 u_char *mfs_root; 186 int mfs_root_size; 187 #else 188 extern volatile u_char __weak_symbol mfs_root; 189 extern volatile u_char __weak_symbol mfs_root_end; 190 __GLOBL(mfs_root); 191 __GLOBL(mfs_root_end); 192 #define mfs_root_size ((uintptr_t)(&mfs_root_end - &mfs_root)) 193 #endif 194 #endif 195 196 static g_init_t g_md_init; 197 static g_fini_t g_md_fini; 198 static g_start_t g_md_start; 199 static g_access_t g_md_access; 200 static void g_md_dumpconf(struct sbuf *sb, const char *indent, 201 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp); 202 203 static struct cdev *status_dev = NULL; 204 static struct sx md_sx; 205 static struct unrhdr *md_uh; 206 207 static d_ioctl_t mdctlioctl; 208 209 static struct cdevsw mdctl_cdevsw = { 210 .d_version = D_VERSION, 211 .d_ioctl = mdctlioctl, 212 .d_name = MD_NAME, 213 }; 214 215 struct g_class g_md_class = { 216 .name = "MD", 217 .version = G_VERSION, 218 .init = g_md_init, 219 .fini = g_md_fini, 220 .start = g_md_start, 221 .access = g_md_access, 222 .dumpconf = g_md_dumpconf, 223 }; 224 225 DECLARE_GEOM_CLASS(g_md_class, g_md); 226 227 228 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list); 229 230 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 231 #define NMASK (NINDIR-1) 232 static int nshift; 233 234 static uma_zone_t md_pbuf_zone; 235 236 struct indir { 237 uintptr_t *array; 238 u_int total; 239 u_int used; 240 u_int shift; 241 }; 242 243 struct md_s { 244 int unit; 245 LIST_ENTRY(md_s) list; 246 struct bio_queue_head bio_queue; 247 struct mtx queue_mtx; 248 struct mtx stat_mtx; 249 struct cdev *dev; 250 enum md_types type; 251 off_t mediasize; 252 unsigned sectorsize; 253 unsigned opencount; 254 unsigned fwheads; 255 unsigned fwsectors; 256 char ident[32]; 257 unsigned flags; 258 char name[20]; 259 struct proc *procp; 260 struct g_geom *gp; 261 struct g_provider *pp; 262 int (*start)(struct md_s *sc, struct bio *bp); 263 struct devstat *devstat; 264 265 /* MD_MALLOC related fields */ 266 struct indir *indir; 267 uma_zone_t uma; 268 269 /* MD_PRELOAD related fields */ 270 u_char *pl_ptr; 271 size_t pl_len; 272 273 /* MD_VNODE related fields */ 274 struct vnode *vnode; 275 char file[PATH_MAX]; 276 char label[PATH_MAX]; 277 struct ucred *cred; 278 279 /* MD_SWAP related fields */ 280 vm_object_t object; 281 }; 282 283 static struct indir * 284 new_indir(u_int shift) 285 { 286 struct indir *ip; 287 288 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT) 289 | M_ZERO); 290 if (ip == NULL) 291 return (NULL); 292 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 293 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); 294 if (ip->array == NULL) { 295 free(ip, M_MD); 296 return (NULL); 297 } 298 ip->total = NINDIR; 299 ip->shift = shift; 300 return (ip); 301 } 302 303 static void 304 del_indir(struct indir *ip) 305 { 306 307 free(ip->array, M_MDSECT); 308 free(ip, M_MD); 309 } 310 311 static void 312 destroy_indir(struct md_s *sc, struct indir *ip) 313 { 314 int i; 315 316 for (i = 0; i < NINDIR; i++) { 317 if (!ip->array[i]) 318 continue; 319 if (ip->shift) 320 destroy_indir(sc, (struct indir*)(ip->array[i])); 321 else if (ip->array[i] > 255) 322 uma_zfree(sc->uma, (void *)(ip->array[i])); 323 } 324 del_indir(ip); 325 } 326 327 /* 328 * This function does the math and allocates the top level "indir" structure 329 * for a device of "size" sectors. 330 */ 331 332 static struct indir * 333 dimension(off_t size) 334 { 335 off_t rcnt; 336 struct indir *ip; 337 int layer; 338 339 rcnt = size; 340 layer = 0; 341 while (rcnt > NINDIR) { 342 rcnt /= NINDIR; 343 layer++; 344 } 345 346 /* 347 * XXX: the top layer is probably not fully populated, so we allocate 348 * too much space for ip->array in here. 349 */ 350 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 351 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 352 M_MDSECT, M_WAITOK | M_ZERO); 353 ip->total = NINDIR; 354 ip->shift = layer * nshift; 355 return (ip); 356 } 357 358 /* 359 * Read a given sector 360 */ 361 362 static uintptr_t 363 s_read(struct indir *ip, off_t offset) 364 { 365 struct indir *cip; 366 int idx; 367 uintptr_t up; 368 369 if (md_debug > 1) 370 printf("s_read(%jd)\n", (intmax_t)offset); 371 up = 0; 372 for (cip = ip; cip != NULL;) { 373 if (cip->shift) { 374 idx = (offset >> cip->shift) & NMASK; 375 up = cip->array[idx]; 376 cip = (struct indir *)up; 377 continue; 378 } 379 idx = offset & NMASK; 380 return (cip->array[idx]); 381 } 382 return (0); 383 } 384 385 /* 386 * Write a given sector, prune the tree if the value is 0 387 */ 388 389 static int 390 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 391 { 392 struct indir *cip, *lip[10]; 393 int idx, li; 394 uintptr_t up; 395 396 if (md_debug > 1) 397 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 398 up = 0; 399 li = 0; 400 cip = ip; 401 for (;;) { 402 lip[li++] = cip; 403 if (cip->shift) { 404 idx = (offset >> cip->shift) & NMASK; 405 up = cip->array[idx]; 406 if (up != 0) { 407 cip = (struct indir *)up; 408 continue; 409 } 410 /* Allocate branch */ 411 cip->array[idx] = 412 (uintptr_t)new_indir(cip->shift - nshift); 413 if (cip->array[idx] == 0) 414 return (ENOSPC); 415 cip->used++; 416 up = cip->array[idx]; 417 cip = (struct indir *)up; 418 continue; 419 } 420 /* leafnode */ 421 idx = offset & NMASK; 422 up = cip->array[idx]; 423 if (up != 0) 424 cip->used--; 425 cip->array[idx] = ptr; 426 if (ptr != 0) 427 cip->used++; 428 break; 429 } 430 if (cip->used != 0 || li == 1) 431 return (0); 432 li--; 433 while (cip->used == 0 && cip != ip) { 434 li--; 435 idx = (offset >> lip[li]->shift) & NMASK; 436 up = lip[li]->array[idx]; 437 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 438 del_indir(cip); 439 lip[li]->array[idx] = 0; 440 lip[li]->used--; 441 cip = lip[li]; 442 } 443 return (0); 444 } 445 446 447 static int 448 g_md_access(struct g_provider *pp, int r, int w, int e) 449 { 450 struct md_s *sc; 451 452 sc = pp->geom->softc; 453 if (sc == NULL) { 454 if (r <= 0 && w <= 0 && e <= 0) 455 return (0); 456 return (ENXIO); 457 } 458 r += pp->acr; 459 w += pp->acw; 460 e += pp->ace; 461 if ((sc->flags & MD_READONLY) != 0 && w > 0) 462 return (EROFS); 463 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 464 sc->opencount = 1; 465 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 466 sc->opencount = 0; 467 } 468 return (0); 469 } 470 471 static void 472 g_md_start(struct bio *bp) 473 { 474 struct md_s *sc; 475 476 sc = bp->bio_to->geom->softc; 477 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) { 478 mtx_lock(&sc->stat_mtx); 479 devstat_start_transaction_bio(sc->devstat, bp); 480 mtx_unlock(&sc->stat_mtx); 481 } 482 mtx_lock(&sc->queue_mtx); 483 bioq_disksort(&sc->bio_queue, bp); 484 mtx_unlock(&sc->queue_mtx); 485 wakeup(sc); 486 } 487 488 #define MD_MALLOC_MOVE_ZERO 1 489 #define MD_MALLOC_MOVE_FILL 2 490 #define MD_MALLOC_MOVE_READ 3 491 #define MD_MALLOC_MOVE_WRITE 4 492 #define MD_MALLOC_MOVE_CMP 5 493 494 static int 495 md_malloc_move_ma(vm_page_t **mp, int *ma_offs, unsigned sectorsize, 496 void *ptr, u_char fill, int op) 497 { 498 struct sf_buf *sf; 499 vm_page_t m, *mp1; 500 char *p, first; 501 off_t *uc; 502 unsigned n; 503 int error, i, ma_offs1, sz, first_read; 504 505 m = NULL; 506 error = 0; 507 sf = NULL; 508 /* if (op == MD_MALLOC_MOVE_CMP) { gcc */ 509 first = 0; 510 first_read = 0; 511 uc = ptr; 512 mp1 = *mp; 513 ma_offs1 = *ma_offs; 514 /* } */ 515 sched_pin(); 516 for (n = sectorsize; n != 0; n -= sz) { 517 sz = imin(PAGE_SIZE - *ma_offs, n); 518 if (m != **mp) { 519 if (sf != NULL) 520 sf_buf_free(sf); 521 m = **mp; 522 sf = sf_buf_alloc(m, SFB_CPUPRIVATE | 523 (md_malloc_wait ? 0 : SFB_NOWAIT)); 524 if (sf == NULL) { 525 error = ENOMEM; 526 break; 527 } 528 } 529 p = (char *)sf_buf_kva(sf) + *ma_offs; 530 switch (op) { 531 case MD_MALLOC_MOVE_ZERO: 532 bzero(p, sz); 533 break; 534 case MD_MALLOC_MOVE_FILL: 535 memset(p, fill, sz); 536 break; 537 case MD_MALLOC_MOVE_READ: 538 bcopy(ptr, p, sz); 539 cpu_flush_dcache(p, sz); 540 break; 541 case MD_MALLOC_MOVE_WRITE: 542 bcopy(p, ptr, sz); 543 break; 544 case MD_MALLOC_MOVE_CMP: 545 for (i = 0; i < sz; i++, p++) { 546 if (!first_read) { 547 *uc = (u_char)*p; 548 first = *p; 549 first_read = 1; 550 } else if (*p != first) { 551 error = EDOOFUS; 552 break; 553 } 554 } 555 break; 556 default: 557 KASSERT(0, ("md_malloc_move_ma unknown op %d\n", op)); 558 break; 559 } 560 if (error != 0) 561 break; 562 *ma_offs += sz; 563 *ma_offs %= PAGE_SIZE; 564 if (*ma_offs == 0) 565 (*mp)++; 566 ptr = (char *)ptr + sz; 567 } 568 569 if (sf != NULL) 570 sf_buf_free(sf); 571 sched_unpin(); 572 if (op == MD_MALLOC_MOVE_CMP && error != 0) { 573 *mp = mp1; 574 *ma_offs = ma_offs1; 575 } 576 return (error); 577 } 578 579 static int 580 md_malloc_move_vlist(bus_dma_segment_t **pvlist, int *pma_offs, 581 unsigned len, void *ptr, u_char fill, int op) 582 { 583 bus_dma_segment_t *vlist; 584 uint8_t *p, *end, first; 585 off_t *uc; 586 int ma_offs, seg_len; 587 588 vlist = *pvlist; 589 ma_offs = *pma_offs; 590 uc = ptr; 591 592 for (; len != 0; len -= seg_len) { 593 seg_len = imin(vlist->ds_len - ma_offs, len); 594 p = (uint8_t *)(uintptr_t)vlist->ds_addr + ma_offs; 595 switch (op) { 596 case MD_MALLOC_MOVE_ZERO: 597 bzero(p, seg_len); 598 break; 599 case MD_MALLOC_MOVE_FILL: 600 memset(p, fill, seg_len); 601 break; 602 case MD_MALLOC_MOVE_READ: 603 bcopy(ptr, p, seg_len); 604 cpu_flush_dcache(p, seg_len); 605 break; 606 case MD_MALLOC_MOVE_WRITE: 607 bcopy(p, ptr, seg_len); 608 break; 609 case MD_MALLOC_MOVE_CMP: 610 end = p + seg_len; 611 first = *uc = *p; 612 /* Confirm all following bytes match the first */ 613 while (++p < end) { 614 if (*p != first) 615 return (EDOOFUS); 616 } 617 break; 618 default: 619 KASSERT(0, ("md_malloc_move_vlist unknown op %d\n", op)); 620 break; 621 } 622 623 ma_offs += seg_len; 624 if (ma_offs == vlist->ds_len) { 625 ma_offs = 0; 626 vlist++; 627 } 628 ptr = (uint8_t *)ptr + seg_len; 629 } 630 *pvlist = vlist; 631 *pma_offs = ma_offs; 632 633 return (0); 634 } 635 636 static int 637 mdstart_malloc(struct md_s *sc, struct bio *bp) 638 { 639 u_char *dst; 640 vm_page_t *m; 641 bus_dma_segment_t *vlist; 642 int i, error, error1, ma_offs, notmapped; 643 off_t secno, nsec, uc; 644 uintptr_t sp, osp; 645 646 switch (bp->bio_cmd) { 647 case BIO_READ: 648 case BIO_WRITE: 649 case BIO_DELETE: 650 break; 651 default: 652 return (EOPNOTSUPP); 653 } 654 655 notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0; 656 vlist = (bp->bio_flags & BIO_VLIST) != 0 ? 657 (bus_dma_segment_t *)bp->bio_data : NULL; 658 if (notmapped) { 659 m = bp->bio_ma; 660 ma_offs = bp->bio_ma_offset; 661 dst = NULL; 662 KASSERT(vlist == NULL, ("vlists cannot be unmapped")); 663 } else if (vlist != NULL) { 664 ma_offs = bp->bio_ma_offset; 665 dst = NULL; 666 } else { 667 dst = bp->bio_data; 668 } 669 670 nsec = bp->bio_length / sc->sectorsize; 671 secno = bp->bio_offset / sc->sectorsize; 672 error = 0; 673 while (nsec--) { 674 osp = s_read(sc->indir, secno); 675 if (bp->bio_cmd == BIO_DELETE) { 676 if (osp != 0) 677 error = s_write(sc->indir, secno, 0); 678 } else if (bp->bio_cmd == BIO_READ) { 679 if (osp == 0) { 680 if (notmapped) { 681 error = md_malloc_move_ma(&m, &ma_offs, 682 sc->sectorsize, NULL, 0, 683 MD_MALLOC_MOVE_ZERO); 684 } else if (vlist != NULL) { 685 error = md_malloc_move_vlist(&vlist, 686 &ma_offs, sc->sectorsize, NULL, 0, 687 MD_MALLOC_MOVE_ZERO); 688 } else 689 bzero(dst, sc->sectorsize); 690 } else if (osp <= 255) { 691 if (notmapped) { 692 error = md_malloc_move_ma(&m, &ma_offs, 693 sc->sectorsize, NULL, osp, 694 MD_MALLOC_MOVE_FILL); 695 } else if (vlist != NULL) { 696 error = md_malloc_move_vlist(&vlist, 697 &ma_offs, sc->sectorsize, NULL, osp, 698 MD_MALLOC_MOVE_FILL); 699 } else 700 memset(dst, osp, sc->sectorsize); 701 } else { 702 if (notmapped) { 703 error = md_malloc_move_ma(&m, &ma_offs, 704 sc->sectorsize, (void *)osp, 0, 705 MD_MALLOC_MOVE_READ); 706 } else if (vlist != NULL) { 707 error = md_malloc_move_vlist(&vlist, 708 &ma_offs, sc->sectorsize, 709 (void *)osp, 0, 710 MD_MALLOC_MOVE_READ); 711 } else { 712 bcopy((void *)osp, dst, sc->sectorsize); 713 cpu_flush_dcache(dst, sc->sectorsize); 714 } 715 } 716 osp = 0; 717 } else if (bp->bio_cmd == BIO_WRITE) { 718 if (sc->flags & MD_COMPRESS) { 719 if (notmapped) { 720 error1 = md_malloc_move_ma(&m, &ma_offs, 721 sc->sectorsize, &uc, 0, 722 MD_MALLOC_MOVE_CMP); 723 i = error1 == 0 ? sc->sectorsize : 0; 724 } else if (vlist != NULL) { 725 error1 = md_malloc_move_vlist(&vlist, 726 &ma_offs, sc->sectorsize, &uc, 0, 727 MD_MALLOC_MOVE_CMP); 728 i = error1 == 0 ? sc->sectorsize : 0; 729 } else { 730 uc = dst[0]; 731 for (i = 1; i < sc->sectorsize; i++) { 732 if (dst[i] != uc) 733 break; 734 } 735 } 736 } else { 737 i = 0; 738 uc = 0; 739 } 740 if (i == sc->sectorsize) { 741 if (osp != uc) 742 error = s_write(sc->indir, secno, uc); 743 } else { 744 if (osp <= 255) { 745 sp = (uintptr_t)uma_zalloc(sc->uma, 746 md_malloc_wait ? M_WAITOK : 747 M_NOWAIT); 748 if (sp == 0) { 749 error = ENOSPC; 750 break; 751 } 752 if (notmapped) { 753 error = md_malloc_move_ma(&m, 754 &ma_offs, sc->sectorsize, 755 (void *)sp, 0, 756 MD_MALLOC_MOVE_WRITE); 757 } else if (vlist != NULL) { 758 error = md_malloc_move_vlist( 759 &vlist, &ma_offs, 760 sc->sectorsize, (void *)sp, 761 0, MD_MALLOC_MOVE_WRITE); 762 } else { 763 bcopy(dst, (void *)sp, 764 sc->sectorsize); 765 } 766 error = s_write(sc->indir, secno, sp); 767 } else { 768 if (notmapped) { 769 error = md_malloc_move_ma(&m, 770 &ma_offs, sc->sectorsize, 771 (void *)osp, 0, 772 MD_MALLOC_MOVE_WRITE); 773 } else if (vlist != NULL) { 774 error = md_malloc_move_vlist( 775 &vlist, &ma_offs, 776 sc->sectorsize, (void *)osp, 777 0, MD_MALLOC_MOVE_WRITE); 778 } else { 779 bcopy(dst, (void *)osp, 780 sc->sectorsize); 781 } 782 osp = 0; 783 } 784 } 785 } else { 786 error = EOPNOTSUPP; 787 } 788 if (osp > 255) 789 uma_zfree(sc->uma, (void*)osp); 790 if (error != 0) 791 break; 792 secno++; 793 if (!notmapped && vlist == NULL) 794 dst += sc->sectorsize; 795 } 796 bp->bio_resid = 0; 797 return (error); 798 } 799 800 static void 801 mdcopyto_vlist(void *src, bus_dma_segment_t *vlist, off_t offset, off_t len) 802 { 803 off_t seg_len; 804 805 while (offset >= vlist->ds_len) { 806 offset -= vlist->ds_len; 807 vlist++; 808 } 809 810 while (len != 0) { 811 seg_len = omin(len, vlist->ds_len - offset); 812 bcopy(src, (void *)(uintptr_t)(vlist->ds_addr + offset), 813 seg_len); 814 offset = 0; 815 src = (uint8_t *)src + seg_len; 816 len -= seg_len; 817 vlist++; 818 } 819 } 820 821 static void 822 mdcopyfrom_vlist(bus_dma_segment_t *vlist, off_t offset, void *dst, off_t len) 823 { 824 off_t seg_len; 825 826 while (offset >= vlist->ds_len) { 827 offset -= vlist->ds_len; 828 vlist++; 829 } 830 831 while (len != 0) { 832 seg_len = omin(len, vlist->ds_len - offset); 833 bcopy((void *)(uintptr_t)(vlist->ds_addr + offset), dst, 834 seg_len); 835 offset = 0; 836 dst = (uint8_t *)dst + seg_len; 837 len -= seg_len; 838 vlist++; 839 } 840 } 841 842 static int 843 mdstart_preload(struct md_s *sc, struct bio *bp) 844 { 845 uint8_t *p; 846 847 p = sc->pl_ptr + bp->bio_offset; 848 switch (bp->bio_cmd) { 849 case BIO_READ: 850 if ((bp->bio_flags & BIO_VLIST) != 0) { 851 mdcopyto_vlist(p, (bus_dma_segment_t *)bp->bio_data, 852 bp->bio_ma_offset, bp->bio_length); 853 } else { 854 bcopy(p, bp->bio_data, bp->bio_length); 855 } 856 cpu_flush_dcache(bp->bio_data, bp->bio_length); 857 break; 858 case BIO_WRITE: 859 if ((bp->bio_flags & BIO_VLIST) != 0) { 860 mdcopyfrom_vlist((bus_dma_segment_t *)bp->bio_data, 861 bp->bio_ma_offset, p, bp->bio_length); 862 } else { 863 bcopy(bp->bio_data, p, bp->bio_length); 864 } 865 break; 866 } 867 bp->bio_resid = 0; 868 return (0); 869 } 870 871 static int 872 mdstart_vnode(struct md_s *sc, struct bio *bp) 873 { 874 int error; 875 struct uio auio; 876 struct iovec aiov; 877 struct iovec *piov; 878 struct mount *mp; 879 struct vnode *vp; 880 struct buf *pb; 881 bus_dma_segment_t *vlist; 882 struct thread *td; 883 off_t iolen, iostart, len, zerosize; 884 int ma_offs, npages; 885 886 switch (bp->bio_cmd) { 887 case BIO_READ: 888 auio.uio_rw = UIO_READ; 889 break; 890 case BIO_WRITE: 891 case BIO_DELETE: 892 auio.uio_rw = UIO_WRITE; 893 break; 894 case BIO_FLUSH: 895 break; 896 default: 897 return (EOPNOTSUPP); 898 } 899 900 td = curthread; 901 vp = sc->vnode; 902 pb = NULL; 903 piov = NULL; 904 ma_offs = bp->bio_ma_offset; 905 len = bp->bio_length; 906 907 /* 908 * VNODE I/O 909 * 910 * If an error occurs, we set BIO_ERROR but we do not set 911 * B_INVAL because (for a write anyway), the buffer is 912 * still valid. 913 */ 914 915 if (bp->bio_cmd == BIO_FLUSH) { 916 (void) vn_start_write(vp, &mp, V_WAIT); 917 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 918 error = VOP_FSYNC(vp, MNT_WAIT, td); 919 VOP_UNLOCK(vp, 0); 920 vn_finished_write(mp); 921 return (error); 922 } 923 924 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 925 auio.uio_resid = bp->bio_length; 926 auio.uio_segflg = UIO_SYSSPACE; 927 auio.uio_td = td; 928 929 if (bp->bio_cmd == BIO_DELETE) { 930 /* 931 * Emulate BIO_DELETE by writing zeros. 932 */ 933 zerosize = ZERO_REGION_SIZE - 934 (ZERO_REGION_SIZE % sc->sectorsize); 935 auio.uio_iovcnt = howmany(bp->bio_length, zerosize); 936 piov = malloc(sizeof(*piov) * auio.uio_iovcnt, M_MD, M_WAITOK); 937 auio.uio_iov = piov; 938 while (len > 0) { 939 piov->iov_base = __DECONST(void *, zero_region); 940 piov->iov_len = len; 941 if (len > zerosize) 942 piov->iov_len = zerosize; 943 len -= piov->iov_len; 944 piov++; 945 } 946 piov = auio.uio_iov; 947 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 948 piov = malloc(sizeof(*piov) * bp->bio_ma_n, M_MD, M_WAITOK); 949 auio.uio_iov = piov; 950 vlist = (bus_dma_segment_t *)bp->bio_data; 951 while (len > 0) { 952 piov->iov_base = (void *)(uintptr_t)(vlist->ds_addr + 953 ma_offs); 954 piov->iov_len = vlist->ds_len - ma_offs; 955 if (piov->iov_len > len) 956 piov->iov_len = len; 957 len -= piov->iov_len; 958 ma_offs = 0; 959 vlist++; 960 piov++; 961 } 962 auio.uio_iovcnt = piov - auio.uio_iov; 963 piov = auio.uio_iov; 964 } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 965 pb = uma_zalloc(md_pbuf_zone, M_WAITOK); 966 bp->bio_resid = len; 967 unmapped_step: 968 npages = atop(min(MAXPHYS, round_page(len + (ma_offs & 969 PAGE_MASK)))); 970 iolen = min(ptoa(npages) - (ma_offs & PAGE_MASK), len); 971 KASSERT(iolen > 0, ("zero iolen")); 972 pmap_qenter((vm_offset_t)pb->b_data, 973 &bp->bio_ma[atop(ma_offs)], npages); 974 aiov.iov_base = (void *)((vm_offset_t)pb->b_data + 975 (ma_offs & PAGE_MASK)); 976 aiov.iov_len = iolen; 977 auio.uio_iov = &aiov; 978 auio.uio_iovcnt = 1; 979 auio.uio_resid = iolen; 980 } else { 981 aiov.iov_base = bp->bio_data; 982 aiov.iov_len = bp->bio_length; 983 auio.uio_iov = &aiov; 984 auio.uio_iovcnt = 1; 985 } 986 iostart = auio.uio_offset; 987 if (auio.uio_rw == UIO_READ) { 988 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 989 error = VOP_READ(vp, &auio, 0, sc->cred); 990 VOP_UNLOCK(vp, 0); 991 } else { 992 (void) vn_start_write(vp, &mp, V_WAIT); 993 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 994 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC, 995 sc->cred); 996 VOP_UNLOCK(vp, 0); 997 vn_finished_write(mp); 998 if (error == 0) 999 sc->flags &= ~MD_VERIFY; 1000 } 1001 1002 /* When MD_CACHE is set, try to avoid double-caching the data. */ 1003 if (error == 0 && (sc->flags & MD_CACHE) == 0) 1004 VOP_ADVISE(vp, iostart, auio.uio_offset - 1, 1005 POSIX_FADV_DONTNEED); 1006 1007 if (pb != NULL) { 1008 pmap_qremove((vm_offset_t)pb->b_data, npages); 1009 if (error == 0) { 1010 len -= iolen; 1011 bp->bio_resid -= iolen; 1012 ma_offs += iolen; 1013 if (len > 0) 1014 goto unmapped_step; 1015 } 1016 uma_zfree(md_pbuf_zone, pb); 1017 } 1018 1019 free(piov, M_MD); 1020 if (pb == NULL) 1021 bp->bio_resid = auio.uio_resid; 1022 return (error); 1023 } 1024 1025 static void 1026 md_swap_page_free(vm_page_t m) 1027 { 1028 1029 vm_page_xunbusy(m); 1030 vm_page_lock(m); 1031 vm_page_free(m); 1032 vm_page_unlock(m); 1033 } 1034 1035 static int 1036 mdstart_swap(struct md_s *sc, struct bio *bp) 1037 { 1038 vm_page_t m; 1039 u_char *p; 1040 vm_pindex_t i, lastp; 1041 bus_dma_segment_t *vlist; 1042 int rv, ma_offs, offs, len, lastend; 1043 1044 switch (bp->bio_cmd) { 1045 case BIO_READ: 1046 case BIO_WRITE: 1047 case BIO_DELETE: 1048 break; 1049 default: 1050 return (EOPNOTSUPP); 1051 } 1052 1053 p = bp->bio_data; 1054 ma_offs = (bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0 ? 1055 bp->bio_ma_offset : 0; 1056 vlist = (bp->bio_flags & BIO_VLIST) != 0 ? 1057 (bus_dma_segment_t *)bp->bio_data : NULL; 1058 1059 /* 1060 * offs is the offset at which to start operating on the 1061 * next (ie, first) page. lastp is the last page on 1062 * which we're going to operate. lastend is the ending 1063 * position within that last page (ie, PAGE_SIZE if 1064 * we're operating on complete aligned pages). 1065 */ 1066 offs = bp->bio_offset % PAGE_SIZE; 1067 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 1068 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 1069 1070 rv = VM_PAGER_OK; 1071 VM_OBJECT_WLOCK(sc->object); 1072 vm_object_pip_add(sc->object, 1); 1073 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 1074 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 1075 m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM); 1076 if (bp->bio_cmd == BIO_READ) { 1077 if (m->valid == VM_PAGE_BITS_ALL) 1078 rv = VM_PAGER_OK; 1079 else 1080 rv = vm_pager_get_pages(sc->object, &m, 1, 1081 NULL, NULL); 1082 if (rv == VM_PAGER_ERROR) { 1083 md_swap_page_free(m); 1084 break; 1085 } else if (rv == VM_PAGER_FAIL) { 1086 /* 1087 * Pager does not have the page. Zero 1088 * the allocated page, and mark it as 1089 * valid. Do not set dirty, the page 1090 * can be recreated if thrown out. 1091 */ 1092 pmap_zero_page(m); 1093 m->valid = VM_PAGE_BITS_ALL; 1094 } 1095 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 1096 pmap_copy_pages(&m, offs, bp->bio_ma, 1097 ma_offs, len); 1098 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 1099 physcopyout_vlist(VM_PAGE_TO_PHYS(m) + offs, 1100 vlist, ma_offs, len); 1101 cpu_flush_dcache(p, len); 1102 } else { 1103 physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len); 1104 cpu_flush_dcache(p, len); 1105 } 1106 } else if (bp->bio_cmd == BIO_WRITE) { 1107 if (len == PAGE_SIZE || m->valid == VM_PAGE_BITS_ALL) 1108 rv = VM_PAGER_OK; 1109 else 1110 rv = vm_pager_get_pages(sc->object, &m, 1, 1111 NULL, NULL); 1112 if (rv == VM_PAGER_ERROR) { 1113 md_swap_page_free(m); 1114 break; 1115 } else if (rv == VM_PAGER_FAIL) 1116 pmap_zero_page(m); 1117 1118 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 1119 pmap_copy_pages(bp->bio_ma, ma_offs, &m, 1120 offs, len); 1121 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 1122 physcopyin_vlist(vlist, ma_offs, 1123 VM_PAGE_TO_PHYS(m) + offs, len); 1124 } else { 1125 physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len); 1126 } 1127 1128 m->valid = VM_PAGE_BITS_ALL; 1129 if (m->dirty != VM_PAGE_BITS_ALL) { 1130 vm_page_dirty(m); 1131 vm_pager_page_unswapped(m); 1132 } 1133 } else if (bp->bio_cmd == BIO_DELETE) { 1134 if (len == PAGE_SIZE || m->valid == VM_PAGE_BITS_ALL) 1135 rv = VM_PAGER_OK; 1136 else 1137 rv = vm_pager_get_pages(sc->object, &m, 1, 1138 NULL, NULL); 1139 if (rv == VM_PAGER_ERROR) { 1140 md_swap_page_free(m); 1141 break; 1142 } else if (rv == VM_PAGER_FAIL) { 1143 md_swap_page_free(m); 1144 m = NULL; 1145 } else { 1146 /* Page is valid. */ 1147 if (len != PAGE_SIZE) { 1148 pmap_zero_page_area(m, offs, len); 1149 if (m->dirty != VM_PAGE_BITS_ALL) { 1150 vm_page_dirty(m); 1151 vm_pager_page_unswapped(m); 1152 } 1153 } else { 1154 vm_pager_page_unswapped(m); 1155 md_swap_page_free(m); 1156 m = NULL; 1157 } 1158 } 1159 } 1160 if (m != NULL) { 1161 vm_page_xunbusy(m); 1162 vm_page_lock(m); 1163 if (vm_page_active(m)) 1164 vm_page_reference(m); 1165 else 1166 vm_page_activate(m); 1167 vm_page_unlock(m); 1168 } 1169 1170 /* Actions on further pages start at offset 0 */ 1171 p += PAGE_SIZE - offs; 1172 offs = 0; 1173 ma_offs += len; 1174 } 1175 vm_object_pip_wakeup(sc->object); 1176 VM_OBJECT_WUNLOCK(sc->object); 1177 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 1178 } 1179 1180 static int 1181 mdstart_null(struct md_s *sc, struct bio *bp) 1182 { 1183 1184 switch (bp->bio_cmd) { 1185 case BIO_READ: 1186 bzero(bp->bio_data, bp->bio_length); 1187 cpu_flush_dcache(bp->bio_data, bp->bio_length); 1188 break; 1189 case BIO_WRITE: 1190 break; 1191 } 1192 bp->bio_resid = 0; 1193 return (0); 1194 } 1195 1196 static void 1197 md_kthread(void *arg) 1198 { 1199 struct md_s *sc; 1200 struct bio *bp; 1201 int error; 1202 1203 sc = arg; 1204 thread_lock(curthread); 1205 sched_prio(curthread, PRIBIO); 1206 thread_unlock(curthread); 1207 if (sc->type == MD_VNODE) 1208 curthread->td_pflags |= TDP_NORUNNINGBUF; 1209 1210 for (;;) { 1211 mtx_lock(&sc->queue_mtx); 1212 if (sc->flags & MD_SHUTDOWN) { 1213 sc->flags |= MD_EXITING; 1214 mtx_unlock(&sc->queue_mtx); 1215 kproc_exit(0); 1216 } 1217 bp = bioq_takefirst(&sc->bio_queue); 1218 if (!bp) { 1219 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 1220 continue; 1221 } 1222 mtx_unlock(&sc->queue_mtx); 1223 if (bp->bio_cmd == BIO_GETATTR) { 1224 int isv = ((sc->flags & MD_VERIFY) != 0); 1225 1226 if ((sc->fwsectors && sc->fwheads && 1227 (g_handleattr_int(bp, "GEOM::fwsectors", 1228 sc->fwsectors) || 1229 g_handleattr_int(bp, "GEOM::fwheads", 1230 sc->fwheads))) || 1231 g_handleattr_int(bp, "GEOM::candelete", 1)) 1232 error = -1; 1233 else if (sc->ident[0] != '\0' && 1234 g_handleattr_str(bp, "GEOM::ident", sc->ident)) 1235 error = -1; 1236 else if (g_handleattr_int(bp, "MNT::verified", isv)) 1237 error = -1; 1238 else 1239 error = EOPNOTSUPP; 1240 } else { 1241 error = sc->start(sc, bp); 1242 } 1243 1244 if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) { 1245 /* 1246 * Devstat uses (bio_bcount, bio_resid) for 1247 * determining the length of the completed part of 1248 * the i/o. g_io_deliver() will translate from 1249 * bio_completed to that, but it also destroys the 1250 * bio so we must do our own translation. 1251 */ 1252 bp->bio_bcount = bp->bio_length; 1253 bp->bio_resid = (error == -1 ? bp->bio_bcount : 0); 1254 devstat_end_transaction_bio(sc->devstat, bp); 1255 } 1256 if (error != -1) { 1257 bp->bio_completed = bp->bio_length; 1258 g_io_deliver(bp, error); 1259 } 1260 } 1261 } 1262 1263 static struct md_s * 1264 mdfind(int unit) 1265 { 1266 struct md_s *sc; 1267 1268 LIST_FOREACH(sc, &md_softc_list, list) { 1269 if (sc->unit == unit) 1270 break; 1271 } 1272 return (sc); 1273 } 1274 1275 static struct md_s * 1276 mdnew(int unit, int *errp, enum md_types type) 1277 { 1278 struct md_s *sc; 1279 int error; 1280 1281 *errp = 0; 1282 if (unit == -1) 1283 unit = alloc_unr(md_uh); 1284 else 1285 unit = alloc_unr_specific(md_uh, unit); 1286 1287 if (unit == -1) { 1288 *errp = EBUSY; 1289 return (NULL); 1290 } 1291 1292 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); 1293 sc->type = type; 1294 bioq_init(&sc->bio_queue); 1295 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 1296 mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF); 1297 sc->unit = unit; 1298 sprintf(sc->name, "md%d", unit); 1299 LIST_INSERT_HEAD(&md_softc_list, sc, list); 1300 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 1301 if (error == 0) 1302 return (sc); 1303 LIST_REMOVE(sc, list); 1304 mtx_destroy(&sc->stat_mtx); 1305 mtx_destroy(&sc->queue_mtx); 1306 free_unr(md_uh, sc->unit); 1307 free(sc, M_MD); 1308 *errp = error; 1309 return (NULL); 1310 } 1311 1312 static void 1313 mdinit(struct md_s *sc) 1314 { 1315 struct g_geom *gp; 1316 struct g_provider *pp; 1317 1318 g_topology_lock(); 1319 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 1320 gp->softc = sc; 1321 pp = g_new_providerf(gp, "md%d", sc->unit); 1322 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 1323 pp->mediasize = sc->mediasize; 1324 pp->sectorsize = sc->sectorsize; 1325 switch (sc->type) { 1326 case MD_MALLOC: 1327 case MD_VNODE: 1328 case MD_SWAP: 1329 pp->flags |= G_PF_ACCEPT_UNMAPPED; 1330 break; 1331 case MD_PRELOAD: 1332 case MD_NULL: 1333 break; 1334 } 1335 sc->gp = gp; 1336 sc->pp = pp; 1337 g_error_provider(pp, 0); 1338 g_topology_unlock(); 1339 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize, 1340 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 1341 } 1342 1343 static int 1344 mdcreate_malloc(struct md_s *sc, struct md_req *mdr) 1345 { 1346 uintptr_t sp; 1347 int error; 1348 off_t u; 1349 1350 error = 0; 1351 if (mdr->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 1352 return (EINVAL); 1353 if (mdr->md_sectorsize != 0 && !powerof2(mdr->md_sectorsize)) 1354 return (EINVAL); 1355 /* Compression doesn't make sense if we have reserved space */ 1356 if (mdr->md_options & MD_RESERVE) 1357 mdr->md_options &= ~MD_COMPRESS; 1358 if (mdr->md_fwsectors != 0) 1359 sc->fwsectors = mdr->md_fwsectors; 1360 if (mdr->md_fwheads != 0) 1361 sc->fwheads = mdr->md_fwheads; 1362 sc->flags = mdr->md_options & (MD_COMPRESS | MD_FORCE); 1363 sc->indir = dimension(sc->mediasize / sc->sectorsize); 1364 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 1365 0x1ff, 0); 1366 if (mdr->md_options & MD_RESERVE) { 1367 off_t nsectors; 1368 1369 nsectors = sc->mediasize / sc->sectorsize; 1370 for (u = 0; u < nsectors; u++) { 1371 sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ? 1372 M_WAITOK : M_NOWAIT) | M_ZERO); 1373 if (sp != 0) 1374 error = s_write(sc->indir, u, sp); 1375 else 1376 error = ENOMEM; 1377 if (error != 0) 1378 break; 1379 } 1380 } 1381 return (error); 1382 } 1383 1384 1385 static int 1386 mdsetcred(struct md_s *sc, struct ucred *cred) 1387 { 1388 char *tmpbuf; 1389 int error = 0; 1390 1391 /* 1392 * Set credits in our softc 1393 */ 1394 1395 if (sc->cred) 1396 crfree(sc->cred); 1397 sc->cred = crhold(cred); 1398 1399 /* 1400 * Horrible kludge to establish credentials for NFS XXX. 1401 */ 1402 1403 if (sc->vnode) { 1404 struct uio auio; 1405 struct iovec aiov; 1406 1407 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 1408 bzero(&auio, sizeof(auio)); 1409 1410 aiov.iov_base = tmpbuf; 1411 aiov.iov_len = sc->sectorsize; 1412 auio.uio_iov = &aiov; 1413 auio.uio_iovcnt = 1; 1414 auio.uio_offset = 0; 1415 auio.uio_rw = UIO_READ; 1416 auio.uio_segflg = UIO_SYSSPACE; 1417 auio.uio_resid = aiov.iov_len; 1418 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1419 error = VOP_READ(sc->vnode, &auio, 0, sc->cred); 1420 VOP_UNLOCK(sc->vnode, 0); 1421 free(tmpbuf, M_TEMP); 1422 } 1423 return (error); 1424 } 1425 1426 static int 1427 mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td) 1428 { 1429 struct vattr vattr; 1430 struct nameidata nd; 1431 char *fname; 1432 int error, flags; 1433 1434 fname = mdr->md_file; 1435 if (mdr->md_file_seg == UIO_USERSPACE) { 1436 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL); 1437 if (error != 0) 1438 return (error); 1439 } else if (mdr->md_file_seg == UIO_SYSSPACE) 1440 strlcpy(sc->file, fname, sizeof(sc->file)); 1441 else 1442 return (EDOOFUS); 1443 1444 /* 1445 * If the user specified that this is a read only device, don't 1446 * set the FWRITE mask before trying to open the backing store. 1447 */ 1448 flags = FREAD | ((mdr->md_options & MD_READONLY) ? 0 : FWRITE) \ 1449 | ((mdr->md_options & MD_VERIFY) ? O_VERIFY : 0); 1450 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td); 1451 error = vn_open(&nd, &flags, 0, NULL); 1452 if (error != 0) 1453 return (error); 1454 NDFREE(&nd, NDF_ONLY_PNBUF); 1455 if (nd.ni_vp->v_type != VREG) { 1456 error = EINVAL; 1457 goto bad; 1458 } 1459 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred); 1460 if (error != 0) 1461 goto bad; 1462 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { 1463 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); 1464 if (nd.ni_vp->v_iflag & VI_DOOMED) { 1465 /* Forced unmount. */ 1466 error = EBADF; 1467 goto bad; 1468 } 1469 } 1470 nd.ni_vp->v_vflag |= VV_MD; 1471 VOP_UNLOCK(nd.ni_vp, 0); 1472 1473 if (mdr->md_fwsectors != 0) 1474 sc->fwsectors = mdr->md_fwsectors; 1475 if (mdr->md_fwheads != 0) 1476 sc->fwheads = mdr->md_fwheads; 1477 snprintf(sc->ident, sizeof(sc->ident), "MD-DEV%ju-INO%ju", 1478 (uintmax_t)vattr.va_fsid, (uintmax_t)vattr.va_fileid); 1479 sc->flags = mdr->md_options & (MD_ASYNC | MD_CACHE | MD_FORCE | 1480 MD_VERIFY); 1481 if (!(flags & FWRITE)) 1482 sc->flags |= MD_READONLY; 1483 sc->vnode = nd.ni_vp; 1484 1485 error = mdsetcred(sc, td->td_ucred); 1486 if (error != 0) { 1487 sc->vnode = NULL; 1488 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); 1489 nd.ni_vp->v_vflag &= ~VV_MD; 1490 goto bad; 1491 } 1492 return (0); 1493 bad: 1494 VOP_UNLOCK(nd.ni_vp, 0); 1495 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 1496 return (error); 1497 } 1498 1499 static int 1500 mddestroy(struct md_s *sc, struct thread *td) 1501 { 1502 1503 if (sc->gp) { 1504 sc->gp->softc = NULL; 1505 g_topology_lock(); 1506 g_wither_geom(sc->gp, ENXIO); 1507 g_topology_unlock(); 1508 sc->gp = NULL; 1509 sc->pp = NULL; 1510 } 1511 if (sc->devstat) { 1512 devstat_remove_entry(sc->devstat); 1513 sc->devstat = NULL; 1514 } 1515 mtx_lock(&sc->queue_mtx); 1516 sc->flags |= MD_SHUTDOWN; 1517 wakeup(sc); 1518 while (!(sc->flags & MD_EXITING)) 1519 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10); 1520 mtx_unlock(&sc->queue_mtx); 1521 mtx_destroy(&sc->stat_mtx); 1522 mtx_destroy(&sc->queue_mtx); 1523 if (sc->vnode != NULL) { 1524 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); 1525 sc->vnode->v_vflag &= ~VV_MD; 1526 VOP_UNLOCK(sc->vnode, 0); 1527 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? 1528 FREAD : (FREAD|FWRITE), sc->cred, td); 1529 } 1530 if (sc->cred != NULL) 1531 crfree(sc->cred); 1532 if (sc->object != NULL) 1533 vm_object_deallocate(sc->object); 1534 if (sc->indir) 1535 destroy_indir(sc, sc->indir); 1536 if (sc->uma) 1537 uma_zdestroy(sc->uma); 1538 1539 LIST_REMOVE(sc, list); 1540 free_unr(md_uh, sc->unit); 1541 free(sc, M_MD); 1542 return (0); 1543 } 1544 1545 static int 1546 mdresize(struct md_s *sc, struct md_req *mdr) 1547 { 1548 int error, res; 1549 vm_pindex_t oldpages, newpages; 1550 1551 switch (sc->type) { 1552 case MD_VNODE: 1553 case MD_NULL: 1554 break; 1555 case MD_SWAP: 1556 if (mdr->md_mediasize <= 0 || 1557 (mdr->md_mediasize % PAGE_SIZE) != 0) 1558 return (EDOM); 1559 oldpages = OFF_TO_IDX(round_page(sc->mediasize)); 1560 newpages = OFF_TO_IDX(round_page(mdr->md_mediasize)); 1561 if (newpages < oldpages) { 1562 VM_OBJECT_WLOCK(sc->object); 1563 vm_object_page_remove(sc->object, newpages, 0, 0); 1564 swap_pager_freespace(sc->object, newpages, 1565 oldpages - newpages); 1566 swap_release_by_cred(IDX_TO_OFF(oldpages - 1567 newpages), sc->cred); 1568 sc->object->charge = IDX_TO_OFF(newpages); 1569 sc->object->size = newpages; 1570 VM_OBJECT_WUNLOCK(sc->object); 1571 } else if (newpages > oldpages) { 1572 res = swap_reserve_by_cred(IDX_TO_OFF(newpages - 1573 oldpages), sc->cred); 1574 if (!res) 1575 return (ENOMEM); 1576 if ((mdr->md_options & MD_RESERVE) || 1577 (sc->flags & MD_RESERVE)) { 1578 error = swap_pager_reserve(sc->object, 1579 oldpages, newpages - oldpages); 1580 if (error < 0) { 1581 swap_release_by_cred( 1582 IDX_TO_OFF(newpages - oldpages), 1583 sc->cred); 1584 return (EDOM); 1585 } 1586 } 1587 VM_OBJECT_WLOCK(sc->object); 1588 sc->object->charge = IDX_TO_OFF(newpages); 1589 sc->object->size = newpages; 1590 VM_OBJECT_WUNLOCK(sc->object); 1591 } 1592 break; 1593 default: 1594 return (EOPNOTSUPP); 1595 } 1596 1597 sc->mediasize = mdr->md_mediasize; 1598 g_topology_lock(); 1599 g_resize_provider(sc->pp, sc->mediasize); 1600 g_topology_unlock(); 1601 return (0); 1602 } 1603 1604 static int 1605 mdcreate_swap(struct md_s *sc, struct md_req *mdr, struct thread *td) 1606 { 1607 vm_ooffset_t npage; 1608 int error; 1609 1610 /* 1611 * Range check. Disallow negative sizes and sizes not being 1612 * multiple of page size. 1613 */ 1614 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1615 return (EDOM); 1616 1617 /* 1618 * Allocate an OBJT_SWAP object. 1619 * 1620 * Note the truncation. 1621 */ 1622 1623 if ((mdr->md_options & MD_VERIFY) != 0) 1624 return (EINVAL); 1625 npage = mdr->md_mediasize / PAGE_SIZE; 1626 if (mdr->md_fwsectors != 0) 1627 sc->fwsectors = mdr->md_fwsectors; 1628 if (mdr->md_fwheads != 0) 1629 sc->fwheads = mdr->md_fwheads; 1630 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, 1631 VM_PROT_DEFAULT, 0, td->td_ucred); 1632 if (sc->object == NULL) 1633 return (ENOMEM); 1634 sc->flags = mdr->md_options & (MD_FORCE | MD_RESERVE); 1635 if (mdr->md_options & MD_RESERVE) { 1636 if (swap_pager_reserve(sc->object, 0, npage) < 0) { 1637 error = EDOM; 1638 goto finish; 1639 } 1640 } 1641 error = mdsetcred(sc, td->td_ucred); 1642 finish: 1643 if (error != 0) { 1644 vm_object_deallocate(sc->object); 1645 sc->object = NULL; 1646 } 1647 return (error); 1648 } 1649 1650 static int 1651 mdcreate_null(struct md_s *sc, struct md_req *mdr, struct thread *td) 1652 { 1653 1654 /* 1655 * Range check. Disallow negative sizes and sizes not being 1656 * multiple of page size. 1657 */ 1658 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1659 return (EDOM); 1660 1661 return (0); 1662 } 1663 1664 static int 1665 kern_mdattach_locked(struct thread *td, struct md_req *mdr) 1666 { 1667 struct md_s *sc; 1668 unsigned sectsize; 1669 int error, i; 1670 1671 sx_assert(&md_sx, SA_XLOCKED); 1672 1673 switch (mdr->md_type) { 1674 case MD_MALLOC: 1675 case MD_PRELOAD: 1676 case MD_VNODE: 1677 case MD_SWAP: 1678 case MD_NULL: 1679 break; 1680 default: 1681 return (EINVAL); 1682 } 1683 if (mdr->md_sectorsize == 0) 1684 sectsize = DEV_BSIZE; 1685 else 1686 sectsize = mdr->md_sectorsize; 1687 if (sectsize > MAXPHYS || mdr->md_mediasize < sectsize) 1688 return (EINVAL); 1689 if (mdr->md_options & MD_AUTOUNIT) 1690 sc = mdnew(-1, &error, mdr->md_type); 1691 else { 1692 if (mdr->md_unit > INT_MAX) 1693 return (EINVAL); 1694 sc = mdnew(mdr->md_unit, &error, mdr->md_type); 1695 } 1696 if (sc == NULL) 1697 return (error); 1698 if (mdr->md_label != NULL) 1699 error = copyinstr(mdr->md_label, sc->label, 1700 sizeof(sc->label), NULL); 1701 if (error != 0) 1702 goto err_after_new; 1703 if (mdr->md_options & MD_AUTOUNIT) 1704 mdr->md_unit = sc->unit; 1705 sc->mediasize = mdr->md_mediasize; 1706 sc->sectorsize = sectsize; 1707 error = EDOOFUS; 1708 switch (sc->type) { 1709 case MD_MALLOC: 1710 sc->start = mdstart_malloc; 1711 error = mdcreate_malloc(sc, mdr); 1712 break; 1713 case MD_PRELOAD: 1714 /* 1715 * We disallow attaching preloaded memory disks via 1716 * ioctl. Preloaded memory disks are automatically 1717 * attached in g_md_init(). 1718 */ 1719 error = EOPNOTSUPP; 1720 break; 1721 case MD_VNODE: 1722 sc->start = mdstart_vnode; 1723 error = mdcreate_vnode(sc, mdr, td); 1724 break; 1725 case MD_SWAP: 1726 sc->start = mdstart_swap; 1727 error = mdcreate_swap(sc, mdr, td); 1728 break; 1729 case MD_NULL: 1730 sc->start = mdstart_null; 1731 error = mdcreate_null(sc, mdr, td); 1732 break; 1733 } 1734 err_after_new: 1735 if (error != 0) { 1736 mddestroy(sc, td); 1737 return (error); 1738 } 1739 1740 /* Prune off any residual fractional sector */ 1741 i = sc->mediasize % sc->sectorsize; 1742 sc->mediasize -= i; 1743 1744 mdinit(sc); 1745 return (0); 1746 } 1747 1748 static int 1749 kern_mdattach(struct thread *td, struct md_req *mdr) 1750 { 1751 int error; 1752 1753 sx_xlock(&md_sx); 1754 error = kern_mdattach_locked(td, mdr); 1755 sx_xunlock(&md_sx); 1756 return (error); 1757 } 1758 1759 static int 1760 kern_mddetach_locked(struct thread *td, struct md_req *mdr) 1761 { 1762 struct md_s *sc; 1763 1764 sx_assert(&md_sx, SA_XLOCKED); 1765 1766 if (mdr->md_mediasize != 0 || 1767 (mdr->md_options & ~MD_FORCE) != 0) 1768 return (EINVAL); 1769 1770 sc = mdfind(mdr->md_unit); 1771 if (sc == NULL) 1772 return (ENOENT); 1773 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) && 1774 !(mdr->md_options & MD_FORCE)) 1775 return (EBUSY); 1776 return (mddestroy(sc, td)); 1777 } 1778 1779 static int 1780 kern_mddetach(struct thread *td, struct md_req *mdr) 1781 { 1782 int error; 1783 1784 sx_xlock(&md_sx); 1785 error = kern_mddetach_locked(td, mdr); 1786 sx_xunlock(&md_sx); 1787 return (error); 1788 } 1789 1790 static int 1791 kern_mdresize_locked(struct md_req *mdr) 1792 { 1793 struct md_s *sc; 1794 1795 sx_assert(&md_sx, SA_XLOCKED); 1796 1797 if ((mdr->md_options & ~(MD_FORCE | MD_RESERVE)) != 0) 1798 return (EINVAL); 1799 1800 sc = mdfind(mdr->md_unit); 1801 if (sc == NULL) 1802 return (ENOENT); 1803 if (mdr->md_mediasize < sc->sectorsize) 1804 return (EINVAL); 1805 if (mdr->md_mediasize < sc->mediasize && 1806 !(sc->flags & MD_FORCE) && 1807 !(mdr->md_options & MD_FORCE)) 1808 return (EBUSY); 1809 return (mdresize(sc, mdr)); 1810 } 1811 1812 static int 1813 kern_mdresize(struct md_req *mdr) 1814 { 1815 int error; 1816 1817 sx_xlock(&md_sx); 1818 error = kern_mdresize_locked(mdr); 1819 sx_xunlock(&md_sx); 1820 return (error); 1821 } 1822 1823 static int 1824 kern_mdquery_locked(struct md_req *mdr) 1825 { 1826 struct md_s *sc; 1827 int error; 1828 1829 sx_assert(&md_sx, SA_XLOCKED); 1830 1831 sc = mdfind(mdr->md_unit); 1832 if (sc == NULL) 1833 return (ENOENT); 1834 mdr->md_type = sc->type; 1835 mdr->md_options = sc->flags; 1836 mdr->md_mediasize = sc->mediasize; 1837 mdr->md_sectorsize = sc->sectorsize; 1838 error = 0; 1839 if (mdr->md_label != NULL) { 1840 error = copyout(sc->label, mdr->md_label, 1841 strlen(sc->label) + 1); 1842 if (error != 0) 1843 return (error); 1844 } 1845 if (sc->type == MD_VNODE || 1846 (sc->type == MD_PRELOAD && mdr->md_file != NULL)) 1847 error = copyout(sc->file, mdr->md_file, 1848 strlen(sc->file) + 1); 1849 return (error); 1850 } 1851 1852 static int 1853 kern_mdquery(struct md_req *mdr) 1854 { 1855 int error; 1856 1857 sx_xlock(&md_sx); 1858 error = kern_mdquery_locked(mdr); 1859 sx_xunlock(&md_sx); 1860 return (error); 1861 } 1862 1863 static int 1864 kern_mdlist_locked(struct md_req *mdr) 1865 { 1866 struct md_s *sc; 1867 int i; 1868 1869 sx_assert(&md_sx, SA_XLOCKED); 1870 1871 /* 1872 * Write the number of md devices to mdr->md_units[0]. 1873 * Write the unit number of the first (mdr->md_units_nitems - 2) 1874 * units to mdr->md_units[1::(mdr->md_units - 2)] and terminate the 1875 * list with -1. 1876 * 1877 * XXX: There is currently no mechanism to retrieve unit 1878 * numbers for more than (MDNPAD - 2) units. 1879 * 1880 * XXX: Due to the use of LIST_INSERT_HEAD in mdnew(), the 1881 * list of visible unit numbers not stable. 1882 */ 1883 i = 1; 1884 LIST_FOREACH(sc, &md_softc_list, list) { 1885 if (i < mdr->md_units_nitems - 1) 1886 mdr->md_units[i] = sc->unit; 1887 i++; 1888 } 1889 mdr->md_units[MIN(i, mdr->md_units_nitems - 1)] = -1; 1890 mdr->md_units[0] = i - 1; 1891 return (0); 1892 } 1893 1894 static int 1895 kern_mdlist(struct md_req *mdr) 1896 { 1897 int error; 1898 1899 sx_xlock(&md_sx); 1900 error = kern_mdlist_locked(mdr); 1901 sx_xunlock(&md_sx); 1902 return (error); 1903 } 1904 1905 /* Copy members that are not userspace pointers. */ 1906 #define MD_IOCTL2REQ(mdio, mdr) do { \ 1907 (mdr)->md_unit = (mdio)->md_unit; \ 1908 (mdr)->md_type = (mdio)->md_type; \ 1909 (mdr)->md_mediasize = (mdio)->md_mediasize; \ 1910 (mdr)->md_sectorsize = (mdio)->md_sectorsize; \ 1911 (mdr)->md_options = (mdio)->md_options; \ 1912 (mdr)->md_fwheads = (mdio)->md_fwheads; \ 1913 (mdr)->md_fwsectors = (mdio)->md_fwsectors; \ 1914 (mdr)->md_units = &(mdio)->md_pad[0]; \ 1915 (mdr)->md_units_nitems = nitems((mdio)->md_pad); \ 1916 } while(0) 1917 1918 /* Copy members that might have been updated */ 1919 #define MD_REQ2IOCTL(mdr, mdio) do { \ 1920 (mdio)->md_unit = (mdr)->md_unit; \ 1921 (mdio)->md_type = (mdr)->md_type; \ 1922 (mdio)->md_mediasize = (mdr)->md_mediasize; \ 1923 (mdio)->md_sectorsize = (mdr)->md_sectorsize; \ 1924 (mdio)->md_options = (mdr)->md_options; \ 1925 (mdio)->md_fwheads = (mdr)->md_fwheads; \ 1926 (mdio)->md_fwsectors = (mdr)->md_fwsectors; \ 1927 } while(0) 1928 1929 static int 1930 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, 1931 struct thread *td) 1932 { 1933 struct md_req mdr; 1934 int error; 1935 1936 if (md_debug) 1937 printf("mdctlioctl(%s %lx %p %x %p)\n", 1938 devtoname(dev), cmd, addr, flags, td); 1939 1940 bzero(&mdr, sizeof(mdr)); 1941 switch (cmd) { 1942 case MDIOCATTACH: 1943 case MDIOCDETACH: 1944 case MDIOCRESIZE: 1945 case MDIOCQUERY: 1946 case MDIOCLIST: { 1947 struct md_ioctl *mdio = (struct md_ioctl *)addr; 1948 if (mdio->md_version != MDIOVERSION) 1949 return (EINVAL); 1950 MD_IOCTL2REQ(mdio, &mdr); 1951 mdr.md_file = mdio->md_file; 1952 mdr.md_file_seg = UIO_USERSPACE; 1953 /* If the file is adjacent to the md_ioctl it's in kernel. */ 1954 if ((void *)mdio->md_file == (void *)(mdio + 1)) 1955 mdr.md_file_seg = UIO_SYSSPACE; 1956 mdr.md_label = mdio->md_label; 1957 break; 1958 } 1959 #ifdef COMPAT_FREEBSD32 1960 case MDIOCATTACH_32: 1961 case MDIOCDETACH_32: 1962 case MDIOCRESIZE_32: 1963 case MDIOCQUERY_32: 1964 case MDIOCLIST_32: { 1965 struct md_ioctl32 *mdio = (struct md_ioctl32 *)addr; 1966 if (mdio->md_version != MDIOVERSION) 1967 return (EINVAL); 1968 MD_IOCTL2REQ(mdio, &mdr); 1969 mdr.md_file = (void *)(uintptr_t)mdio->md_file; 1970 mdr.md_file_seg = UIO_USERSPACE; 1971 mdr.md_label = (void *)(uintptr_t)mdio->md_label; 1972 break; 1973 } 1974 #endif 1975 default: 1976 /* Fall through to handler switch. */ 1977 break; 1978 } 1979 1980 error = 0; 1981 switch (cmd) { 1982 case MDIOCATTACH: 1983 #ifdef COMPAT_FREEBSD32 1984 case MDIOCATTACH_32: 1985 #endif 1986 error = kern_mdattach(td, &mdr); 1987 break; 1988 case MDIOCDETACH: 1989 #ifdef COMPAT_FREEBSD32 1990 case MDIOCDETACH_32: 1991 #endif 1992 error = kern_mddetach(td, &mdr); 1993 break; 1994 case MDIOCRESIZE: 1995 #ifdef COMPAT_FREEBSD32 1996 case MDIOCRESIZE_32: 1997 #endif 1998 error = kern_mdresize(&mdr); 1999 break; 2000 case MDIOCQUERY: 2001 #ifdef COMPAT_FREEBSD32 2002 case MDIOCQUERY_32: 2003 #endif 2004 error = kern_mdquery(&mdr); 2005 break; 2006 case MDIOCLIST: 2007 #ifdef COMPAT_FREEBSD32 2008 case MDIOCLIST_32: 2009 #endif 2010 error = kern_mdlist(&mdr); 2011 break; 2012 default: 2013 error = ENOIOCTL; 2014 } 2015 2016 switch (cmd) { 2017 case MDIOCATTACH: 2018 case MDIOCQUERY: { 2019 struct md_ioctl *mdio = (struct md_ioctl *)addr; 2020 MD_REQ2IOCTL(&mdr, mdio); 2021 break; 2022 } 2023 #ifdef COMPAT_FREEBSD32 2024 case MDIOCATTACH_32: 2025 case MDIOCQUERY_32: { 2026 struct md_ioctl32 *mdio = (struct md_ioctl32 *)addr; 2027 MD_REQ2IOCTL(&mdr, mdio); 2028 break; 2029 } 2030 #endif 2031 default: 2032 /* Other commands to not alter mdr. */ 2033 break; 2034 } 2035 2036 return (error); 2037 } 2038 2039 static void 2040 md_preloaded(u_char *image, size_t length, const char *name) 2041 { 2042 struct md_s *sc; 2043 int error; 2044 2045 sc = mdnew(-1, &error, MD_PRELOAD); 2046 if (sc == NULL) 2047 return; 2048 sc->mediasize = length; 2049 sc->sectorsize = DEV_BSIZE; 2050 sc->pl_ptr = image; 2051 sc->pl_len = length; 2052 sc->start = mdstart_preload; 2053 if (name != NULL) 2054 strlcpy(sc->file, name, sizeof(sc->file)); 2055 #ifdef MD_ROOT 2056 if (sc->unit == 0) { 2057 #ifndef ROOTDEVNAME 2058 rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0"; 2059 #endif 2060 #ifdef MD_ROOT_READONLY 2061 sc->flags |= MD_READONLY; 2062 #endif 2063 } 2064 #endif 2065 mdinit(sc); 2066 if (name != NULL) { 2067 printf("%s%d: Preloaded image <%s> %zd bytes at %p\n", 2068 MD_NAME, sc->unit, name, length, image); 2069 } else { 2070 printf("%s%d: Embedded image %zd bytes at %p\n", 2071 MD_NAME, sc->unit, length, image); 2072 } 2073 } 2074 2075 static void 2076 g_md_init(struct g_class *mp __unused) 2077 { 2078 caddr_t mod; 2079 u_char *ptr, *name, *type; 2080 unsigned len; 2081 int i; 2082 2083 /* figure out log2(NINDIR) */ 2084 for (i = NINDIR, nshift = -1; i; nshift++) 2085 i >>= 1; 2086 2087 mod = NULL; 2088 sx_init(&md_sx, "MD config lock"); 2089 g_topology_unlock(); 2090 md_uh = new_unrhdr(0, INT_MAX, NULL); 2091 #ifdef MD_ROOT 2092 if (mfs_root_size != 0) { 2093 sx_xlock(&md_sx); 2094 #ifdef MD_ROOT_MEM 2095 md_preloaded(mfs_root, mfs_root_size, NULL); 2096 #else 2097 md_preloaded(__DEVOLATILE(u_char *, &mfs_root), mfs_root_size, 2098 NULL); 2099 #endif 2100 sx_xunlock(&md_sx); 2101 } 2102 #endif 2103 /* XXX: are preload_* static or do they need Giant ? */ 2104 while ((mod = preload_search_next_name(mod)) != NULL) { 2105 name = (char *)preload_search_info(mod, MODINFO_NAME); 2106 if (name == NULL) 2107 continue; 2108 type = (char *)preload_search_info(mod, MODINFO_TYPE); 2109 if (type == NULL) 2110 continue; 2111 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 2112 continue; 2113 ptr = preload_fetch_addr(mod); 2114 len = preload_fetch_size(mod); 2115 if (ptr != NULL && len != 0) { 2116 sx_xlock(&md_sx); 2117 md_preloaded(ptr, len, name); 2118 sx_xunlock(&md_sx); 2119 } 2120 } 2121 md_pbuf_zone = pbuf_zsecond_create("mdpbuf", nswbuf / 10); 2122 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL, 2123 0600, MDCTL_NAME); 2124 g_topology_lock(); 2125 } 2126 2127 static void 2128 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 2129 struct g_consumer *cp __unused, struct g_provider *pp) 2130 { 2131 struct md_s *mp; 2132 char *type; 2133 2134 mp = gp->softc; 2135 if (mp == NULL) 2136 return; 2137 2138 switch (mp->type) { 2139 case MD_MALLOC: 2140 type = "malloc"; 2141 break; 2142 case MD_PRELOAD: 2143 type = "preload"; 2144 break; 2145 case MD_VNODE: 2146 type = "vnode"; 2147 break; 2148 case MD_SWAP: 2149 type = "swap"; 2150 break; 2151 case MD_NULL: 2152 type = "null"; 2153 break; 2154 default: 2155 type = "unknown"; 2156 break; 2157 } 2158 2159 if (pp != NULL) { 2160 if (indent == NULL) { 2161 sbuf_printf(sb, " u %d", mp->unit); 2162 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize); 2163 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads); 2164 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors); 2165 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize); 2166 sbuf_printf(sb, " t %s", type); 2167 if ((mp->type == MD_VNODE && mp->vnode != NULL) || 2168 (mp->type == MD_PRELOAD && mp->file[0] != '\0')) 2169 sbuf_printf(sb, " file %s", mp->file); 2170 sbuf_printf(sb, " label %s", mp->label); 2171 } else { 2172 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, 2173 mp->unit); 2174 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n", 2175 indent, (uintmax_t) mp->sectorsize); 2176 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n", 2177 indent, (uintmax_t) mp->fwheads); 2178 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n", 2179 indent, (uintmax_t) mp->fwsectors); 2180 if (mp->ident[0] != '\0') { 2181 sbuf_printf(sb, "%s<ident>", indent); 2182 g_conf_printf_escaped(sb, "%s", mp->ident); 2183 sbuf_printf(sb, "</ident>\n"); 2184 } 2185 sbuf_printf(sb, "%s<length>%ju</length>\n", 2186 indent, (uintmax_t) mp->mediasize); 2187 sbuf_printf(sb, "%s<compression>%s</compression>\n", indent, 2188 (mp->flags & MD_COMPRESS) == 0 ? "off": "on"); 2189 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 2190 (mp->flags & MD_READONLY) == 0 ? "read-write": 2191 "read-only"); 2192 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2193 type); 2194 if ((mp->type == MD_VNODE && mp->vnode != NULL) || 2195 (mp->type == MD_PRELOAD && mp->file[0] != '\0')) { 2196 sbuf_printf(sb, "%s<file>", indent); 2197 g_conf_printf_escaped(sb, "%s", mp->file); 2198 sbuf_printf(sb, "</file>\n"); 2199 } 2200 if (mp->type == MD_VNODE) 2201 sbuf_printf(sb, "%s<cache>%s</cache>\n", indent, 2202 (mp->flags & MD_CACHE) == 0 ? "off": "on"); 2203 sbuf_printf(sb, "%s<label>", indent); 2204 g_conf_printf_escaped(sb, "%s", mp->label); 2205 sbuf_printf(sb, "</label>\n"); 2206 } 2207 } 2208 } 2209 2210 static void 2211 g_md_fini(struct g_class *mp __unused) 2212 { 2213 2214 sx_destroy(&md_sx); 2215 if (status_dev != NULL) 2216 destroy_dev(status_dev); 2217 uma_zdestroy(md_pbuf_zone); 2218 delete_unrhdr(md_uh); 2219 } 2220