1 /*- 2 * SPDX-License-Identifier: (Beerware AND BSD-3-Clause) 3 * 4 * ---------------------------------------------------------------------------- 5 * "THE BEER-WARE LICENSE" (Revision 42): 6 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 7 * can do whatever you want with this stuff. If we meet some day, and you think 8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 9 * ---------------------------------------------------------------------------- 10 * 11 */ 12 13 /*- 14 * The following functions are based on the historical vn(4) driver: 15 * mdstart_swap(), mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() 16 * and mddestroy(), and as such under the following copyright: 17 * 18 * Copyright (c) 1988 University of Utah. 19 * Copyright (c) 1990, 1993 20 * The Regents of the University of California. All rights reserved. 21 * Copyright (c) 2013 The FreeBSD Foundation 22 * All rights reserved. 23 * 24 * This code is derived from software contributed to Berkeley by 25 * the Systems Programming Group of the University of Utah Computer 26 * Science Department. 27 * 28 * Portions of this software were developed by Konstantin Belousov 29 * under sponsorship from the FreeBSD Foundation. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 3. Neither the name of the University nor the names of its contributors 40 * may be used to endorse or promote products derived from this software 41 * without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 53 * SUCH DAMAGE. 54 * 55 * from: Utah Hdr: vn.c 1.13 94/04/02 56 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 57 */ 58 59 #include "opt_rootdevname.h" 60 #include "opt_geom.h" 61 #include "opt_md.h" 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/bio.h> 66 #include <sys/buf.h> 67 #include <sys/conf.h> 68 #include <sys/devicestat.h> 69 #include <sys/fcntl.h> 70 #include <sys/kernel.h> 71 #include <sys/kthread.h> 72 #include <sys/limits.h> 73 #include <sys/linker.h> 74 #include <sys/lock.h> 75 #include <sys/malloc.h> 76 #include <sys/mdioctl.h> 77 #include <sys/mount.h> 78 #include <sys/mutex.h> 79 #include <sys/sx.h> 80 #include <sys/namei.h> 81 #include <sys/proc.h> 82 #include <sys/queue.h> 83 #include <sys/rwlock.h> 84 #include <sys/sbuf.h> 85 #include <sys/sched.h> 86 #include <sys/sf_buf.h> 87 #include <sys/sysctl.h> 88 #include <sys/uio.h> 89 #include <sys/unistd.h> 90 #include <sys/vnode.h> 91 #include <sys/disk.h> 92 93 #include <geom/geom.h> 94 #include <geom/geom_int.h> 95 96 #include <vm/vm.h> 97 #include <vm/vm_extern.h> 98 #include <vm/vm_param.h> 99 #include <vm/vm_object.h> 100 #include <vm/vm_page.h> 101 #include <vm/vm_pager.h> 102 #include <vm/swap_pager.h> 103 #include <vm/uma.h> 104 105 #include <machine/bus.h> 106 107 #define MD_MODVER 1 108 109 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ 110 #define MD_EXITING 0x20000 /* Worker thread is exiting. */ 111 #define MD_PROVIDERGONE 0x40000 /* Safe to free the softc */ 112 113 #ifndef MD_NSECT 114 #define MD_NSECT (10000 * 2) 115 #endif 116 117 struct md_req { 118 unsigned md_unit; /* unit number */ 119 enum md_types md_type; /* type of disk */ 120 off_t md_mediasize; /* size of disk in bytes */ 121 unsigned md_sectorsize; /* sectorsize */ 122 unsigned md_options; /* options */ 123 int md_fwheads; /* firmware heads */ 124 int md_fwsectors; /* firmware sectors */ 125 char *md_file; /* pathname of file to mount */ 126 enum uio_seg md_file_seg; /* location of md_file */ 127 char *md_label; /* label of the device (userspace) */ 128 int *md_units; /* pointer to units array (kernel) */ 129 size_t md_units_nitems; /* items in md_units array */ 130 }; 131 132 #ifdef COMPAT_FREEBSD32 133 struct md_ioctl32 { 134 unsigned md_version; 135 unsigned md_unit; 136 enum md_types md_type; 137 uint32_t md_file; 138 off_t md_mediasize; 139 unsigned md_sectorsize; 140 unsigned md_options; 141 uint64_t md_base; 142 int md_fwheads; 143 int md_fwsectors; 144 uint32_t md_label; 145 int md_pad[MDNPAD]; 146 } 147 #ifdef __amd64__ 148 __attribute__((__packed__)) 149 #endif 150 ; 151 #ifndef __amd64__ 152 CTASSERT((sizeof(struct md_ioctl32)) == 440); 153 #else 154 CTASSERT((sizeof(struct md_ioctl32)) == 436); 155 #endif 156 157 #define MDIOCATTACH_32 _IOC_NEWTYPE(MDIOCATTACH, struct md_ioctl32) 158 #define MDIOCDETACH_32 _IOC_NEWTYPE(MDIOCDETACH, struct md_ioctl32) 159 #define MDIOCQUERY_32 _IOC_NEWTYPE(MDIOCQUERY, struct md_ioctl32) 160 #define MDIOCRESIZE_32 _IOC_NEWTYPE(MDIOCRESIZE, struct md_ioctl32) 161 #endif /* COMPAT_FREEBSD32 */ 162 163 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk"); 164 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors"); 165 166 static int md_debug; 167 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, 168 "Enable md(4) debug messages"); 169 static int md_malloc_wait; 170 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, 171 "Allow malloc to wait for memory allocations"); 172 173 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE) 174 #define MD_ROOT_FSTYPE "ufs" 175 #endif 176 177 #if defined(MD_ROOT) 178 /* 179 * Preloaded image gets put here. 180 */ 181 #if defined(MD_ROOT_SIZE) 182 /* 183 * We put the mfs_root symbol into the oldmfs section of the kernel object file. 184 * Applications that patch the object with the image can determine 185 * the size looking at the oldmfs section size within the kernel. 186 */ 187 u_char mfs_root[MD_ROOT_SIZE*1024] __attribute__ ((section ("oldmfs"))); 188 const int mfs_root_size = sizeof(mfs_root); 189 #elif defined(MD_ROOT_MEM) 190 /* MD region already mapped in the memory */ 191 u_char *mfs_root; 192 int mfs_root_size; 193 #else 194 extern volatile u_char __weak_symbol mfs_root; 195 extern volatile u_char __weak_symbol mfs_root_end; 196 #define mfs_root_size ((uintptr_t)(&mfs_root_end - &mfs_root)) 197 #endif 198 #endif 199 200 static g_init_t g_md_init; 201 static g_fini_t g_md_fini; 202 static g_start_t g_md_start; 203 static g_access_t g_md_access; 204 static void g_md_dumpconf(struct sbuf *sb, const char *indent, 205 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp); 206 static g_provgone_t g_md_providergone; 207 208 static struct cdev *status_dev = NULL; 209 static struct sx md_sx; 210 static struct unrhdr *md_uh; 211 212 static d_ioctl_t mdctlioctl; 213 214 static struct cdevsw mdctl_cdevsw = { 215 .d_version = D_VERSION, 216 .d_ioctl = mdctlioctl, 217 .d_name = MD_NAME, 218 }; 219 220 struct g_class g_md_class = { 221 .name = "MD", 222 .version = G_VERSION, 223 .init = g_md_init, 224 .fini = g_md_fini, 225 .start = g_md_start, 226 .access = g_md_access, 227 .dumpconf = g_md_dumpconf, 228 .providergone = g_md_providergone, 229 }; 230 231 DECLARE_GEOM_CLASS(g_md_class, g_md); 232 MODULE_VERSION(geom_md, 0); 233 234 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list); 235 236 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) 237 #define NMASK (NINDIR-1) 238 static int nshift; 239 240 struct indir { 241 uintptr_t *array; 242 u_int total; 243 u_int used; 244 u_int shift; 245 }; 246 247 struct md_s { 248 int unit; 249 LIST_ENTRY(md_s) list; 250 struct bio_queue_head bio_queue; 251 struct mtx queue_mtx; 252 struct cdev *dev; 253 enum md_types type; 254 off_t mediasize; 255 unsigned sectorsize; 256 unsigned opencount; 257 unsigned fwheads; 258 unsigned fwsectors; 259 char ident[DISK_IDENT_SIZE]; 260 unsigned flags; 261 char name[20]; 262 struct proc *procp; 263 struct g_geom *gp; 264 struct g_provider *pp; 265 int (*start)(struct md_s *sc, struct bio *bp); 266 struct devstat *devstat; 267 struct ucred *cred; 268 char label[PATH_MAX]; 269 bool candelete; 270 271 union { 272 /* MD_MALLOC related fields */ 273 struct { 274 struct indir *indir; 275 uma_zone_t uma; 276 } s_malloc; 277 278 /* MD_PRELOAD related fields */ 279 struct { 280 u_char *pl_ptr; 281 size_t pl_len; 282 char name[PATH_MAX]; 283 } s_preload; 284 285 /* MD_VNODE related fields */ 286 struct { 287 struct vnode *vnode; 288 char file[PATH_MAX]; 289 vm_offset_t kva; 290 } s_vnode; 291 292 /* MD_SWAP related fields */ 293 struct { 294 vm_object_t object; 295 } s_swap; 296 297 /* MD_NULL */ 298 struct { 299 } s_null; 300 }; 301 }; 302 303 static struct indir * 304 new_indir(u_int shift) 305 { 306 struct indir *ip; 307 308 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT) 309 | M_ZERO); 310 if (ip == NULL) 311 return (NULL); 312 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 313 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); 314 if (ip->array == NULL) { 315 free(ip, M_MD); 316 return (NULL); 317 } 318 ip->total = NINDIR; 319 ip->shift = shift; 320 return (ip); 321 } 322 323 static void 324 del_indir(struct indir *ip) 325 { 326 327 free(ip->array, M_MDSECT); 328 free(ip, M_MD); 329 } 330 331 static void 332 destroy_indir(struct md_s *sc, struct indir *ip) 333 { 334 int i; 335 336 for (i = 0; i < NINDIR; i++) { 337 if (!ip->array[i]) 338 continue; 339 if (ip->shift) 340 destroy_indir(sc, (struct indir*)(ip->array[i])); 341 else if (ip->array[i] > 255) 342 uma_zfree(sc->s_malloc.uma, (void *)(ip->array[i])); 343 } 344 del_indir(ip); 345 } 346 347 /* 348 * This function does the math and allocates the top level "indir" structure 349 * for a device of "size" sectors. 350 */ 351 352 static struct indir * 353 dimension(off_t size) 354 { 355 off_t rcnt; 356 struct indir *ip; 357 int layer; 358 359 rcnt = size; 360 layer = 0; 361 while (rcnt > NINDIR) { 362 rcnt /= NINDIR; 363 layer++; 364 } 365 366 /* 367 * XXX: the top layer is probably not fully populated, so we allocate 368 * too much space for ip->array in here. 369 */ 370 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); 371 ip->array = malloc(sizeof(uintptr_t) * NINDIR, 372 M_MDSECT, M_WAITOK | M_ZERO); 373 ip->total = NINDIR; 374 ip->shift = layer * nshift; 375 return (ip); 376 } 377 378 /* 379 * Read a given sector 380 */ 381 382 static uintptr_t 383 s_read(struct indir *ip, off_t offset) 384 { 385 struct indir *cip; 386 int idx; 387 uintptr_t up; 388 389 if (md_debug > 1) 390 printf("s_read(%jd)\n", (intmax_t)offset); 391 up = 0; 392 for (cip = ip; cip != NULL;) { 393 if (cip->shift) { 394 idx = (offset >> cip->shift) & NMASK; 395 up = cip->array[idx]; 396 cip = (struct indir *)up; 397 continue; 398 } 399 idx = offset & NMASK; 400 return (cip->array[idx]); 401 } 402 return (0); 403 } 404 405 /* 406 * Write a given sector, prune the tree if the value is 0 407 */ 408 409 static int 410 s_write(struct indir *ip, off_t offset, uintptr_t ptr) 411 { 412 struct indir *cip, *lip[10]; 413 int idx, li; 414 uintptr_t up; 415 416 if (md_debug > 1) 417 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); 418 up = 0; 419 li = 0; 420 cip = ip; 421 for (;;) { 422 lip[li++] = cip; 423 if (cip->shift) { 424 idx = (offset >> cip->shift) & NMASK; 425 up = cip->array[idx]; 426 if (up != 0) { 427 cip = (struct indir *)up; 428 continue; 429 } 430 /* Allocate branch */ 431 cip->array[idx] = 432 (uintptr_t)new_indir(cip->shift - nshift); 433 if (cip->array[idx] == 0) 434 return (ENOSPC); 435 cip->used++; 436 up = cip->array[idx]; 437 cip = (struct indir *)up; 438 continue; 439 } 440 /* leafnode */ 441 idx = offset & NMASK; 442 up = cip->array[idx]; 443 if (up != 0) 444 cip->used--; 445 cip->array[idx] = ptr; 446 if (ptr != 0) 447 cip->used++; 448 break; 449 } 450 if (cip->used != 0 || li == 1) 451 return (0); 452 li--; 453 while (cip->used == 0 && cip != ip) { 454 li--; 455 idx = (offset >> lip[li]->shift) & NMASK; 456 up = lip[li]->array[idx]; 457 KASSERT(up == (uintptr_t)cip, ("md screwed up")); 458 del_indir(cip); 459 lip[li]->array[idx] = 0; 460 lip[li]->used--; 461 cip = lip[li]; 462 } 463 return (0); 464 } 465 466 static int 467 g_md_access(struct g_provider *pp, int r, int w, int e) 468 { 469 struct md_s *sc; 470 471 sc = pp->geom->softc; 472 if (sc == NULL) { 473 if (r <= 0 && w <= 0 && e <= 0) 474 return (0); 475 return (ENXIO); 476 } 477 r += pp->acr; 478 w += pp->acw; 479 e += pp->ace; 480 if ((sc->flags & MD_READONLY) != 0 && w > 0) 481 return (EROFS); 482 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 483 sc->opencount = 1; 484 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 485 sc->opencount = 0; 486 } 487 return (0); 488 } 489 490 static void 491 g_md_start(struct bio *bp) 492 { 493 struct md_s *sc; 494 495 sc = bp->bio_to->geom->softc; 496 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) { 497 devstat_start_transaction_bio(sc->devstat, bp); 498 } 499 mtx_lock(&sc->queue_mtx); 500 bioq_disksort(&sc->bio_queue, bp); 501 wakeup(sc); 502 mtx_unlock(&sc->queue_mtx); 503 } 504 505 #define MD_MALLOC_MOVE_ZERO 1 506 #define MD_MALLOC_MOVE_FILL 2 507 #define MD_MALLOC_MOVE_READ 3 508 #define MD_MALLOC_MOVE_WRITE 4 509 #define MD_MALLOC_MOVE_CMP 5 510 511 static int 512 md_malloc_move_ma(vm_page_t **mp, int *ma_offs, unsigned sectorsize, 513 void *ptr, u_char fill, int op) 514 { 515 struct sf_buf *sf; 516 vm_page_t m, *mp1; 517 char *p, first; 518 off_t *uc; 519 unsigned n; 520 int error, i, ma_offs1, sz, first_read; 521 522 m = NULL; 523 error = 0; 524 sf = NULL; 525 /* if (op == MD_MALLOC_MOVE_CMP) { gcc */ 526 first = 0; 527 first_read = 0; 528 uc = ptr; 529 mp1 = *mp; 530 ma_offs1 = *ma_offs; 531 /* } */ 532 sched_pin(); 533 for (n = sectorsize; n != 0; n -= sz) { 534 sz = imin(PAGE_SIZE - *ma_offs, n); 535 if (m != **mp) { 536 if (sf != NULL) 537 sf_buf_free(sf); 538 m = **mp; 539 sf = sf_buf_alloc(m, SFB_CPUPRIVATE | 540 (md_malloc_wait ? 0 : SFB_NOWAIT)); 541 if (sf == NULL) { 542 error = ENOMEM; 543 break; 544 } 545 } 546 p = (char *)sf_buf_kva(sf) + *ma_offs; 547 switch (op) { 548 case MD_MALLOC_MOVE_ZERO: 549 bzero(p, sz); 550 break; 551 case MD_MALLOC_MOVE_FILL: 552 memset(p, fill, sz); 553 break; 554 case MD_MALLOC_MOVE_READ: 555 bcopy(ptr, p, sz); 556 cpu_flush_dcache(p, sz); 557 break; 558 case MD_MALLOC_MOVE_WRITE: 559 bcopy(p, ptr, sz); 560 break; 561 case MD_MALLOC_MOVE_CMP: 562 for (i = 0; i < sz; i++, p++) { 563 if (!first_read) { 564 *uc = (u_char)*p; 565 first = *p; 566 first_read = 1; 567 } else if (*p != first) { 568 error = EDOOFUS; 569 break; 570 } 571 } 572 break; 573 default: 574 KASSERT(0, ("md_malloc_move_ma unknown op %d\n", op)); 575 break; 576 } 577 if (error != 0) 578 break; 579 *ma_offs += sz; 580 *ma_offs %= PAGE_SIZE; 581 if (*ma_offs == 0) 582 (*mp)++; 583 ptr = (char *)ptr + sz; 584 } 585 586 if (sf != NULL) 587 sf_buf_free(sf); 588 sched_unpin(); 589 if (op == MD_MALLOC_MOVE_CMP && error != 0) { 590 *mp = mp1; 591 *ma_offs = ma_offs1; 592 } 593 return (error); 594 } 595 596 static int 597 md_malloc_move_vlist(bus_dma_segment_t **pvlist, int *pma_offs, 598 unsigned len, void *ptr, u_char fill, int op) 599 { 600 bus_dma_segment_t *vlist; 601 uint8_t *p, *end, first; 602 off_t *uc; 603 int ma_offs, seg_len; 604 605 vlist = *pvlist; 606 ma_offs = *pma_offs; 607 uc = ptr; 608 609 for (; len != 0; len -= seg_len) { 610 seg_len = imin(vlist->ds_len - ma_offs, len); 611 p = (uint8_t *)(uintptr_t)vlist->ds_addr + ma_offs; 612 switch (op) { 613 case MD_MALLOC_MOVE_ZERO: 614 bzero(p, seg_len); 615 break; 616 case MD_MALLOC_MOVE_FILL: 617 memset(p, fill, seg_len); 618 break; 619 case MD_MALLOC_MOVE_READ: 620 bcopy(ptr, p, seg_len); 621 cpu_flush_dcache(p, seg_len); 622 break; 623 case MD_MALLOC_MOVE_WRITE: 624 bcopy(p, ptr, seg_len); 625 break; 626 case MD_MALLOC_MOVE_CMP: 627 end = p + seg_len; 628 first = *uc = *p; 629 /* Confirm all following bytes match the first */ 630 while (++p < end) { 631 if (*p != first) 632 return (EDOOFUS); 633 } 634 break; 635 default: 636 KASSERT(0, ("md_malloc_move_vlist unknown op %d\n", op)); 637 break; 638 } 639 640 ma_offs += seg_len; 641 if (ma_offs == vlist->ds_len) { 642 ma_offs = 0; 643 vlist++; 644 } 645 ptr = (uint8_t *)ptr + seg_len; 646 } 647 *pvlist = vlist; 648 *pma_offs = ma_offs; 649 650 return (0); 651 } 652 653 static int 654 mdstart_malloc(struct md_s *sc, struct bio *bp) 655 { 656 u_char *dst; 657 vm_page_t *m; 658 bus_dma_segment_t *vlist; 659 int i, error, error1, ma_offs, notmapped; 660 off_t secno, nsec, uc; 661 uintptr_t sp, osp; 662 663 switch (bp->bio_cmd) { 664 case BIO_READ: 665 case BIO_WRITE: 666 case BIO_DELETE: 667 break; 668 case BIO_FLUSH: 669 return (0); 670 default: 671 return (EOPNOTSUPP); 672 } 673 674 notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0; 675 vlist = (bp->bio_flags & BIO_VLIST) != 0 ? 676 (bus_dma_segment_t *)bp->bio_data : NULL; 677 if (notmapped) { 678 m = bp->bio_ma; 679 ma_offs = bp->bio_ma_offset; 680 dst = NULL; 681 KASSERT(vlist == NULL, ("vlists cannot be unmapped")); 682 } else if (vlist != NULL) { 683 ma_offs = bp->bio_ma_offset; 684 dst = NULL; 685 } else { 686 dst = bp->bio_data; 687 } 688 689 nsec = bp->bio_length / sc->sectorsize; 690 secno = bp->bio_offset / sc->sectorsize; 691 error = 0; 692 while (nsec--) { 693 osp = s_read(sc->s_malloc.indir, secno); 694 if (bp->bio_cmd == BIO_DELETE) { 695 if (osp != 0) 696 error = s_write(sc->s_malloc.indir, secno, 0); 697 } else if (bp->bio_cmd == BIO_READ) { 698 if (osp == 0) { 699 if (notmapped) { 700 error = md_malloc_move_ma(&m, &ma_offs, 701 sc->sectorsize, NULL, 0, 702 MD_MALLOC_MOVE_ZERO); 703 } else if (vlist != NULL) { 704 error = md_malloc_move_vlist(&vlist, 705 &ma_offs, sc->sectorsize, NULL, 0, 706 MD_MALLOC_MOVE_ZERO); 707 } else 708 bzero(dst, sc->sectorsize); 709 } else if (osp <= 255) { 710 if (notmapped) { 711 error = md_malloc_move_ma(&m, &ma_offs, 712 sc->sectorsize, NULL, osp, 713 MD_MALLOC_MOVE_FILL); 714 } else if (vlist != NULL) { 715 error = md_malloc_move_vlist(&vlist, 716 &ma_offs, sc->sectorsize, NULL, osp, 717 MD_MALLOC_MOVE_FILL); 718 } else 719 memset(dst, osp, sc->sectorsize); 720 } else { 721 if (notmapped) { 722 error = md_malloc_move_ma(&m, &ma_offs, 723 sc->sectorsize, (void *)osp, 0, 724 MD_MALLOC_MOVE_READ); 725 } else if (vlist != NULL) { 726 error = md_malloc_move_vlist(&vlist, 727 &ma_offs, sc->sectorsize, 728 (void *)osp, 0, 729 MD_MALLOC_MOVE_READ); 730 } else { 731 bcopy((void *)osp, dst, sc->sectorsize); 732 cpu_flush_dcache(dst, sc->sectorsize); 733 } 734 } 735 osp = 0; 736 } else if (bp->bio_cmd == BIO_WRITE) { 737 if (sc->flags & MD_COMPRESS) { 738 if (notmapped) { 739 error1 = md_malloc_move_ma(&m, &ma_offs, 740 sc->sectorsize, &uc, 0, 741 MD_MALLOC_MOVE_CMP); 742 i = error1 == 0 ? sc->sectorsize : 0; 743 } else if (vlist != NULL) { 744 error1 = md_malloc_move_vlist(&vlist, 745 &ma_offs, sc->sectorsize, &uc, 0, 746 MD_MALLOC_MOVE_CMP); 747 i = error1 == 0 ? sc->sectorsize : 0; 748 } else { 749 uc = dst[0]; 750 for (i = 1; i < sc->sectorsize; i++) { 751 if (dst[i] != uc) 752 break; 753 } 754 } 755 } else { 756 i = 0; 757 uc = 0; 758 } 759 if (i == sc->sectorsize) { 760 if (osp != uc) 761 error = s_write(sc->s_malloc.indir, 762 secno, uc); 763 } else { 764 if (osp <= 255) { 765 sp = (uintptr_t)uma_zalloc( 766 sc->s_malloc.uma, 767 md_malloc_wait ? M_WAITOK : 768 M_NOWAIT); 769 if (sp == 0) { 770 error = ENOSPC; 771 break; 772 } 773 if (notmapped) { 774 error = md_malloc_move_ma(&m, 775 &ma_offs, sc->sectorsize, 776 (void *)sp, 0, 777 MD_MALLOC_MOVE_WRITE); 778 } else if (vlist != NULL) { 779 error = md_malloc_move_vlist( 780 &vlist, &ma_offs, 781 sc->sectorsize, (void *)sp, 782 0, MD_MALLOC_MOVE_WRITE); 783 } else { 784 bcopy(dst, (void *)sp, 785 sc->sectorsize); 786 } 787 error = s_write(sc->s_malloc.indir, 788 secno, sp); 789 } else { 790 if (notmapped) { 791 error = md_malloc_move_ma(&m, 792 &ma_offs, sc->sectorsize, 793 (void *)osp, 0, 794 MD_MALLOC_MOVE_WRITE); 795 } else if (vlist != NULL) { 796 error = md_malloc_move_vlist( 797 &vlist, &ma_offs, 798 sc->sectorsize, (void *)osp, 799 0, MD_MALLOC_MOVE_WRITE); 800 } else { 801 bcopy(dst, (void *)osp, 802 sc->sectorsize); 803 } 804 osp = 0; 805 } 806 } 807 } else { 808 error = EOPNOTSUPP; 809 } 810 if (osp > 255) 811 uma_zfree(sc->s_malloc.uma, (void*)osp); 812 if (error != 0) 813 break; 814 secno++; 815 if (!notmapped && vlist == NULL) 816 dst += sc->sectorsize; 817 } 818 bp->bio_resid = 0; 819 return (error); 820 } 821 822 static void 823 mdcopyto_vlist(void *src, bus_dma_segment_t *vlist, off_t offset, off_t len) 824 { 825 off_t seg_len; 826 827 while (offset >= vlist->ds_len) { 828 offset -= vlist->ds_len; 829 vlist++; 830 } 831 832 while (len != 0) { 833 seg_len = omin(len, vlist->ds_len - offset); 834 bcopy(src, (void *)(uintptr_t)(vlist->ds_addr + offset), 835 seg_len); 836 offset = 0; 837 src = (uint8_t *)src + seg_len; 838 len -= seg_len; 839 vlist++; 840 } 841 } 842 843 static void 844 mdcopyfrom_vlist(bus_dma_segment_t *vlist, off_t offset, void *dst, off_t len) 845 { 846 off_t seg_len; 847 848 while (offset >= vlist->ds_len) { 849 offset -= vlist->ds_len; 850 vlist++; 851 } 852 853 while (len != 0) { 854 seg_len = omin(len, vlist->ds_len - offset); 855 bcopy((void *)(uintptr_t)(vlist->ds_addr + offset), dst, 856 seg_len); 857 offset = 0; 858 dst = (uint8_t *)dst + seg_len; 859 len -= seg_len; 860 vlist++; 861 } 862 } 863 864 static int 865 mdstart_preload(struct md_s *sc, struct bio *bp) 866 { 867 uint8_t *p; 868 869 p = sc->s_preload.pl_ptr + bp->bio_offset; 870 switch (bp->bio_cmd) { 871 case BIO_READ: 872 if ((bp->bio_flags & BIO_VLIST) != 0) { 873 mdcopyto_vlist(p, (bus_dma_segment_t *)bp->bio_data, 874 bp->bio_ma_offset, bp->bio_length); 875 } else { 876 bcopy(p, bp->bio_data, bp->bio_length); 877 } 878 cpu_flush_dcache(bp->bio_data, bp->bio_length); 879 break; 880 case BIO_WRITE: 881 if ((bp->bio_flags & BIO_VLIST) != 0) { 882 mdcopyfrom_vlist((bus_dma_segment_t *)bp->bio_data, 883 bp->bio_ma_offset, p, bp->bio_length); 884 } else { 885 bcopy(bp->bio_data, p, bp->bio_length); 886 } 887 break; 888 } 889 bp->bio_resid = 0; 890 return (0); 891 } 892 893 static int 894 mdstart_vnode(struct md_s *sc, struct bio *bp) 895 { 896 int error; 897 struct uio auio; 898 struct iovec aiov; 899 struct iovec *piov; 900 struct mount *mp; 901 struct vnode *vp; 902 bus_dma_segment_t *vlist; 903 struct thread *td; 904 off_t iolen, iostart, off, len; 905 int ma_offs, npages; 906 bool mapped; 907 908 td = curthread; 909 vp = sc->s_vnode.vnode; 910 piov = NULL; 911 ma_offs = bp->bio_ma_offset; 912 off = bp->bio_offset; 913 len = bp->bio_length; 914 mapped = false; 915 916 /* 917 * VNODE I/O 918 * 919 * If an error occurs, we set BIO_ERROR but we do not set 920 * B_INVAL because (for a write anyway), the buffer is 921 * still valid. 922 */ 923 924 switch (bp->bio_cmd) { 925 case BIO_READ: 926 auio.uio_rw = UIO_READ; 927 break; 928 case BIO_WRITE: 929 auio.uio_rw = UIO_WRITE; 930 break; 931 case BIO_FLUSH: 932 do { 933 (void)vn_start_write(vp, &mp, V_WAIT); 934 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 935 error = VOP_FSYNC(vp, MNT_WAIT, td); 936 VOP_UNLOCK(vp); 937 vn_finished_write(mp); 938 } while (error == ERELOOKUP); 939 return (error); 940 case BIO_DELETE: 941 if (sc->candelete) { 942 error = vn_deallocate(vp, &off, &len, 0, 943 sc->flags & MD_ASYNC ? 0 : IO_SYNC, 944 sc->cred, NOCRED); 945 bp->bio_resid = len; 946 return (error); 947 } 948 /* FALLTHROUGH */ 949 default: 950 return (EOPNOTSUPP); 951 } 952 953 auio.uio_offset = (vm_ooffset_t)bp->bio_offset; 954 auio.uio_resid = bp->bio_length; 955 auio.uio_segflg = UIO_SYSSPACE; 956 auio.uio_td = td; 957 958 if ((bp->bio_flags & BIO_VLIST) != 0) { 959 piov = malloc(sizeof(*piov) * bp->bio_ma_n, M_MD, M_WAITOK); 960 auio.uio_iov = piov; 961 vlist = (bus_dma_segment_t *)bp->bio_data; 962 while (len > 0) { 963 piov->iov_base = (void *)(uintptr_t)(vlist->ds_addr + 964 ma_offs); 965 piov->iov_len = vlist->ds_len - ma_offs; 966 if (piov->iov_len > len) 967 piov->iov_len = len; 968 len -= piov->iov_len; 969 ma_offs = 0; 970 vlist++; 971 piov++; 972 } 973 auio.uio_iovcnt = piov - auio.uio_iov; 974 piov = auio.uio_iov; 975 } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 976 bp->bio_resid = len; 977 unmapped_step: 978 npages = atop(min(maxphys, round_page(len + (ma_offs & 979 PAGE_MASK)))); 980 iolen = min(ptoa(npages) - (ma_offs & PAGE_MASK), len); 981 KASSERT(iolen > 0, ("zero iolen")); 982 KASSERT(npages <= atop(maxphys + PAGE_SIZE), 983 ("npages %d too large", npages)); 984 pmap_qenter(sc->s_vnode.kva, &bp->bio_ma[atop(ma_offs)], 985 npages); 986 aiov.iov_base = (void *)(sc->s_vnode.kva + (ma_offs & 987 PAGE_MASK)); 988 aiov.iov_len = iolen; 989 auio.uio_iov = &aiov; 990 auio.uio_iovcnt = 1; 991 auio.uio_resid = iolen; 992 mapped = true; 993 } else { 994 aiov.iov_base = bp->bio_data; 995 aiov.iov_len = bp->bio_length; 996 auio.uio_iov = &aiov; 997 auio.uio_iovcnt = 1; 998 } 999 iostart = auio.uio_offset; 1000 if (bp->bio_cmd == BIO_READ) { 1001 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1002 error = VOP_READ(vp, &auio, 0, sc->cred); 1003 VOP_UNLOCK(vp); 1004 } else { 1005 (void) vn_start_write(vp, &mp, V_WAIT); 1006 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1007 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC, 1008 sc->cred); 1009 VOP_UNLOCK(vp); 1010 vn_finished_write(mp); 1011 if (error == 0) 1012 sc->flags &= ~MD_VERIFY; 1013 } 1014 1015 /* When MD_CACHE is set, try to avoid double-caching the data. */ 1016 if (error == 0 && (sc->flags & MD_CACHE) == 0) 1017 VOP_ADVISE(vp, iostart, auio.uio_offset - 1, 1018 POSIX_FADV_DONTNEED); 1019 1020 if (mapped) { 1021 pmap_qremove(sc->s_vnode.kva, npages); 1022 if (error == 0) { 1023 len -= iolen; 1024 bp->bio_resid -= iolen; 1025 ma_offs += iolen; 1026 if (len > 0) 1027 goto unmapped_step; 1028 } 1029 } else { 1030 bp->bio_resid = auio.uio_resid; 1031 } 1032 1033 free(piov, M_MD); 1034 return (error); 1035 } 1036 1037 static int 1038 mdstart_swap(struct md_s *sc, struct bio *bp) 1039 { 1040 vm_page_t m; 1041 u_char *p; 1042 vm_pindex_t i, lastp; 1043 bus_dma_segment_t *vlist; 1044 int rv, ma_offs, offs, len, lastend; 1045 1046 switch (bp->bio_cmd) { 1047 case BIO_READ: 1048 case BIO_WRITE: 1049 case BIO_DELETE: 1050 break; 1051 case BIO_FLUSH: 1052 return (0); 1053 default: 1054 return (EOPNOTSUPP); 1055 } 1056 1057 p = bp->bio_data; 1058 ma_offs = (bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0 ? 1059 bp->bio_ma_offset : 0; 1060 vlist = (bp->bio_flags & BIO_VLIST) != 0 ? 1061 (bus_dma_segment_t *)bp->bio_data : NULL; 1062 1063 /* 1064 * offs is the offset at which to start operating on the 1065 * next (ie, first) page. lastp is the last page on 1066 * which we're going to operate. lastend is the ending 1067 * position within that last page (ie, PAGE_SIZE if 1068 * we're operating on complete aligned pages). 1069 */ 1070 offs = bp->bio_offset % PAGE_SIZE; 1071 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; 1072 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; 1073 1074 rv = VM_PAGER_OK; 1075 vm_object_pip_add(sc->s_swap.object, 1); 1076 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { 1077 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; 1078 m = vm_page_grab_unlocked(sc->s_swap.object, i, 1079 VM_ALLOC_SYSTEM); 1080 if (bp->bio_cmd == BIO_READ) { 1081 if (vm_page_all_valid(m)) 1082 rv = VM_PAGER_OK; 1083 else 1084 rv = vm_pager_get_pages(sc->s_swap.object, 1085 &m, 1, NULL, NULL); 1086 if (rv == VM_PAGER_ERROR) { 1087 VM_OBJECT_WLOCK(sc->s_swap.object); 1088 vm_page_free(m); 1089 VM_OBJECT_WUNLOCK(sc->s_swap.object); 1090 break; 1091 } else if (rv == VM_PAGER_FAIL) { 1092 /* 1093 * Pager does not have the page. Zero 1094 * the allocated page, and mark it as 1095 * valid. Do not set dirty, the page 1096 * can be recreated if thrown out. 1097 */ 1098 pmap_zero_page(m); 1099 vm_page_valid(m); 1100 } 1101 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 1102 pmap_copy_pages(&m, offs, bp->bio_ma, 1103 ma_offs, len); 1104 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 1105 physcopyout_vlist(VM_PAGE_TO_PHYS(m) + offs, 1106 vlist, ma_offs, len); 1107 cpu_flush_dcache(p, len); 1108 } else { 1109 physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len); 1110 cpu_flush_dcache(p, len); 1111 } 1112 } else if (bp->bio_cmd == BIO_WRITE) { 1113 if (len == PAGE_SIZE || vm_page_all_valid(m)) 1114 rv = VM_PAGER_OK; 1115 else 1116 rv = vm_pager_get_pages(sc->s_swap.object, 1117 &m, 1, NULL, NULL); 1118 if (rv == VM_PAGER_ERROR) { 1119 VM_OBJECT_WLOCK(sc->s_swap.object); 1120 vm_page_free(m); 1121 VM_OBJECT_WUNLOCK(sc->s_swap.object); 1122 break; 1123 } else if (rv == VM_PAGER_FAIL) 1124 pmap_zero_page(m); 1125 1126 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 1127 pmap_copy_pages(bp->bio_ma, ma_offs, &m, 1128 offs, len); 1129 } else if ((bp->bio_flags & BIO_VLIST) != 0) { 1130 physcopyin_vlist(vlist, ma_offs, 1131 VM_PAGE_TO_PHYS(m) + offs, len); 1132 } else { 1133 physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len); 1134 } 1135 1136 vm_page_valid(m); 1137 vm_page_set_dirty(m); 1138 } else if (bp->bio_cmd == BIO_DELETE) { 1139 if (len == PAGE_SIZE || vm_page_all_valid(m)) 1140 rv = VM_PAGER_OK; 1141 else 1142 rv = vm_pager_get_pages(sc->s_swap.object, 1143 &m, 1, NULL, NULL); 1144 VM_OBJECT_WLOCK(sc->s_swap.object); 1145 if (rv == VM_PAGER_ERROR) { 1146 vm_page_free(m); 1147 VM_OBJECT_WUNLOCK(sc->s_swap.object); 1148 break; 1149 } else if (rv == VM_PAGER_FAIL) { 1150 vm_page_free(m); 1151 m = NULL; 1152 } else { 1153 /* Page is valid. */ 1154 if (len != PAGE_SIZE) { 1155 pmap_zero_page_area(m, offs, len); 1156 vm_page_set_dirty(m); 1157 } else { 1158 vm_pager_page_unswapped(m); 1159 vm_page_free(m); 1160 m = NULL; 1161 } 1162 } 1163 VM_OBJECT_WUNLOCK(sc->s_swap.object); 1164 } 1165 if (m != NULL) { 1166 /* 1167 * The page may be deactivated prior to setting 1168 * PGA_REFERENCED, but in this case it will be 1169 * reactivated by the page daemon. 1170 */ 1171 if (vm_page_active(m)) 1172 vm_page_reference(m); 1173 else 1174 vm_page_activate(m); 1175 vm_page_xunbusy(m); 1176 } 1177 1178 /* Actions on further pages start at offset 0 */ 1179 p += PAGE_SIZE - offs; 1180 offs = 0; 1181 ma_offs += len; 1182 } 1183 vm_object_pip_wakeup(sc->s_swap.object); 1184 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); 1185 } 1186 1187 static int 1188 mdstart_null(struct md_s *sc, struct bio *bp) 1189 { 1190 1191 switch (bp->bio_cmd) { 1192 case BIO_READ: 1193 bzero(bp->bio_data, bp->bio_length); 1194 cpu_flush_dcache(bp->bio_data, bp->bio_length); 1195 break; 1196 case BIO_WRITE: 1197 break; 1198 } 1199 bp->bio_resid = 0; 1200 return (0); 1201 } 1202 1203 static void 1204 md_handleattr(struct md_s *sc, struct bio *bp) 1205 { 1206 if (sc->fwsectors && sc->fwheads && 1207 (g_handleattr_int(bp, "GEOM::fwsectors", sc->fwsectors) != 0 || 1208 g_handleattr_int(bp, "GEOM::fwheads", sc->fwheads) != 0)) 1209 return; 1210 if (g_handleattr_int(bp, "GEOM::candelete", sc->candelete) != 0) 1211 return; 1212 if (sc->ident[0] != '\0' && 1213 g_handleattr_str(bp, "GEOM::ident", sc->ident) != 0) 1214 return; 1215 if (g_handleattr_int(bp, "MNT::verified", (sc->flags & MD_VERIFY) != 0)) 1216 return; 1217 g_io_deliver(bp, EOPNOTSUPP); 1218 } 1219 1220 static void 1221 md_kthread(void *arg) 1222 { 1223 struct md_s *sc; 1224 struct bio *bp; 1225 int error; 1226 1227 sc = arg; 1228 thread_lock(curthread); 1229 sched_prio(curthread, PRIBIO); 1230 thread_unlock(curthread); 1231 if (sc->type == MD_VNODE) 1232 curthread->td_pflags |= TDP_NORUNNINGBUF; 1233 1234 for (;;) { 1235 mtx_lock(&sc->queue_mtx); 1236 if (sc->flags & MD_SHUTDOWN) { 1237 sc->flags |= MD_EXITING; 1238 mtx_unlock(&sc->queue_mtx); 1239 kproc_exit(0); 1240 } 1241 bp = bioq_takefirst(&sc->bio_queue); 1242 if (!bp) { 1243 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); 1244 continue; 1245 } 1246 mtx_unlock(&sc->queue_mtx); 1247 if (bp->bio_cmd == BIO_GETATTR) { 1248 md_handleattr(sc, bp); 1249 } else { 1250 error = sc->start(sc, bp); 1251 if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) { 1252 /* 1253 * Devstat uses (bio_bcount, bio_resid) for 1254 * determining the length of the completed part 1255 * of the i/o. g_io_deliver() will translate 1256 * from bio_completed to that, but it also 1257 * destroys the bio so we must do our own 1258 * translation. 1259 */ 1260 bp->bio_bcount = bp->bio_length; 1261 devstat_end_transaction_bio(sc->devstat, bp); 1262 } 1263 bp->bio_completed = bp->bio_length - bp->bio_resid; 1264 g_io_deliver(bp, error); 1265 } 1266 } 1267 } 1268 1269 static struct md_s * 1270 mdfind(int unit) 1271 { 1272 struct md_s *sc; 1273 1274 LIST_FOREACH(sc, &md_softc_list, list) { 1275 if (sc->unit == unit) 1276 break; 1277 } 1278 return (sc); 1279 } 1280 1281 static struct md_s * 1282 mdnew(int unit, int *errp, enum md_types type) 1283 { 1284 struct md_s *sc; 1285 int error; 1286 1287 *errp = 0; 1288 if (unit == -1) 1289 unit = alloc_unr(md_uh); 1290 else 1291 unit = alloc_unr_specific(md_uh, unit); 1292 1293 if (unit == -1) { 1294 *errp = EBUSY; 1295 return (NULL); 1296 } 1297 1298 sc = malloc(sizeof(*sc), M_MD, M_WAITOK | M_ZERO); 1299 sc->type = type; 1300 bioq_init(&sc->bio_queue); 1301 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); 1302 sc->unit = unit; 1303 sprintf(sc->name, "md%d", unit); 1304 LIST_INSERT_HEAD(&md_softc_list, sc, list); 1305 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); 1306 if (error == 0) 1307 return (sc); 1308 LIST_REMOVE(sc, list); 1309 mtx_destroy(&sc->queue_mtx); 1310 free_unr(md_uh, sc->unit); 1311 free(sc, M_MD); 1312 *errp = error; 1313 return (NULL); 1314 } 1315 1316 static void 1317 mdinit(struct md_s *sc) 1318 { 1319 struct g_geom *gp; 1320 struct g_provider *pp; 1321 unsigned remn; 1322 1323 g_topology_lock(); 1324 gp = g_new_geomf(&g_md_class, "md%d", sc->unit); 1325 gp->softc = sc; 1326 pp = g_new_providerf(gp, "md%d", sc->unit); 1327 devstat_remove_entry(pp->stat); 1328 pp->stat = NULL; 1329 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 1330 /* Prune off any residual fractional sector. */ 1331 remn = sc->mediasize % sc->sectorsize; 1332 if (remn != 0) { 1333 printf("md%d: truncating fractional last sector by %u bytes\n", 1334 sc->unit, remn); 1335 sc->mediasize -= remn; 1336 } 1337 pp->mediasize = sc->mediasize; 1338 pp->sectorsize = sc->sectorsize; 1339 switch (sc->type) { 1340 case MD_MALLOC: 1341 case MD_VNODE: 1342 case MD_SWAP: 1343 pp->flags |= G_PF_ACCEPT_UNMAPPED; 1344 break; 1345 case MD_PRELOAD: 1346 case MD_NULL: 1347 break; 1348 } 1349 sc->gp = gp; 1350 sc->pp = pp; 1351 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize, 1352 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 1353 sc->devstat->id = pp; 1354 g_error_provider(pp, 0); 1355 g_topology_unlock(); 1356 } 1357 1358 static int 1359 mdcreate_malloc(struct md_s *sc, struct md_req *mdr) 1360 { 1361 uintptr_t sp; 1362 int error; 1363 off_t u; 1364 1365 error = 0; 1366 if (mdr->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) 1367 return (EINVAL); 1368 if (mdr->md_sectorsize != 0 && !powerof2(mdr->md_sectorsize)) 1369 return (EINVAL); 1370 /* Compression doesn't make sense if we have reserved space */ 1371 if (mdr->md_options & MD_RESERVE) 1372 mdr->md_options &= ~MD_COMPRESS; 1373 if (mdr->md_fwsectors != 0) 1374 sc->fwsectors = mdr->md_fwsectors; 1375 if (mdr->md_fwheads != 0) 1376 sc->fwheads = mdr->md_fwheads; 1377 sc->flags = mdr->md_options & (MD_COMPRESS | MD_FORCE | MD_RESERVE); 1378 sc->s_malloc.indir = dimension(sc->mediasize / sc->sectorsize); 1379 sc->s_malloc.uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, 1380 NULL, NULL, 0x1ff, 0); 1381 if (mdr->md_options & MD_RESERVE) { 1382 off_t nsectors; 1383 1384 nsectors = sc->mediasize / sc->sectorsize; 1385 for (u = 0; u < nsectors; u++) { 1386 sp = (uintptr_t)uma_zalloc(sc->s_malloc.uma, 1387 (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); 1388 if (sp != 0) 1389 error = s_write(sc->s_malloc.indir, u, sp); 1390 else 1391 error = ENOMEM; 1392 if (error != 0) 1393 break; 1394 } 1395 } 1396 return (error); 1397 } 1398 1399 static int 1400 mdsetcred(struct md_s *sc, struct ucred *cred) 1401 { 1402 char *tmpbuf; 1403 int error = 0; 1404 1405 /* 1406 * Set credits in our softc 1407 */ 1408 1409 if (sc->cred) 1410 crfree(sc->cred); 1411 sc->cred = crhold(cred); 1412 1413 /* 1414 * Horrible kludge to establish credentials for NFS XXX. 1415 */ 1416 1417 if (sc->type == MD_VNODE && sc->s_vnode.vnode != NULL) { 1418 struct uio auio; 1419 struct iovec aiov; 1420 1421 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); 1422 bzero(&auio, sizeof(auio)); 1423 1424 aiov.iov_base = tmpbuf; 1425 aiov.iov_len = sc->sectorsize; 1426 auio.uio_iov = &aiov; 1427 auio.uio_iovcnt = 1; 1428 auio.uio_offset = 0; 1429 auio.uio_rw = UIO_READ; 1430 auio.uio_segflg = UIO_SYSSPACE; 1431 auio.uio_resid = aiov.iov_len; 1432 vn_lock(sc->s_vnode.vnode, LK_EXCLUSIVE | LK_RETRY); 1433 error = VOP_READ(sc->s_vnode.vnode, &auio, 0, sc->cred); 1434 VOP_UNLOCK(sc->s_vnode.vnode); 1435 free(tmpbuf, M_TEMP); 1436 } 1437 return (error); 1438 } 1439 1440 static int 1441 mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td) 1442 { 1443 struct vattr vattr; 1444 struct nameidata nd; 1445 char *fname; 1446 int error, flags; 1447 long v; 1448 1449 fname = mdr->md_file; 1450 if (mdr->md_file_seg == UIO_USERSPACE) { 1451 error = copyinstr(fname, sc->s_vnode.file, 1452 sizeof(sc->s_vnode.file), NULL); 1453 if (error != 0) 1454 return (error); 1455 } else if (mdr->md_file_seg == UIO_SYSSPACE) 1456 strlcpy(sc->s_vnode.file, fname, sizeof(sc->s_vnode.file)); 1457 else 1458 return (EDOOFUS); 1459 1460 /* 1461 * If the user specified that this is a read only device, don't 1462 * set the FWRITE mask before trying to open the backing store. 1463 */ 1464 flags = FREAD | ((mdr->md_options & MD_READONLY) ? 0 : FWRITE) \ 1465 | ((mdr->md_options & MD_VERIFY) ? O_VERIFY : 0); 1466 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->s_vnode.file); 1467 error = vn_open(&nd, &flags, 0, NULL); 1468 if (error != 0) 1469 return (error); 1470 NDFREE_PNBUF(&nd); 1471 if (nd.ni_vp->v_type != VREG) { 1472 error = EINVAL; 1473 goto bad; 1474 } 1475 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred); 1476 if (error != 0) 1477 goto bad; 1478 if ((mdr->md_options & MD_MUSTDEALLOC) != 0) { 1479 error = VOP_PATHCONF(nd.ni_vp, _PC_DEALLOC_PRESENT, &v); 1480 if (error != 0) 1481 goto bad; 1482 if (v == 0) 1483 sc->candelete = false; 1484 } 1485 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { 1486 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); 1487 if (VN_IS_DOOMED(nd.ni_vp)) { 1488 /* Forced unmount. */ 1489 error = EBADF; 1490 goto bad; 1491 } 1492 } 1493 nd.ni_vp->v_vflag |= VV_MD; 1494 VOP_UNLOCK(nd.ni_vp); 1495 1496 if (mdr->md_fwsectors != 0) 1497 sc->fwsectors = mdr->md_fwsectors; 1498 if (mdr->md_fwheads != 0) 1499 sc->fwheads = mdr->md_fwheads; 1500 snprintf(sc->ident, sizeof(sc->ident), "MD-DEV%ju-INO%ju", 1501 (uintmax_t)vattr.va_fsid, (uintmax_t)vattr.va_fileid); 1502 sc->flags = mdr->md_options & (MD_ASYNC | MD_CACHE | MD_FORCE | 1503 MD_VERIFY | MD_MUSTDEALLOC); 1504 if (!(flags & FWRITE)) 1505 sc->flags |= MD_READONLY; 1506 sc->s_vnode.vnode = nd.ni_vp; 1507 1508 error = mdsetcred(sc, td->td_ucred); 1509 if (error != 0) { 1510 sc->s_vnode.vnode = NULL; 1511 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); 1512 nd.ni_vp->v_vflag &= ~VV_MD; 1513 goto bad; 1514 } 1515 1516 sc->s_vnode.kva = kva_alloc(maxphys + PAGE_SIZE); 1517 return (0); 1518 bad: 1519 VOP_UNLOCK(nd.ni_vp); 1520 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); 1521 return (error); 1522 } 1523 1524 static void 1525 g_md_providergone(struct g_provider *pp) 1526 { 1527 struct md_s *sc = pp->geom->softc; 1528 1529 mtx_lock(&sc->queue_mtx); 1530 sc->flags |= MD_PROVIDERGONE; 1531 wakeup(&sc->flags); 1532 mtx_unlock(&sc->queue_mtx); 1533 } 1534 1535 static int 1536 mddestroy(struct md_s *sc, struct thread *td) 1537 { 1538 1539 if (sc->gp) { 1540 g_topology_lock(); 1541 g_wither_geom(sc->gp, ENXIO); 1542 g_topology_unlock(); 1543 1544 mtx_lock(&sc->queue_mtx); 1545 while (!(sc->flags & MD_PROVIDERGONE)) 1546 msleep(&sc->flags, &sc->queue_mtx, PRIBIO, "mddestroy", 0); 1547 mtx_unlock(&sc->queue_mtx); 1548 } 1549 if (sc->devstat) { 1550 devstat_remove_entry(sc->devstat); 1551 sc->devstat = NULL; 1552 } 1553 mtx_lock(&sc->queue_mtx); 1554 sc->flags |= MD_SHUTDOWN; 1555 wakeup(sc); 1556 while (!(sc->flags & MD_EXITING)) 1557 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10); 1558 mtx_unlock(&sc->queue_mtx); 1559 mtx_destroy(&sc->queue_mtx); 1560 switch (sc->type) { 1561 case MD_VNODE: 1562 if (sc->s_vnode.vnode != NULL) { 1563 vn_lock(sc->s_vnode.vnode, LK_EXCLUSIVE | LK_RETRY); 1564 sc->s_vnode.vnode->v_vflag &= ~VV_MD; 1565 VOP_UNLOCK(sc->s_vnode.vnode); 1566 (void)vn_close(sc->s_vnode.vnode, 1567 sc->flags & MD_READONLY ? FREAD : (FREAD|FWRITE), 1568 sc->cred, td); 1569 } 1570 if (sc->s_vnode.kva != 0) 1571 kva_free(sc->s_vnode.kva, maxphys + PAGE_SIZE); 1572 break; 1573 case MD_SWAP: 1574 if (sc->s_swap.object != NULL) 1575 vm_object_deallocate(sc->s_swap.object); 1576 break; 1577 case MD_MALLOC: 1578 if (sc->s_malloc.indir != NULL) 1579 destroy_indir(sc, sc->s_malloc.indir); 1580 if (sc->s_malloc.uma != NULL) 1581 uma_zdestroy(sc->s_malloc.uma); 1582 break; 1583 case MD_PRELOAD: 1584 case MD_NULL: 1585 break; 1586 default: 1587 __assert_unreachable(); 1588 } 1589 if (sc->cred != NULL) 1590 crfree(sc->cred); 1591 1592 LIST_REMOVE(sc, list); 1593 free_unr(md_uh, sc->unit); 1594 free(sc, M_MD); 1595 return (0); 1596 } 1597 1598 static int 1599 mdresize(struct md_s *sc, struct md_req *mdr) 1600 { 1601 int error, res; 1602 vm_pindex_t oldpages, newpages; 1603 1604 switch (sc->type) { 1605 case MD_VNODE: 1606 case MD_NULL: 1607 break; 1608 case MD_SWAP: 1609 if (mdr->md_mediasize <= 0 || 1610 (mdr->md_mediasize % PAGE_SIZE) != 0) 1611 return (EDOM); 1612 oldpages = OFF_TO_IDX(sc->mediasize); 1613 newpages = OFF_TO_IDX(mdr->md_mediasize); 1614 if (newpages < oldpages) { 1615 VM_OBJECT_WLOCK(sc->s_swap.object); 1616 vm_object_page_remove(sc->s_swap.object, newpages, 1617 0, 0); 1618 swap_release_by_cred(IDX_TO_OFF(oldpages - 1619 newpages), sc->cred); 1620 sc->s_swap.object->charge = IDX_TO_OFF(newpages); 1621 sc->s_swap.object->size = newpages; 1622 VM_OBJECT_WUNLOCK(sc->s_swap.object); 1623 } else if (newpages > oldpages) { 1624 res = swap_reserve_by_cred(IDX_TO_OFF(newpages - 1625 oldpages), sc->cred); 1626 if (!res) 1627 return (ENOMEM); 1628 if ((mdr->md_options & MD_RESERVE) || 1629 (sc->flags & MD_RESERVE)) { 1630 error = swap_pager_reserve(sc->s_swap.object, 1631 oldpages, newpages - oldpages); 1632 if (error < 0) { 1633 swap_release_by_cred( 1634 IDX_TO_OFF(newpages - oldpages), 1635 sc->cred); 1636 return (EDOM); 1637 } 1638 } 1639 VM_OBJECT_WLOCK(sc->s_swap.object); 1640 sc->s_swap.object->charge = IDX_TO_OFF(newpages); 1641 sc->s_swap.object->size = newpages; 1642 VM_OBJECT_WUNLOCK(sc->s_swap.object); 1643 } 1644 break; 1645 default: 1646 return (EOPNOTSUPP); 1647 } 1648 1649 sc->mediasize = mdr->md_mediasize; 1650 1651 g_topology_lock(); 1652 g_resize_provider(sc->pp, sc->mediasize); 1653 g_topology_unlock(); 1654 return (0); 1655 } 1656 1657 static int 1658 mdcreate_swap(struct md_s *sc, struct md_req *mdr, struct thread *td) 1659 { 1660 vm_ooffset_t npage; 1661 int error; 1662 1663 /* 1664 * Range check. Disallow negative sizes and sizes not being 1665 * multiple of page size. 1666 */ 1667 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1668 return (EDOM); 1669 1670 /* 1671 * Allocate an OBJT_SWAP object. 1672 * 1673 * Note the truncation. 1674 */ 1675 1676 if ((mdr->md_options & MD_VERIFY) != 0) 1677 return (EINVAL); 1678 npage = mdr->md_mediasize / PAGE_SIZE; 1679 if (mdr->md_fwsectors != 0) 1680 sc->fwsectors = mdr->md_fwsectors; 1681 if (mdr->md_fwheads != 0) 1682 sc->fwheads = mdr->md_fwheads; 1683 sc->s_swap.object = vm_pager_allocate(OBJT_SWAP, NULL, 1684 PAGE_SIZE * npage, VM_PROT_DEFAULT, 0, td->td_ucred); 1685 if (sc->s_swap.object == NULL) 1686 return (ENOMEM); 1687 sc->flags = mdr->md_options & (MD_FORCE | MD_RESERVE); 1688 if (mdr->md_options & MD_RESERVE) { 1689 if (swap_pager_reserve(sc->s_swap.object, 0, npage) < 0) { 1690 error = EDOM; 1691 goto finish; 1692 } 1693 } 1694 error = mdsetcred(sc, td->td_ucred); 1695 finish: 1696 if (error != 0) { 1697 vm_object_deallocate(sc->s_swap.object); 1698 sc->s_swap.object = NULL; 1699 } 1700 return (error); 1701 } 1702 1703 static int 1704 mdcreate_null(struct md_s *sc, struct md_req *mdr, struct thread *td) 1705 { 1706 1707 /* 1708 * Range check. Disallow negative sizes and sizes not being 1709 * multiple of page size. 1710 */ 1711 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) 1712 return (EDOM); 1713 1714 return (0); 1715 } 1716 1717 static int 1718 kern_mdattach_locked(struct thread *td, struct md_req *mdr) 1719 { 1720 struct md_s *sc; 1721 unsigned sectsize; 1722 int error; 1723 1724 sx_assert(&md_sx, SA_XLOCKED); 1725 1726 switch (mdr->md_type) { 1727 case MD_MALLOC: 1728 case MD_PRELOAD: 1729 case MD_VNODE: 1730 case MD_SWAP: 1731 case MD_NULL: 1732 break; 1733 default: 1734 return (EINVAL); 1735 } 1736 if (mdr->md_sectorsize == 0) 1737 sectsize = DEV_BSIZE; 1738 else 1739 sectsize = mdr->md_sectorsize; 1740 if (sectsize > maxphys || mdr->md_mediasize < sectsize) 1741 return (EINVAL); 1742 if (mdr->md_options & MD_AUTOUNIT) 1743 sc = mdnew(-1, &error, mdr->md_type); 1744 else { 1745 if (mdr->md_unit > INT_MAX) 1746 return (EINVAL); 1747 sc = mdnew(mdr->md_unit, &error, mdr->md_type); 1748 } 1749 if (sc == NULL) 1750 return (error); 1751 if (mdr->md_label != NULL) 1752 error = copyinstr(mdr->md_label, sc->label, 1753 sizeof(sc->label), NULL); 1754 if (error != 0) 1755 goto err_after_new; 1756 if (mdr->md_options & MD_AUTOUNIT) 1757 mdr->md_unit = sc->unit; 1758 sc->mediasize = mdr->md_mediasize; 1759 sc->sectorsize = sectsize; 1760 sc->candelete = true; 1761 error = EDOOFUS; 1762 switch (sc->type) { 1763 case MD_MALLOC: 1764 sc->start = mdstart_malloc; 1765 error = mdcreate_malloc(sc, mdr); 1766 break; 1767 case MD_PRELOAD: 1768 /* 1769 * We disallow attaching preloaded memory disks via 1770 * ioctl. Preloaded memory disks are automatically 1771 * attached in g_md_init(). 1772 */ 1773 error = EOPNOTSUPP; 1774 break; 1775 case MD_VNODE: 1776 sc->start = mdstart_vnode; 1777 error = mdcreate_vnode(sc, mdr, td); 1778 break; 1779 case MD_SWAP: 1780 sc->start = mdstart_swap; 1781 error = mdcreate_swap(sc, mdr, td); 1782 break; 1783 case MD_NULL: 1784 sc->start = mdstart_null; 1785 error = mdcreate_null(sc, mdr, td); 1786 break; 1787 } 1788 err_after_new: 1789 if (error != 0) { 1790 mddestroy(sc, td); 1791 return (error); 1792 } 1793 1794 mdinit(sc); 1795 return (0); 1796 } 1797 1798 static int 1799 kern_mdattach(struct thread *td, struct md_req *mdr) 1800 { 1801 int error; 1802 1803 sx_xlock(&md_sx); 1804 error = kern_mdattach_locked(td, mdr); 1805 sx_xunlock(&md_sx); 1806 return (error); 1807 } 1808 1809 static int 1810 kern_mddetach_locked(struct thread *td, struct md_req *mdr) 1811 { 1812 struct md_s *sc; 1813 1814 sx_assert(&md_sx, SA_XLOCKED); 1815 1816 if (mdr->md_mediasize != 0 || 1817 (mdr->md_options & ~MD_FORCE) != 0) 1818 return (EINVAL); 1819 1820 sc = mdfind(mdr->md_unit); 1821 if (sc == NULL) 1822 return (ENOENT); 1823 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) && 1824 !(mdr->md_options & MD_FORCE)) 1825 return (EBUSY); 1826 return (mddestroy(sc, td)); 1827 } 1828 1829 static int 1830 kern_mddetach(struct thread *td, struct md_req *mdr) 1831 { 1832 int error; 1833 1834 sx_xlock(&md_sx); 1835 error = kern_mddetach_locked(td, mdr); 1836 sx_xunlock(&md_sx); 1837 return (error); 1838 } 1839 1840 static int 1841 kern_mdresize_locked(struct md_req *mdr) 1842 { 1843 struct md_s *sc; 1844 1845 sx_assert(&md_sx, SA_XLOCKED); 1846 1847 if ((mdr->md_options & ~(MD_FORCE | MD_RESERVE)) != 0) 1848 return (EINVAL); 1849 1850 sc = mdfind(mdr->md_unit); 1851 if (sc == NULL) 1852 return (ENOENT); 1853 if (mdr->md_mediasize < sc->sectorsize) 1854 return (EINVAL); 1855 mdr->md_mediasize -= mdr->md_mediasize % sc->sectorsize; 1856 if (mdr->md_mediasize < sc->mediasize && 1857 !(sc->flags & MD_FORCE) && 1858 !(mdr->md_options & MD_FORCE)) 1859 return (EBUSY); 1860 return (mdresize(sc, mdr)); 1861 } 1862 1863 static int 1864 kern_mdresize(struct md_req *mdr) 1865 { 1866 int error; 1867 1868 sx_xlock(&md_sx); 1869 error = kern_mdresize_locked(mdr); 1870 sx_xunlock(&md_sx); 1871 return (error); 1872 } 1873 1874 static int 1875 kern_mdquery_locked(struct md_req *mdr) 1876 { 1877 struct md_s *sc; 1878 int error; 1879 1880 sx_assert(&md_sx, SA_XLOCKED); 1881 1882 sc = mdfind(mdr->md_unit); 1883 if (sc == NULL) 1884 return (ENOENT); 1885 mdr->md_type = sc->type; 1886 mdr->md_options = sc->flags; 1887 mdr->md_mediasize = sc->mediasize; 1888 mdr->md_sectorsize = sc->sectorsize; 1889 error = 0; 1890 if (mdr->md_label != NULL) { 1891 error = copyout(sc->label, mdr->md_label, 1892 strlen(sc->label) + 1); 1893 if (error != 0) 1894 return (error); 1895 } 1896 if (sc->type == MD_VNODE) { 1897 error = copyout(sc->s_vnode.file, mdr->md_file, 1898 strlen(sc->s_vnode.file) + 1); 1899 } else if (sc->type == MD_PRELOAD && mdr->md_file != NULL) { 1900 error = copyout(sc->s_preload.name, mdr->md_file, 1901 strlen(sc->s_preload.name) + 1); 1902 } 1903 return (error); 1904 } 1905 1906 static int 1907 kern_mdquery(struct md_req *mdr) 1908 { 1909 int error; 1910 1911 sx_xlock(&md_sx); 1912 error = kern_mdquery_locked(mdr); 1913 sx_xunlock(&md_sx); 1914 return (error); 1915 } 1916 1917 /* Copy members that are not userspace pointers. */ 1918 #define MD_IOCTL2REQ(mdio, mdr) do { \ 1919 (mdr)->md_unit = (mdio)->md_unit; \ 1920 (mdr)->md_type = (mdio)->md_type; \ 1921 (mdr)->md_mediasize = (mdio)->md_mediasize; \ 1922 (mdr)->md_sectorsize = (mdio)->md_sectorsize; \ 1923 (mdr)->md_options = (mdio)->md_options; \ 1924 (mdr)->md_fwheads = (mdio)->md_fwheads; \ 1925 (mdr)->md_fwsectors = (mdio)->md_fwsectors; \ 1926 (mdr)->md_units = &(mdio)->md_pad[0]; \ 1927 (mdr)->md_units_nitems = nitems((mdio)->md_pad); \ 1928 } while(0) 1929 1930 /* Copy members that might have been updated */ 1931 #define MD_REQ2IOCTL(mdr, mdio) do { \ 1932 (mdio)->md_unit = (mdr)->md_unit; \ 1933 (mdio)->md_type = (mdr)->md_type; \ 1934 (mdio)->md_mediasize = (mdr)->md_mediasize; \ 1935 (mdio)->md_sectorsize = (mdr)->md_sectorsize; \ 1936 (mdio)->md_options = (mdr)->md_options; \ 1937 (mdio)->md_fwheads = (mdr)->md_fwheads; \ 1938 (mdio)->md_fwsectors = (mdr)->md_fwsectors; \ 1939 } while(0) 1940 1941 static int 1942 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, 1943 struct thread *td) 1944 { 1945 struct md_req mdr; 1946 int error; 1947 1948 if (md_debug) 1949 printf("mdctlioctl(%s %lx %p %x %p)\n", 1950 devtoname(dev), cmd, addr, flags, td); 1951 1952 bzero(&mdr, sizeof(mdr)); 1953 switch (cmd) { 1954 case MDIOCATTACH: 1955 case MDIOCDETACH: 1956 case MDIOCRESIZE: 1957 case MDIOCQUERY: { 1958 struct md_ioctl *mdio = (struct md_ioctl *)addr; 1959 if (mdio->md_version != MDIOVERSION) 1960 return (EINVAL); 1961 MD_IOCTL2REQ(mdio, &mdr); 1962 mdr.md_file = mdio->md_file; 1963 mdr.md_file_seg = UIO_USERSPACE; 1964 /* If the file is adjacent to the md_ioctl it's in kernel. */ 1965 if ((void *)mdio->md_file == (void *)(mdio + 1)) 1966 mdr.md_file_seg = UIO_SYSSPACE; 1967 mdr.md_label = mdio->md_label; 1968 break; 1969 } 1970 #ifdef COMPAT_FREEBSD32 1971 case MDIOCATTACH_32: 1972 case MDIOCDETACH_32: 1973 case MDIOCRESIZE_32: 1974 case MDIOCQUERY_32: { 1975 struct md_ioctl32 *mdio = (struct md_ioctl32 *)addr; 1976 if (mdio->md_version != MDIOVERSION) 1977 return (EINVAL); 1978 MD_IOCTL2REQ(mdio, &mdr); 1979 mdr.md_file = (void *)(uintptr_t)mdio->md_file; 1980 mdr.md_file_seg = UIO_USERSPACE; 1981 mdr.md_label = (void *)(uintptr_t)mdio->md_label; 1982 break; 1983 } 1984 #endif 1985 default: 1986 /* Fall through to handler switch. */ 1987 break; 1988 } 1989 1990 error = 0; 1991 switch (cmd) { 1992 case MDIOCATTACH: 1993 #ifdef COMPAT_FREEBSD32 1994 case MDIOCATTACH_32: 1995 #endif 1996 error = kern_mdattach(td, &mdr); 1997 break; 1998 case MDIOCDETACH: 1999 #ifdef COMPAT_FREEBSD32 2000 case MDIOCDETACH_32: 2001 #endif 2002 error = kern_mddetach(td, &mdr); 2003 break; 2004 case MDIOCRESIZE: 2005 #ifdef COMPAT_FREEBSD32 2006 case MDIOCRESIZE_32: 2007 #endif 2008 error = kern_mdresize(&mdr); 2009 break; 2010 case MDIOCQUERY: 2011 #ifdef COMPAT_FREEBSD32 2012 case MDIOCQUERY_32: 2013 #endif 2014 error = kern_mdquery(&mdr); 2015 break; 2016 default: 2017 error = ENOIOCTL; 2018 } 2019 2020 switch (cmd) { 2021 case MDIOCATTACH: 2022 case MDIOCQUERY: { 2023 struct md_ioctl *mdio = (struct md_ioctl *)addr; 2024 MD_REQ2IOCTL(&mdr, mdio); 2025 break; 2026 } 2027 #ifdef COMPAT_FREEBSD32 2028 case MDIOCATTACH_32: 2029 case MDIOCQUERY_32: { 2030 struct md_ioctl32 *mdio = (struct md_ioctl32 *)addr; 2031 MD_REQ2IOCTL(&mdr, mdio); 2032 break; 2033 } 2034 #endif 2035 default: 2036 /* Other commands to not alter mdr. */ 2037 break; 2038 } 2039 2040 return (error); 2041 } 2042 2043 static void 2044 md_preloaded(u_char *image, size_t length, const char *name) 2045 { 2046 struct md_s *sc; 2047 int error; 2048 2049 sc = mdnew(-1, &error, MD_PRELOAD); 2050 if (sc == NULL) 2051 return; 2052 sc->mediasize = length; 2053 sc->sectorsize = DEV_BSIZE; 2054 sc->s_preload.pl_ptr = image; 2055 sc->s_preload.pl_len = length; 2056 sc->start = mdstart_preload; 2057 if (name != NULL) 2058 strlcpy(sc->s_preload.name, name, 2059 sizeof(sc->s_preload.name)); 2060 #ifdef MD_ROOT 2061 if (sc->unit == 0) { 2062 #ifndef ROOTDEVNAME 2063 rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0"; 2064 #endif 2065 #ifdef MD_ROOT_READONLY 2066 sc->flags |= MD_READONLY; 2067 #endif 2068 } 2069 #endif 2070 mdinit(sc); 2071 if (name != NULL) { 2072 printf("%s%d: Preloaded image <%s> %zd bytes at %p\n", 2073 MD_NAME, sc->unit, name, length, image); 2074 } else { 2075 printf("%s%d: Embedded image %zd bytes at %p\n", 2076 MD_NAME, sc->unit, length, image); 2077 } 2078 } 2079 2080 static void 2081 g_md_init(struct g_class *mp __unused) 2082 { 2083 caddr_t mod; 2084 u_char *ptr, *name, *type; 2085 unsigned len; 2086 int i; 2087 2088 /* figure out log2(NINDIR) */ 2089 for (i = NINDIR, nshift = -1; i; nshift++) 2090 i >>= 1; 2091 2092 mod = NULL; 2093 sx_init(&md_sx, "MD config lock"); 2094 g_topology_unlock(); 2095 md_uh = new_unrhdr(0, INT_MAX, NULL); 2096 #ifdef MD_ROOT 2097 if (mfs_root_size != 0) { 2098 sx_xlock(&md_sx); 2099 #ifdef MD_ROOT_MEM 2100 md_preloaded(mfs_root, mfs_root_size, NULL); 2101 #else 2102 md_preloaded(__DEVOLATILE(u_char *, &mfs_root), mfs_root_size, 2103 NULL); 2104 #endif 2105 sx_xunlock(&md_sx); 2106 } 2107 #endif 2108 /* XXX: are preload_* static or do they need Giant ? */ 2109 while ((mod = preload_search_next_name(mod)) != NULL) { 2110 name = (char *)preload_search_info(mod, MODINFO_NAME); 2111 if (name == NULL) 2112 continue; 2113 type = (char *)preload_search_info(mod, MODINFO_TYPE); 2114 if (type == NULL) 2115 continue; 2116 if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) 2117 continue; 2118 ptr = preload_fetch_addr(mod); 2119 len = preload_fetch_size(mod); 2120 if (ptr != NULL && len != 0) { 2121 sx_xlock(&md_sx); 2122 md_preloaded(ptr, len, name); 2123 sx_xunlock(&md_sx); 2124 } 2125 } 2126 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL, 2127 0600, MDCTL_NAME); 2128 g_topology_lock(); 2129 } 2130 2131 static void 2132 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 2133 struct g_consumer *cp __unused, struct g_provider *pp) 2134 { 2135 struct md_s *mp; 2136 char *type; 2137 2138 mp = gp->softc; 2139 if (mp == NULL) 2140 return; 2141 2142 switch (mp->type) { 2143 case MD_MALLOC: 2144 type = "malloc"; 2145 break; 2146 case MD_PRELOAD: 2147 type = "preload"; 2148 break; 2149 case MD_VNODE: 2150 type = "vnode"; 2151 break; 2152 case MD_SWAP: 2153 type = "swap"; 2154 break; 2155 case MD_NULL: 2156 type = "null"; 2157 break; 2158 default: 2159 type = "unknown"; 2160 break; 2161 } 2162 2163 if (pp != NULL) { 2164 if (indent == NULL) { 2165 sbuf_printf(sb, " u %d", mp->unit); 2166 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize); 2167 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads); 2168 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors); 2169 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize); 2170 sbuf_printf(sb, " t %s", type); 2171 if (mp->type == MD_VNODE && 2172 mp->s_vnode.vnode != NULL) 2173 sbuf_printf(sb, " file %s", mp->s_vnode.file); 2174 if (mp->type == MD_PRELOAD && 2175 mp->s_preload.name[0] != '\0') { 2176 sbuf_printf(sb, " file %s", 2177 mp->s_preload.name); 2178 } 2179 sbuf_printf(sb, " label %s", mp->label); 2180 } else { 2181 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, 2182 mp->unit); 2183 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n", 2184 indent, (uintmax_t) mp->sectorsize); 2185 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n", 2186 indent, (uintmax_t) mp->fwheads); 2187 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n", 2188 indent, (uintmax_t) mp->fwsectors); 2189 if (mp->ident[0] != '\0') { 2190 sbuf_printf(sb, "%s<ident>", indent); 2191 g_conf_printf_escaped(sb, "%s", mp->ident); 2192 sbuf_printf(sb, "</ident>\n"); 2193 } 2194 sbuf_printf(sb, "%s<length>%ju</length>\n", 2195 indent, (uintmax_t) mp->mediasize); 2196 sbuf_printf(sb, "%s<compression>%s</compression>\n", indent, 2197 (mp->flags & MD_COMPRESS) == 0 ? "off": "on"); 2198 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 2199 (mp->flags & MD_READONLY) == 0 ? "read-write": 2200 "read-only"); 2201 sbuf_printf(sb, "%s<type>%s</type>\n", indent, 2202 type); 2203 if (mp->type == MD_VNODE) { 2204 if (mp->s_vnode.vnode != NULL) { 2205 sbuf_printf(sb, "%s<file>", indent); 2206 g_conf_printf_escaped(sb, "%s", 2207 mp->s_vnode.file); 2208 sbuf_printf(sb, "</file>\n"); 2209 } 2210 sbuf_printf(sb, "%s<cache>%s</cache>\n", indent, 2211 (mp->flags & MD_CACHE) == 0 ? "off": "on"); 2212 } 2213 if (mp->type == MD_PRELOAD && 2214 mp->s_preload.name[0] != '\0') { 2215 sbuf_printf(sb, "%s<file>", indent); 2216 g_conf_printf_escaped(sb, "%s", 2217 mp->s_preload.name); 2218 sbuf_printf(sb, "</file>\n"); 2219 } 2220 sbuf_printf(sb, "%s<label>", indent); 2221 g_conf_printf_escaped(sb, "%s", mp->label); 2222 sbuf_printf(sb, "</label>\n"); 2223 } 2224 } 2225 } 2226 2227 static void 2228 g_md_fini(struct g_class *mp __unused) 2229 { 2230 2231 sx_destroy(&md_sx); 2232 if (status_dev != NULL) 2233 destroy_dev(status_dev); 2234 delete_unrhdr(md_uh); 2235 } 2236