1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * lofi (loopback file) driver - allows you to attach a file to a device, 28 * which can then be accessed through that device. The simple model is that 29 * you tell lofi to open a file, and then use the block device you get as 30 * you would any block device. lofi translates access to the block device 31 * into I/O on the underlying file. This is mostly useful for 32 * mounting images of filesystems. 33 * 34 * lofi is controlled through /dev/lofictl - this is the only device exported 35 * during attach, and is minor number 0. lofiadm communicates with lofi through 36 * ioctls on this device. When a file is attached to lofi, block and character 37 * devices are exported in /dev/lofi and /dev/rlofi. Currently, these devices 38 * are identified by their minor number, and the minor number is also used 39 * as the name in /dev/lofi. If we ever decide to support virtual disks, 40 * we'll have to divide the minor number space to identify fdisk partitions 41 * and slices, and the name will then be the minor number shifted down a 42 * few bits. Minor devices are tracked with state structures handled with 43 * ddi_soft_state(9F) for simplicity. 44 * 45 * A file attached to lofi is opened when attached and not closed until 46 * explicitly detached from lofi. This seems more sensible than deferring 47 * the open until the /dev/lofi device is opened, for a number of reasons. 48 * One is that any failure is likely to be noticed by the person (or script) 49 * running lofiadm. Another is that it would be a security problem if the 50 * file was replaced by another one after being added but before being opened. 51 * 52 * The only hard part about lofi is the ioctls. In order to support things 53 * like 'newfs' on a lofi device, it needs to support certain disk ioctls. 54 * So it has to fake disk geometry and partition information. More may need 55 * to be faked if your favorite utility doesn't work and you think it should 56 * (fdformat doesn't work because it really wants to know the type of floppy 57 * controller to talk to, and that didn't seem easy to fake. Or possibly even 58 * necessary, since we have mkfs_pcfs now). 59 * 60 * Normally, a lofi device cannot be detached if it is open (i.e. busy). To 61 * support simulation of hotplug events, an optional force flag is provided. 62 * If a lofi device is open when a force detach is requested, then the 63 * underlying file is closed and any subsequent operations return EIO. When the 64 * device is closed for the last time, it will be cleaned up at that time. In 65 * addition, the DKIOCSTATE ioctl will return DKIO_DEV_GONE when the device is 66 * detached but not removed. 67 * 68 * Known problems: 69 * 70 * UFS logging. Mounting a UFS filesystem image "logging" 71 * works for basic copy testing but wedges during a build of ON through 72 * that image. Some deadlock in lufs holding the log mutex and then 73 * getting stuck on a buf. So for now, don't do that. 74 * 75 * Direct I/O. Since the filesystem data is being cached in the buffer 76 * cache, _and_ again in the underlying filesystem, it's tempting to 77 * enable direct I/O on the underlying file. Don't, because that deadlocks. 78 * I think to fix the cache-twice problem we might need filesystem support. 79 * 80 * lofi on itself. The simple lock strategy (lofi_lock) precludes this 81 * because you'll be in lofi_ioctl, holding the lock when you open the 82 * file, which, if it's lofi, will grab lofi_lock. We prevent this for 83 * now, though not using ddi_soft_state(9F) would make it possible to 84 * do. Though it would still be silly. 85 * 86 * Interesting things to do: 87 * 88 * Allow multiple files for each device. A poor-man's metadisk, basically. 89 * 90 * Pass-through ioctls on block devices. You can (though it's not 91 * documented), give lofi a block device as a file name. Then we shouldn't 92 * need to fake a geometry, however, it may be relevant if you're replacing 93 * metadisk, or using lofi to get crypto. 94 * It makes sense to do lofiadm -c aes -a /dev/dsk/c0t0d0s4 /dev/lofi/1 95 * and then in /etc/vfstab have an entry for /dev/lofi/1 as /export/home. 96 * In fact this even makes sense if you have lofi "above" metadisk. 97 * 98 * Encryption: 99 * Each lofi device can have its own symmetric key and cipher. 100 * They are passed to us by lofiadm(1m) in the correct format for use 101 * with the misc/kcf crypto_* routines. 102 * 103 * Each block has its own IV, that is calculated in lofi_blk_mech(), based 104 * on the "master" key held in the lsp and the block number of the buffer. 105 */ 106 107 #include <sys/types.h> 108 #include <netinet/in.h> 109 #include <sys/sysmacros.h> 110 #include <sys/uio.h> 111 #include <sys/kmem.h> 112 #include <sys/cred.h> 113 #include <sys/mman.h> 114 #include <sys/errno.h> 115 #include <sys/aio_req.h> 116 #include <sys/stat.h> 117 #include <sys/file.h> 118 #include <sys/modctl.h> 119 #include <sys/conf.h> 120 #include <sys/debug.h> 121 #include <sys/vnode.h> 122 #include <sys/lofi.h> 123 #include <sys/fcntl.h> 124 #include <sys/pathname.h> 125 #include <sys/filio.h> 126 #include <sys/fdio.h> 127 #include <sys/open.h> 128 #include <sys/disp.h> 129 #include <vm/seg_map.h> 130 #include <sys/ddi.h> 131 #include <sys/sunddi.h> 132 #include <sys/zmod.h> 133 #include <sys/crypto/common.h> 134 #include <sys/crypto/api.h> 135 #include <LzmaDec.h> 136 137 /* 138 * The basis for CRYOFF is derived from usr/src/uts/common/sys/fs/ufs_fs.h. 139 * Crypto metadata, if it exists, is located at the end of the boot block 140 * (BBOFF + BBSIZE, which is SBOFF). The super block and everything after 141 * is offset by the size of the crypto metadata which is handled by 142 * lsp->ls_crypto_offset. 143 */ 144 #define CRYOFF ((off_t)8192) 145 146 #define NBLOCKS_PROP_NAME "Nblocks" 147 #define SIZE_PROP_NAME "Size" 148 149 #define SETUP_C_DATA(cd, buf, len) \ 150 (cd).cd_format = CRYPTO_DATA_RAW; \ 151 (cd).cd_offset = 0; \ 152 (cd).cd_miscdata = NULL; \ 153 (cd).cd_length = (len); \ 154 (cd).cd_raw.iov_base = (buf); \ 155 (cd).cd_raw.iov_len = (len); 156 157 #define UIO_CHECK(uio) \ 158 if (((uio)->uio_loffset % DEV_BSIZE) != 0 || \ 159 ((uio)->uio_resid % DEV_BSIZE) != 0) { \ 160 return (EINVAL); \ 161 } 162 163 static dev_info_t *lofi_dip = NULL; 164 static void *lofi_statep = NULL; 165 static kmutex_t lofi_lock; /* state lock */ 166 167 /* 168 * Because lofi_taskq_nthreads limits the actual swamping of the device, the 169 * maxalloc parameter (lofi_taskq_maxalloc) should be tuned conservatively 170 * high. If we want to be assured that the underlying device is always busy, 171 * we must be sure that the number of bytes enqueued when the number of 172 * enqueued tasks exceeds maxalloc is sufficient to keep the device busy for 173 * the duration of the sleep time in taskq_ent_alloc(). That is, lofi should 174 * set maxalloc to be the maximum throughput (in bytes per second) of the 175 * underlying device divided by the minimum I/O size. We assume a realistic 176 * maximum throughput of one hundred megabytes per second; we set maxalloc on 177 * the lofi task queue to be 104857600 divided by DEV_BSIZE. 178 */ 179 static int lofi_taskq_maxalloc = 104857600 / DEV_BSIZE; 180 static int lofi_taskq_nthreads = 4; /* # of taskq threads per device */ 181 182 uint32_t lofi_max_files = LOFI_MAX_FILES; 183 const char lofi_crypto_magic[6] = LOFI_CRYPTO_MAGIC; 184 185 /* 186 * To avoid decompressing data in a compressed segment multiple times 187 * when accessing small parts of a segment's data, we cache and reuse 188 * the uncompressed segment's data. 189 * 190 * A single cached segment is sufficient to avoid lots of duplicate 191 * segment decompress operations. A small cache size also reduces the 192 * memory footprint. 193 * 194 * lofi_max_comp_cache is the maximum number of decompressed data segments 195 * cached for each compressed lofi image. It can be set to 0 to disable 196 * caching. 197 */ 198 199 uint32_t lofi_max_comp_cache = 1; 200 201 static int gzip_decompress(void *src, size_t srclen, void *dst, 202 size_t *destlen, int level); 203 204 static int lzma_decompress(void *src, size_t srclen, void *dst, 205 size_t *dstlen, int level); 206 207 lofi_compress_info_t lofi_compress_table[LOFI_COMPRESS_FUNCTIONS] = { 208 {gzip_decompress, NULL, 6, "gzip"}, /* default */ 209 {gzip_decompress, NULL, 6, "gzip-6"}, 210 {gzip_decompress, NULL, 9, "gzip-9"}, 211 {lzma_decompress, NULL, 0, "lzma"} 212 }; 213 214 /*ARGSUSED*/ 215 static void 216 *SzAlloc(void *p, size_t size) 217 { 218 return (kmem_alloc(size, KM_SLEEP)); 219 } 220 221 /*ARGSUSED*/ 222 static void 223 SzFree(void *p, void *address, size_t size) 224 { 225 kmem_free(address, size); 226 } 227 228 static ISzAlloc g_Alloc = { SzAlloc, SzFree }; 229 230 /* 231 * Free data referenced by the linked list of cached uncompressed 232 * segments. 233 */ 234 static void 235 lofi_free_comp_cache(struct lofi_state *lsp) 236 { 237 struct lofi_comp_cache *lc; 238 239 while ((lc = list_remove_head(&lsp->ls_comp_cache)) != NULL) { 240 kmem_free(lc->lc_data, lsp->ls_uncomp_seg_sz); 241 kmem_free(lc, sizeof (struct lofi_comp_cache)); 242 lsp->ls_comp_cache_count--; 243 } 244 ASSERT(lsp->ls_comp_cache_count == 0); 245 } 246 247 static int 248 lofi_busy(void) 249 { 250 minor_t minor; 251 252 /* 253 * We need to make sure no mappings exist - mod_remove won't 254 * help because the device isn't open. 255 */ 256 mutex_enter(&lofi_lock); 257 for (minor = 1; minor <= lofi_max_files; minor++) { 258 if (ddi_get_soft_state(lofi_statep, minor) != NULL) { 259 mutex_exit(&lofi_lock); 260 return (EBUSY); 261 } 262 } 263 mutex_exit(&lofi_lock); 264 return (0); 265 } 266 267 static int 268 is_opened(struct lofi_state *lsp) 269 { 270 ASSERT(mutex_owned(&lofi_lock)); 271 return (lsp->ls_chr_open || lsp->ls_blk_open || lsp->ls_lyr_open_count); 272 } 273 274 static int 275 mark_opened(struct lofi_state *lsp, int otyp) 276 { 277 ASSERT(mutex_owned(&lofi_lock)); 278 switch (otyp) { 279 case OTYP_CHR: 280 lsp->ls_chr_open = 1; 281 break; 282 case OTYP_BLK: 283 lsp->ls_blk_open = 1; 284 break; 285 case OTYP_LYR: 286 lsp->ls_lyr_open_count++; 287 break; 288 default: 289 return (-1); 290 } 291 return (0); 292 } 293 294 static void 295 mark_closed(struct lofi_state *lsp, int otyp) 296 { 297 ASSERT(mutex_owned(&lofi_lock)); 298 switch (otyp) { 299 case OTYP_CHR: 300 lsp->ls_chr_open = 0; 301 break; 302 case OTYP_BLK: 303 lsp->ls_blk_open = 0; 304 break; 305 case OTYP_LYR: 306 lsp->ls_lyr_open_count--; 307 break; 308 default: 309 break; 310 } 311 } 312 313 static void 314 lofi_free_crypto(struct lofi_state *lsp) 315 { 316 ASSERT(mutex_owned(&lofi_lock)); 317 318 if (lsp->ls_crypto_enabled) { 319 /* 320 * Clean up the crypto state so that it doesn't hang around 321 * in memory after we are done with it. 322 */ 323 bzero(lsp->ls_key.ck_data, 324 CRYPTO_BITS2BYTES(lsp->ls_key.ck_length)); 325 kmem_free(lsp->ls_key.ck_data, 326 CRYPTO_BITS2BYTES(lsp->ls_key.ck_length)); 327 lsp->ls_key.ck_data = NULL; 328 lsp->ls_key.ck_length = 0; 329 330 if (lsp->ls_mech.cm_param != NULL) { 331 kmem_free(lsp->ls_mech.cm_param, 332 lsp->ls_mech.cm_param_len); 333 lsp->ls_mech.cm_param = NULL; 334 lsp->ls_mech.cm_param_len = 0; 335 } 336 337 if (lsp->ls_iv_mech.cm_param != NULL) { 338 kmem_free(lsp->ls_iv_mech.cm_param, 339 lsp->ls_iv_mech.cm_param_len); 340 lsp->ls_iv_mech.cm_param = NULL; 341 lsp->ls_iv_mech.cm_param_len = 0; 342 } 343 344 mutex_destroy(&lsp->ls_crypto_lock); 345 } 346 } 347 348 static void 349 lofi_free_handle(dev_t dev, minor_t minor, struct lofi_state *lsp, 350 cred_t *credp) 351 { 352 dev_t newdev; 353 char namebuf[50]; 354 355 ASSERT(mutex_owned(&lofi_lock)); 356 357 lofi_free_crypto(lsp); 358 359 if (lsp->ls_vp) { 360 (void) VOP_CLOSE(lsp->ls_vp, lsp->ls_openflag, 361 1, 0, credp, NULL); 362 VN_RELE(lsp->ls_vp); 363 lsp->ls_vp = NULL; 364 } 365 366 newdev = makedevice(getmajor(dev), minor); 367 (void) ddi_prop_remove(newdev, lofi_dip, SIZE_PROP_NAME); 368 (void) ddi_prop_remove(newdev, lofi_dip, NBLOCKS_PROP_NAME); 369 370 (void) snprintf(namebuf, sizeof (namebuf), "%d", minor); 371 ddi_remove_minor_node(lofi_dip, namebuf); 372 (void) snprintf(namebuf, sizeof (namebuf), "%d,raw", minor); 373 ddi_remove_minor_node(lofi_dip, namebuf); 374 375 kmem_free(lsp->ls_filename, lsp->ls_filename_sz); 376 taskq_destroy(lsp->ls_taskq); 377 if (lsp->ls_kstat) { 378 kstat_delete(lsp->ls_kstat); 379 mutex_destroy(&lsp->ls_kstat_lock); 380 } 381 382 /* 383 * Free cached decompressed segment data 384 */ 385 lofi_free_comp_cache(lsp); 386 list_destroy(&lsp->ls_comp_cache); 387 mutex_destroy(&lsp->ls_comp_cache_lock); 388 389 if (lsp->ls_uncomp_seg_sz > 0) { 390 kmem_free(lsp->ls_comp_index_data, lsp->ls_comp_index_data_sz); 391 lsp->ls_uncomp_seg_sz = 0; 392 } 393 394 mutex_destroy(&lsp->ls_vp_lock); 395 396 ddi_soft_state_free(lofi_statep, minor); 397 } 398 399 /*ARGSUSED*/ 400 static int 401 lofi_open(dev_t *devp, int flag, int otyp, struct cred *credp) 402 { 403 minor_t minor; 404 struct lofi_state *lsp; 405 406 mutex_enter(&lofi_lock); 407 minor = getminor(*devp); 408 if (minor == 0) { 409 /* master control device */ 410 /* must be opened exclusively */ 411 if (((flag & FEXCL) != FEXCL) || (otyp != OTYP_CHR)) { 412 mutex_exit(&lofi_lock); 413 return (EINVAL); 414 } 415 lsp = ddi_get_soft_state(lofi_statep, 0); 416 if (lsp == NULL) { 417 mutex_exit(&lofi_lock); 418 return (ENXIO); 419 } 420 if (is_opened(lsp)) { 421 mutex_exit(&lofi_lock); 422 return (EBUSY); 423 } 424 (void) mark_opened(lsp, OTYP_CHR); 425 mutex_exit(&lofi_lock); 426 return (0); 427 } 428 429 /* otherwise, the mapping should already exist */ 430 lsp = ddi_get_soft_state(lofi_statep, minor); 431 if (lsp == NULL) { 432 mutex_exit(&lofi_lock); 433 return (EINVAL); 434 } 435 436 if (lsp->ls_vp == NULL) { 437 mutex_exit(&lofi_lock); 438 return (ENXIO); 439 } 440 441 if (mark_opened(lsp, otyp) == -1) { 442 mutex_exit(&lofi_lock); 443 return (EINVAL); 444 } 445 446 mutex_exit(&lofi_lock); 447 return (0); 448 } 449 450 /*ARGSUSED*/ 451 static int 452 lofi_close(dev_t dev, int flag, int otyp, struct cred *credp) 453 { 454 minor_t minor; 455 struct lofi_state *lsp; 456 457 mutex_enter(&lofi_lock); 458 minor = getminor(dev); 459 lsp = ddi_get_soft_state(lofi_statep, minor); 460 if (lsp == NULL) { 461 mutex_exit(&lofi_lock); 462 return (EINVAL); 463 } 464 mark_closed(lsp, otyp); 465 466 /* 467 * If we forcibly closed the underlying device (li_force), or 468 * asked for cleanup (li_cleanup), finish up if we're the last 469 * out of the door. 470 */ 471 if (minor != 0 && !is_opened(lsp) && 472 (lsp->ls_cleanup || lsp->ls_vp == NULL)) 473 lofi_free_handle(dev, minor, lsp, credp); 474 475 mutex_exit(&lofi_lock); 476 return (0); 477 } 478 479 /* 480 * Sets the mechanism's initialization vector (IV) if one is needed. 481 * The IV is computed from the data block number. lsp->ls_mech is 482 * altered so that: 483 * lsp->ls_mech.cm_param_len is set to the IV len. 484 * lsp->ls_mech.cm_param is set to the IV. 485 */ 486 static int 487 lofi_blk_mech(struct lofi_state *lsp, longlong_t lblkno) 488 { 489 int ret; 490 crypto_data_t cdata; 491 char *iv; 492 size_t iv_len; 493 size_t min; 494 void *data; 495 size_t datasz; 496 497 ASSERT(mutex_owned(&lsp->ls_crypto_lock)); 498 499 if (lsp == NULL) 500 return (CRYPTO_DEVICE_ERROR); 501 502 /* lsp->ls_mech.cm_param{_len} has already been set for static iv */ 503 if (lsp->ls_iv_type == IVM_NONE) { 504 return (CRYPTO_SUCCESS); 505 } 506 507 /* 508 * if kmem already alloced from previous call and it's the same size 509 * we need now, just recycle it; allocate new kmem only if we have to 510 */ 511 if (lsp->ls_mech.cm_param == NULL || 512 lsp->ls_mech.cm_param_len != lsp->ls_iv_len) { 513 iv_len = lsp->ls_iv_len; 514 iv = kmem_zalloc(iv_len, KM_SLEEP); 515 } else { 516 iv_len = lsp->ls_mech.cm_param_len; 517 iv = lsp->ls_mech.cm_param; 518 bzero(iv, iv_len); 519 } 520 521 switch (lsp->ls_iv_type) { 522 case IVM_ENC_BLKNO: 523 /* iv is not static, lblkno changes each time */ 524 data = &lblkno; 525 datasz = sizeof (lblkno); 526 break; 527 default: 528 data = 0; 529 datasz = 0; 530 break; 531 } 532 533 /* 534 * write blkno into the iv buffer padded on the left in case 535 * blkno ever grows bigger than its current longlong_t size 536 * or a variation other than blkno is used for the iv data 537 */ 538 min = MIN(datasz, iv_len); 539 bcopy(data, iv + (iv_len - min), min); 540 541 /* encrypt the data in-place to get the IV */ 542 SETUP_C_DATA(cdata, iv, iv_len); 543 544 ret = crypto_encrypt(&lsp->ls_iv_mech, &cdata, &lsp->ls_key, 545 NULL, NULL, NULL); 546 if (ret != CRYPTO_SUCCESS) { 547 cmn_err(CE_WARN, "failed to create iv for block %lld: (0x%x)", 548 lblkno, ret); 549 if (lsp->ls_mech.cm_param != iv) 550 kmem_free(iv, iv_len); 551 552 return (ret); 553 } 554 555 /* clean up the iv from the last computation */ 556 if (lsp->ls_mech.cm_param != NULL && lsp->ls_mech.cm_param != iv) 557 kmem_free(lsp->ls_mech.cm_param, lsp->ls_mech.cm_param_len); 558 559 lsp->ls_mech.cm_param_len = iv_len; 560 lsp->ls_mech.cm_param = iv; 561 562 return (CRYPTO_SUCCESS); 563 } 564 565 /* 566 * Performs encryption and decryption of a chunk of data of size "len", 567 * one DEV_BSIZE block at a time. "len" is assumed to be a multiple of 568 * DEV_BSIZE. 569 */ 570 static int 571 lofi_crypto(struct lofi_state *lsp, struct buf *bp, caddr_t plaintext, 572 caddr_t ciphertext, size_t len, boolean_t op_encrypt) 573 { 574 crypto_data_t cdata; 575 crypto_data_t wdata; 576 int ret; 577 longlong_t lblkno = bp->b_lblkno; 578 579 mutex_enter(&lsp->ls_crypto_lock); 580 581 /* 582 * though we could encrypt/decrypt entire "len" chunk of data, we need 583 * to break it into DEV_BSIZE pieces to capture blkno incrementing 584 */ 585 SETUP_C_DATA(cdata, plaintext, len); 586 cdata.cd_length = DEV_BSIZE; 587 if (ciphertext != NULL) { /* not in-place crypto */ 588 SETUP_C_DATA(wdata, ciphertext, len); 589 wdata.cd_length = DEV_BSIZE; 590 } 591 592 do { 593 ret = lofi_blk_mech(lsp, lblkno); 594 if (ret != CRYPTO_SUCCESS) 595 continue; 596 597 if (op_encrypt) { 598 ret = crypto_encrypt(&lsp->ls_mech, &cdata, 599 &lsp->ls_key, NULL, 600 ((ciphertext != NULL) ? &wdata : NULL), NULL); 601 } else { 602 ret = crypto_decrypt(&lsp->ls_mech, &cdata, 603 &lsp->ls_key, NULL, 604 ((ciphertext != NULL) ? &wdata : NULL), NULL); 605 } 606 607 cdata.cd_offset += DEV_BSIZE; 608 if (ciphertext != NULL) 609 wdata.cd_offset += DEV_BSIZE; 610 lblkno++; 611 } while (ret == CRYPTO_SUCCESS && cdata.cd_offset < len); 612 613 mutex_exit(&lsp->ls_crypto_lock); 614 615 if (ret != CRYPTO_SUCCESS) { 616 cmn_err(CE_WARN, "%s failed for block %lld: (0x%x)", 617 op_encrypt ? "crypto_encrypt()" : "crypto_decrypt()", 618 lblkno, ret); 619 } 620 621 return (ret); 622 } 623 624 #define RDWR_RAW 1 625 #define RDWR_BCOPY 2 626 627 static int 628 lofi_rdwr(caddr_t bufaddr, offset_t offset, struct buf *bp, 629 struct lofi_state *lsp, size_t len, int method, caddr_t bcopy_locn) 630 { 631 ssize_t resid; 632 int isread; 633 int error; 634 635 /* 636 * Handles reads/writes for both plain and encrypted lofi 637 * Note: offset is already shifted by lsp->ls_crypto_offset 638 * when it gets here. 639 */ 640 641 isread = bp->b_flags & B_READ; 642 if (isread) { 643 if (method == RDWR_BCOPY) { 644 /* DO NOT update bp->b_resid for bcopy */ 645 bcopy(bcopy_locn, bufaddr, len); 646 error = 0; 647 } else { /* RDWR_RAW */ 648 error = vn_rdwr(UIO_READ, lsp->ls_vp, bufaddr, len, 649 offset, UIO_SYSSPACE, 0, RLIM64_INFINITY, kcred, 650 &resid); 651 bp->b_resid = resid; 652 } 653 if (lsp->ls_crypto_enabled && error == 0) { 654 if (lofi_crypto(lsp, bp, bufaddr, NULL, len, 655 B_FALSE) != CRYPTO_SUCCESS) { 656 /* 657 * XXX: original code didn't set residual 658 * back to len because no error was expected 659 * from bcopy() if encryption is not enabled 660 */ 661 if (method != RDWR_BCOPY) 662 bp->b_resid = len; 663 error = EIO; 664 } 665 } 666 return (error); 667 } else { 668 void *iobuf = bufaddr; 669 670 if (lsp->ls_crypto_enabled) { 671 /* don't do in-place crypto to keep bufaddr intact */ 672 iobuf = kmem_alloc(len, KM_SLEEP); 673 if (lofi_crypto(lsp, bp, bufaddr, iobuf, len, 674 B_TRUE) != CRYPTO_SUCCESS) { 675 kmem_free(iobuf, len); 676 if (method != RDWR_BCOPY) 677 bp->b_resid = len; 678 return (EIO); 679 } 680 } 681 if (method == RDWR_BCOPY) { 682 /* DO NOT update bp->b_resid for bcopy */ 683 bcopy(iobuf, bcopy_locn, len); 684 error = 0; 685 } else { /* RDWR_RAW */ 686 error = vn_rdwr(UIO_WRITE, lsp->ls_vp, iobuf, len, 687 offset, UIO_SYSSPACE, 0, RLIM64_INFINITY, kcred, 688 &resid); 689 bp->b_resid = resid; 690 } 691 if (lsp->ls_crypto_enabled) { 692 kmem_free(iobuf, len); 693 } 694 return (error); 695 } 696 } 697 698 static int 699 lofi_mapped_rdwr(caddr_t bufaddr, offset_t offset, struct buf *bp, 700 struct lofi_state *lsp) 701 { 702 int error; 703 offset_t alignedoffset, mapoffset; 704 size_t xfersize; 705 int isread; 706 int smflags; 707 caddr_t mapaddr; 708 size_t len; 709 enum seg_rw srw; 710 int save_error; 711 712 /* 713 * Note: offset is already shifted by lsp->ls_crypto_offset 714 * when it gets here. 715 */ 716 if (lsp->ls_crypto_enabled) 717 ASSERT(lsp->ls_vp_comp_size == lsp->ls_vp_size); 718 719 /* 720 * segmap always gives us an 8K (MAXBSIZE) chunk, aligned on 721 * an 8K boundary, but the buf transfer address may not be 722 * aligned on more than a 512-byte boundary (we don't enforce 723 * that even though we could). This matters since the initial 724 * part of the transfer may not start at offset 0 within the 725 * segmap'd chunk. So we have to compensate for that with 726 * 'mapoffset'. Subsequent chunks always start off at the 727 * beginning, and the last is capped by b_resid 728 * 729 * Visually, where "|" represents page map boundaries: 730 * alignedoffset (mapaddr begins at this segmap boundary) 731 * | offset (from beginning of file) 732 * | | len 733 * v v v 734 * ===|====X========|====...======|========X====|==== 735 * /-------------...---------------/ 736 * ^ bp->b_bcount/bp->b_resid at start 737 * /----/--------/----...------/--------/ 738 * ^ ^ ^ ^ ^ 739 * | | | | nth xfersize (<= MAXBSIZE) 740 * | | 2nd thru n-1st xfersize (= MAXBSIZE) 741 * | 1st xfersize (<= MAXBSIZE) 742 * mapoffset (offset into 1st segmap, non-0 1st time, 0 thereafter) 743 * 744 * Notes: "alignedoffset" is "offset" rounded down to nearest 745 * MAXBSIZE boundary. "len" is next page boundary of size 746 * PAGESIZE after "alignedoffset". 747 */ 748 mapoffset = offset & MAXBOFFSET; 749 alignedoffset = offset - mapoffset; 750 bp->b_resid = bp->b_bcount; 751 isread = bp->b_flags & B_READ; 752 srw = isread ? S_READ : S_WRITE; 753 do { 754 xfersize = MIN(lsp->ls_vp_comp_size - offset, 755 MIN(MAXBSIZE - mapoffset, bp->b_resid)); 756 len = roundup(mapoffset + xfersize, PAGESIZE); 757 mapaddr = segmap_getmapflt(segkmap, lsp->ls_vp, 758 alignedoffset, MAXBSIZE, 1, srw); 759 /* 760 * Now fault in the pages. This lets us check 761 * for errors before we reference mapaddr and 762 * try to resolve the fault in bcopy (which would 763 * panic instead). And this can easily happen, 764 * particularly if you've lofi'd a file over NFS 765 * and someone deletes the file on the server. 766 */ 767 error = segmap_fault(kas.a_hat, segkmap, mapaddr, 768 len, F_SOFTLOCK, srw); 769 if (error) { 770 (void) segmap_release(segkmap, mapaddr, 0); 771 if (FC_CODE(error) == FC_OBJERR) 772 error = FC_ERRNO(error); 773 else 774 error = EIO; 775 break; 776 } 777 /* error may be non-zero for encrypted lofi */ 778 error = lofi_rdwr(bufaddr, 0, bp, lsp, xfersize, 779 RDWR_BCOPY, mapaddr + mapoffset); 780 if (error == 0) { 781 bp->b_resid -= xfersize; 782 bufaddr += xfersize; 783 offset += xfersize; 784 } 785 smflags = 0; 786 if (isread) { 787 smflags |= SM_FREE; 788 /* 789 * If we're reading an entire page starting 790 * at a page boundary, there's a good chance 791 * we won't need it again. Put it on the 792 * head of the freelist. 793 */ 794 if (mapoffset == 0 && xfersize == MAXBSIZE) 795 smflags |= SM_DONTNEED; 796 } else { 797 if (error == 0) /* write back good pages */ 798 smflags |= SM_WRITE; 799 } 800 (void) segmap_fault(kas.a_hat, segkmap, mapaddr, 801 len, F_SOFTUNLOCK, srw); 802 save_error = segmap_release(segkmap, mapaddr, smflags); 803 if (error == 0) 804 error = save_error; 805 /* only the first map may start partial */ 806 mapoffset = 0; 807 alignedoffset += MAXBSIZE; 808 } while ((error == 0) && (bp->b_resid > 0) && 809 (offset < lsp->ls_vp_comp_size)); 810 811 return (error); 812 } 813 814 /* 815 * Check if segment seg_index is present in the decompressed segment 816 * data cache. 817 * 818 * Returns a pointer to the decompressed segment data cache entry if 819 * found, and NULL when decompressed data for this segment is not yet 820 * cached. 821 */ 822 static struct lofi_comp_cache * 823 lofi_find_comp_data(struct lofi_state *lsp, uint64_t seg_index) 824 { 825 struct lofi_comp_cache *lc; 826 827 ASSERT(mutex_owned(&lsp->ls_comp_cache_lock)); 828 829 for (lc = list_head(&lsp->ls_comp_cache); lc != NULL; 830 lc = list_next(&lsp->ls_comp_cache, lc)) { 831 if (lc->lc_index == seg_index) { 832 /* 833 * Decompressed segment data was found in the 834 * cache. 835 * 836 * The cache uses an LRU replacement strategy; 837 * move the entry to head of list. 838 */ 839 list_remove(&lsp->ls_comp_cache, lc); 840 list_insert_head(&lsp->ls_comp_cache, lc); 841 return (lc); 842 } 843 } 844 return (NULL); 845 } 846 847 /* 848 * Add the data for a decompressed segment at segment index 849 * seg_index to the cache of the decompressed segments. 850 * 851 * Returns a pointer to the cache element structure in case 852 * the data was added to the cache; returns NULL when the data 853 * wasn't cached. 854 */ 855 static struct lofi_comp_cache * 856 lofi_add_comp_data(struct lofi_state *lsp, uint64_t seg_index, 857 uchar_t *data) 858 { 859 struct lofi_comp_cache *lc; 860 861 ASSERT(mutex_owned(&lsp->ls_comp_cache_lock)); 862 863 while (lsp->ls_comp_cache_count > lofi_max_comp_cache) { 864 lc = list_remove_tail(&lsp->ls_comp_cache); 865 ASSERT(lc != NULL); 866 kmem_free(lc->lc_data, lsp->ls_uncomp_seg_sz); 867 kmem_free(lc, sizeof (struct lofi_comp_cache)); 868 lsp->ls_comp_cache_count--; 869 } 870 871 /* 872 * Do not cache when disabled by tunable variable 873 */ 874 if (lofi_max_comp_cache == 0) 875 return (NULL); 876 877 /* 878 * When the cache has not yet reached the maximum allowed 879 * number of segments, allocate a new cache element. 880 * Otherwise the cache is full; reuse the last list element 881 * (LRU) for caching the decompressed segment data. 882 * 883 * The cache element for the new decompressed segment data is 884 * added to the head of the list. 885 */ 886 if (lsp->ls_comp_cache_count < lofi_max_comp_cache) { 887 lc = kmem_alloc(sizeof (struct lofi_comp_cache), KM_SLEEP); 888 lc->lc_data = NULL; 889 list_insert_head(&lsp->ls_comp_cache, lc); 890 lsp->ls_comp_cache_count++; 891 } else { 892 lc = list_remove_tail(&lsp->ls_comp_cache); 893 if (lc == NULL) 894 return (NULL); 895 list_insert_head(&lsp->ls_comp_cache, lc); 896 } 897 898 /* 899 * Free old uncompressed segment data when reusing a cache 900 * entry. 901 */ 902 if (lc->lc_data != NULL) 903 kmem_free(lc->lc_data, lsp->ls_uncomp_seg_sz); 904 905 lc->lc_data = data; 906 lc->lc_index = seg_index; 907 return (lc); 908 } 909 910 911 /*ARGSUSED*/ 912 static int 913 gzip_decompress(void *src, size_t srclen, void *dst, 914 size_t *dstlen, int level) 915 { 916 ASSERT(*dstlen >= srclen); 917 918 if (z_uncompress(dst, dstlen, src, srclen) != Z_OK) 919 return (-1); 920 return (0); 921 } 922 923 #define LZMA_HEADER_SIZE (LZMA_PROPS_SIZE + 8) 924 /*ARGSUSED*/ 925 static int 926 lzma_decompress(void *src, size_t srclen, void *dst, 927 size_t *dstlen, int level) 928 { 929 size_t insizepure; 930 void *actual_src; 931 ELzmaStatus status; 932 933 insizepure = srclen - LZMA_HEADER_SIZE; 934 actual_src = (void *)((Byte *)src + LZMA_HEADER_SIZE); 935 936 if (LzmaDecode((Byte *)dst, (size_t *)dstlen, 937 (const Byte *)actual_src, &insizepure, 938 (const Byte *)src, LZMA_PROPS_SIZE, LZMA_FINISH_ANY, &status, 939 &g_Alloc) != SZ_OK) { 940 return (-1); 941 } 942 return (0); 943 } 944 945 /* 946 * This is basically what strategy used to be before we found we 947 * needed task queues. 948 */ 949 static void 950 lofi_strategy_task(void *arg) 951 { 952 struct buf *bp = (struct buf *)arg; 953 int error; 954 struct lofi_state *lsp; 955 offset_t offset; 956 caddr_t bufaddr; 957 size_t len; 958 size_t xfersize; 959 boolean_t bufinited = B_FALSE; 960 961 lsp = ddi_get_soft_state(lofi_statep, getminor(bp->b_edev)); 962 if (lsp == NULL) { 963 error = ENXIO; 964 goto errout; 965 } 966 if (lsp->ls_kstat) { 967 mutex_enter(lsp->ls_kstat->ks_lock); 968 kstat_waitq_to_runq(KSTAT_IO_PTR(lsp->ls_kstat)); 969 mutex_exit(lsp->ls_kstat->ks_lock); 970 } 971 bp_mapin(bp); 972 bufaddr = bp->b_un.b_addr; 973 offset = bp->b_lblkno * DEV_BSIZE; /* offset within file */ 974 if (lsp->ls_crypto_enabled) { 975 /* encrypted data really begins after crypto header */ 976 offset += lsp->ls_crypto_offset; 977 } 978 len = bp->b_bcount; 979 bufinited = B_TRUE; 980 981 if (lsp->ls_vp == NULL || lsp->ls_vp_closereq) { 982 error = EIO; 983 goto errout; 984 } 985 986 /* 987 * We used to always use vn_rdwr here, but we cannot do that because 988 * we might decide to read or write from the the underlying 989 * file during this call, which would be a deadlock because 990 * we have the rw_lock. So instead we page, unless it's not 991 * mapable or it's a character device or it's an encrypted lofi. 992 */ 993 if ((lsp->ls_vp->v_flag & VNOMAP) || (lsp->ls_vp->v_type == VCHR) || 994 lsp->ls_crypto_enabled) { 995 error = lofi_rdwr(bufaddr, offset, bp, lsp, len, RDWR_RAW, 996 NULL); 997 } else if (lsp->ls_uncomp_seg_sz == 0) { 998 error = lofi_mapped_rdwr(bufaddr, offset, bp, lsp); 999 } else { 1000 uchar_t *compressed_seg = NULL, *cmpbuf; 1001 uchar_t *uncompressed_seg = NULL; 1002 lofi_compress_info_t *li; 1003 size_t oblkcount; 1004 ulong_t seglen; 1005 uint64_t sblkno, eblkno, cmpbytes; 1006 uint64_t uncompressed_seg_index; 1007 struct lofi_comp_cache *lc; 1008 offset_t sblkoff, eblkoff; 1009 u_offset_t salign, ealign; 1010 u_offset_t sdiff; 1011 uint32_t comp_data_sz; 1012 uint64_t i; 1013 1014 /* 1015 * From here on we're dealing primarily with compressed files 1016 */ 1017 ASSERT(!lsp->ls_crypto_enabled); 1018 1019 /* 1020 * Compressed files can only be read from and 1021 * not written to 1022 */ 1023 if (!(bp->b_flags & B_READ)) { 1024 bp->b_resid = bp->b_bcount; 1025 error = EROFS; 1026 goto done; 1027 } 1028 1029 ASSERT(lsp->ls_comp_algorithm_index >= 0); 1030 li = &lofi_compress_table[lsp->ls_comp_algorithm_index]; 1031 /* 1032 * Compute starting and ending compressed segment numbers 1033 * We use only bitwise operations avoiding division and 1034 * modulus because we enforce the compression segment size 1035 * to a power of 2 1036 */ 1037 sblkno = offset >> lsp->ls_comp_seg_shift; 1038 sblkoff = offset & (lsp->ls_uncomp_seg_sz - 1); 1039 eblkno = (offset + bp->b_bcount) >> lsp->ls_comp_seg_shift; 1040 eblkoff = (offset + bp->b_bcount) & (lsp->ls_uncomp_seg_sz - 1); 1041 1042 /* 1043 * Check the decompressed segment cache. 1044 * 1045 * The cache is used only when the requested data 1046 * is within a segment. Requests that cross 1047 * segment boundaries bypass the cache. 1048 */ 1049 if (sblkno == eblkno || 1050 (sblkno + 1 == eblkno && eblkoff == 0)) { 1051 /* 1052 * Request doesn't cross a segment boundary, 1053 * now check the cache. 1054 */ 1055 mutex_enter(&lsp->ls_comp_cache_lock); 1056 lc = lofi_find_comp_data(lsp, sblkno); 1057 if (lc != NULL) { 1058 /* 1059 * We've found the decompressed segment 1060 * data in the cache; reuse it. 1061 */ 1062 bcopy(lc->lc_data + sblkoff, bufaddr, 1063 bp->b_bcount); 1064 mutex_exit(&lsp->ls_comp_cache_lock); 1065 bp->b_resid = 0; 1066 error = 0; 1067 goto done; 1068 } 1069 mutex_exit(&lsp->ls_comp_cache_lock); 1070 } 1071 1072 /* 1073 * Align start offset to block boundary for segmap 1074 */ 1075 salign = lsp->ls_comp_seg_index[sblkno]; 1076 sdiff = salign & (DEV_BSIZE - 1); 1077 salign -= sdiff; 1078 if (eblkno >= (lsp->ls_comp_index_sz - 1)) { 1079 /* 1080 * We're dealing with the last segment of 1081 * the compressed file -- the size of this 1082 * segment *may not* be the same as the 1083 * segment size for the file 1084 */ 1085 eblkoff = (offset + bp->b_bcount) & 1086 (lsp->ls_uncomp_last_seg_sz - 1); 1087 ealign = lsp->ls_vp_comp_size; 1088 } else { 1089 ealign = lsp->ls_comp_seg_index[eblkno + 1]; 1090 } 1091 1092 /* 1093 * Preserve original request paramaters 1094 */ 1095 oblkcount = bp->b_bcount; 1096 1097 /* 1098 * Assign the calculated parameters 1099 */ 1100 comp_data_sz = ealign - salign; 1101 bp->b_bcount = comp_data_sz; 1102 1103 /* 1104 * Allocate fixed size memory blocks to hold compressed 1105 * segments and one uncompressed segment since we 1106 * uncompress segments one at a time 1107 */ 1108 compressed_seg = kmem_alloc(bp->b_bcount, KM_SLEEP); 1109 uncompressed_seg = kmem_alloc(lsp->ls_uncomp_seg_sz, KM_SLEEP); 1110 /* 1111 * Map in the calculated number of blocks 1112 */ 1113 error = lofi_mapped_rdwr((caddr_t)compressed_seg, salign, 1114 bp, lsp); 1115 1116 bp->b_bcount = oblkcount; 1117 bp->b_resid = oblkcount; 1118 if (error != 0) 1119 goto done; 1120 1121 /* 1122 * We have the compressed blocks, now uncompress them 1123 */ 1124 cmpbuf = compressed_seg + sdiff; 1125 for (i = sblkno; i <= eblkno; i++) { 1126 ASSERT(i < lsp->ls_comp_index_sz - 1); 1127 1128 /* 1129 * The last segment is special in that it is 1130 * most likely not going to be the same 1131 * (uncompressed) size as the other segments. 1132 */ 1133 if (i == (lsp->ls_comp_index_sz - 2)) { 1134 seglen = lsp->ls_uncomp_last_seg_sz; 1135 } else { 1136 seglen = lsp->ls_uncomp_seg_sz; 1137 } 1138 1139 /* 1140 * Each of the segment index entries contains 1141 * the starting block number for that segment. 1142 * The number of compressed bytes in a segment 1143 * is thus the difference between the starting 1144 * block number of this segment and the starting 1145 * block number of the next segment. 1146 */ 1147 cmpbytes = lsp->ls_comp_seg_index[i + 1] - 1148 lsp->ls_comp_seg_index[i]; 1149 1150 /* 1151 * The first byte in a compressed segment is a flag 1152 * that indicates whether this segment is compressed 1153 * at all 1154 */ 1155 if (*cmpbuf == UNCOMPRESSED) { 1156 bcopy((cmpbuf + SEGHDR), uncompressed_seg, 1157 (cmpbytes - SEGHDR)); 1158 } else { 1159 if (li->l_decompress((cmpbuf + SEGHDR), 1160 (cmpbytes - SEGHDR), uncompressed_seg, 1161 &seglen, li->l_level) != 0) { 1162 error = EIO; 1163 goto done; 1164 } 1165 } 1166 1167 uncompressed_seg_index = i; 1168 1169 /* 1170 * Determine how much uncompressed data we 1171 * have to copy and copy it 1172 */ 1173 xfersize = lsp->ls_uncomp_seg_sz - sblkoff; 1174 if (i == eblkno) 1175 xfersize -= (lsp->ls_uncomp_seg_sz - eblkoff); 1176 1177 bcopy((uncompressed_seg + sblkoff), bufaddr, xfersize); 1178 1179 cmpbuf += cmpbytes; 1180 bufaddr += xfersize; 1181 bp->b_resid -= xfersize; 1182 sblkoff = 0; 1183 1184 if (bp->b_resid == 0) 1185 break; 1186 } 1187 1188 /* 1189 * Add the data for the last decopressed segment to 1190 * the cache. 1191 * 1192 * In case the uncompressed segment data was added to (and 1193 * is referenced by) the cache, make sure we don't free it 1194 * here. 1195 */ 1196 mutex_enter(&lsp->ls_comp_cache_lock); 1197 if ((lc = lofi_add_comp_data(lsp, uncompressed_seg_index, 1198 uncompressed_seg)) != NULL) { 1199 uncompressed_seg = NULL; 1200 } 1201 mutex_exit(&lsp->ls_comp_cache_lock); 1202 1203 done: 1204 if (compressed_seg != NULL) 1205 kmem_free(compressed_seg, comp_data_sz); 1206 if (uncompressed_seg != NULL) 1207 kmem_free(uncompressed_seg, lsp->ls_uncomp_seg_sz); 1208 } /* end of handling compressed files */ 1209 1210 errout: 1211 if (bufinited && lsp->ls_kstat) { 1212 size_t n_done = bp->b_bcount - bp->b_resid; 1213 kstat_io_t *kioptr; 1214 1215 mutex_enter(lsp->ls_kstat->ks_lock); 1216 kioptr = KSTAT_IO_PTR(lsp->ls_kstat); 1217 if (bp->b_flags & B_READ) { 1218 kioptr->nread += n_done; 1219 kioptr->reads++; 1220 } else { 1221 kioptr->nwritten += n_done; 1222 kioptr->writes++; 1223 } 1224 kstat_runq_exit(kioptr); 1225 mutex_exit(lsp->ls_kstat->ks_lock); 1226 } 1227 1228 mutex_enter(&lsp->ls_vp_lock); 1229 if (--lsp->ls_vp_iocount == 0) 1230 cv_broadcast(&lsp->ls_vp_cv); 1231 mutex_exit(&lsp->ls_vp_lock); 1232 1233 bioerror(bp, error); 1234 biodone(bp); 1235 } 1236 1237 static int 1238 lofi_strategy(struct buf *bp) 1239 { 1240 struct lofi_state *lsp; 1241 offset_t offset; 1242 1243 /* 1244 * We cannot just do I/O here, because the current thread 1245 * _might_ end up back in here because the underlying filesystem 1246 * wants a buffer, which eventually gets into bio_recycle and 1247 * might call into lofi to write out a delayed-write buffer. 1248 * This is bad if the filesystem above lofi is the same as below. 1249 * 1250 * We could come up with a complex strategy using threads to 1251 * do the I/O asynchronously, or we could use task queues. task 1252 * queues were incredibly easy so they win. 1253 */ 1254 lsp = ddi_get_soft_state(lofi_statep, getminor(bp->b_edev)); 1255 if (lsp == NULL) { 1256 bioerror(bp, ENXIO); 1257 biodone(bp); 1258 return (0); 1259 } 1260 1261 mutex_enter(&lsp->ls_vp_lock); 1262 if (lsp->ls_vp == NULL || lsp->ls_vp_closereq) { 1263 bioerror(bp, EIO); 1264 biodone(bp); 1265 mutex_exit(&lsp->ls_vp_lock); 1266 return (0); 1267 } 1268 1269 offset = bp->b_lblkno * DEV_BSIZE; /* offset within file */ 1270 if (lsp->ls_crypto_enabled) { 1271 /* encrypted data really begins after crypto header */ 1272 offset += lsp->ls_crypto_offset; 1273 } 1274 if (offset == lsp->ls_vp_size) { 1275 /* EOF */ 1276 if ((bp->b_flags & B_READ) != 0) { 1277 bp->b_resid = bp->b_bcount; 1278 bioerror(bp, 0); 1279 } else { 1280 /* writes should fail */ 1281 bioerror(bp, ENXIO); 1282 } 1283 biodone(bp); 1284 mutex_exit(&lsp->ls_vp_lock); 1285 return (0); 1286 } 1287 if (offset > lsp->ls_vp_size) { 1288 bioerror(bp, ENXIO); 1289 biodone(bp); 1290 mutex_exit(&lsp->ls_vp_lock); 1291 return (0); 1292 } 1293 lsp->ls_vp_iocount++; 1294 mutex_exit(&lsp->ls_vp_lock); 1295 1296 if (lsp->ls_kstat) { 1297 mutex_enter(lsp->ls_kstat->ks_lock); 1298 kstat_waitq_enter(KSTAT_IO_PTR(lsp->ls_kstat)); 1299 mutex_exit(lsp->ls_kstat->ks_lock); 1300 } 1301 (void) taskq_dispatch(lsp->ls_taskq, lofi_strategy_task, bp, KM_SLEEP); 1302 return (0); 1303 } 1304 1305 /*ARGSUSED2*/ 1306 static int 1307 lofi_read(dev_t dev, struct uio *uio, struct cred *credp) 1308 { 1309 if (getminor(dev) == 0) 1310 return (EINVAL); 1311 UIO_CHECK(uio); 1312 return (physio(lofi_strategy, NULL, dev, B_READ, minphys, uio)); 1313 } 1314 1315 /*ARGSUSED2*/ 1316 static int 1317 lofi_write(dev_t dev, struct uio *uio, struct cred *credp) 1318 { 1319 if (getminor(dev) == 0) 1320 return (EINVAL); 1321 UIO_CHECK(uio); 1322 return (physio(lofi_strategy, NULL, dev, B_WRITE, minphys, uio)); 1323 } 1324 1325 /*ARGSUSED2*/ 1326 static int 1327 lofi_aread(dev_t dev, struct aio_req *aio, struct cred *credp) 1328 { 1329 if (getminor(dev) == 0) 1330 return (EINVAL); 1331 UIO_CHECK(aio->aio_uio); 1332 return (aphysio(lofi_strategy, anocancel, dev, B_READ, minphys, aio)); 1333 } 1334 1335 /*ARGSUSED2*/ 1336 static int 1337 lofi_awrite(dev_t dev, struct aio_req *aio, struct cred *credp) 1338 { 1339 if (getminor(dev) == 0) 1340 return (EINVAL); 1341 UIO_CHECK(aio->aio_uio); 1342 return (aphysio(lofi_strategy, anocancel, dev, B_WRITE, minphys, aio)); 1343 } 1344 1345 /*ARGSUSED*/ 1346 static int 1347 lofi_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 1348 { 1349 switch (infocmd) { 1350 case DDI_INFO_DEVT2DEVINFO: 1351 *result = lofi_dip; 1352 return (DDI_SUCCESS); 1353 case DDI_INFO_DEVT2INSTANCE: 1354 *result = 0; 1355 return (DDI_SUCCESS); 1356 } 1357 return (DDI_FAILURE); 1358 } 1359 1360 static int 1361 lofi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 1362 { 1363 int error; 1364 1365 if (cmd != DDI_ATTACH) 1366 return (DDI_FAILURE); 1367 error = ddi_soft_state_zalloc(lofi_statep, 0); 1368 if (error == DDI_FAILURE) { 1369 return (DDI_FAILURE); 1370 } 1371 error = ddi_create_minor_node(dip, LOFI_CTL_NODE, S_IFCHR, 0, 1372 DDI_PSEUDO, NULL); 1373 if (error == DDI_FAILURE) { 1374 ddi_soft_state_free(lofi_statep, 0); 1375 return (DDI_FAILURE); 1376 } 1377 /* driver handles kernel-issued IOCTLs */ 1378 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 1379 DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) { 1380 ddi_remove_minor_node(dip, NULL); 1381 ddi_soft_state_free(lofi_statep, 0); 1382 return (DDI_FAILURE); 1383 } 1384 lofi_dip = dip; 1385 ddi_report_dev(dip); 1386 return (DDI_SUCCESS); 1387 } 1388 1389 static int 1390 lofi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1391 { 1392 if (cmd != DDI_DETACH) 1393 return (DDI_FAILURE); 1394 if (lofi_busy()) 1395 return (DDI_FAILURE); 1396 lofi_dip = NULL; 1397 ddi_remove_minor_node(dip, NULL); 1398 ddi_prop_remove_all(dip); 1399 ddi_soft_state_free(lofi_statep, 0); 1400 return (DDI_SUCCESS); 1401 } 1402 1403 /* 1404 * With addition of encryption, be careful that encryption key is wiped before 1405 * kernel memory structures are freed, and also that key is not accidentally 1406 * passed out into userland structures. 1407 */ 1408 static void 1409 free_lofi_ioctl(struct lofi_ioctl *klip) 1410 { 1411 /* Make sure this encryption key doesn't stick around */ 1412 bzero(klip->li_key, sizeof (klip->li_key)); 1413 kmem_free(klip, sizeof (struct lofi_ioctl)); 1414 } 1415 1416 /* 1417 * These two just simplify the rest of the ioctls that need to copyin/out 1418 * the lofi_ioctl structure. 1419 */ 1420 struct lofi_ioctl * 1421 copy_in_lofi_ioctl(const struct lofi_ioctl *ulip, int flag) 1422 { 1423 struct lofi_ioctl *klip; 1424 int error; 1425 1426 klip = kmem_alloc(sizeof (struct lofi_ioctl), KM_SLEEP); 1427 error = ddi_copyin(ulip, klip, sizeof (struct lofi_ioctl), flag); 1428 if (error) { 1429 free_lofi_ioctl(klip); 1430 return (NULL); 1431 } 1432 1433 /* make sure filename is always null-terminated */ 1434 klip->li_filename[MAXPATHLEN-1] = '\0'; 1435 1436 /* validate minor number */ 1437 if (klip->li_minor > lofi_max_files) { 1438 free_lofi_ioctl(klip); 1439 cmn_err(CE_WARN, "attempt to map more than lofi_max_files (%d)", 1440 lofi_max_files); 1441 return (NULL); 1442 } 1443 return (klip); 1444 } 1445 1446 int 1447 copy_out_lofi_ioctl(const struct lofi_ioctl *klip, struct lofi_ioctl *ulip, 1448 int flag) 1449 { 1450 int error; 1451 1452 /* 1453 * NOTE: Do NOT copy the crypto_key_t "back" to userland. 1454 * This ensures that an attacker can't trivially find the 1455 * key for a mapping just by issuing the ioctl. 1456 * 1457 * It can still be found by poking around in kmem with mdb(1), 1458 * but there is no point in making it easy when the info isn't 1459 * of any use in this direction anyway. 1460 * 1461 * Either way we don't actually have the raw key stored in 1462 * a form that we can get it anyway, since we just used it 1463 * to create a ctx template and didn't keep "the original". 1464 */ 1465 error = ddi_copyout(klip, ulip, sizeof (struct lofi_ioctl), flag); 1466 if (error) 1467 return (EFAULT); 1468 return (0); 1469 } 1470 1471 /* 1472 * Return the minor number 'filename' is mapped to, if it is. 1473 */ 1474 static int 1475 file_to_minor(char *filename) 1476 { 1477 minor_t minor; 1478 struct lofi_state *lsp; 1479 1480 ASSERT(mutex_owned(&lofi_lock)); 1481 for (minor = 1; minor <= lofi_max_files; minor++) { 1482 lsp = ddi_get_soft_state(lofi_statep, minor); 1483 if (lsp == NULL) 1484 continue; 1485 if (strcmp(lsp->ls_filename, filename) == 0) 1486 return (minor); 1487 } 1488 return (0); 1489 } 1490 1491 /* 1492 * lofiadm does some validation, but since Joe Random (or crashme) could 1493 * do our ioctls, we need to do some validation too. 1494 */ 1495 static int 1496 valid_filename(const char *filename) 1497 { 1498 static char *blkprefix = "/dev/" LOFI_BLOCK_NAME "/"; 1499 static char *charprefix = "/dev/" LOFI_CHAR_NAME "/"; 1500 1501 /* must be absolute path */ 1502 if (filename[0] != '/') 1503 return (0); 1504 /* must not be lofi */ 1505 if (strncmp(filename, blkprefix, strlen(blkprefix)) == 0) 1506 return (0); 1507 if (strncmp(filename, charprefix, strlen(charprefix)) == 0) 1508 return (0); 1509 return (1); 1510 } 1511 1512 /* 1513 * Fakes up a disk geometry, and one big partition, based on the size 1514 * of the file. This is needed because we allow newfs'ing the device, 1515 * and newfs will do several disk ioctls to figure out the geometry and 1516 * partition information. It uses that information to determine the parameters 1517 * to pass to mkfs. Geometry is pretty much irrelevant these days, but we 1518 * have to support it. 1519 */ 1520 static void 1521 fake_disk_geometry(struct lofi_state *lsp) 1522 { 1523 u_offset_t dsize = lsp->ls_vp_size - lsp->ls_crypto_offset; 1524 1525 /* dk_geom - see dkio(7I) */ 1526 /* 1527 * dkg_ncyl _could_ be set to one here (one big cylinder with gobs 1528 * of sectors), but that breaks programs like fdisk which want to 1529 * partition a disk by cylinder. With one cylinder, you can't create 1530 * an fdisk partition and put pcfs on it for testing (hard to pick 1531 * a number between one and one). 1532 * 1533 * The cheezy floppy test is an attempt to not have too few cylinders 1534 * for a small file, or so many on a big file that you waste space 1535 * for backup superblocks or cylinder group structures. 1536 */ 1537 if (dsize < (2 * 1024 * 1024)) /* floppy? */ 1538 lsp->ls_dkg.dkg_ncyl = dsize / (100 * 1024); 1539 else 1540 lsp->ls_dkg.dkg_ncyl = dsize / (300 * 1024); 1541 /* in case file file is < 100k */ 1542 if (lsp->ls_dkg.dkg_ncyl == 0) 1543 lsp->ls_dkg.dkg_ncyl = 1; 1544 lsp->ls_dkg.dkg_acyl = 0; 1545 lsp->ls_dkg.dkg_bcyl = 0; 1546 lsp->ls_dkg.dkg_nhead = 1; 1547 lsp->ls_dkg.dkg_obs1 = 0; 1548 lsp->ls_dkg.dkg_intrlv = 0; 1549 lsp->ls_dkg.dkg_obs2 = 0; 1550 lsp->ls_dkg.dkg_obs3 = 0; 1551 lsp->ls_dkg.dkg_apc = 0; 1552 lsp->ls_dkg.dkg_rpm = 7200; 1553 lsp->ls_dkg.dkg_pcyl = lsp->ls_dkg.dkg_ncyl + lsp->ls_dkg.dkg_acyl; 1554 lsp->ls_dkg.dkg_nsect = dsize / (DEV_BSIZE * lsp->ls_dkg.dkg_ncyl); 1555 lsp->ls_dkg.dkg_write_reinstruct = 0; 1556 lsp->ls_dkg.dkg_read_reinstruct = 0; 1557 1558 /* vtoc - see dkio(7I) */ 1559 bzero(&lsp->ls_vtoc, sizeof (struct vtoc)); 1560 lsp->ls_vtoc.v_sanity = VTOC_SANE; 1561 lsp->ls_vtoc.v_version = V_VERSION; 1562 (void) strncpy(lsp->ls_vtoc.v_volume, LOFI_DRIVER_NAME, 1563 sizeof (lsp->ls_vtoc.v_volume)); 1564 lsp->ls_vtoc.v_sectorsz = DEV_BSIZE; 1565 lsp->ls_vtoc.v_nparts = 1; 1566 lsp->ls_vtoc.v_part[0].p_tag = V_UNASSIGNED; 1567 1568 /* 1569 * A compressed file is read-only, other files can 1570 * be read-write 1571 */ 1572 if (lsp->ls_uncomp_seg_sz > 0) { 1573 lsp->ls_vtoc.v_part[0].p_flag = V_UNMNT | V_RONLY; 1574 } else { 1575 lsp->ls_vtoc.v_part[0].p_flag = V_UNMNT; 1576 } 1577 lsp->ls_vtoc.v_part[0].p_start = (daddr_t)0; 1578 /* 1579 * The partition size cannot just be the number of sectors, because 1580 * that might not end on a cylinder boundary. And if that's the case, 1581 * newfs/mkfs will print a scary warning. So just figure the size 1582 * based on the number of cylinders and sectors/cylinder. 1583 */ 1584 lsp->ls_vtoc.v_part[0].p_size = lsp->ls_dkg.dkg_pcyl * 1585 lsp->ls_dkg.dkg_nsect * lsp->ls_dkg.dkg_nhead; 1586 1587 /* dk_cinfo - see dkio(7I) */ 1588 bzero(&lsp->ls_ci, sizeof (struct dk_cinfo)); 1589 (void) strcpy(lsp->ls_ci.dki_cname, LOFI_DRIVER_NAME); 1590 lsp->ls_ci.dki_ctype = DKC_MD; 1591 lsp->ls_ci.dki_flags = 0; 1592 lsp->ls_ci.dki_cnum = 0; 1593 lsp->ls_ci.dki_addr = 0; 1594 lsp->ls_ci.dki_space = 0; 1595 lsp->ls_ci.dki_prio = 0; 1596 lsp->ls_ci.dki_vec = 0; 1597 (void) strcpy(lsp->ls_ci.dki_dname, LOFI_DRIVER_NAME); 1598 lsp->ls_ci.dki_unit = 0; 1599 lsp->ls_ci.dki_slave = 0; 1600 lsp->ls_ci.dki_partition = 0; 1601 /* 1602 * newfs uses this to set maxcontig. Must not be < 16, or it 1603 * will be 0 when newfs multiplies it by DEV_BSIZE and divides 1604 * it by the block size. Then tunefs doesn't work because 1605 * maxcontig is 0. 1606 */ 1607 lsp->ls_ci.dki_maxtransfer = 16; 1608 } 1609 1610 /* 1611 * map in a compressed file 1612 * 1613 * Read in the header and the index that follows. 1614 * 1615 * The header is as follows - 1616 * 1617 * Signature (name of the compression algorithm) 1618 * Compression segment size (a multiple of 512) 1619 * Number of index entries 1620 * Size of the last block 1621 * The array containing the index entries 1622 * 1623 * The header information is always stored in 1624 * network byte order on disk. 1625 */ 1626 static int 1627 lofi_map_compressed_file(struct lofi_state *lsp, char *buf) 1628 { 1629 uint32_t index_sz, header_len, i; 1630 ssize_t resid; 1631 enum uio_rw rw; 1632 char *tbuf = buf; 1633 int error; 1634 1635 /* The signature has already been read */ 1636 tbuf += sizeof (lsp->ls_comp_algorithm); 1637 bcopy(tbuf, &(lsp->ls_uncomp_seg_sz), sizeof (lsp->ls_uncomp_seg_sz)); 1638 lsp->ls_uncomp_seg_sz = ntohl(lsp->ls_uncomp_seg_sz); 1639 1640 /* 1641 * The compressed segment size must be a power of 2 1642 */ 1643 if (lsp->ls_uncomp_seg_sz < DEV_BSIZE || 1644 !ISP2(lsp->ls_uncomp_seg_sz)) 1645 return (EINVAL); 1646 1647 for (i = 0; !((lsp->ls_uncomp_seg_sz >> i) & 1); i++) 1648 ; 1649 1650 lsp->ls_comp_seg_shift = i; 1651 1652 tbuf += sizeof (lsp->ls_uncomp_seg_sz); 1653 bcopy(tbuf, &(lsp->ls_comp_index_sz), sizeof (lsp->ls_comp_index_sz)); 1654 lsp->ls_comp_index_sz = ntohl(lsp->ls_comp_index_sz); 1655 1656 tbuf += sizeof (lsp->ls_comp_index_sz); 1657 bcopy(tbuf, &(lsp->ls_uncomp_last_seg_sz), 1658 sizeof (lsp->ls_uncomp_last_seg_sz)); 1659 lsp->ls_uncomp_last_seg_sz = ntohl(lsp->ls_uncomp_last_seg_sz); 1660 1661 /* 1662 * Compute the total size of the uncompressed data 1663 * for use in fake_disk_geometry and other calculations. 1664 * Disk geometry has to be faked with respect to the 1665 * actual uncompressed data size rather than the 1666 * compressed file size. 1667 */ 1668 lsp->ls_vp_size = 1669 (u_offset_t)(lsp->ls_comp_index_sz - 2) * lsp->ls_uncomp_seg_sz 1670 + lsp->ls_uncomp_last_seg_sz; 1671 1672 /* 1673 * Index size is rounded up to DEV_BSIZE for ease 1674 * of segmapping 1675 */ 1676 index_sz = sizeof (*lsp->ls_comp_seg_index) * lsp->ls_comp_index_sz; 1677 header_len = sizeof (lsp->ls_comp_algorithm) + 1678 sizeof (lsp->ls_uncomp_seg_sz) + 1679 sizeof (lsp->ls_comp_index_sz) + 1680 sizeof (lsp->ls_uncomp_last_seg_sz); 1681 lsp->ls_comp_offbase = header_len + index_sz; 1682 1683 index_sz += header_len; 1684 index_sz = roundup(index_sz, DEV_BSIZE); 1685 1686 lsp->ls_comp_index_data = kmem_alloc(index_sz, KM_SLEEP); 1687 lsp->ls_comp_index_data_sz = index_sz; 1688 1689 /* 1690 * Read in the index -- this has a side-effect 1691 * of reading in the header as well 1692 */ 1693 rw = UIO_READ; 1694 error = vn_rdwr(rw, lsp->ls_vp, lsp->ls_comp_index_data, index_sz, 1695 0, UIO_SYSSPACE, 0, RLIM64_INFINITY, kcred, &resid); 1696 1697 if (error != 0) 1698 return (error); 1699 1700 /* Skip the header, this is where the index really begins */ 1701 lsp->ls_comp_seg_index = 1702 /*LINTED*/ 1703 (uint64_t *)(lsp->ls_comp_index_data + header_len); 1704 1705 /* 1706 * Now recompute offsets in the index to account for 1707 * the header length 1708 */ 1709 for (i = 0; i < lsp->ls_comp_index_sz; i++) { 1710 lsp->ls_comp_seg_index[i] = lsp->ls_comp_offbase + 1711 BE_64(lsp->ls_comp_seg_index[i]); 1712 } 1713 1714 return (error); 1715 } 1716 1717 /* 1718 * Check to see if the passed in signature is a valid 1719 * one. If it is valid, return the index into 1720 * lofi_compress_table. 1721 * 1722 * Return -1 if it is invalid 1723 */ 1724 static int lofi_compress_select(char *signature) 1725 { 1726 int i; 1727 1728 for (i = 0; i < LOFI_COMPRESS_FUNCTIONS; i++) { 1729 if (strcmp(lofi_compress_table[i].l_name, signature) == 0) 1730 return (i); 1731 } 1732 1733 return (-1); 1734 } 1735 1736 /* 1737 * map a file to a minor number. Return the minor number. 1738 */ 1739 static int 1740 lofi_map_file(dev_t dev, struct lofi_ioctl *ulip, int pickminor, 1741 int *rvalp, struct cred *credp, int ioctl_flag) 1742 { 1743 minor_t newminor; 1744 struct lofi_state *lsp; 1745 struct lofi_ioctl *klip; 1746 int error; 1747 struct vnode *vp; 1748 int64_t Nblocks_prop_val; 1749 int64_t Size_prop_val; 1750 int compress_index; 1751 vattr_t vattr; 1752 int flag; 1753 enum vtype v_type; 1754 int zalloced = 0; 1755 dev_t newdev; 1756 char namebuf[50]; 1757 char buf[DEV_BSIZE]; 1758 char crybuf[DEV_BSIZE]; 1759 ssize_t resid; 1760 boolean_t need_vn_close = B_FALSE; 1761 boolean_t keycopied = B_FALSE; 1762 boolean_t need_size_update = B_FALSE; 1763 1764 klip = copy_in_lofi_ioctl(ulip, ioctl_flag); 1765 if (klip == NULL) 1766 return (EFAULT); 1767 1768 mutex_enter(&lofi_lock); 1769 1770 if (!valid_filename(klip->li_filename)) { 1771 error = EINVAL; 1772 goto out; 1773 } 1774 1775 if (file_to_minor(klip->li_filename) != 0) { 1776 error = EBUSY; 1777 goto out; 1778 } 1779 1780 if (pickminor) { 1781 /* Find a free one */ 1782 for (newminor = 1; newminor <= lofi_max_files; newminor++) 1783 if (ddi_get_soft_state(lofi_statep, newminor) == NULL) 1784 break; 1785 if (newminor >= lofi_max_files) { 1786 error = EAGAIN; 1787 goto out; 1788 } 1789 } else { 1790 newminor = klip->li_minor; 1791 if (ddi_get_soft_state(lofi_statep, newminor) != NULL) { 1792 error = EEXIST; 1793 goto out; 1794 } 1795 } 1796 1797 /* make sure it's valid */ 1798 error = lookupname(klip->li_filename, UIO_SYSSPACE, FOLLOW, 1799 NULLVPP, &vp); 1800 if (error) { 1801 goto out; 1802 } 1803 v_type = vp->v_type; 1804 VN_RELE(vp); 1805 if (!V_ISLOFIABLE(v_type)) { 1806 error = EINVAL; 1807 goto out; 1808 } 1809 flag = FREAD | FWRITE | FOFFMAX | FEXCL; 1810 error = vn_open(klip->li_filename, UIO_SYSSPACE, flag, 0, &vp, 0, 0); 1811 if (error) { 1812 /* try read-only */ 1813 flag &= ~FWRITE; 1814 error = vn_open(klip->li_filename, UIO_SYSSPACE, flag, 0, 1815 &vp, 0, 0); 1816 if (error) { 1817 goto out; 1818 } 1819 } 1820 need_vn_close = B_TRUE; 1821 1822 vattr.va_mask = AT_SIZE; 1823 error = VOP_GETATTR(vp, &vattr, 0, credp, NULL); 1824 if (error) { 1825 goto out; 1826 } 1827 /* the file needs to be a multiple of the block size */ 1828 if ((vattr.va_size % DEV_BSIZE) != 0) { 1829 error = EINVAL; 1830 goto out; 1831 } 1832 newdev = makedevice(getmajor(dev), newminor); 1833 Size_prop_val = vattr.va_size; 1834 if ((ddi_prop_update_int64(newdev, lofi_dip, 1835 SIZE_PROP_NAME, Size_prop_val)) != DDI_PROP_SUCCESS) { 1836 error = EINVAL; 1837 goto out; 1838 } 1839 Nblocks_prop_val = vattr.va_size / DEV_BSIZE; 1840 if ((ddi_prop_update_int64(newdev, lofi_dip, 1841 NBLOCKS_PROP_NAME, Nblocks_prop_val)) != DDI_PROP_SUCCESS) { 1842 error = EINVAL; 1843 goto propout; 1844 } 1845 error = ddi_soft_state_zalloc(lofi_statep, newminor); 1846 if (error == DDI_FAILURE) { 1847 error = ENOMEM; 1848 goto propout; 1849 } 1850 zalloced = 1; 1851 (void) snprintf(namebuf, sizeof (namebuf), "%d", newminor); 1852 error = ddi_create_minor_node(lofi_dip, namebuf, S_IFBLK, newminor, 1853 DDI_PSEUDO, NULL); 1854 if (error != DDI_SUCCESS) { 1855 error = ENXIO; 1856 goto propout; 1857 } 1858 (void) snprintf(namebuf, sizeof (namebuf), "%d,raw", newminor); 1859 error = ddi_create_minor_node(lofi_dip, namebuf, S_IFCHR, newminor, 1860 DDI_PSEUDO, NULL); 1861 if (error != DDI_SUCCESS) { 1862 /* remove block node */ 1863 (void) snprintf(namebuf, sizeof (namebuf), "%d", newminor); 1864 ddi_remove_minor_node(lofi_dip, namebuf); 1865 error = ENXIO; 1866 goto propout; 1867 } 1868 lsp = ddi_get_soft_state(lofi_statep, newminor); 1869 lsp->ls_filename_sz = strlen(klip->li_filename) + 1; 1870 lsp->ls_filename = kmem_alloc(lsp->ls_filename_sz, KM_SLEEP); 1871 (void) snprintf(namebuf, sizeof (namebuf), "%s_taskq_%d", 1872 LOFI_DRIVER_NAME, newminor); 1873 lsp->ls_taskq = taskq_create(namebuf, lofi_taskq_nthreads, 1874 minclsyspri, 1, lofi_taskq_maxalloc, 0); 1875 lsp->ls_kstat = kstat_create(LOFI_DRIVER_NAME, newminor, 1876 NULL, "disk", KSTAT_TYPE_IO, 1, 0); 1877 if (lsp->ls_kstat) { 1878 mutex_init(&lsp->ls_kstat_lock, NULL, MUTEX_DRIVER, NULL); 1879 lsp->ls_kstat->ks_lock = &lsp->ls_kstat_lock; 1880 kstat_install(lsp->ls_kstat); 1881 } 1882 cv_init(&lsp->ls_vp_cv, NULL, CV_DRIVER, NULL); 1883 mutex_init(&lsp->ls_vp_lock, NULL, MUTEX_DRIVER, NULL); 1884 1885 list_create(&lsp->ls_comp_cache, sizeof (struct lofi_comp_cache), 1886 offsetof(struct lofi_comp_cache, lc_list)); 1887 mutex_init(&lsp->ls_comp_cache_lock, NULL, MUTEX_DRIVER, NULL); 1888 1889 /* 1890 * save open mode so file can be closed properly and vnode counts 1891 * updated correctly. 1892 */ 1893 lsp->ls_openflag = flag; 1894 1895 /* 1896 * Try to handle stacked lofs vnodes. 1897 */ 1898 if (vp->v_type == VREG) { 1899 if (VOP_REALVP(vp, &lsp->ls_vp, NULL) != 0) { 1900 lsp->ls_vp = vp; 1901 } else { 1902 /* 1903 * Even though vp was obtained via vn_open(), we 1904 * can't call vn_close() on it, since lofs will 1905 * pass the VOP_CLOSE() on down to the realvp 1906 * (which we are about to use). Hence we merely 1907 * drop the reference to the lofs vnode and hold 1908 * the realvp so things behave as if we've 1909 * opened the realvp without any interaction 1910 * with lofs. 1911 */ 1912 VN_HOLD(lsp->ls_vp); 1913 VN_RELE(vp); 1914 } 1915 } else { 1916 lsp->ls_vp = vp; 1917 } 1918 lsp->ls_vp_size = vattr.va_size; 1919 (void) strcpy(lsp->ls_filename, klip->li_filename); 1920 if (rvalp) 1921 *rvalp = (int)newminor; 1922 klip->li_minor = newminor; 1923 1924 /* 1925 * Initialize crypto details for encrypted lofi 1926 */ 1927 if (klip->li_crypto_enabled) { 1928 int ret; 1929 1930 mutex_init(&lsp->ls_crypto_lock, NULL, MUTEX_DRIVER, NULL); 1931 1932 lsp->ls_mech.cm_type = crypto_mech2id(klip->li_cipher); 1933 if (lsp->ls_mech.cm_type == CRYPTO_MECH_INVALID) { 1934 cmn_err(CE_WARN, "invalid cipher %s requested for %s", 1935 klip->li_cipher, lsp->ls_filename); 1936 error = EINVAL; 1937 goto propout; 1938 } 1939 1940 /* this is just initialization here */ 1941 lsp->ls_mech.cm_param = NULL; 1942 lsp->ls_mech.cm_param_len = 0; 1943 1944 lsp->ls_iv_type = klip->li_iv_type; 1945 lsp->ls_iv_mech.cm_type = crypto_mech2id(klip->li_iv_cipher); 1946 if (lsp->ls_iv_mech.cm_type == CRYPTO_MECH_INVALID) { 1947 cmn_err(CE_WARN, "invalid iv cipher %s requested" 1948 " for %s", klip->li_iv_cipher, lsp->ls_filename); 1949 error = EINVAL; 1950 goto propout; 1951 } 1952 1953 /* iv mech must itself take a null iv */ 1954 lsp->ls_iv_mech.cm_param = NULL; 1955 lsp->ls_iv_mech.cm_param_len = 0; 1956 lsp->ls_iv_len = klip->li_iv_len; 1957 1958 /* 1959 * Create ctx using li_cipher & the raw li_key after checking 1960 * that it isn't a weak key. 1961 */ 1962 lsp->ls_key.ck_format = CRYPTO_KEY_RAW; 1963 lsp->ls_key.ck_length = klip->li_key_len; 1964 lsp->ls_key.ck_data = kmem_alloc( 1965 CRYPTO_BITS2BYTES(lsp->ls_key.ck_length), KM_SLEEP); 1966 bcopy(klip->li_key, lsp->ls_key.ck_data, 1967 CRYPTO_BITS2BYTES(lsp->ls_key.ck_length)); 1968 keycopied = B_TRUE; 1969 1970 ret = crypto_key_check(&lsp->ls_mech, &lsp->ls_key); 1971 if (ret != CRYPTO_SUCCESS) { 1972 error = EINVAL; 1973 cmn_err(CE_WARN, "weak key check failed for cipher " 1974 "%s on file %s (0x%x)", klip->li_cipher, 1975 lsp->ls_filename, ret); 1976 goto propout; 1977 } 1978 } 1979 lsp->ls_crypto_enabled = klip->li_crypto_enabled; 1980 1981 /* 1982 * Read the file signature to check if it is compressed or encrypted. 1983 * Crypto signature is in a different location; both areas should 1984 * read to keep compression and encryption mutually exclusive. 1985 */ 1986 if (lsp->ls_crypto_enabled) { 1987 error = vn_rdwr(UIO_READ, lsp->ls_vp, crybuf, DEV_BSIZE, 1988 CRYOFF, UIO_SYSSPACE, 0, RLIM64_INFINITY, kcred, &resid); 1989 if (error != 0) 1990 goto propout; 1991 } 1992 error = vn_rdwr(UIO_READ, lsp->ls_vp, buf, DEV_BSIZE, 0, UIO_SYSSPACE, 1993 0, RLIM64_INFINITY, kcred, &resid); 1994 if (error != 0) 1995 goto propout; 1996 1997 /* initialize these variables for all lofi files */ 1998 lsp->ls_uncomp_seg_sz = 0; 1999 lsp->ls_vp_comp_size = lsp->ls_vp_size; 2000 lsp->ls_comp_algorithm[0] = '\0'; 2001 2002 /* encrypted lofi reads/writes shifted by crypto metadata size */ 2003 lsp->ls_crypto_offset = 0; 2004 2005 /* this is a compressed lofi */ 2006 if ((compress_index = lofi_compress_select(buf)) != -1) { 2007 2008 /* compression and encryption are mutually exclusive */ 2009 if (klip->li_crypto_enabled) { 2010 error = ENOTSUP; 2011 goto propout; 2012 } 2013 2014 /* initialize compression info for compressed lofi */ 2015 lsp->ls_comp_algorithm_index = compress_index; 2016 (void) strlcpy(lsp->ls_comp_algorithm, 2017 lofi_compress_table[compress_index].l_name, 2018 sizeof (lsp->ls_comp_algorithm)); 2019 2020 error = lofi_map_compressed_file(lsp, buf); 2021 if (error != 0) 2022 goto propout; 2023 need_size_update = B_TRUE; 2024 2025 /* this is an encrypted lofi */ 2026 } else if (strncmp(crybuf, lofi_crypto_magic, 2027 sizeof (lofi_crypto_magic)) == 0) { 2028 2029 char *marker = crybuf; 2030 2031 /* 2032 * This is the case where the header in the lofi image is 2033 * already initialized to indicate it is encrypted. 2034 * There is another case (see below) where encryption is 2035 * requested but the lofi image has never been used yet, 2036 * so the header needs to be written with encryption magic. 2037 */ 2038 2039 /* indicate this must be an encrypted lofi due to magic */ 2040 klip->li_crypto_enabled = B_TRUE; 2041 2042 /* 2043 * The encryption header information is laid out this way: 2044 * 6 bytes: hex "CFLOFI" 2045 * 2 bytes: version = 0 ... for now 2046 * 96 bytes: reserved1 (not implemented yet) 2047 * 4 bytes: data_sector = 2 ... for now 2048 * more... not implemented yet 2049 */ 2050 2051 /* copy the magic */ 2052 bcopy(marker, lsp->ls_crypto.magic, 2053 sizeof (lsp->ls_crypto.magic)); 2054 marker += sizeof (lsp->ls_crypto.magic); 2055 2056 /* read the encryption version number */ 2057 bcopy(marker, &(lsp->ls_crypto.version), 2058 sizeof (lsp->ls_crypto.version)); 2059 lsp->ls_crypto.version = ntohs(lsp->ls_crypto.version); 2060 marker += sizeof (lsp->ls_crypto.version); 2061 2062 /* read a chunk of reserved data */ 2063 bcopy(marker, lsp->ls_crypto.reserved1, 2064 sizeof (lsp->ls_crypto.reserved1)); 2065 marker += sizeof (lsp->ls_crypto.reserved1); 2066 2067 /* read block number where encrypted data begins */ 2068 bcopy(marker, &(lsp->ls_crypto.data_sector), 2069 sizeof (lsp->ls_crypto.data_sector)); 2070 lsp->ls_crypto.data_sector = ntohl(lsp->ls_crypto.data_sector); 2071 marker += sizeof (lsp->ls_crypto.data_sector); 2072 2073 /* and ignore the rest until it is implemented */ 2074 2075 lsp->ls_crypto_offset = lsp->ls_crypto.data_sector * DEV_BSIZE; 2076 need_size_update = B_TRUE; 2077 2078 /* neither compressed nor encrypted, BUT could be new encrypted lofi */ 2079 } else if (klip->li_crypto_enabled) { 2080 2081 /* 2082 * This is the case where encryption was requested but the 2083 * appears to be entirely blank where the encryption header 2084 * would have been in the lofi image. If it is blank, 2085 * assume it is a brand new lofi image and initialize the 2086 * header area with encryption magic and current version 2087 * header data. If it is not blank, that's an error. 2088 */ 2089 int i; 2090 char *marker; 2091 struct crypto_meta chead; 2092 2093 for (i = 0; i < sizeof (struct crypto_meta); i++) 2094 if (crybuf[i] != '\0') 2095 break; 2096 if (i != sizeof (struct crypto_meta)) { 2097 error = EINVAL; 2098 goto propout; 2099 } 2100 2101 /* nothing there, initialize as encrypted lofi */ 2102 marker = crybuf; 2103 bcopy(lofi_crypto_magic, marker, sizeof (lofi_crypto_magic)); 2104 marker += sizeof (lofi_crypto_magic); 2105 chead.version = htons(LOFI_CRYPTO_VERSION); 2106 bcopy(&(chead.version), marker, sizeof (chead.version)); 2107 marker += sizeof (chead.version); 2108 marker += sizeof (chead.reserved1); 2109 chead.data_sector = htonl(LOFI_CRYPTO_DATA_SECTOR); 2110 bcopy(&(chead.data_sector), marker, sizeof (chead.data_sector)); 2111 2112 /* write the header */ 2113 error = vn_rdwr(UIO_WRITE, lsp->ls_vp, crybuf, DEV_BSIZE, 2114 CRYOFF, UIO_SYSSPACE, 0, RLIM64_INFINITY, kcred, &resid); 2115 if (error != 0) 2116 goto propout; 2117 2118 /* fix things up so it looks like we read this info */ 2119 bcopy(lofi_crypto_magic, lsp->ls_crypto.magic, 2120 sizeof (lofi_crypto_magic)); 2121 lsp->ls_crypto.version = LOFI_CRYPTO_VERSION; 2122 lsp->ls_crypto.data_sector = LOFI_CRYPTO_DATA_SECTOR; 2123 2124 lsp->ls_crypto_offset = lsp->ls_crypto.data_sector * DEV_BSIZE; 2125 need_size_update = B_TRUE; 2126 } 2127 2128 /* 2129 * Either lsp->ls_vp_size or lsp->ls_crypto_offset changed; 2130 * for encrypted lofi, advertise that it is somewhat shorter 2131 * due to embedded crypto metadata section 2132 */ 2133 if (need_size_update) { 2134 /* update DDI properties */ 2135 Size_prop_val = lsp->ls_vp_size - lsp->ls_crypto_offset; 2136 if ((ddi_prop_update_int64(newdev, lofi_dip, SIZE_PROP_NAME, 2137 Size_prop_val)) != DDI_PROP_SUCCESS) { 2138 error = EINVAL; 2139 goto propout; 2140 } 2141 Nblocks_prop_val = 2142 (lsp->ls_vp_size - lsp->ls_crypto_offset) / DEV_BSIZE; 2143 if ((ddi_prop_update_int64(newdev, lofi_dip, NBLOCKS_PROP_NAME, 2144 Nblocks_prop_val)) != DDI_PROP_SUCCESS) { 2145 error = EINVAL; 2146 goto propout; 2147 } 2148 } 2149 2150 fake_disk_geometry(lsp); 2151 mutex_exit(&lofi_lock); 2152 (void) copy_out_lofi_ioctl(klip, ulip, ioctl_flag); 2153 free_lofi_ioctl(klip); 2154 return (0); 2155 2156 propout: 2157 if (keycopied) { 2158 bzero(lsp->ls_key.ck_data, 2159 CRYPTO_BITS2BYTES(lsp->ls_key.ck_length)); 2160 kmem_free(lsp->ls_key.ck_data, 2161 CRYPTO_BITS2BYTES(lsp->ls_key.ck_length)); 2162 lsp->ls_key.ck_data = NULL; 2163 lsp->ls_key.ck_length = 0; 2164 } 2165 2166 if (zalloced) 2167 ddi_soft_state_free(lofi_statep, newminor); 2168 2169 (void) ddi_prop_remove(newdev, lofi_dip, SIZE_PROP_NAME); 2170 (void) ddi_prop_remove(newdev, lofi_dip, NBLOCKS_PROP_NAME); 2171 2172 out: 2173 if (need_vn_close) { 2174 (void) VOP_CLOSE(vp, flag, 1, 0, credp, NULL); 2175 VN_RELE(vp); 2176 } 2177 2178 mutex_exit(&lofi_lock); 2179 free_lofi_ioctl(klip); 2180 return (error); 2181 } 2182 2183 /* 2184 * unmap a file. 2185 */ 2186 static int 2187 lofi_unmap_file(dev_t dev, struct lofi_ioctl *ulip, int byfilename, 2188 struct cred *credp, int ioctl_flag) 2189 { 2190 struct lofi_state *lsp; 2191 struct lofi_ioctl *klip; 2192 minor_t minor; 2193 2194 klip = copy_in_lofi_ioctl(ulip, ioctl_flag); 2195 if (klip == NULL) 2196 return (EFAULT); 2197 2198 mutex_enter(&lofi_lock); 2199 if (byfilename) { 2200 minor = file_to_minor(klip->li_filename); 2201 } else { 2202 minor = klip->li_minor; 2203 } 2204 if (minor == 0) { 2205 mutex_exit(&lofi_lock); 2206 free_lofi_ioctl(klip); 2207 return (ENXIO); 2208 } 2209 lsp = ddi_get_soft_state(lofi_statep, minor); 2210 if (lsp == NULL || lsp->ls_vp == NULL) { 2211 mutex_exit(&lofi_lock); 2212 free_lofi_ioctl(klip); 2213 return (ENXIO); 2214 } 2215 2216 /* 2217 * If it's still held open, we'll do one of three things: 2218 * 2219 * If no flag is set, just return EBUSY. 2220 * 2221 * If the 'cleanup' flag is set, unmap and remove the device when 2222 * the last user finishes. 2223 * 2224 * If the 'force' flag is set, then we forcibly close the underlying 2225 * file. Subsequent operations will fail, and the DKIOCSTATE ioctl 2226 * will return DKIO_DEV_GONE. When the device is last closed, the 2227 * device will be cleaned up appropriately. 2228 * 2229 * This is complicated by the fact that we may have outstanding 2230 * dispatched I/Os. Rather than having a single mutex to serialize all 2231 * I/O, we keep a count of the number of outstanding I/O requests 2232 * (ls_vp_iocount), as well as a flag to indicate that no new I/Os 2233 * should be dispatched (ls_vp_closereq). 2234 * 2235 * We set the flag, wait for the number of outstanding I/Os to reach 0, 2236 * and then close the underlying vnode. 2237 */ 2238 if (is_opened(lsp)) { 2239 if (klip->li_force) { 2240 mutex_enter(&lsp->ls_vp_lock); 2241 lsp->ls_vp_closereq = B_TRUE; 2242 /* wake up any threads waiting on dkiocstate */ 2243 cv_broadcast(&lsp->ls_vp_cv); 2244 while (lsp->ls_vp_iocount > 0) 2245 cv_wait(&lsp->ls_vp_cv, &lsp->ls_vp_lock); 2246 mutex_exit(&lsp->ls_vp_lock); 2247 lofi_free_handle(dev, minor, lsp, credp); 2248 2249 klip->li_minor = minor; 2250 mutex_exit(&lofi_lock); 2251 (void) copy_out_lofi_ioctl(klip, ulip, ioctl_flag); 2252 free_lofi_ioctl(klip); 2253 return (0); 2254 } else if (klip->li_cleanup) { 2255 lsp->ls_cleanup = 1; 2256 mutex_exit(&lofi_lock); 2257 free_lofi_ioctl(klip); 2258 return (0); 2259 } 2260 2261 mutex_exit(&lofi_lock); 2262 free_lofi_ioctl(klip); 2263 return (EBUSY); 2264 } 2265 2266 lofi_free_handle(dev, minor, lsp, credp); 2267 2268 klip->li_minor = minor; 2269 mutex_exit(&lofi_lock); 2270 (void) copy_out_lofi_ioctl(klip, ulip, ioctl_flag); 2271 free_lofi_ioctl(klip); 2272 return (0); 2273 } 2274 2275 /* 2276 * get the filename given the minor number, or the minor number given 2277 * the name. 2278 */ 2279 /*ARGSUSED*/ 2280 static int 2281 lofi_get_info(dev_t dev, struct lofi_ioctl *ulip, int which, 2282 struct cred *credp, int ioctl_flag) 2283 { 2284 struct lofi_state *lsp; 2285 struct lofi_ioctl *klip; 2286 int error; 2287 minor_t minor; 2288 2289 klip = copy_in_lofi_ioctl(ulip, ioctl_flag); 2290 if (klip == NULL) 2291 return (EFAULT); 2292 2293 switch (which) { 2294 case LOFI_GET_FILENAME: 2295 minor = klip->li_minor; 2296 if (minor == 0) { 2297 free_lofi_ioctl(klip); 2298 return (EINVAL); 2299 } 2300 2301 mutex_enter(&lofi_lock); 2302 lsp = ddi_get_soft_state(lofi_statep, minor); 2303 if (lsp == NULL) { 2304 mutex_exit(&lofi_lock); 2305 free_lofi_ioctl(klip); 2306 return (ENXIO); 2307 } 2308 (void) strcpy(klip->li_filename, lsp->ls_filename); 2309 (void) strlcpy(klip->li_algorithm, lsp->ls_comp_algorithm, 2310 sizeof (klip->li_algorithm)); 2311 klip->li_crypto_enabled = lsp->ls_crypto_enabled; 2312 mutex_exit(&lofi_lock); 2313 error = copy_out_lofi_ioctl(klip, ulip, ioctl_flag); 2314 free_lofi_ioctl(klip); 2315 return (error); 2316 case LOFI_GET_MINOR: 2317 mutex_enter(&lofi_lock); 2318 klip->li_minor = file_to_minor(klip->li_filename); 2319 /* caller should not depend on klip->li_crypto_enabled here */ 2320 mutex_exit(&lofi_lock); 2321 if (klip->li_minor == 0) { 2322 free_lofi_ioctl(klip); 2323 return (ENOENT); 2324 } 2325 error = copy_out_lofi_ioctl(klip, ulip, ioctl_flag); 2326 free_lofi_ioctl(klip); 2327 return (error); 2328 case LOFI_CHECK_COMPRESSED: 2329 mutex_enter(&lofi_lock); 2330 klip->li_minor = file_to_minor(klip->li_filename); 2331 mutex_exit(&lofi_lock); 2332 if (klip->li_minor == 0) { 2333 free_lofi_ioctl(klip); 2334 return (ENOENT); 2335 } 2336 mutex_enter(&lofi_lock); 2337 lsp = ddi_get_soft_state(lofi_statep, klip->li_minor); 2338 if (lsp == NULL) { 2339 mutex_exit(&lofi_lock); 2340 free_lofi_ioctl(klip); 2341 return (ENXIO); 2342 } 2343 ASSERT(strcmp(klip->li_filename, lsp->ls_filename) == 0); 2344 2345 (void) strlcpy(klip->li_algorithm, lsp->ls_comp_algorithm, 2346 sizeof (klip->li_algorithm)); 2347 mutex_exit(&lofi_lock); 2348 error = copy_out_lofi_ioctl(klip, ulip, ioctl_flag); 2349 free_lofi_ioctl(klip); 2350 return (error); 2351 default: 2352 free_lofi_ioctl(klip); 2353 return (EINVAL); 2354 } 2355 2356 } 2357 2358 static int 2359 lofi_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, 2360 int *rvalp) 2361 { 2362 int error; 2363 enum dkio_state dkstate; 2364 struct lofi_state *lsp; 2365 minor_t minor; 2366 2367 minor = getminor(dev); 2368 /* lofi ioctls only apply to the master device */ 2369 if (minor == 0) { 2370 struct lofi_ioctl *lip = (struct lofi_ioctl *)arg; 2371 2372 /* 2373 * the query command only need read-access - i.e., normal 2374 * users are allowed to do those on the ctl device as 2375 * long as they can open it read-only. 2376 */ 2377 switch (cmd) { 2378 case LOFI_MAP_FILE: 2379 if ((flag & FWRITE) == 0) 2380 return (EPERM); 2381 return (lofi_map_file(dev, lip, 1, rvalp, credp, flag)); 2382 case LOFI_MAP_FILE_MINOR: 2383 if ((flag & FWRITE) == 0) 2384 return (EPERM); 2385 return (lofi_map_file(dev, lip, 0, rvalp, credp, flag)); 2386 case LOFI_UNMAP_FILE: 2387 if ((flag & FWRITE) == 0) 2388 return (EPERM); 2389 return (lofi_unmap_file(dev, lip, 1, credp, flag)); 2390 case LOFI_UNMAP_FILE_MINOR: 2391 if ((flag & FWRITE) == 0) 2392 return (EPERM); 2393 return (lofi_unmap_file(dev, lip, 0, credp, flag)); 2394 case LOFI_GET_FILENAME: 2395 return (lofi_get_info(dev, lip, LOFI_GET_FILENAME, 2396 credp, flag)); 2397 case LOFI_GET_MINOR: 2398 return (lofi_get_info(dev, lip, LOFI_GET_MINOR, 2399 credp, flag)); 2400 case LOFI_GET_MAXMINOR: 2401 error = ddi_copyout(&lofi_max_files, &lip->li_minor, 2402 sizeof (lofi_max_files), flag); 2403 if (error) 2404 return (EFAULT); 2405 return (0); 2406 case LOFI_CHECK_COMPRESSED: 2407 return (lofi_get_info(dev, lip, LOFI_CHECK_COMPRESSED, 2408 credp, flag)); 2409 default: 2410 break; 2411 } 2412 } 2413 2414 mutex_enter(&lofi_lock); 2415 lsp = ddi_get_soft_state(lofi_statep, minor); 2416 if (lsp == NULL || lsp->ls_vp_closereq) { 2417 mutex_exit(&lofi_lock); 2418 return (ENXIO); 2419 } 2420 mutex_exit(&lofi_lock); 2421 2422 /* 2423 * We explicitly allow DKIOCSTATE, but all other ioctls should fail with 2424 * EIO as if the device was no longer present. 2425 */ 2426 if (lsp->ls_vp == NULL && cmd != DKIOCSTATE) 2427 return (EIO); 2428 2429 /* these are for faking out utilities like newfs */ 2430 switch (cmd) { 2431 case DKIOCGVTOC: 2432 switch (ddi_model_convert_from(flag & FMODELS)) { 2433 case DDI_MODEL_ILP32: { 2434 struct vtoc32 vtoc32; 2435 2436 vtoctovtoc32(lsp->ls_vtoc, vtoc32); 2437 if (ddi_copyout(&vtoc32, (void *)arg, 2438 sizeof (struct vtoc32), flag)) 2439 return (EFAULT); 2440 break; 2441 } 2442 2443 case DDI_MODEL_NONE: 2444 if (ddi_copyout(&lsp->ls_vtoc, (void *)arg, 2445 sizeof (struct vtoc), flag)) 2446 return (EFAULT); 2447 break; 2448 } 2449 return (0); 2450 case DKIOCINFO: 2451 error = ddi_copyout(&lsp->ls_ci, (void *)arg, 2452 sizeof (struct dk_cinfo), flag); 2453 if (error) 2454 return (EFAULT); 2455 return (0); 2456 case DKIOCG_VIRTGEOM: 2457 case DKIOCG_PHYGEOM: 2458 case DKIOCGGEOM: 2459 error = ddi_copyout(&lsp->ls_dkg, (void *)arg, 2460 sizeof (struct dk_geom), flag); 2461 if (error) 2462 return (EFAULT); 2463 return (0); 2464 case DKIOCSTATE: 2465 /* 2466 * Normally, lofi devices are always in the INSERTED state. If 2467 * a device is forcefully unmapped, then the device transitions 2468 * to the DKIO_DEV_GONE state. 2469 */ 2470 if (ddi_copyin((void *)arg, &dkstate, sizeof (dkstate), 2471 flag) != 0) 2472 return (EFAULT); 2473 2474 mutex_enter(&lsp->ls_vp_lock); 2475 lsp->ls_vp_iocount++; 2476 while (((dkstate == DKIO_INSERTED && lsp->ls_vp != NULL) || 2477 (dkstate == DKIO_DEV_GONE && lsp->ls_vp == NULL)) && 2478 !lsp->ls_vp_closereq) { 2479 /* 2480 * By virtue of having the device open, we know that 2481 * 'lsp' will remain valid when we return. 2482 */ 2483 if (!cv_wait_sig(&lsp->ls_vp_cv, 2484 &lsp->ls_vp_lock)) { 2485 lsp->ls_vp_iocount--; 2486 cv_broadcast(&lsp->ls_vp_cv); 2487 mutex_exit(&lsp->ls_vp_lock); 2488 return (EINTR); 2489 } 2490 } 2491 2492 dkstate = (!lsp->ls_vp_closereq && lsp->ls_vp != NULL ? 2493 DKIO_INSERTED : DKIO_DEV_GONE); 2494 lsp->ls_vp_iocount--; 2495 cv_broadcast(&lsp->ls_vp_cv); 2496 mutex_exit(&lsp->ls_vp_lock); 2497 2498 if (ddi_copyout(&dkstate, (void *)arg, 2499 sizeof (dkstate), flag) != 0) 2500 return (EFAULT); 2501 return (0); 2502 default: 2503 return (ENOTTY); 2504 } 2505 } 2506 2507 static struct cb_ops lofi_cb_ops = { 2508 lofi_open, /* open */ 2509 lofi_close, /* close */ 2510 lofi_strategy, /* strategy */ 2511 nodev, /* print */ 2512 nodev, /* dump */ 2513 lofi_read, /* read */ 2514 lofi_write, /* write */ 2515 lofi_ioctl, /* ioctl */ 2516 nodev, /* devmap */ 2517 nodev, /* mmap */ 2518 nodev, /* segmap */ 2519 nochpoll, /* poll */ 2520 ddi_prop_op, /* prop_op */ 2521 0, /* streamtab */ 2522 D_64BIT | D_NEW | D_MP, /* Driver compatibility flag */ 2523 CB_REV, 2524 lofi_aread, 2525 lofi_awrite 2526 }; 2527 2528 static struct dev_ops lofi_ops = { 2529 DEVO_REV, /* devo_rev, */ 2530 0, /* refcnt */ 2531 lofi_info, /* info */ 2532 nulldev, /* identify */ 2533 nulldev, /* probe */ 2534 lofi_attach, /* attach */ 2535 lofi_detach, /* detach */ 2536 nodev, /* reset */ 2537 &lofi_cb_ops, /* driver operations */ 2538 NULL, /* no bus operations */ 2539 NULL, /* power */ 2540 ddi_quiesce_not_needed, /* quiesce */ 2541 }; 2542 2543 static struct modldrv modldrv = { 2544 &mod_driverops, 2545 "loopback file driver", 2546 &lofi_ops, 2547 }; 2548 2549 static struct modlinkage modlinkage = { 2550 MODREV_1, 2551 &modldrv, 2552 NULL 2553 }; 2554 2555 int 2556 _init(void) 2557 { 2558 int error; 2559 2560 error = ddi_soft_state_init(&lofi_statep, 2561 sizeof (struct lofi_state), 0); 2562 if (error) 2563 return (error); 2564 2565 mutex_init(&lofi_lock, NULL, MUTEX_DRIVER, NULL); 2566 error = mod_install(&modlinkage); 2567 if (error) { 2568 mutex_destroy(&lofi_lock); 2569 ddi_soft_state_fini(&lofi_statep); 2570 } 2571 2572 return (error); 2573 } 2574 2575 int 2576 _fini(void) 2577 { 2578 int error; 2579 2580 if (lofi_busy()) 2581 return (EBUSY); 2582 2583 error = mod_remove(&modlinkage); 2584 if (error) 2585 return (error); 2586 2587 mutex_destroy(&lofi_lock); 2588 ddi_soft_state_fini(&lofi_statep); 2589 2590 return (error); 2591 } 2592 2593 int 2594 _info(struct modinfo *modinfop) 2595 { 2596 return (mod_info(&modlinkage, modinfop)); 2597 } 2598