1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * lofi (loopback file) driver - allows you to attach a file to a device, 30 * which can then be accessed through that device. The simple model is that 31 * you tell lofi to open a file, and then use the block device you get as 32 * you would any block device. lofi translates access to the block device 33 * into I/O on the underlying file. This is mostly useful for 34 * mounting images of filesystems. 35 * 36 * lofi is controlled through /dev/lofictl - this is the only device exported 37 * during attach, and is minor number 0. lofiadm communicates with lofi through 38 * ioctls on this device. When a file is attached to lofi, block and character 39 * devices are exported in /dev/lofi and /dev/rlofi. Currently, these devices 40 * are identified by their minor number, and the minor number is also used 41 * as the name in /dev/lofi. If we ever decide to support virtual disks, 42 * we'll have to divide the minor number space to identify fdisk partitions 43 * and slices, and the name will then be the minor number shifted down a 44 * few bits. Minor devices are tracked with state structures handled with 45 * ddi_soft_state(9F) for simplicity. 46 * 47 * A file attached to lofi is opened when attached and not closed until 48 * explicitly detached from lofi. This seems more sensible than deferring 49 * the open until the /dev/lofi device is opened, for a number of reasons. 50 * One is that any failure is likely to be noticed by the person (or script) 51 * running lofiadm. Another is that it would be a security problem if the 52 * file was replaced by another one after being added but before being opened. 53 * 54 * The only hard part about lofi is the ioctls. In order to support things 55 * like 'newfs' on a lofi device, it needs to support certain disk ioctls. 56 * So it has to fake disk geometry and partition information. More may need 57 * to be faked if your favorite utility doesn't work and you think it should 58 * (fdformat doesn't work because it really wants to know the type of floppy 59 * controller to talk to, and that didn't seem easy to fake. Or possibly even 60 * necessary, since we have mkfs_pcfs now). 61 * 62 * Normally, a lofi device cannot be detached if it is open (i.e. busy). To 63 * support simulation of hotplug events, an optional force flag is provided. 64 * If a lofi device is open when a force detach is requested, then the 65 * underlying file is closed and any subsequent operations return EIO. When the 66 * device is closed for the last time, it will be cleaned up at that time. In 67 * addition, the DKIOCSTATE ioctl will return DKIO_DEV_GONE when the device is 68 * detached but not removed. 69 * 70 * Known problems: 71 * 72 * UFS logging. Mounting a UFS filesystem image "logging" 73 * works for basic copy testing but wedges during a build of ON through 74 * that image. Some deadlock in lufs holding the log mutex and then 75 * getting stuck on a buf. So for now, don't do that. 76 * 77 * Direct I/O. Since the filesystem data is being cached in the buffer 78 * cache, _and_ again in the underlying filesystem, it's tempting to 79 * enable direct I/O on the underlying file. Don't, because that deadlocks. 80 * I think to fix the cache-twice problem we might need filesystem support. 81 * 82 * lofi on itself. The simple lock strategy (lofi_lock) precludes this 83 * because you'll be in lofi_ioctl, holding the lock when you open the 84 * file, which, if it's lofi, will grab lofi_lock. We prevent this for 85 * now, though not using ddi_soft_state(9F) would make it possible to 86 * do. Though it would still be silly. 87 * 88 * Interesting things to do: 89 * 90 * Allow multiple files for each device. A poor-man's metadisk, basically. 91 * 92 * Pass-through ioctls on block devices. You can (though it's not 93 * documented), give lofi a block device as a file name. Then we shouldn't 94 * need to fake a geometry. But this is also silly unless you're replacing 95 * metadisk. 96 * 97 * Encryption. tpm would like this. Apparently Windows 2000 has it, and 98 * so does Linux. 99 */ 100 101 #include <sys/types.h> 102 #include <sys/sysmacros.h> 103 #include <sys/cmn_err.h> 104 #include <sys/uio.h> 105 #include <sys/kmem.h> 106 #include <sys/cred.h> 107 #include <sys/mman.h> 108 #include <sys/errno.h> 109 #include <sys/aio_req.h> 110 #include <sys/stat.h> 111 #include <sys/file.h> 112 #include <sys/modctl.h> 113 #include <sys/conf.h> 114 #include <sys/debug.h> 115 #include <sys/vnode.h> 116 #include <sys/lofi.h> 117 #include <sys/fcntl.h> 118 #include <sys/pathname.h> 119 #include <sys/filio.h> 120 #include <sys/fdio.h> 121 #include <sys/open.h> 122 #include <sys/disp.h> 123 #include <vm/seg_map.h> 124 #include <sys/ddi.h> 125 #include <sys/sunddi.h> 126 127 /* seems safer than having to get the string right many times */ 128 #define NBLOCKS_PROP_NAME "Nblocks" 129 #define SIZE_PROP_NAME "Size" 130 131 static dev_info_t *lofi_dip; 132 static void *lofi_statep; 133 static kmutex_t lofi_lock; /* state lock */ 134 135 /* 136 * Because lofi_taskq_nthreads limits the actual swamping of the device, the 137 * maxalloc parameter (lofi_taskq_maxalloc) should be tuned conservatively 138 * high. If we want to be assured that the underlying device is always busy, 139 * we must be sure that the number of bytes enqueued when the number of 140 * enqueued tasks exceeds maxalloc is sufficient to keep the device busy for 141 * the duration of the sleep time in taskq_ent_alloc(). That is, lofi should 142 * set maxalloc to be the maximum throughput (in bytes per second) of the 143 * underlying device divided by the minimum I/O size. We assume a realistic 144 * maximum throughput of one hundred megabytes per second; we set maxalloc on 145 * the lofi task queue to be 104857600 divided by DEV_BSIZE. 146 */ 147 static int lofi_taskq_maxalloc = 104857600 / DEV_BSIZE; 148 static int lofi_taskq_nthreads = 4; /* # of taskq threads per device */ 149 150 uint32_t lofi_max_files = LOFI_MAX_FILES; 151 152 static int 153 lofi_busy(void) 154 { 155 minor_t minor; 156 157 /* 158 * We need to make sure no mappings exist - mod_remove won't 159 * help because the device isn't open. 160 */ 161 mutex_enter(&lofi_lock); 162 for (minor = 1; minor <= lofi_max_files; minor++) { 163 if (ddi_get_soft_state(lofi_statep, minor) != NULL) { 164 mutex_exit(&lofi_lock); 165 return (EBUSY); 166 } 167 } 168 mutex_exit(&lofi_lock); 169 return (0); 170 } 171 172 static int 173 is_opened(struct lofi_state *lsp) 174 { 175 ASSERT(mutex_owned(&lofi_lock)); 176 return (lsp->ls_chr_open || lsp->ls_blk_open || lsp->ls_lyr_open_count); 177 } 178 179 static int 180 mark_opened(struct lofi_state *lsp, int otyp) 181 { 182 ASSERT(mutex_owned(&lofi_lock)); 183 switch (otyp) { 184 case OTYP_CHR: 185 lsp->ls_chr_open = 1; 186 break; 187 case OTYP_BLK: 188 lsp->ls_blk_open = 1; 189 break; 190 case OTYP_LYR: 191 lsp->ls_lyr_open_count++; 192 break; 193 default: 194 return (-1); 195 } 196 return (0); 197 } 198 199 static void 200 mark_closed(struct lofi_state *lsp, int otyp) 201 { 202 ASSERT(mutex_owned(&lofi_lock)); 203 switch (otyp) { 204 case OTYP_CHR: 205 lsp->ls_chr_open = 0; 206 break; 207 case OTYP_BLK: 208 lsp->ls_blk_open = 0; 209 break; 210 case OTYP_LYR: 211 lsp->ls_lyr_open_count--; 212 break; 213 default: 214 break; 215 } 216 } 217 218 static void 219 lofi_free_handle(dev_t dev, minor_t minor, struct lofi_state *lsp, 220 cred_t *credp) 221 { 222 dev_t newdev; 223 char namebuf[50]; 224 225 if (lsp->ls_vp) { 226 (void) VOP_CLOSE(lsp->ls_vp, lsp->ls_openflag, 1, 0, credp); 227 VN_RELE(lsp->ls_vp); 228 lsp->ls_vp = NULL; 229 } 230 231 newdev = makedevice(getmajor(dev), minor); 232 (void) ddi_prop_remove(newdev, lofi_dip, SIZE_PROP_NAME); 233 (void) ddi_prop_remove(newdev, lofi_dip, NBLOCKS_PROP_NAME); 234 235 (void) snprintf(namebuf, sizeof (namebuf), "%d", minor); 236 ddi_remove_minor_node(lofi_dip, namebuf); 237 (void) snprintf(namebuf, sizeof (namebuf), "%d,raw", minor); 238 ddi_remove_minor_node(lofi_dip, namebuf); 239 240 kmem_free(lsp->ls_filename, lsp->ls_filename_sz); 241 taskq_destroy(lsp->ls_taskq); 242 if (lsp->ls_kstat) { 243 kstat_delete(lsp->ls_kstat); 244 mutex_destroy(&lsp->ls_kstat_lock); 245 } 246 ddi_soft_state_free(lofi_statep, minor); 247 } 248 249 /*ARGSUSED*/ 250 static int 251 lofi_open(dev_t *devp, int flag, int otyp, struct cred *credp) 252 { 253 minor_t minor; 254 struct lofi_state *lsp; 255 256 mutex_enter(&lofi_lock); 257 minor = getminor(*devp); 258 if (minor == 0) { 259 /* master control device */ 260 /* must be opened exclusively */ 261 if (((flag & FEXCL) != FEXCL) || (otyp != OTYP_CHR)) { 262 mutex_exit(&lofi_lock); 263 return (EINVAL); 264 } 265 lsp = ddi_get_soft_state(lofi_statep, 0); 266 if (lsp == NULL) { 267 mutex_exit(&lofi_lock); 268 return (ENXIO); 269 } 270 if (is_opened(lsp)) { 271 mutex_exit(&lofi_lock); 272 return (EBUSY); 273 } 274 (void) mark_opened(lsp, OTYP_CHR); 275 mutex_exit(&lofi_lock); 276 return (0); 277 } 278 279 /* otherwise, the mapping should already exist */ 280 lsp = ddi_get_soft_state(lofi_statep, minor); 281 if (lsp == NULL) { 282 mutex_exit(&lofi_lock); 283 return (EINVAL); 284 } 285 286 if (lsp->ls_vp == NULL) { 287 mutex_exit(&lofi_lock); 288 return (ENXIO); 289 } 290 291 if (mark_opened(lsp, otyp) == -1) { 292 mutex_exit(&lofi_lock); 293 return (EINVAL); 294 } 295 296 mutex_exit(&lofi_lock); 297 return (0); 298 } 299 300 /*ARGSUSED*/ 301 static int 302 lofi_close(dev_t dev, int flag, int otyp, struct cred *credp) 303 { 304 minor_t minor; 305 struct lofi_state *lsp; 306 307 mutex_enter(&lofi_lock); 308 minor = getminor(dev); 309 lsp = ddi_get_soft_state(lofi_statep, minor); 310 if (lsp == NULL) { 311 mutex_exit(&lofi_lock); 312 return (EINVAL); 313 } 314 mark_closed(lsp, otyp); 315 316 /* 317 * If we have forcibly closed the underlying device, and this is the 318 * last close, then tear down the rest of the device. 319 */ 320 if (minor != 0 && lsp->ls_vp == NULL && !is_opened(lsp)) 321 lofi_free_handle(dev, minor, lsp, credp); 322 mutex_exit(&lofi_lock); 323 return (0); 324 } 325 326 /* 327 * This is basically what strategy used to be before we found we 328 * needed task queues. 329 */ 330 static void 331 lofi_strategy_task(void *arg) 332 { 333 struct buf *bp = (struct buf *)arg; 334 int error; 335 struct lofi_state *lsp; 336 offset_t offset, alignedoffset; 337 offset_t mapoffset; 338 caddr_t bufaddr; 339 caddr_t mapaddr; 340 size_t xfersize; 341 size_t len; 342 int isread; 343 int smflags; 344 enum seg_rw srw; 345 346 lsp = ddi_get_soft_state(lofi_statep, getminor(bp->b_edev)); 347 if (lsp->ls_kstat) { 348 mutex_enter(lsp->ls_kstat->ks_lock); 349 kstat_waitq_to_runq(KSTAT_IO_PTR(lsp->ls_kstat)); 350 mutex_exit(lsp->ls_kstat->ks_lock); 351 } 352 bp_mapin(bp); 353 bufaddr = bp->b_un.b_addr; 354 offset = bp->b_lblkno * DEV_BSIZE; /* offset within file */ 355 356 /* 357 * We used to always use vn_rdwr here, but we cannot do that because 358 * we might decide to read or write from the the underlying 359 * file during this call, which would be a deadlock because 360 * we have the rw_lock. So instead we page, unless it's not 361 * mapable or it's a character device. 362 */ 363 if (lsp->ls_vp == NULL || lsp->ls_vp_closereq) { 364 error = EIO; 365 } else if (((lsp->ls_vp->v_flag & VNOMAP) == 0) && 366 (lsp->ls_vp->v_type != VCHR)) { 367 /* 368 * segmap always gives us an 8K (MAXBSIZE) chunk, aligned on 369 * an 8K boundary, but the buf transfer address may not be 370 * aligned on more than a 512-byte boundary (we don't 371 * enforce that, though we could). This matters since the 372 * initial part of the transfer may not start at offset 0 373 * within the segmap'd chunk. So we have to compensate for 374 * that with 'mapoffset'. Subsequent chunks always start 375 * off at the beginning, and the last is capped by b_resid. 376 */ 377 mapoffset = offset & MAXBOFFSET; 378 alignedoffset = offset - mapoffset; /* now map-aligned */ 379 bp->b_resid = bp->b_bcount; 380 isread = bp->b_flags & B_READ; 381 srw = isread ? S_READ : S_WRITE; 382 do { 383 xfersize = MIN(lsp->ls_vp_size - offset, 384 MIN(MAXBSIZE - mapoffset, bp->b_resid)); 385 len = roundup(mapoffset + xfersize, PAGESIZE); 386 mapaddr = segmap_getmapflt(segkmap, lsp->ls_vp, 387 alignedoffset, MAXBSIZE, 1, srw); 388 /* 389 * Now fault in the pages. This lets us check 390 * for errors before we reference mapaddr and 391 * try to resolve the fault in bcopy (which would 392 * panic instead). And this can easily happen, 393 * particularly if you've lofi'd a file over NFS 394 * and someone deletes the file on the server. 395 */ 396 error = segmap_fault(kas.a_hat, segkmap, mapaddr, 397 len, F_SOFTLOCK, srw); 398 if (error) { 399 (void) segmap_release(segkmap, mapaddr, 0); 400 if (FC_CODE(error) == FC_OBJERR) 401 error = FC_ERRNO(error); 402 else 403 error = EIO; 404 break; 405 } 406 smflags = 0; 407 if (isread) { 408 bcopy(mapaddr + mapoffset, bufaddr, xfersize); 409 } else { 410 smflags |= SM_WRITE; 411 bcopy(bufaddr, mapaddr + mapoffset, xfersize); 412 } 413 bp->b_resid -= xfersize; 414 bufaddr += xfersize; 415 offset += xfersize; 416 (void) segmap_fault(kas.a_hat, segkmap, mapaddr, 417 len, F_SOFTUNLOCK, srw); 418 error = segmap_release(segkmap, mapaddr, smflags); 419 /* only the first map may start partial */ 420 mapoffset = 0; 421 alignedoffset += MAXBSIZE; 422 } while ((error == 0) && (bp->b_resid > 0) && 423 (offset < lsp->ls_vp_size)); 424 } else { 425 ssize_t resid; 426 enum uio_rw rw; 427 428 if (bp->b_flags & B_READ) 429 rw = UIO_READ; 430 else 431 rw = UIO_WRITE; 432 error = vn_rdwr(rw, lsp->ls_vp, bufaddr, bp->b_bcount, 433 offset, UIO_SYSSPACE, 0, RLIM64_INFINITY, kcred, &resid); 434 bp->b_resid = resid; 435 } 436 437 if (lsp->ls_kstat) { 438 size_t n_done = bp->b_bcount - bp->b_resid; 439 kstat_io_t *kioptr; 440 441 mutex_enter(lsp->ls_kstat->ks_lock); 442 kioptr = KSTAT_IO_PTR(lsp->ls_kstat); 443 if (bp->b_flags & B_READ) { 444 kioptr->nread += n_done; 445 kioptr->reads++; 446 } else { 447 kioptr->nwritten += n_done; 448 kioptr->writes++; 449 } 450 kstat_runq_exit(kioptr); 451 mutex_exit(lsp->ls_kstat->ks_lock); 452 } 453 454 mutex_enter(&lsp->ls_vp_lock); 455 if (--lsp->ls_vp_iocount == 0) 456 cv_broadcast(&lsp->ls_vp_cv); 457 mutex_exit(&lsp->ls_vp_lock); 458 459 bioerror(bp, error); 460 biodone(bp); 461 } 462 463 static int 464 lofi_strategy(struct buf *bp) 465 { 466 struct lofi_state *lsp; 467 offset_t offset; 468 469 /* 470 * We cannot just do I/O here, because the current thread 471 * _might_ end up back in here because the underlying filesystem 472 * wants a buffer, which eventually gets into bio_recycle and 473 * might call into lofi to write out a delayed-write buffer. 474 * This is bad if the filesystem above lofi is the same as below. 475 * 476 * We could come up with a complex strategy using threads to 477 * do the I/O asynchronously, or we could use task queues. task 478 * queues were incredibly easy so they win. 479 */ 480 lsp = ddi_get_soft_state(lofi_statep, getminor(bp->b_edev)); 481 mutex_enter(&lsp->ls_vp_lock); 482 if (lsp->ls_vp == NULL || lsp->ls_vp_closereq) { 483 bioerror(bp, EIO); 484 biodone(bp); 485 mutex_exit(&lsp->ls_vp_lock); 486 return (0); 487 } 488 489 offset = bp->b_lblkno * DEV_BSIZE; /* offset within file */ 490 if (offset == lsp->ls_vp_size) { 491 /* EOF */ 492 if ((bp->b_flags & B_READ) != 0) { 493 bp->b_resid = bp->b_bcount; 494 bioerror(bp, 0); 495 } else { 496 /* writes should fail */ 497 bioerror(bp, ENXIO); 498 } 499 biodone(bp); 500 mutex_exit(&lsp->ls_vp_lock); 501 return (0); 502 } 503 if (offset > lsp->ls_vp_size) { 504 bioerror(bp, ENXIO); 505 biodone(bp); 506 mutex_exit(&lsp->ls_vp_lock); 507 return (0); 508 } 509 lsp->ls_vp_iocount++; 510 mutex_exit(&lsp->ls_vp_lock); 511 512 if (lsp->ls_kstat) { 513 mutex_enter(lsp->ls_kstat->ks_lock); 514 kstat_waitq_enter(KSTAT_IO_PTR(lsp->ls_kstat)); 515 mutex_exit(lsp->ls_kstat->ks_lock); 516 } 517 (void) taskq_dispatch(lsp->ls_taskq, lofi_strategy_task, bp, KM_SLEEP); 518 return (0); 519 } 520 521 /*ARGSUSED2*/ 522 static int 523 lofi_read(dev_t dev, struct uio *uio, struct cred *credp) 524 { 525 if (getminor(dev) == 0) 526 return (EINVAL); 527 return (physio(lofi_strategy, NULL, dev, B_READ, minphys, uio)); 528 } 529 530 /*ARGSUSED2*/ 531 static int 532 lofi_write(dev_t dev, struct uio *uio, struct cred *credp) 533 { 534 if (getminor(dev) == 0) 535 return (EINVAL); 536 return (physio(lofi_strategy, NULL, dev, B_WRITE, minphys, uio)); 537 } 538 539 /*ARGSUSED2*/ 540 static int 541 lofi_aread(dev_t dev, struct aio_req *aio, struct cred *credp) 542 { 543 if (getminor(dev) == 0) 544 return (EINVAL); 545 return (aphysio(lofi_strategy, anocancel, dev, B_READ, minphys, aio)); 546 } 547 548 /*ARGSUSED2*/ 549 static int 550 lofi_awrite(dev_t dev, struct aio_req *aio, struct cred *credp) 551 { 552 if (getminor(dev) == 0) 553 return (EINVAL); 554 return (aphysio(lofi_strategy, anocancel, dev, B_WRITE, minphys, aio)); 555 } 556 557 /*ARGSUSED*/ 558 static int 559 lofi_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 560 { 561 switch (infocmd) { 562 case DDI_INFO_DEVT2DEVINFO: 563 *result = lofi_dip; 564 return (DDI_SUCCESS); 565 case DDI_INFO_DEVT2INSTANCE: 566 *result = 0; 567 return (DDI_SUCCESS); 568 } 569 return (DDI_FAILURE); 570 } 571 572 static int 573 lofi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 574 { 575 int error; 576 577 if (cmd != DDI_ATTACH) 578 return (DDI_FAILURE); 579 error = ddi_soft_state_zalloc(lofi_statep, 0); 580 if (error == DDI_FAILURE) { 581 return (DDI_FAILURE); 582 } 583 error = ddi_create_minor_node(dip, LOFI_CTL_NODE, S_IFCHR, 0, 584 DDI_PSEUDO, NULL); 585 if (error == DDI_FAILURE) { 586 ddi_soft_state_free(lofi_statep, 0); 587 return (DDI_FAILURE); 588 } 589 lofi_dip = dip; 590 ddi_report_dev(dip); 591 return (DDI_SUCCESS); 592 } 593 594 static int 595 lofi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 596 { 597 if (cmd != DDI_DETACH) 598 return (DDI_FAILURE); 599 if (lofi_busy()) 600 return (DDI_FAILURE); 601 lofi_dip = NULL; 602 ddi_remove_minor_node(dip, NULL); 603 ddi_soft_state_free(lofi_statep, 0); 604 return (DDI_SUCCESS); 605 } 606 607 /* 608 * These two just simplify the rest of the ioctls that need to copyin/out 609 * the lofi_ioctl structure. 610 */ 611 struct lofi_ioctl * 612 copy_in_lofi_ioctl(const struct lofi_ioctl *ulip, int flag) 613 { 614 struct lofi_ioctl *klip; 615 int error; 616 617 klip = kmem_alloc(sizeof (struct lofi_ioctl), KM_SLEEP); 618 error = ddi_copyin(ulip, klip, sizeof (struct lofi_ioctl), flag); 619 if (error) { 620 kmem_free(klip, sizeof (struct lofi_ioctl)); 621 return (NULL); 622 } 623 624 /* make sure filename is always null-terminated */ 625 klip->li_filename[MAXPATHLEN] = '\0'; 626 627 /* validate minor number */ 628 if (klip->li_minor > lofi_max_files) { 629 kmem_free(klip, sizeof (struct lofi_ioctl)); 630 return (NULL); 631 } 632 return (klip); 633 } 634 635 int 636 copy_out_lofi_ioctl(const struct lofi_ioctl *klip, struct lofi_ioctl *ulip, 637 int flag) 638 { 639 int error; 640 641 error = ddi_copyout(klip, ulip, sizeof (struct lofi_ioctl), flag); 642 if (error) 643 return (EFAULT); 644 return (0); 645 } 646 647 void 648 free_lofi_ioctl(struct lofi_ioctl *klip) 649 { 650 kmem_free(klip, sizeof (struct lofi_ioctl)); 651 } 652 653 /* 654 * Return the minor number 'filename' is mapped to, if it is. 655 */ 656 static int 657 file_to_minor(char *filename) 658 { 659 minor_t minor; 660 struct lofi_state *lsp; 661 662 ASSERT(mutex_owned(&lofi_lock)); 663 for (minor = 1; minor <= lofi_max_files; minor++) { 664 lsp = ddi_get_soft_state(lofi_statep, minor); 665 if (lsp == NULL) 666 continue; 667 if (strcmp(lsp->ls_filename, filename) == 0) 668 return (minor); 669 } 670 return (0); 671 } 672 673 /* 674 * lofiadm does some validation, but since Joe Random (or crashme) could 675 * do our ioctls, we need to do some validation too. 676 */ 677 static int 678 valid_filename(const char *filename) 679 { 680 static char *blkprefix = "/dev/" LOFI_BLOCK_NAME "/"; 681 static char *charprefix = "/dev/" LOFI_CHAR_NAME "/"; 682 683 /* must be absolute path */ 684 if (filename[0] != '/') 685 return (0); 686 /* must not be lofi */ 687 if (strncmp(filename, blkprefix, strlen(blkprefix)) == 0) 688 return (0); 689 if (strncmp(filename, charprefix, strlen(charprefix)) == 0) 690 return (0); 691 return (1); 692 } 693 694 /* 695 * Fakes up a disk geometry, and one big partition, based on the size 696 * of the file. This is needed because we allow newfs'ing the device, 697 * and newfs will do several disk ioctls to figure out the geometry and 698 * partition information. It uses that information to determine the parameters 699 * to pass to mkfs. Geometry is pretty much irrelevant these days, but we 700 * have to support it. 701 */ 702 static void 703 fake_disk_geometry(struct lofi_state *lsp) 704 { 705 /* dk_geom - see dkio(7I) */ 706 /* 707 * dkg_ncyl _could_ be set to one here (one big cylinder with gobs 708 * of sectors), but that breaks programs like fdisk which want to 709 * partition a disk by cylinder. With one cylinder, you can't create 710 * an fdisk partition and put pcfs on it for testing (hard to pick 711 * a number between one and one). 712 * 713 * The cheezy floppy test is an attempt to not have too few cylinders 714 * for a small file, or so many on a big file that you waste space 715 * for backup superblocks or cylinder group structures. 716 */ 717 if (lsp->ls_vp_size < (2 * 1024 * 1024)) /* floppy? */ 718 lsp->ls_dkg.dkg_ncyl = lsp->ls_vp_size / (100 * 1024); 719 else 720 lsp->ls_dkg.dkg_ncyl = lsp->ls_vp_size / (300 * 1024); 721 /* in case file file is < 100k */ 722 if (lsp->ls_dkg.dkg_ncyl == 0) 723 lsp->ls_dkg.dkg_ncyl = 1; 724 lsp->ls_dkg.dkg_acyl = 0; 725 lsp->ls_dkg.dkg_bcyl = 0; 726 lsp->ls_dkg.dkg_nhead = 1; 727 lsp->ls_dkg.dkg_obs1 = 0; 728 lsp->ls_dkg.dkg_intrlv = 0; 729 lsp->ls_dkg.dkg_obs2 = 0; 730 lsp->ls_dkg.dkg_obs3 = 0; 731 lsp->ls_dkg.dkg_apc = 0; 732 lsp->ls_dkg.dkg_rpm = 7200; 733 lsp->ls_dkg.dkg_pcyl = lsp->ls_dkg.dkg_ncyl + lsp->ls_dkg.dkg_acyl; 734 lsp->ls_dkg.dkg_nsect = lsp->ls_vp_size / 735 (DEV_BSIZE * lsp->ls_dkg.dkg_ncyl); 736 lsp->ls_dkg.dkg_write_reinstruct = 0; 737 lsp->ls_dkg.dkg_read_reinstruct = 0; 738 739 /* vtoc - see dkio(7I) */ 740 bzero(&lsp->ls_vtoc, sizeof (struct vtoc)); 741 lsp->ls_vtoc.v_sanity = VTOC_SANE; 742 lsp->ls_vtoc.v_version = V_VERSION; 743 bcopy(LOFI_DRIVER_NAME, lsp->ls_vtoc.v_volume, 7); 744 lsp->ls_vtoc.v_sectorsz = DEV_BSIZE; 745 lsp->ls_vtoc.v_nparts = 1; 746 lsp->ls_vtoc.v_part[0].p_tag = V_UNASSIGNED; 747 lsp->ls_vtoc.v_part[0].p_flag = V_UNMNT; 748 lsp->ls_vtoc.v_part[0].p_start = (daddr_t)0; 749 /* 750 * The partition size cannot just be the number of sectors, because 751 * that might not end on a cylinder boundary. And if that's the case, 752 * newfs/mkfs will print a scary warning. So just figure the size 753 * based on the number of cylinders and sectors/cylinder. 754 */ 755 lsp->ls_vtoc.v_part[0].p_size = lsp->ls_dkg.dkg_pcyl * 756 lsp->ls_dkg.dkg_nsect * lsp->ls_dkg.dkg_nhead; 757 758 /* dk_cinfo - see dkio(7I) */ 759 bzero(&lsp->ls_ci, sizeof (struct dk_cinfo)); 760 (void) strcpy(lsp->ls_ci.dki_cname, LOFI_DRIVER_NAME); 761 lsp->ls_ci.dki_ctype = DKC_MD; 762 lsp->ls_ci.dki_flags = 0; 763 lsp->ls_ci.dki_cnum = 0; 764 lsp->ls_ci.dki_addr = 0; 765 lsp->ls_ci.dki_space = 0; 766 lsp->ls_ci.dki_prio = 0; 767 lsp->ls_ci.dki_vec = 0; 768 (void) strcpy(lsp->ls_ci.dki_dname, LOFI_DRIVER_NAME); 769 lsp->ls_ci.dki_unit = 0; 770 lsp->ls_ci.dki_slave = 0; 771 lsp->ls_ci.dki_partition = 0; 772 /* 773 * newfs uses this to set maxcontig. Must not be < 16, or it 774 * will be 0 when newfs multiplies it by DEV_BSIZE and divides 775 * it by the block size. Then tunefs doesn't work because 776 * maxcontig is 0. 777 */ 778 lsp->ls_ci.dki_maxtransfer = 16; 779 } 780 781 /* 782 * map a file to a minor number. Return the minor number. 783 */ 784 static int 785 lofi_map_file(dev_t dev, struct lofi_ioctl *ulip, int pickminor, 786 int *rvalp, struct cred *credp, int ioctl_flag) 787 { 788 minor_t newminor; 789 struct lofi_state *lsp; 790 struct lofi_ioctl *klip; 791 int error; 792 struct vnode *vp; 793 int64_t Nblocks_prop_val; 794 int64_t Size_prop_val; 795 vattr_t vattr; 796 int flag; 797 enum vtype v_type; 798 int zalloced = 0; 799 dev_t newdev; 800 char namebuf[50]; 801 802 klip = copy_in_lofi_ioctl(ulip, ioctl_flag); 803 if (klip == NULL) 804 return (EFAULT); 805 806 mutex_enter(&lofi_lock); 807 808 if (!valid_filename(klip->li_filename)) { 809 error = EINVAL; 810 goto out; 811 } 812 813 if (file_to_minor(klip->li_filename) != 0) { 814 error = EBUSY; 815 goto out; 816 } 817 818 if (pickminor) { 819 /* Find a free one */ 820 for (newminor = 1; newminor <= lofi_max_files; newminor++) 821 if (ddi_get_soft_state(lofi_statep, newminor) == NULL) 822 break; 823 if (newminor >= lofi_max_files) { 824 error = EAGAIN; 825 goto out; 826 } 827 } else { 828 newminor = klip->li_minor; 829 if (ddi_get_soft_state(lofi_statep, newminor) != NULL) { 830 error = EEXIST; 831 goto out; 832 } 833 } 834 835 /* make sure it's valid */ 836 error = lookupname(klip->li_filename, UIO_SYSSPACE, FOLLOW, 837 NULLVPP, &vp); 838 if (error) { 839 goto out; 840 } 841 v_type = vp->v_type; 842 VN_RELE(vp); 843 if (!V_ISLOFIABLE(v_type)) { 844 error = EINVAL; 845 goto out; 846 } 847 flag = FREAD | FWRITE | FOFFMAX | FEXCL; 848 error = vn_open(klip->li_filename, UIO_SYSSPACE, flag, 0, &vp, 0, 0); 849 if (error) { 850 /* try read-only */ 851 flag &= ~FWRITE; 852 error = vn_open(klip->li_filename, UIO_SYSSPACE, flag, 0, 853 &vp, 0, 0); 854 if (error) { 855 goto out; 856 } 857 } 858 vattr.va_mask = AT_SIZE; 859 error = VOP_GETATTR(vp, &vattr, 0, credp); 860 if (error) { 861 goto closeout; 862 } 863 /* the file needs to be a multiple of the block size */ 864 if ((vattr.va_size % DEV_BSIZE) != 0) { 865 error = EINVAL; 866 goto closeout; 867 } 868 newdev = makedevice(getmajor(dev), newminor); 869 Size_prop_val = vattr.va_size; 870 if ((ddi_prop_update_int64(newdev, lofi_dip, 871 SIZE_PROP_NAME, Size_prop_val)) != DDI_PROP_SUCCESS) { 872 error = EINVAL; 873 goto closeout; 874 } 875 Nblocks_prop_val = vattr.va_size / DEV_BSIZE; 876 if ((ddi_prop_update_int64(newdev, lofi_dip, 877 NBLOCKS_PROP_NAME, Nblocks_prop_val)) != DDI_PROP_SUCCESS) { 878 error = EINVAL; 879 goto propout; 880 } 881 error = ddi_soft_state_zalloc(lofi_statep, newminor); 882 if (error == DDI_FAILURE) { 883 error = ENOMEM; 884 goto propout; 885 } 886 zalloced = 1; 887 (void) snprintf(namebuf, sizeof (namebuf), "%d", newminor); 888 (void) ddi_create_minor_node(lofi_dip, namebuf, S_IFBLK, newminor, 889 DDI_PSEUDO, NULL); 890 if (error != DDI_SUCCESS) { 891 error = ENXIO; 892 goto propout; 893 } 894 (void) snprintf(namebuf, sizeof (namebuf), "%d,raw", newminor); 895 error = ddi_create_minor_node(lofi_dip, namebuf, S_IFCHR, newminor, 896 DDI_PSEUDO, NULL); 897 if (error != DDI_SUCCESS) { 898 /* remove block node */ 899 (void) snprintf(namebuf, sizeof (namebuf), "%d", newminor); 900 ddi_remove_minor_node(lofi_dip, namebuf); 901 error = ENXIO; 902 goto propout; 903 } 904 lsp = ddi_get_soft_state(lofi_statep, newminor); 905 lsp->ls_filename_sz = strlen(klip->li_filename) + 1; 906 lsp->ls_filename = kmem_alloc(lsp->ls_filename_sz, KM_SLEEP); 907 (void) snprintf(namebuf, sizeof (namebuf), "%s_taskq_%d", 908 LOFI_DRIVER_NAME, newminor); 909 lsp->ls_taskq = taskq_create(namebuf, lofi_taskq_nthreads, 910 minclsyspri, 1, lofi_taskq_maxalloc, 0); 911 lsp->ls_kstat = kstat_create(LOFI_DRIVER_NAME, newminor, 912 NULL, "disk", KSTAT_TYPE_IO, 1, 0); 913 if (lsp->ls_kstat) { 914 mutex_init(&lsp->ls_kstat_lock, NULL, MUTEX_DRIVER, NULL); 915 lsp->ls_kstat->ks_lock = &lsp->ls_kstat_lock; 916 kstat_install(lsp->ls_kstat); 917 } 918 cv_init(&lsp->ls_vp_cv, NULL, CV_DRIVER, NULL); 919 mutex_init(&lsp->ls_vp_lock, NULL, MUTEX_DRIVER, NULL); 920 921 /* 922 * save open mode so file can be closed properly and vnode counts 923 * updated correctly. 924 */ 925 lsp->ls_openflag = flag; 926 927 /* 928 * Try to handle stacked lofs vnodes. 929 */ 930 if (vp->v_type == VREG) { 931 if (VOP_REALVP(vp, &lsp->ls_vp) != 0) { 932 lsp->ls_vp = vp; 933 } else { 934 /* 935 * Even though vp was obtained via vn_open(), we 936 * can't call vn_close() on it, since lofs will 937 * pass the VOP_CLOSE() on down to the realvp 938 * (which we are about to use). Hence we merely 939 * drop the reference to the lofs vnode and hold 940 * the realvp so things behave as if we've 941 * opened the realvp without any interaction 942 * with lofs. 943 */ 944 VN_HOLD(lsp->ls_vp); 945 VN_RELE(vp); 946 } 947 } else { 948 lsp->ls_vp = vp; 949 } 950 lsp->ls_vp_size = vattr.va_size; 951 (void) strcpy(lsp->ls_filename, klip->li_filename); 952 if (rvalp) 953 *rvalp = (int)newminor; 954 klip->li_minor = newminor; 955 956 fake_disk_geometry(lsp); 957 mutex_exit(&lofi_lock); 958 (void) copy_out_lofi_ioctl(klip, ulip, ioctl_flag); 959 free_lofi_ioctl(klip); 960 return (0); 961 962 propout: 963 (void) ddi_prop_remove(newdev, lofi_dip, SIZE_PROP_NAME); 964 (void) ddi_prop_remove(newdev, lofi_dip, NBLOCKS_PROP_NAME); 965 closeout: 966 (void) VOP_CLOSE(vp, flag, 1, 0, credp); 967 VN_RELE(vp); 968 out: 969 if (zalloced) 970 ddi_soft_state_free(lofi_statep, newminor); 971 mutex_exit(&lofi_lock); 972 free_lofi_ioctl(klip); 973 return (error); 974 } 975 976 /* 977 * unmap a file. 978 */ 979 static int 980 lofi_unmap_file(dev_t dev, struct lofi_ioctl *ulip, int byfilename, 981 struct cred *credp, int ioctl_flag) 982 { 983 struct lofi_state *lsp; 984 struct lofi_ioctl *klip; 985 minor_t minor; 986 987 klip = copy_in_lofi_ioctl(ulip, ioctl_flag); 988 if (klip == NULL) 989 return (EFAULT); 990 991 mutex_enter(&lofi_lock); 992 if (byfilename) { 993 minor = file_to_minor(klip->li_filename); 994 } else { 995 minor = klip->li_minor; 996 } 997 if (minor == 0) { 998 mutex_exit(&lofi_lock); 999 free_lofi_ioctl(klip); 1000 return (ENXIO); 1001 } 1002 lsp = ddi_get_soft_state(lofi_statep, minor); 1003 if (lsp == NULL || lsp->ls_vp == NULL) { 1004 mutex_exit(&lofi_lock); 1005 free_lofi_ioctl(klip); 1006 return (ENXIO); 1007 } 1008 1009 if (is_opened(lsp)) { 1010 /* 1011 * If the 'force' flag is set, then we forcibly close the 1012 * underlying file. Subsequent operations will fail, and the 1013 * DKIOCSTATE ioctl will return DKIO_DEV_GONE. When the device 1014 * is last closed, the device will be cleaned up appropriately. 1015 * 1016 * This is complicated by the fact that we may have outstanding 1017 * dispatched I/Os. Rather than having a single mutex to 1018 * serialize all I/O, we keep a count of the number of 1019 * outstanding I/O requests, as well as a flag to indicate that 1020 * no new I/Os should be dispatched. We set the flag, wait for 1021 * the number of outstanding I/Os to reach 0, and then close the 1022 * underlying vnode. 1023 */ 1024 if (klip->li_force) { 1025 mutex_enter(&lsp->ls_vp_lock); 1026 lsp->ls_vp_closereq = B_TRUE; 1027 while (lsp->ls_vp_iocount > 0) 1028 cv_wait(&lsp->ls_vp_cv, &lsp->ls_vp_lock); 1029 (void) VOP_CLOSE(lsp->ls_vp, lsp->ls_openflag, 1, 0, 1030 credp); 1031 VN_RELE(lsp->ls_vp); 1032 lsp->ls_vp = NULL; 1033 cv_broadcast(&lsp->ls_vp_cv); 1034 mutex_exit(&lsp->ls_vp_lock); 1035 mutex_exit(&lofi_lock); 1036 klip->li_minor = minor; 1037 (void) copy_out_lofi_ioctl(klip, ulip, ioctl_flag); 1038 free_lofi_ioctl(klip); 1039 return (0); 1040 } 1041 mutex_exit(&lofi_lock); 1042 free_lofi_ioctl(klip); 1043 return (EBUSY); 1044 } 1045 1046 lofi_free_handle(dev, minor, lsp, credp); 1047 1048 klip->li_minor = minor; 1049 mutex_exit(&lofi_lock); 1050 (void) copy_out_lofi_ioctl(klip, ulip, ioctl_flag); 1051 free_lofi_ioctl(klip); 1052 return (0); 1053 } 1054 1055 /* 1056 * get the filename given the minor number, or the minor number given 1057 * the name. 1058 */ 1059 /*ARGSUSED*/ 1060 static int 1061 lofi_get_info(dev_t dev, struct lofi_ioctl *ulip, int which, 1062 struct cred *credp, int ioctl_flag) 1063 { 1064 struct lofi_state *lsp; 1065 struct lofi_ioctl *klip; 1066 int error; 1067 minor_t minor; 1068 1069 klip = copy_in_lofi_ioctl(ulip, ioctl_flag); 1070 if (klip == NULL) 1071 return (EFAULT); 1072 1073 switch (which) { 1074 case LOFI_GET_FILENAME: 1075 minor = klip->li_minor; 1076 if (minor == 0) { 1077 free_lofi_ioctl(klip); 1078 return (EINVAL); 1079 } 1080 1081 mutex_enter(&lofi_lock); 1082 lsp = ddi_get_soft_state(lofi_statep, minor); 1083 if (lsp == NULL) { 1084 mutex_exit(&lofi_lock); 1085 free_lofi_ioctl(klip); 1086 return (ENXIO); 1087 } 1088 (void) strcpy(klip->li_filename, lsp->ls_filename); 1089 mutex_exit(&lofi_lock); 1090 error = copy_out_lofi_ioctl(klip, ulip, ioctl_flag); 1091 free_lofi_ioctl(klip); 1092 return (error); 1093 case LOFI_GET_MINOR: 1094 mutex_enter(&lofi_lock); 1095 klip->li_minor = file_to_minor(klip->li_filename); 1096 mutex_exit(&lofi_lock); 1097 if (klip->li_minor == 0) { 1098 free_lofi_ioctl(klip); 1099 return (ENOENT); 1100 } 1101 error = copy_out_lofi_ioctl(klip, ulip, ioctl_flag); 1102 free_lofi_ioctl(klip); 1103 return (error); 1104 default: 1105 free_lofi_ioctl(klip); 1106 return (EINVAL); 1107 } 1108 1109 } 1110 1111 static int 1112 lofi_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, 1113 int *rvalp) 1114 { 1115 int error; 1116 enum dkio_state dkstate; 1117 struct lofi_state *lsp; 1118 minor_t minor; 1119 1120 #ifdef lint 1121 credp = credp; 1122 #endif 1123 1124 minor = getminor(dev); 1125 /* lofi ioctls only apply to the master device */ 1126 if (minor == 0) { 1127 struct lofi_ioctl *lip = (struct lofi_ioctl *)arg; 1128 1129 /* 1130 * the query command only need read-access - i.e., normal 1131 * users are allowed to do those on the ctl device as 1132 * long as they can open it read-only. 1133 */ 1134 switch (cmd) { 1135 case LOFI_MAP_FILE: 1136 if ((flag & FWRITE) == 0) 1137 return (EPERM); 1138 return (lofi_map_file(dev, lip, 1, rvalp, credp, flag)); 1139 case LOFI_MAP_FILE_MINOR: 1140 if ((flag & FWRITE) == 0) 1141 return (EPERM); 1142 return (lofi_map_file(dev, lip, 0, rvalp, credp, flag)); 1143 case LOFI_UNMAP_FILE: 1144 if ((flag & FWRITE) == 0) 1145 return (EPERM); 1146 return (lofi_unmap_file(dev, lip, 1, credp, flag)); 1147 case LOFI_UNMAP_FILE_MINOR: 1148 if ((flag & FWRITE) == 0) 1149 return (EPERM); 1150 return (lofi_unmap_file(dev, lip, 0, credp, flag)); 1151 case LOFI_GET_FILENAME: 1152 return (lofi_get_info(dev, lip, LOFI_GET_FILENAME, 1153 credp, flag)); 1154 case LOFI_GET_MINOR: 1155 return (lofi_get_info(dev, lip, LOFI_GET_MINOR, 1156 credp, flag)); 1157 case LOFI_GET_MAXMINOR: 1158 error = ddi_copyout(&lofi_max_files, &lip->li_minor, 1159 sizeof (lofi_max_files), flag); 1160 if (error) 1161 return (EFAULT); 1162 return (0); 1163 default: 1164 break; 1165 } 1166 } 1167 1168 lsp = ddi_get_soft_state(lofi_statep, minor); 1169 if (lsp == NULL) 1170 return (ENXIO); 1171 1172 /* 1173 * We explicitly allow DKIOCSTATE, but all other ioctls should fail with 1174 * EIO as if the device was no longer present. 1175 */ 1176 if (lsp->ls_vp == NULL && cmd != DKIOCSTATE) 1177 return (EIO); 1178 1179 /* these are for faking out utilities like newfs */ 1180 switch (cmd) { 1181 case DKIOCGVTOC: 1182 switch (ddi_model_convert_from(flag & FMODELS)) { 1183 case DDI_MODEL_ILP32: { 1184 struct vtoc32 vtoc32; 1185 1186 vtoctovtoc32(lsp->ls_vtoc, vtoc32); 1187 if (ddi_copyout(&vtoc32, (void *)arg, 1188 sizeof (struct vtoc32), flag)) 1189 return (EFAULT); 1190 break; 1191 } 1192 1193 case DDI_MODEL_NONE: 1194 if (ddi_copyout(&lsp->ls_vtoc, (void *)arg, 1195 sizeof (struct vtoc), flag)) 1196 return (EFAULT); 1197 break; 1198 } 1199 return (0); 1200 case DKIOCINFO: 1201 error = ddi_copyout(&lsp->ls_ci, (void *)arg, 1202 sizeof (struct dk_cinfo), flag); 1203 if (error) 1204 return (EFAULT); 1205 return (0); 1206 case DKIOCG_VIRTGEOM: 1207 case DKIOCG_PHYGEOM: 1208 case DKIOCGGEOM: 1209 error = ddi_copyout(&lsp->ls_dkg, (void *)arg, 1210 sizeof (struct dk_geom), flag); 1211 if (error) 1212 return (EFAULT); 1213 return (0); 1214 case DKIOCSTATE: 1215 /* 1216 * Normally, lofi devices are always in the INSERTED state. If 1217 * a device is forcefully unmapped, then the device transitions 1218 * to the DKIO_DEV_GONE state. 1219 */ 1220 if (ddi_copyin((void *)arg, &dkstate, sizeof (dkstate), 1221 flag) != 0) 1222 return (EFAULT); 1223 1224 mutex_enter(&lsp->ls_vp_lock); 1225 while ((dkstate == DKIO_INSERTED && lsp->ls_vp != NULL) || 1226 (dkstate == DKIO_DEV_GONE && lsp->ls_vp == NULL)) { 1227 /* 1228 * By virtue of having the device open, we know that 1229 * 'lsp' will remain valid when we return. 1230 */ 1231 if (!cv_wait_sig(&lsp->ls_vp_cv, 1232 &lsp->ls_vp_lock)) { 1233 mutex_exit(&lsp->ls_vp_lock); 1234 return (EINTR); 1235 } 1236 } 1237 1238 dkstate = (lsp->ls_vp != NULL ? DKIO_INSERTED : DKIO_DEV_GONE); 1239 mutex_exit(&lsp->ls_vp_lock); 1240 1241 if (ddi_copyout(&dkstate, (void *)arg, 1242 sizeof (dkstate), flag) != 0) 1243 return (EFAULT); 1244 return (0); 1245 default: 1246 return (ENOTTY); 1247 } 1248 } 1249 1250 static struct cb_ops lofi_cb_ops = { 1251 lofi_open, /* open */ 1252 lofi_close, /* close */ 1253 lofi_strategy, /* strategy */ 1254 nodev, /* print */ 1255 nodev, /* dump */ 1256 lofi_read, /* read */ 1257 lofi_write, /* write */ 1258 lofi_ioctl, /* ioctl */ 1259 nodev, /* devmap */ 1260 nodev, /* mmap */ 1261 nodev, /* segmap */ 1262 nochpoll, /* poll */ 1263 ddi_prop_op, /* prop_op */ 1264 0, /* streamtab */ 1265 D_64BIT | D_NEW | D_MP, /* Driver compatibility flag */ 1266 CB_REV, 1267 lofi_aread, 1268 lofi_awrite 1269 }; 1270 1271 static struct dev_ops lofi_ops = { 1272 DEVO_REV, /* devo_rev, */ 1273 0, /* refcnt */ 1274 lofi_info, /* info */ 1275 nulldev, /* identify */ 1276 nulldev, /* probe */ 1277 lofi_attach, /* attach */ 1278 lofi_detach, /* detach */ 1279 nodev, /* reset */ 1280 &lofi_cb_ops, /* driver operations */ 1281 NULL /* no bus operations */ 1282 }; 1283 1284 static struct modldrv modldrv = { 1285 &mod_driverops, 1286 "loopback file driver (%I%)", 1287 &lofi_ops, 1288 }; 1289 1290 static struct modlinkage modlinkage = { 1291 MODREV_1, 1292 &modldrv, 1293 NULL 1294 }; 1295 1296 int 1297 _init(void) 1298 { 1299 int error; 1300 1301 error = ddi_soft_state_init(&lofi_statep, 1302 sizeof (struct lofi_state), 0); 1303 if (error) 1304 return (error); 1305 1306 mutex_init(&lofi_lock, NULL, MUTEX_DRIVER, NULL); 1307 error = mod_install(&modlinkage); 1308 if (error) { 1309 mutex_destroy(&lofi_lock); 1310 ddi_soft_state_fini(&lofi_statep); 1311 } 1312 1313 return (error); 1314 } 1315 1316 int 1317 _fini(void) 1318 { 1319 int error; 1320 1321 if (lofi_busy()) 1322 return (EBUSY); 1323 1324 error = mod_remove(&modlinkage); 1325 if (error) 1326 return (error); 1327 1328 mutex_destroy(&lofi_lock); 1329 ddi_soft_state_fini(&lofi_statep); 1330 1331 return (error); 1332 } 1333 1334 int 1335 _info(struct modinfo *modinfop) 1336 { 1337 return (mod_info(&modlinkage, modinfop)); 1338 } 1339