1 /*- 2 * Copyright (c) 2002 Poul-Henning Kamp 3 * Copyright (c) 2002 Networks Associates Technology, Inc. 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 7 * and NAI Labs, the Security Research Division of Network Associates, Inc. 8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the authors may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/malloc.h> 43 #include <sys/bio.h> 44 #include <sys/ktr.h> 45 #include <sys/proc.h> 46 #include <sys/stack.h> 47 48 #include <sys/errno.h> 49 #include <geom/geom.h> 50 #include <geom/geom_int.h> 51 #include <sys/devicestat.h> 52 53 #include <vm/uma.h> 54 55 static struct g_bioq g_bio_run_down; 56 static struct g_bioq g_bio_run_up; 57 static struct g_bioq g_bio_run_task; 58 59 static u_int pace; 60 static uma_zone_t biozone; 61 62 #include <machine/atomic.h> 63 64 static void 65 g_bioq_lock(struct g_bioq *bq) 66 { 67 68 mtx_lock(&bq->bio_queue_lock); 69 } 70 71 static void 72 g_bioq_unlock(struct g_bioq *bq) 73 { 74 75 mtx_unlock(&bq->bio_queue_lock); 76 } 77 78 #if 0 79 static void 80 g_bioq_destroy(struct g_bioq *bq) 81 { 82 83 mtx_destroy(&bq->bio_queue_lock); 84 } 85 #endif 86 87 static void 88 g_bioq_init(struct g_bioq *bq) 89 { 90 91 TAILQ_INIT(&bq->bio_queue); 92 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF); 93 } 94 95 static struct bio * 96 g_bioq_first(struct g_bioq *bq) 97 { 98 struct bio *bp; 99 100 bp = TAILQ_FIRST(&bq->bio_queue); 101 if (bp != NULL) { 102 KASSERT((bp->bio_flags & BIO_ONQUEUE), 103 ("Bio not on queue bp=%p target %p", bp, bq)); 104 bp->bio_flags &= ~BIO_ONQUEUE; 105 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue); 106 bq->bio_queue_length--; 107 } 108 return (bp); 109 } 110 111 struct bio * 112 g_new_bio(void) 113 { 114 struct bio *bp; 115 116 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 117 #ifdef KTR 118 if (KTR_COMPILE & KTR_GEOM) { 119 struct stack st; 120 121 CTR1(KTR_GEOM, "g_new_bio(): %p", bp); 122 stack_save(&st); 123 CTRSTACK(KTR_GEOM, &st, 3, 0); 124 } 125 #endif 126 return (bp); 127 } 128 129 struct bio * 130 g_alloc_bio(void) 131 { 132 struct bio *bp; 133 134 bp = uma_zalloc(biozone, M_WAITOK | M_ZERO); 135 #ifdef KTR 136 if (KTR_COMPILE & KTR_GEOM) { 137 struct stack st; 138 139 CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp); 140 stack_save(&st); 141 CTRSTACK(KTR_GEOM, &st, 3, 0); 142 } 143 #endif 144 return (bp); 145 } 146 147 void 148 g_destroy_bio(struct bio *bp) 149 { 150 #ifdef KTR 151 if (KTR_COMPILE & KTR_GEOM) { 152 struct stack st; 153 154 CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp); 155 stack_save(&st); 156 CTRSTACK(KTR_GEOM, &st, 3, 0); 157 } 158 #endif 159 uma_zfree(biozone, bp); 160 } 161 162 struct bio * 163 g_clone_bio(struct bio *bp) 164 { 165 struct bio *bp2; 166 167 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 168 if (bp2 != NULL) { 169 bp2->bio_parent = bp; 170 bp2->bio_cmd = bp->bio_cmd; 171 bp2->bio_length = bp->bio_length; 172 bp2->bio_offset = bp->bio_offset; 173 bp2->bio_data = bp->bio_data; 174 bp2->bio_attribute = bp->bio_attribute; 175 bp->bio_children++; 176 } 177 #ifdef KTR 178 if (KTR_COMPILE & KTR_GEOM) { 179 struct stack st; 180 181 CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2); 182 stack_save(&st); 183 CTRSTACK(KTR_GEOM, &st, 3, 0); 184 } 185 #endif 186 return(bp2); 187 } 188 189 struct bio * 190 g_duplicate_bio(struct bio *bp) 191 { 192 struct bio *bp2; 193 194 bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO); 195 bp2->bio_parent = bp; 196 bp2->bio_cmd = bp->bio_cmd; 197 bp2->bio_length = bp->bio_length; 198 bp2->bio_offset = bp->bio_offset; 199 bp2->bio_data = bp->bio_data; 200 bp2->bio_attribute = bp->bio_attribute; 201 bp->bio_children++; 202 #ifdef KTR 203 if (KTR_COMPILE & KTR_GEOM) { 204 struct stack st; 205 206 CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2); 207 stack_save(&st); 208 CTRSTACK(KTR_GEOM, &st, 3, 0); 209 } 210 #endif 211 return(bp2); 212 } 213 214 void 215 g_io_init() 216 { 217 218 g_bioq_init(&g_bio_run_down); 219 g_bioq_init(&g_bio_run_up); 220 g_bioq_init(&g_bio_run_task); 221 biozone = uma_zcreate("g_bio", sizeof (struct bio), 222 NULL, NULL, 223 NULL, NULL, 224 0, 0); 225 } 226 227 int 228 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr) 229 { 230 struct bio *bp; 231 int error; 232 233 g_trace(G_T_BIO, "bio_getattr(%s)", attr); 234 bp = g_alloc_bio(); 235 bp->bio_cmd = BIO_GETATTR; 236 bp->bio_done = NULL; 237 bp->bio_attribute = attr; 238 bp->bio_length = *len; 239 bp->bio_data = ptr; 240 g_io_request(bp, cp); 241 error = biowait(bp, "ggetattr"); 242 *len = bp->bio_completed; 243 g_destroy_bio(bp); 244 return (error); 245 } 246 247 int 248 g_io_flush(struct g_consumer *cp) 249 { 250 struct bio *bp; 251 int error; 252 253 g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name); 254 bp = g_alloc_bio(); 255 bp->bio_cmd = BIO_FLUSH; 256 bp->bio_done = NULL; 257 bp->bio_attribute = NULL; 258 bp->bio_offset = cp->provider->mediasize; 259 bp->bio_length = 0; 260 bp->bio_data = NULL; 261 g_io_request(bp, cp); 262 error = biowait(bp, "gflush"); 263 g_destroy_bio(bp); 264 return (error); 265 } 266 267 static int 268 g_io_check(struct bio *bp) 269 { 270 struct g_consumer *cp; 271 struct g_provider *pp; 272 273 cp = bp->bio_from; 274 pp = bp->bio_to; 275 276 /* Fail if access counters dont allow the operation */ 277 switch(bp->bio_cmd) { 278 case BIO_READ: 279 case BIO_GETATTR: 280 if (cp->acr == 0) 281 return (EPERM); 282 break; 283 case BIO_WRITE: 284 case BIO_DELETE: 285 case BIO_FLUSH: 286 if (cp->acw == 0) 287 return (EPERM); 288 break; 289 default: 290 return (EPERM); 291 } 292 /* if provider is marked for error, don't disturb. */ 293 if (pp->error) 294 return (pp->error); 295 296 switch(bp->bio_cmd) { 297 case BIO_READ: 298 case BIO_WRITE: 299 case BIO_DELETE: 300 /* Zero sectorsize is a probably lack of media */ 301 if (pp->sectorsize == 0) 302 return (ENXIO); 303 /* Reject I/O not on sector boundary */ 304 if (bp->bio_offset % pp->sectorsize) 305 return (EINVAL); 306 /* Reject I/O not integral sector long */ 307 if (bp->bio_length % pp->sectorsize) 308 return (EINVAL); 309 /* Reject requests before or past the end of media. */ 310 if (bp->bio_offset < 0) 311 return (EIO); 312 if (bp->bio_offset > pp->mediasize) 313 return (EIO); 314 break; 315 default: 316 break; 317 } 318 return (0); 319 } 320 321 void 322 g_io_request(struct bio *bp, struct g_consumer *cp) 323 { 324 struct g_provider *pp; 325 326 KASSERT(cp != NULL, ("NULL cp in g_io_request")); 327 KASSERT(bp != NULL, ("NULL bp in g_io_request")); 328 pp = cp->provider; 329 KASSERT(pp != NULL, ("consumer not attached in g_io_request")); 330 #ifdef DIAGNOSTIC 331 KASSERT(bp->bio_driver1 == NULL, 332 ("bio_driver1 used by the consumer (geom %s)", cp->geom->name)); 333 KASSERT(bp->bio_driver2 == NULL, 334 ("bio_driver2 used by the consumer (geom %s)", cp->geom->name)); 335 KASSERT(bp->bio_pflags == 0, 336 ("bio_pflags used by the consumer (geom %s)", cp->geom->name)); 337 /* 338 * Remember consumer's private fields, so we can detect if they were 339 * modified by the provider. 340 */ 341 bp->_bio_caller1 = bp->bio_caller1; 342 bp->_bio_caller2 = bp->bio_caller2; 343 bp->_bio_cflags = bp->bio_cflags; 344 #endif 345 346 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_GETATTR)) { 347 KASSERT(bp->bio_data != NULL, 348 ("NULL bp->data in g_io_request(cmd=%hhu)", bp->bio_cmd)); 349 } 350 if (bp->bio_cmd & (BIO_DELETE|BIO_FLUSH)) { 351 KASSERT(bp->bio_data == NULL, 352 ("non-NULL bp->data in g_io_request(cmd=%hhu)", 353 bp->bio_cmd)); 354 } 355 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE)) { 356 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0, 357 ("wrong offset %jd for sectorsize %u", 358 bp->bio_offset, cp->provider->sectorsize)); 359 KASSERT(bp->bio_length % cp->provider->sectorsize == 0, 360 ("wrong length %jd for sectorsize %u", 361 bp->bio_length, cp->provider->sectorsize)); 362 } 363 364 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d", 365 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd); 366 367 bp->bio_from = cp; 368 bp->bio_to = pp; 369 bp->bio_error = 0; 370 bp->bio_completed = 0; 371 372 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 373 ("Bio already on queue bp=%p", bp)); 374 bp->bio_flags |= BIO_ONQUEUE; 375 376 binuptime(&bp->bio_t0); 377 378 /* 379 * The statistics collection is lockless, as such, but we 380 * can not update one instance of the statistics from more 381 * than one thread at a time, so grab the lock first. 382 */ 383 g_bioq_lock(&g_bio_run_down); 384 if (g_collectstats & 1) 385 devstat_start_transaction(pp->stat, &bp->bio_t0); 386 if (g_collectstats & 2) 387 devstat_start_transaction(cp->stat, &bp->bio_t0); 388 389 pp->nstart++; 390 cp->nstart++; 391 TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue); 392 g_bio_run_down.bio_queue_length++; 393 g_bioq_unlock(&g_bio_run_down); 394 395 /* Pass it on down. */ 396 wakeup(&g_wait_down); 397 } 398 399 void 400 g_io_deliver(struct bio *bp, int error) 401 { 402 struct g_consumer *cp; 403 struct g_provider *pp; 404 405 KASSERT(bp != NULL, ("NULL bp in g_io_deliver")); 406 pp = bp->bio_to; 407 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver")); 408 #ifdef DIAGNOSTIC 409 KASSERT(bp->bio_caller1 == bp->_bio_caller1, 410 ("bio_caller1 used by the provider %s", pp->name)); 411 KASSERT(bp->bio_caller2 == bp->_bio_caller2, 412 ("bio_caller2 used by the provider %s", pp->name)); 413 KASSERT(bp->bio_cflags == bp->_bio_cflags, 414 ("bio_cflags used by the provider %s", pp->name)); 415 #endif 416 cp = bp->bio_from; 417 if (cp == NULL) { 418 bp->bio_error = error; 419 bp->bio_done(bp); 420 return; 421 } 422 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver")); 423 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver")); 424 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0")); 425 KASSERT(bp->bio_completed <= bp->bio_length, 426 ("bio_completed can't be greater than bio_length")); 427 428 g_trace(G_T_BIO, 429 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd", 430 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error, 431 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 432 433 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 434 ("Bio already on queue bp=%p", bp)); 435 436 /* 437 * XXX: next two doesn't belong here 438 */ 439 bp->bio_bcount = bp->bio_length; 440 bp->bio_resid = bp->bio_bcount - bp->bio_completed; 441 442 /* 443 * The statistics collection is lockless, as such, but we 444 * can not update one instance of the statistics from more 445 * than one thread at a time, so grab the lock first. 446 */ 447 g_bioq_lock(&g_bio_run_up); 448 if (g_collectstats & 1) 449 devstat_end_transaction_bio(pp->stat, bp); 450 if (g_collectstats & 2) 451 devstat_end_transaction_bio(cp->stat, bp); 452 453 cp->nend++; 454 pp->nend++; 455 if (error != ENOMEM) { 456 bp->bio_error = error; 457 TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue); 458 bp->bio_flags |= BIO_ONQUEUE; 459 g_bio_run_up.bio_queue_length++; 460 g_bioq_unlock(&g_bio_run_up); 461 wakeup(&g_wait_up); 462 return; 463 } 464 g_bioq_unlock(&g_bio_run_up); 465 466 if (bootverbose) 467 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name); 468 bp->bio_children = 0; 469 bp->bio_inbed = 0; 470 g_io_request(bp, cp); 471 pace++; 472 return; 473 } 474 475 void 476 g_io_schedule_down(struct thread *tp __unused) 477 { 478 struct bio *bp; 479 off_t excess; 480 int error; 481 482 for(;;) { 483 g_bioq_lock(&g_bio_run_down); 484 bp = g_bioq_first(&g_bio_run_down); 485 if (bp == NULL) { 486 CTR0(KTR_GEOM, "g_down going to sleep"); 487 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock, 488 PRIBIO | PDROP, "-", hz/10); 489 continue; 490 } 491 CTR0(KTR_GEOM, "g_down has work to do"); 492 g_bioq_unlock(&g_bio_run_down); 493 if (pace > 0) { 494 CTR1(KTR_GEOM, "g_down pacing self (pace %d)", pace); 495 pause("g_down", hz/10); 496 pace--; 497 } 498 error = g_io_check(bp); 499 if (error) { 500 CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider " 501 "%s returned %d", bp, bp->bio_to->name, error); 502 g_io_deliver(bp, error); 503 continue; 504 } 505 CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp, 506 bp->bio_to->name); 507 switch (bp->bio_cmd) { 508 case BIO_READ: 509 case BIO_WRITE: 510 case BIO_DELETE: 511 /* Truncate requests to the end of providers media. */ 512 /* 513 * XXX: What if we truncate because of offset being 514 * bad, not length? 515 */ 516 excess = bp->bio_offset + bp->bio_length; 517 if (excess > bp->bio_to->mediasize) { 518 excess -= bp->bio_to->mediasize; 519 bp->bio_length -= excess; 520 if (excess > 0) 521 CTR3(KTR_GEOM, "g_down truncated bio " 522 "%p provider %s by %d", bp, 523 bp->bio_to->name, excess); 524 } 525 /* Deliver zero length transfers right here. */ 526 if (bp->bio_length == 0) { 527 g_io_deliver(bp, 0); 528 CTR2(KTR_GEOM, "g_down terminated 0-length " 529 "bp %p provider %s", bp, bp->bio_to->name); 530 continue; 531 } 532 break; 533 default: 534 break; 535 } 536 THREAD_NO_SLEEPING(); 537 CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld " 538 "len %ld", bp, bp->bio_to->name, bp->bio_offset, 539 bp->bio_length); 540 bp->bio_to->geom->start(bp); 541 THREAD_SLEEPING_OK(); 542 } 543 } 544 545 void 546 bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg) 547 { 548 bp->bio_task = func; 549 bp->bio_task_arg = arg; 550 /* 551 * The taskqueue is actually just a second queue off the "up" 552 * queue, so we use the same lock. 553 */ 554 g_bioq_lock(&g_bio_run_up); 555 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 556 ("Bio already on queue bp=%p target taskq", bp)); 557 bp->bio_flags |= BIO_ONQUEUE; 558 TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue); 559 g_bio_run_task.bio_queue_length++; 560 wakeup(&g_wait_up); 561 g_bioq_unlock(&g_bio_run_up); 562 } 563 564 565 void 566 g_io_schedule_up(struct thread *tp __unused) 567 { 568 struct bio *bp; 569 for(;;) { 570 g_bioq_lock(&g_bio_run_up); 571 bp = g_bioq_first(&g_bio_run_task); 572 if (bp != NULL) { 573 g_bioq_unlock(&g_bio_run_up); 574 THREAD_NO_SLEEPING(); 575 CTR1(KTR_GEOM, "g_up processing task bp %p", bp); 576 bp->bio_task(bp->bio_task_arg); 577 THREAD_SLEEPING_OK(); 578 continue; 579 } 580 bp = g_bioq_first(&g_bio_run_up); 581 if (bp != NULL) { 582 g_bioq_unlock(&g_bio_run_up); 583 THREAD_NO_SLEEPING(); 584 CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off " 585 "%ld len %ld", bp, bp->bio_to->name, 586 bp->bio_offset, bp->bio_length); 587 biodone(bp); 588 THREAD_SLEEPING_OK(); 589 continue; 590 } 591 CTR0(KTR_GEOM, "g_up going to sleep"); 592 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock, 593 PRIBIO | PDROP, "-", hz/10); 594 } 595 } 596 597 void * 598 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error) 599 { 600 struct bio *bp; 601 void *ptr; 602 int errorc; 603 604 KASSERT(length > 0 && length >= cp->provider->sectorsize && 605 length <= MAXPHYS, ("g_read_data(): invalid length %jd", 606 (intmax_t)length)); 607 608 bp = g_alloc_bio(); 609 bp->bio_cmd = BIO_READ; 610 bp->bio_done = NULL; 611 bp->bio_offset = offset; 612 bp->bio_length = length; 613 ptr = g_malloc(length, M_WAITOK); 614 bp->bio_data = ptr; 615 g_io_request(bp, cp); 616 errorc = biowait(bp, "gread"); 617 if (error != NULL) 618 *error = errorc; 619 g_destroy_bio(bp); 620 if (errorc) { 621 g_free(ptr); 622 ptr = NULL; 623 } 624 return (ptr); 625 } 626 627 int 628 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length) 629 { 630 struct bio *bp; 631 int error; 632 633 KASSERT(length > 0 && length >= cp->provider->sectorsize && 634 length <= MAXPHYS, ("g_write_data(): invalid length %jd", 635 (intmax_t)length)); 636 637 bp = g_alloc_bio(); 638 bp->bio_cmd = BIO_WRITE; 639 bp->bio_done = NULL; 640 bp->bio_offset = offset; 641 bp->bio_length = length; 642 bp->bio_data = ptr; 643 g_io_request(bp, cp); 644 error = biowait(bp, "gwrite"); 645 g_destroy_bio(bp); 646 return (error); 647 } 648 649 void 650 g_print_bio(struct bio *bp) 651 { 652 const char *pname, *cmd = NULL; 653 654 if (bp->bio_to != NULL) 655 pname = bp->bio_to->name; 656 else 657 pname = "[unknown]"; 658 659 switch (bp->bio_cmd) { 660 case BIO_GETATTR: 661 cmd = "GETATTR"; 662 printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute); 663 return; 664 case BIO_FLUSH: 665 cmd = "FLUSH"; 666 printf("%s[%s]", pname, cmd); 667 return; 668 case BIO_READ: 669 cmd = "READ"; 670 case BIO_WRITE: 671 if (cmd == NULL) 672 cmd = "WRITE"; 673 case BIO_DELETE: 674 if (cmd == NULL) 675 cmd = "DELETE"; 676 printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd, 677 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 678 return; 679 default: 680 cmd = "UNKNOWN"; 681 printf("%s[%s()]", pname, cmd); 682 return; 683 } 684 /* NOTREACHED */ 685 } 686