1 /*- 2 * Copyright (c) 2002 Poul-Henning Kamp 3 * Copyright (c) 2002 Networks Associates Technology, Inc. 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 7 * and NAI Labs, the Security Research Division of Network Associates, Inc. 8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the authors may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/malloc.h> 43 #include <sys/bio.h> 44 #include <sys/ktr.h> 45 #include <sys/proc.h> 46 #include <sys/stack.h> 47 48 #include <sys/errno.h> 49 #include <geom/geom.h> 50 #include <geom/geom_int.h> 51 #include <sys/devicestat.h> 52 53 #include <vm/uma.h> 54 55 static struct g_bioq g_bio_run_down; 56 static struct g_bioq g_bio_run_up; 57 static struct g_bioq g_bio_run_task; 58 59 static u_int pace; 60 static uma_zone_t biozone; 61 62 /* 63 * The head of the list of classifiers used in g_io_request. 64 * Use g_register_classifier() and g_unregister_classifier() 65 * to add/remove entries to the list. 66 * Classifiers are invoked in registration order. 67 */ 68 static TAILQ_HEAD(g_classifier_tailq, g_classifier_hook) 69 g_classifier_tailq = TAILQ_HEAD_INITIALIZER(g_classifier_tailq); 70 71 #include <machine/atomic.h> 72 73 static void 74 g_bioq_lock(struct g_bioq *bq) 75 { 76 77 mtx_lock(&bq->bio_queue_lock); 78 } 79 80 static void 81 g_bioq_unlock(struct g_bioq *bq) 82 { 83 84 mtx_unlock(&bq->bio_queue_lock); 85 } 86 87 #if 0 88 static void 89 g_bioq_destroy(struct g_bioq *bq) 90 { 91 92 mtx_destroy(&bq->bio_queue_lock); 93 } 94 #endif 95 96 static void 97 g_bioq_init(struct g_bioq *bq) 98 { 99 100 TAILQ_INIT(&bq->bio_queue); 101 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF); 102 } 103 104 static struct bio * 105 g_bioq_first(struct g_bioq *bq) 106 { 107 struct bio *bp; 108 109 bp = TAILQ_FIRST(&bq->bio_queue); 110 if (bp != NULL) { 111 KASSERT((bp->bio_flags & BIO_ONQUEUE), 112 ("Bio not on queue bp=%p target %p", bp, bq)); 113 bp->bio_flags &= ~BIO_ONQUEUE; 114 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue); 115 bq->bio_queue_length--; 116 } 117 return (bp); 118 } 119 120 struct bio * 121 g_new_bio(void) 122 { 123 struct bio *bp; 124 125 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 126 #ifdef KTR 127 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 128 struct stack st; 129 130 CTR1(KTR_GEOM, "g_new_bio(): %p", bp); 131 stack_save(&st); 132 CTRSTACK(KTR_GEOM, &st, 3, 0); 133 } 134 #endif 135 return (bp); 136 } 137 138 struct bio * 139 g_alloc_bio(void) 140 { 141 struct bio *bp; 142 143 bp = uma_zalloc(biozone, M_WAITOK | M_ZERO); 144 #ifdef KTR 145 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 146 struct stack st; 147 148 CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp); 149 stack_save(&st); 150 CTRSTACK(KTR_GEOM, &st, 3, 0); 151 } 152 #endif 153 return (bp); 154 } 155 156 void 157 g_destroy_bio(struct bio *bp) 158 { 159 #ifdef KTR 160 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 161 struct stack st; 162 163 CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp); 164 stack_save(&st); 165 CTRSTACK(KTR_GEOM, &st, 3, 0); 166 } 167 #endif 168 uma_zfree(biozone, bp); 169 } 170 171 struct bio * 172 g_clone_bio(struct bio *bp) 173 { 174 struct bio *bp2; 175 176 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 177 if (bp2 != NULL) { 178 bp2->bio_parent = bp; 179 bp2->bio_cmd = bp->bio_cmd; 180 bp2->bio_length = bp->bio_length; 181 bp2->bio_offset = bp->bio_offset; 182 bp2->bio_data = bp->bio_data; 183 bp2->bio_attribute = bp->bio_attribute; 184 /* Inherit classification info from the parent */ 185 bp2->bio_classifier1 = bp->bio_classifier1; 186 bp2->bio_classifier2 = bp->bio_classifier2; 187 bp->bio_children++; 188 } 189 #ifdef KTR 190 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 191 struct stack st; 192 193 CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2); 194 stack_save(&st); 195 CTRSTACK(KTR_GEOM, &st, 3, 0); 196 } 197 #endif 198 return(bp2); 199 } 200 201 struct bio * 202 g_duplicate_bio(struct bio *bp) 203 { 204 struct bio *bp2; 205 206 bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO); 207 bp2->bio_parent = bp; 208 bp2->bio_cmd = bp->bio_cmd; 209 bp2->bio_length = bp->bio_length; 210 bp2->bio_offset = bp->bio_offset; 211 bp2->bio_data = bp->bio_data; 212 bp2->bio_attribute = bp->bio_attribute; 213 bp->bio_children++; 214 #ifdef KTR 215 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 216 struct stack st; 217 218 CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2); 219 stack_save(&st); 220 CTRSTACK(KTR_GEOM, &st, 3, 0); 221 } 222 #endif 223 return(bp2); 224 } 225 226 void 227 g_io_init() 228 { 229 230 g_bioq_init(&g_bio_run_down); 231 g_bioq_init(&g_bio_run_up); 232 g_bioq_init(&g_bio_run_task); 233 biozone = uma_zcreate("g_bio", sizeof (struct bio), 234 NULL, NULL, 235 NULL, NULL, 236 0, 0); 237 } 238 239 int 240 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr) 241 { 242 struct bio *bp; 243 int error; 244 245 g_trace(G_T_BIO, "bio_getattr(%s)", attr); 246 bp = g_alloc_bio(); 247 bp->bio_cmd = BIO_GETATTR; 248 bp->bio_done = NULL; 249 bp->bio_attribute = attr; 250 bp->bio_length = *len; 251 bp->bio_data = ptr; 252 g_io_request(bp, cp); 253 error = biowait(bp, "ggetattr"); 254 *len = bp->bio_completed; 255 g_destroy_bio(bp); 256 return (error); 257 } 258 259 int 260 g_io_flush(struct g_consumer *cp) 261 { 262 struct bio *bp; 263 int error; 264 265 g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name); 266 bp = g_alloc_bio(); 267 bp->bio_cmd = BIO_FLUSH; 268 bp->bio_done = NULL; 269 bp->bio_attribute = NULL; 270 bp->bio_offset = cp->provider->mediasize; 271 bp->bio_length = 0; 272 bp->bio_data = NULL; 273 g_io_request(bp, cp); 274 error = biowait(bp, "gflush"); 275 g_destroy_bio(bp); 276 return (error); 277 } 278 279 static int 280 g_io_check(struct bio *bp) 281 { 282 struct g_consumer *cp; 283 struct g_provider *pp; 284 285 cp = bp->bio_from; 286 pp = bp->bio_to; 287 288 /* Fail if access counters dont allow the operation */ 289 switch(bp->bio_cmd) { 290 case BIO_READ: 291 case BIO_GETATTR: 292 if (cp->acr == 0) 293 return (EPERM); 294 break; 295 case BIO_WRITE: 296 case BIO_DELETE: 297 case BIO_FLUSH: 298 if (cp->acw == 0) 299 return (EPERM); 300 break; 301 default: 302 return (EPERM); 303 } 304 /* if provider is marked for error, don't disturb. */ 305 if (pp->error) 306 return (pp->error); 307 308 switch(bp->bio_cmd) { 309 case BIO_READ: 310 case BIO_WRITE: 311 case BIO_DELETE: 312 /* Zero sectorsize is a probably lack of media */ 313 if (pp->sectorsize == 0) 314 return (ENXIO); 315 /* Reject I/O not on sector boundary */ 316 if (bp->bio_offset % pp->sectorsize) 317 return (EINVAL); 318 /* Reject I/O not integral sector long */ 319 if (bp->bio_length % pp->sectorsize) 320 return (EINVAL); 321 /* Reject requests before or past the end of media. */ 322 if (bp->bio_offset < 0) 323 return (EIO); 324 if (bp->bio_offset > pp->mediasize) 325 return (EIO); 326 break; 327 default: 328 break; 329 } 330 return (0); 331 } 332 333 /* 334 * bio classification support. 335 * 336 * g_register_classifier() and g_unregister_classifier() 337 * are used to add/remove a classifier from the list. 338 * The list is protected using the g_bio_run_down lock, 339 * because the classifiers are called in this path. 340 * 341 * g_io_request() passes bio's that are not already classified 342 * (i.e. those with bio_classifier1 == NULL) to g_run_classifiers(). 343 * Classifiers can store their result in the two fields 344 * bio_classifier1 and bio_classifier2. 345 * A classifier that updates one of the fields should 346 * return a non-zero value. 347 * If no classifier updates the field, g_run_classifiers() sets 348 * bio_classifier1 = BIO_NOTCLASSIFIED to avoid further calls. 349 */ 350 351 int 352 g_register_classifier(struct g_classifier_hook *hook) 353 { 354 355 g_bioq_lock(&g_bio_run_down); 356 TAILQ_INSERT_TAIL(&g_classifier_tailq, hook, link); 357 g_bioq_unlock(&g_bio_run_down); 358 359 return (0); 360 } 361 362 void 363 g_unregister_classifier(struct g_classifier_hook *hook) 364 { 365 struct g_classifier_hook *entry; 366 367 g_bioq_lock(&g_bio_run_down); 368 TAILQ_FOREACH(entry, &g_classifier_tailq, link) { 369 if (entry == hook) { 370 TAILQ_REMOVE(&g_classifier_tailq, hook, link); 371 break; 372 } 373 } 374 g_bioq_unlock(&g_bio_run_down); 375 } 376 377 static void 378 g_run_classifiers(struct bio *bp) 379 { 380 struct g_classifier_hook *hook; 381 int classified = 0; 382 383 TAILQ_FOREACH(hook, &g_classifier_tailq, link) 384 classified |= hook->func(hook->arg, bp); 385 386 if (!classified) 387 bp->bio_classifier1 = BIO_NOTCLASSIFIED; 388 } 389 390 void 391 g_io_request(struct bio *bp, struct g_consumer *cp) 392 { 393 struct g_provider *pp; 394 int first; 395 396 KASSERT(cp != NULL, ("NULL cp in g_io_request")); 397 KASSERT(bp != NULL, ("NULL bp in g_io_request")); 398 pp = cp->provider; 399 KASSERT(pp != NULL, ("consumer not attached in g_io_request")); 400 #ifdef DIAGNOSTIC 401 KASSERT(bp->bio_driver1 == NULL, 402 ("bio_driver1 used by the consumer (geom %s)", cp->geom->name)); 403 KASSERT(bp->bio_driver2 == NULL, 404 ("bio_driver2 used by the consumer (geom %s)", cp->geom->name)); 405 KASSERT(bp->bio_pflags == 0, 406 ("bio_pflags used by the consumer (geom %s)", cp->geom->name)); 407 /* 408 * Remember consumer's private fields, so we can detect if they were 409 * modified by the provider. 410 */ 411 bp->_bio_caller1 = bp->bio_caller1; 412 bp->_bio_caller2 = bp->bio_caller2; 413 bp->_bio_cflags = bp->bio_cflags; 414 #endif 415 416 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_GETATTR)) { 417 KASSERT(bp->bio_data != NULL, 418 ("NULL bp->data in g_io_request(cmd=%hhu)", bp->bio_cmd)); 419 } 420 if (bp->bio_cmd & (BIO_DELETE|BIO_FLUSH)) { 421 KASSERT(bp->bio_data == NULL, 422 ("non-NULL bp->data in g_io_request(cmd=%hhu)", 423 bp->bio_cmd)); 424 } 425 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE)) { 426 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0, 427 ("wrong offset %jd for sectorsize %u", 428 bp->bio_offset, cp->provider->sectorsize)); 429 KASSERT(bp->bio_length % cp->provider->sectorsize == 0, 430 ("wrong length %jd for sectorsize %u", 431 bp->bio_length, cp->provider->sectorsize)); 432 } 433 434 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d", 435 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd); 436 437 bp->bio_from = cp; 438 bp->bio_to = pp; 439 bp->bio_error = 0; 440 bp->bio_completed = 0; 441 442 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 443 ("Bio already on queue bp=%p", bp)); 444 bp->bio_flags |= BIO_ONQUEUE; 445 446 binuptime(&bp->bio_t0); 447 448 /* 449 * The statistics collection is lockless, as such, but we 450 * can not update one instance of the statistics from more 451 * than one thread at a time, so grab the lock first. 452 * 453 * We also use the lock to protect the list of classifiers. 454 */ 455 g_bioq_lock(&g_bio_run_down); 456 457 if (!TAILQ_EMPTY(&g_classifier_tailq) && !bp->bio_classifier1) 458 g_run_classifiers(bp); 459 460 if (g_collectstats & 1) 461 devstat_start_transaction(pp->stat, &bp->bio_t0); 462 if (g_collectstats & 2) 463 devstat_start_transaction(cp->stat, &bp->bio_t0); 464 465 pp->nstart++; 466 cp->nstart++; 467 first = TAILQ_EMPTY(&g_bio_run_down.bio_queue); 468 TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue); 469 g_bio_run_down.bio_queue_length++; 470 g_bioq_unlock(&g_bio_run_down); 471 472 /* Pass it on down. */ 473 if (first) 474 wakeup(&g_wait_down); 475 } 476 477 void 478 g_io_deliver(struct bio *bp, int error) 479 { 480 struct g_consumer *cp; 481 struct g_provider *pp; 482 int first; 483 484 KASSERT(bp != NULL, ("NULL bp in g_io_deliver")); 485 pp = bp->bio_to; 486 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver")); 487 cp = bp->bio_from; 488 if (cp == NULL) { 489 bp->bio_error = error; 490 bp->bio_done(bp); 491 return; 492 } 493 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver")); 494 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver")); 495 #ifdef DIAGNOSTIC 496 /* 497 * Some classes - GJournal in particular - can modify bio's 498 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO 499 * flag means it's an expected behaviour for that particular geom. 500 */ 501 if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) { 502 KASSERT(bp->bio_caller1 == bp->_bio_caller1, 503 ("bio_caller1 used by the provider %s", pp->name)); 504 KASSERT(bp->bio_caller2 == bp->_bio_caller2, 505 ("bio_caller2 used by the provider %s", pp->name)); 506 KASSERT(bp->bio_cflags == bp->_bio_cflags, 507 ("bio_cflags used by the provider %s", pp->name)); 508 } 509 #endif 510 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0")); 511 KASSERT(bp->bio_completed <= bp->bio_length, 512 ("bio_completed can't be greater than bio_length")); 513 514 g_trace(G_T_BIO, 515 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd", 516 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error, 517 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 518 519 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 520 ("Bio already on queue bp=%p", bp)); 521 522 /* 523 * XXX: next two doesn't belong here 524 */ 525 bp->bio_bcount = bp->bio_length; 526 bp->bio_resid = bp->bio_bcount - bp->bio_completed; 527 528 /* 529 * The statistics collection is lockless, as such, but we 530 * can not update one instance of the statistics from more 531 * than one thread at a time, so grab the lock first. 532 */ 533 g_bioq_lock(&g_bio_run_up); 534 if (g_collectstats & 1) 535 devstat_end_transaction_bio(pp->stat, bp); 536 if (g_collectstats & 2) 537 devstat_end_transaction_bio(cp->stat, bp); 538 539 cp->nend++; 540 pp->nend++; 541 if (error != ENOMEM) { 542 bp->bio_error = error; 543 first = TAILQ_EMPTY(&g_bio_run_up.bio_queue); 544 TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue); 545 bp->bio_flags |= BIO_ONQUEUE; 546 g_bio_run_up.bio_queue_length++; 547 g_bioq_unlock(&g_bio_run_up); 548 if (first) 549 wakeup(&g_wait_up); 550 return; 551 } 552 g_bioq_unlock(&g_bio_run_up); 553 554 if (bootverbose) 555 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name); 556 bp->bio_children = 0; 557 bp->bio_inbed = 0; 558 g_io_request(bp, cp); 559 pace++; 560 return; 561 } 562 563 void 564 g_io_schedule_down(struct thread *tp __unused) 565 { 566 struct bio *bp; 567 off_t excess; 568 int error; 569 570 for(;;) { 571 g_bioq_lock(&g_bio_run_down); 572 bp = g_bioq_first(&g_bio_run_down); 573 if (bp == NULL) { 574 CTR0(KTR_GEOM, "g_down going to sleep"); 575 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock, 576 PRIBIO | PDROP, "-", 0); 577 continue; 578 } 579 CTR0(KTR_GEOM, "g_down has work to do"); 580 g_bioq_unlock(&g_bio_run_down); 581 if (pace > 0) { 582 CTR1(KTR_GEOM, "g_down pacing self (pace %d)", pace); 583 pause("g_down", hz/10); 584 pace--; 585 } 586 error = g_io_check(bp); 587 if (error) { 588 CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider " 589 "%s returned %d", bp, bp->bio_to->name, error); 590 g_io_deliver(bp, error); 591 continue; 592 } 593 CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp, 594 bp->bio_to->name); 595 switch (bp->bio_cmd) { 596 case BIO_READ: 597 case BIO_WRITE: 598 case BIO_DELETE: 599 /* Truncate requests to the end of providers media. */ 600 /* 601 * XXX: What if we truncate because of offset being 602 * bad, not length? 603 */ 604 excess = bp->bio_offset + bp->bio_length; 605 if (excess > bp->bio_to->mediasize) { 606 excess -= bp->bio_to->mediasize; 607 bp->bio_length -= excess; 608 if (excess > 0) 609 CTR3(KTR_GEOM, "g_down truncated bio " 610 "%p provider %s by %d", bp, 611 bp->bio_to->name, excess); 612 } 613 /* Deliver zero length transfers right here. */ 614 if (bp->bio_length == 0) { 615 g_io_deliver(bp, 0); 616 CTR2(KTR_GEOM, "g_down terminated 0-length " 617 "bp %p provider %s", bp, bp->bio_to->name); 618 continue; 619 } 620 break; 621 default: 622 break; 623 } 624 THREAD_NO_SLEEPING(); 625 CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld " 626 "len %ld", bp, bp->bio_to->name, bp->bio_offset, 627 bp->bio_length); 628 bp->bio_to->geom->start(bp); 629 THREAD_SLEEPING_OK(); 630 } 631 } 632 633 void 634 bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg) 635 { 636 bp->bio_task = func; 637 bp->bio_task_arg = arg; 638 /* 639 * The taskqueue is actually just a second queue off the "up" 640 * queue, so we use the same lock. 641 */ 642 g_bioq_lock(&g_bio_run_up); 643 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 644 ("Bio already on queue bp=%p target taskq", bp)); 645 bp->bio_flags |= BIO_ONQUEUE; 646 TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue); 647 g_bio_run_task.bio_queue_length++; 648 wakeup(&g_wait_up); 649 g_bioq_unlock(&g_bio_run_up); 650 } 651 652 653 void 654 g_io_schedule_up(struct thread *tp __unused) 655 { 656 struct bio *bp; 657 for(;;) { 658 g_bioq_lock(&g_bio_run_up); 659 bp = g_bioq_first(&g_bio_run_task); 660 if (bp != NULL) { 661 g_bioq_unlock(&g_bio_run_up); 662 THREAD_NO_SLEEPING(); 663 CTR1(KTR_GEOM, "g_up processing task bp %p", bp); 664 bp->bio_task(bp->bio_task_arg); 665 THREAD_SLEEPING_OK(); 666 continue; 667 } 668 bp = g_bioq_first(&g_bio_run_up); 669 if (bp != NULL) { 670 g_bioq_unlock(&g_bio_run_up); 671 THREAD_NO_SLEEPING(); 672 CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off " 673 "%jd len %ld", bp, bp->bio_to->name, 674 bp->bio_offset, bp->bio_length); 675 biodone(bp); 676 THREAD_SLEEPING_OK(); 677 continue; 678 } 679 CTR0(KTR_GEOM, "g_up going to sleep"); 680 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock, 681 PRIBIO | PDROP, "-", 0); 682 } 683 } 684 685 void * 686 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error) 687 { 688 struct bio *bp; 689 void *ptr; 690 int errorc; 691 692 KASSERT(length > 0 && length >= cp->provider->sectorsize && 693 length <= MAXPHYS, ("g_read_data(): invalid length %jd", 694 (intmax_t)length)); 695 696 bp = g_alloc_bio(); 697 bp->bio_cmd = BIO_READ; 698 bp->bio_done = NULL; 699 bp->bio_offset = offset; 700 bp->bio_length = length; 701 ptr = g_malloc(length, M_WAITOK); 702 bp->bio_data = ptr; 703 g_io_request(bp, cp); 704 errorc = biowait(bp, "gread"); 705 if (error != NULL) 706 *error = errorc; 707 g_destroy_bio(bp); 708 if (errorc) { 709 g_free(ptr); 710 ptr = NULL; 711 } 712 return (ptr); 713 } 714 715 int 716 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length) 717 { 718 struct bio *bp; 719 int error; 720 721 KASSERT(length > 0 && length >= cp->provider->sectorsize && 722 length <= MAXPHYS, ("g_write_data(): invalid length %jd", 723 (intmax_t)length)); 724 725 bp = g_alloc_bio(); 726 bp->bio_cmd = BIO_WRITE; 727 bp->bio_done = NULL; 728 bp->bio_offset = offset; 729 bp->bio_length = length; 730 bp->bio_data = ptr; 731 g_io_request(bp, cp); 732 error = biowait(bp, "gwrite"); 733 g_destroy_bio(bp); 734 return (error); 735 } 736 737 int 738 g_delete_data(struct g_consumer *cp, off_t offset, off_t length) 739 { 740 struct bio *bp; 741 int error; 742 743 KASSERT(length > 0 && length >= cp->provider->sectorsize, 744 ("g_delete_data(): invalid length %jd", (intmax_t)length)); 745 746 bp = g_alloc_bio(); 747 bp->bio_cmd = BIO_DELETE; 748 bp->bio_done = NULL; 749 bp->bio_offset = offset; 750 bp->bio_length = length; 751 bp->bio_data = NULL; 752 g_io_request(bp, cp); 753 error = biowait(bp, "gdelete"); 754 g_destroy_bio(bp); 755 return (error); 756 } 757 758 void 759 g_print_bio(struct bio *bp) 760 { 761 const char *pname, *cmd = NULL; 762 763 if (bp->bio_to != NULL) 764 pname = bp->bio_to->name; 765 else 766 pname = "[unknown]"; 767 768 switch (bp->bio_cmd) { 769 case BIO_GETATTR: 770 cmd = "GETATTR"; 771 printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute); 772 return; 773 case BIO_FLUSH: 774 cmd = "FLUSH"; 775 printf("%s[%s]", pname, cmd); 776 return; 777 case BIO_READ: 778 cmd = "READ"; 779 case BIO_WRITE: 780 if (cmd == NULL) 781 cmd = "WRITE"; 782 case BIO_DELETE: 783 if (cmd == NULL) 784 cmd = "DELETE"; 785 printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd, 786 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 787 return; 788 default: 789 cmd = "UNKNOWN"; 790 printf("%s[%s()]", pname, cmd); 791 return; 792 } 793 /* NOTREACHED */ 794 } 795