1 /*- 2 * Copyright (c) 2002 Poul-Henning Kamp 3 * Copyright (c) 2002 Networks Associates Technology, Inc. 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 7 * and NAI Labs, the Security Research Division of Network Associates, Inc. 8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the authors may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/malloc.h> 43 #include <sys/bio.h> 44 #include <sys/ktr.h> 45 #include <sys/proc.h> 46 #include <sys/stack.h> 47 48 #include <sys/errno.h> 49 #include <geom/geom.h> 50 #include <geom/geom_int.h> 51 #include <sys/devicestat.h> 52 53 #include <vm/uma.h> 54 55 static struct g_bioq g_bio_run_down; 56 static struct g_bioq g_bio_run_up; 57 static struct g_bioq g_bio_run_task; 58 59 static u_int pace; 60 static uma_zone_t biozone; 61 62 /* 63 * The head of the list of classifiers used in g_io_request. 64 * Use g_register_classifier() and g_unregister_classifier() 65 * to add/remove entries to the list. 66 * Classifiers are invoked in registration order. 67 */ 68 static TAILQ_HEAD(g_classifier_tailq, g_classifier_hook) 69 g_classifier_tailq = TAILQ_HEAD_INITIALIZER(g_classifier_tailq); 70 71 #include <machine/atomic.h> 72 73 static void 74 g_bioq_lock(struct g_bioq *bq) 75 { 76 77 mtx_lock(&bq->bio_queue_lock); 78 } 79 80 static void 81 g_bioq_unlock(struct g_bioq *bq) 82 { 83 84 mtx_unlock(&bq->bio_queue_lock); 85 } 86 87 #if 0 88 static void 89 g_bioq_destroy(struct g_bioq *bq) 90 { 91 92 mtx_destroy(&bq->bio_queue_lock); 93 } 94 #endif 95 96 static void 97 g_bioq_init(struct g_bioq *bq) 98 { 99 100 TAILQ_INIT(&bq->bio_queue); 101 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF); 102 } 103 104 static struct bio * 105 g_bioq_first(struct g_bioq *bq) 106 { 107 struct bio *bp; 108 109 bp = TAILQ_FIRST(&bq->bio_queue); 110 if (bp != NULL) { 111 KASSERT((bp->bio_flags & BIO_ONQUEUE), 112 ("Bio not on queue bp=%p target %p", bp, bq)); 113 bp->bio_flags &= ~BIO_ONQUEUE; 114 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue); 115 bq->bio_queue_length--; 116 } 117 return (bp); 118 } 119 120 struct bio * 121 g_new_bio(void) 122 { 123 struct bio *bp; 124 125 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 126 #ifdef KTR 127 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 128 struct stack st; 129 130 CTR1(KTR_GEOM, "g_new_bio(): %p", bp); 131 stack_save(&st); 132 CTRSTACK(KTR_GEOM, &st, 3, 0); 133 } 134 #endif 135 return (bp); 136 } 137 138 struct bio * 139 g_alloc_bio(void) 140 { 141 struct bio *bp; 142 143 bp = uma_zalloc(biozone, M_WAITOK | M_ZERO); 144 #ifdef KTR 145 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 146 struct stack st; 147 148 CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp); 149 stack_save(&st); 150 CTRSTACK(KTR_GEOM, &st, 3, 0); 151 } 152 #endif 153 return (bp); 154 } 155 156 void 157 g_destroy_bio(struct bio *bp) 158 { 159 #ifdef KTR 160 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 161 struct stack st; 162 163 CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp); 164 stack_save(&st); 165 CTRSTACK(KTR_GEOM, &st, 3, 0); 166 } 167 #endif 168 uma_zfree(biozone, bp); 169 } 170 171 struct bio * 172 g_clone_bio(struct bio *bp) 173 { 174 struct bio *bp2; 175 176 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 177 if (bp2 != NULL) { 178 bp2->bio_parent = bp; 179 bp2->bio_cmd = bp->bio_cmd; 180 /* 181 * BIO_ORDERED flag may be used by disk drivers to enforce 182 * ordering restrictions, so this flag needs to be cloned. 183 * Other bio flags are not suitable for cloning. 184 */ 185 bp2->bio_flags = bp->bio_flags & BIO_ORDERED; 186 bp2->bio_length = bp->bio_length; 187 bp2->bio_offset = bp->bio_offset; 188 bp2->bio_data = bp->bio_data; 189 bp2->bio_attribute = bp->bio_attribute; 190 /* Inherit classification info from the parent */ 191 bp2->bio_classifier1 = bp->bio_classifier1; 192 bp2->bio_classifier2 = bp->bio_classifier2; 193 bp->bio_children++; 194 } 195 #ifdef KTR 196 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 197 struct stack st; 198 199 CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2); 200 stack_save(&st); 201 CTRSTACK(KTR_GEOM, &st, 3, 0); 202 } 203 #endif 204 return(bp2); 205 } 206 207 struct bio * 208 g_duplicate_bio(struct bio *bp) 209 { 210 struct bio *bp2; 211 212 bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO); 213 bp2->bio_parent = bp; 214 bp2->bio_cmd = bp->bio_cmd; 215 bp2->bio_length = bp->bio_length; 216 bp2->bio_offset = bp->bio_offset; 217 bp2->bio_data = bp->bio_data; 218 bp2->bio_attribute = bp->bio_attribute; 219 bp->bio_children++; 220 #ifdef KTR 221 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 222 struct stack st; 223 224 CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2); 225 stack_save(&st); 226 CTRSTACK(KTR_GEOM, &st, 3, 0); 227 } 228 #endif 229 return(bp2); 230 } 231 232 void 233 g_io_init() 234 { 235 236 g_bioq_init(&g_bio_run_down); 237 g_bioq_init(&g_bio_run_up); 238 g_bioq_init(&g_bio_run_task); 239 biozone = uma_zcreate("g_bio", sizeof (struct bio), 240 NULL, NULL, 241 NULL, NULL, 242 0, 0); 243 } 244 245 int 246 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr) 247 { 248 struct bio *bp; 249 int error; 250 251 g_trace(G_T_BIO, "bio_getattr(%s)", attr); 252 bp = g_alloc_bio(); 253 bp->bio_cmd = BIO_GETATTR; 254 bp->bio_done = NULL; 255 bp->bio_attribute = attr; 256 bp->bio_length = *len; 257 bp->bio_data = ptr; 258 g_io_request(bp, cp); 259 error = biowait(bp, "ggetattr"); 260 *len = bp->bio_completed; 261 g_destroy_bio(bp); 262 return (error); 263 } 264 265 int 266 g_io_flush(struct g_consumer *cp) 267 { 268 struct bio *bp; 269 int error; 270 271 g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name); 272 bp = g_alloc_bio(); 273 bp->bio_cmd = BIO_FLUSH; 274 bp->bio_flags |= BIO_ORDERED; 275 bp->bio_done = NULL; 276 bp->bio_attribute = NULL; 277 bp->bio_offset = cp->provider->mediasize; 278 bp->bio_length = 0; 279 bp->bio_data = NULL; 280 g_io_request(bp, cp); 281 error = biowait(bp, "gflush"); 282 g_destroy_bio(bp); 283 return (error); 284 } 285 286 static int 287 g_io_check(struct bio *bp) 288 { 289 struct g_consumer *cp; 290 struct g_provider *pp; 291 292 cp = bp->bio_from; 293 pp = bp->bio_to; 294 295 /* Fail if access counters dont allow the operation */ 296 switch(bp->bio_cmd) { 297 case BIO_READ: 298 case BIO_GETATTR: 299 if (cp->acr == 0) 300 return (EPERM); 301 break; 302 case BIO_WRITE: 303 case BIO_DELETE: 304 case BIO_FLUSH: 305 if (cp->acw == 0) 306 return (EPERM); 307 break; 308 default: 309 return (EPERM); 310 } 311 /* if provider is marked for error, don't disturb. */ 312 if (pp->error) 313 return (pp->error); 314 if (cp->flags & G_CF_ORPHAN) 315 return (ENXIO); 316 317 switch(bp->bio_cmd) { 318 case BIO_READ: 319 case BIO_WRITE: 320 case BIO_DELETE: 321 /* Zero sectorsize or mediasize is probably a lack of media. */ 322 if (pp->sectorsize == 0 || pp->mediasize == 0) 323 return (ENXIO); 324 /* Reject I/O not on sector boundary */ 325 if (bp->bio_offset % pp->sectorsize) 326 return (EINVAL); 327 /* Reject I/O not integral sector long */ 328 if (bp->bio_length % pp->sectorsize) 329 return (EINVAL); 330 /* Reject requests before or past the end of media. */ 331 if (bp->bio_offset < 0) 332 return (EIO); 333 if (bp->bio_offset > pp->mediasize) 334 return (EIO); 335 break; 336 default: 337 break; 338 } 339 return (0); 340 } 341 342 /* 343 * bio classification support. 344 * 345 * g_register_classifier() and g_unregister_classifier() 346 * are used to add/remove a classifier from the list. 347 * The list is protected using the g_bio_run_down lock, 348 * because the classifiers are called in this path. 349 * 350 * g_io_request() passes bio's that are not already classified 351 * (i.e. those with bio_classifier1 == NULL) to g_run_classifiers(). 352 * Classifiers can store their result in the two fields 353 * bio_classifier1 and bio_classifier2. 354 * A classifier that updates one of the fields should 355 * return a non-zero value. 356 * If no classifier updates the field, g_run_classifiers() sets 357 * bio_classifier1 = BIO_NOTCLASSIFIED to avoid further calls. 358 */ 359 360 int 361 g_register_classifier(struct g_classifier_hook *hook) 362 { 363 364 g_bioq_lock(&g_bio_run_down); 365 TAILQ_INSERT_TAIL(&g_classifier_tailq, hook, link); 366 g_bioq_unlock(&g_bio_run_down); 367 368 return (0); 369 } 370 371 void 372 g_unregister_classifier(struct g_classifier_hook *hook) 373 { 374 struct g_classifier_hook *entry; 375 376 g_bioq_lock(&g_bio_run_down); 377 TAILQ_FOREACH(entry, &g_classifier_tailq, link) { 378 if (entry == hook) { 379 TAILQ_REMOVE(&g_classifier_tailq, hook, link); 380 break; 381 } 382 } 383 g_bioq_unlock(&g_bio_run_down); 384 } 385 386 static void 387 g_run_classifiers(struct bio *bp) 388 { 389 struct g_classifier_hook *hook; 390 int classified = 0; 391 392 TAILQ_FOREACH(hook, &g_classifier_tailq, link) 393 classified |= hook->func(hook->arg, bp); 394 395 if (!classified) 396 bp->bio_classifier1 = BIO_NOTCLASSIFIED; 397 } 398 399 void 400 g_io_request(struct bio *bp, struct g_consumer *cp) 401 { 402 struct g_provider *pp; 403 int first; 404 405 KASSERT(cp != NULL, ("NULL cp in g_io_request")); 406 KASSERT(bp != NULL, ("NULL bp in g_io_request")); 407 pp = cp->provider; 408 KASSERT(pp != NULL, ("consumer not attached in g_io_request")); 409 #ifdef DIAGNOSTIC 410 KASSERT(bp->bio_driver1 == NULL, 411 ("bio_driver1 used by the consumer (geom %s)", cp->geom->name)); 412 KASSERT(bp->bio_driver2 == NULL, 413 ("bio_driver2 used by the consumer (geom %s)", cp->geom->name)); 414 KASSERT(bp->bio_pflags == 0, 415 ("bio_pflags used by the consumer (geom %s)", cp->geom->name)); 416 /* 417 * Remember consumer's private fields, so we can detect if they were 418 * modified by the provider. 419 */ 420 bp->_bio_caller1 = bp->bio_caller1; 421 bp->_bio_caller2 = bp->bio_caller2; 422 bp->_bio_cflags = bp->bio_cflags; 423 #endif 424 425 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_GETATTR)) { 426 KASSERT(bp->bio_data != NULL, 427 ("NULL bp->data in g_io_request(cmd=%hhu)", bp->bio_cmd)); 428 } 429 if (bp->bio_cmd & (BIO_DELETE|BIO_FLUSH)) { 430 KASSERT(bp->bio_data == NULL, 431 ("non-NULL bp->data in g_io_request(cmd=%hhu)", 432 bp->bio_cmd)); 433 } 434 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE)) { 435 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0, 436 ("wrong offset %jd for sectorsize %u", 437 bp->bio_offset, cp->provider->sectorsize)); 438 KASSERT(bp->bio_length % cp->provider->sectorsize == 0, 439 ("wrong length %jd for sectorsize %u", 440 bp->bio_length, cp->provider->sectorsize)); 441 } 442 443 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d", 444 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd); 445 446 bp->bio_from = cp; 447 bp->bio_to = pp; 448 bp->bio_error = 0; 449 bp->bio_completed = 0; 450 451 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 452 ("Bio already on queue bp=%p", bp)); 453 bp->bio_flags |= BIO_ONQUEUE; 454 455 if (g_collectstats) 456 binuptime(&bp->bio_t0); 457 else 458 getbinuptime(&bp->bio_t0); 459 460 /* 461 * The statistics collection is lockless, as such, but we 462 * can not update one instance of the statistics from more 463 * than one thread at a time, so grab the lock first. 464 * 465 * We also use the lock to protect the list of classifiers. 466 */ 467 g_bioq_lock(&g_bio_run_down); 468 469 if (!TAILQ_EMPTY(&g_classifier_tailq) && !bp->bio_classifier1) 470 g_run_classifiers(bp); 471 472 if (g_collectstats & 1) 473 devstat_start_transaction(pp->stat, &bp->bio_t0); 474 if (g_collectstats & 2) 475 devstat_start_transaction(cp->stat, &bp->bio_t0); 476 477 pp->nstart++; 478 cp->nstart++; 479 first = TAILQ_EMPTY(&g_bio_run_down.bio_queue); 480 TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue); 481 g_bio_run_down.bio_queue_length++; 482 g_bioq_unlock(&g_bio_run_down); 483 484 /* Pass it on down. */ 485 if (first) 486 wakeup(&g_wait_down); 487 } 488 489 void 490 g_io_deliver(struct bio *bp, int error) 491 { 492 struct g_consumer *cp; 493 struct g_provider *pp; 494 int first; 495 496 KASSERT(bp != NULL, ("NULL bp in g_io_deliver")); 497 pp = bp->bio_to; 498 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver")); 499 cp = bp->bio_from; 500 if (cp == NULL) { 501 bp->bio_error = error; 502 bp->bio_done(bp); 503 return; 504 } 505 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver")); 506 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver")); 507 #ifdef DIAGNOSTIC 508 /* 509 * Some classes - GJournal in particular - can modify bio's 510 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO 511 * flag means it's an expected behaviour for that particular geom. 512 */ 513 if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) { 514 KASSERT(bp->bio_caller1 == bp->_bio_caller1, 515 ("bio_caller1 used by the provider %s", pp->name)); 516 KASSERT(bp->bio_caller2 == bp->_bio_caller2, 517 ("bio_caller2 used by the provider %s", pp->name)); 518 KASSERT(bp->bio_cflags == bp->_bio_cflags, 519 ("bio_cflags used by the provider %s", pp->name)); 520 } 521 #endif 522 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0")); 523 KASSERT(bp->bio_completed <= bp->bio_length, 524 ("bio_completed can't be greater than bio_length")); 525 526 g_trace(G_T_BIO, 527 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd", 528 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error, 529 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 530 531 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 532 ("Bio already on queue bp=%p", bp)); 533 534 /* 535 * XXX: next two doesn't belong here 536 */ 537 bp->bio_bcount = bp->bio_length; 538 bp->bio_resid = bp->bio_bcount - bp->bio_completed; 539 540 /* 541 * The statistics collection is lockless, as such, but we 542 * can not update one instance of the statistics from more 543 * than one thread at a time, so grab the lock first. 544 */ 545 g_bioq_lock(&g_bio_run_up); 546 if (g_collectstats & 1) 547 devstat_end_transaction_bio(pp->stat, bp); 548 if (g_collectstats & 2) 549 devstat_end_transaction_bio(cp->stat, bp); 550 551 cp->nend++; 552 pp->nend++; 553 if (error != ENOMEM) { 554 bp->bio_error = error; 555 first = TAILQ_EMPTY(&g_bio_run_up.bio_queue); 556 TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue); 557 bp->bio_flags |= BIO_ONQUEUE; 558 g_bio_run_up.bio_queue_length++; 559 g_bioq_unlock(&g_bio_run_up); 560 if (first) 561 wakeup(&g_wait_up); 562 return; 563 } 564 g_bioq_unlock(&g_bio_run_up); 565 566 if (bootverbose) 567 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name); 568 bp->bio_children = 0; 569 bp->bio_inbed = 0; 570 bp->bio_driver1 = NULL; 571 bp->bio_driver2 = NULL; 572 bp->bio_pflags = 0; 573 g_io_request(bp, cp); 574 pace++; 575 return; 576 } 577 578 void 579 g_io_schedule_down(struct thread *tp __unused) 580 { 581 struct bio *bp; 582 off_t excess; 583 int error; 584 585 for(;;) { 586 g_bioq_lock(&g_bio_run_down); 587 bp = g_bioq_first(&g_bio_run_down); 588 if (bp == NULL) { 589 CTR0(KTR_GEOM, "g_down going to sleep"); 590 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock, 591 PRIBIO | PDROP, "-", 0); 592 continue; 593 } 594 CTR0(KTR_GEOM, "g_down has work to do"); 595 g_bioq_unlock(&g_bio_run_down); 596 if (pace > 0) { 597 CTR1(KTR_GEOM, "g_down pacing self (pace %d)", pace); 598 pause("g_down", hz/10); 599 pace--; 600 } 601 error = g_io_check(bp); 602 if (error) { 603 CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider " 604 "%s returned %d", bp, bp->bio_to->name, error); 605 g_io_deliver(bp, error); 606 continue; 607 } 608 CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp, 609 bp->bio_to->name); 610 switch (bp->bio_cmd) { 611 case BIO_READ: 612 case BIO_WRITE: 613 case BIO_DELETE: 614 /* Truncate requests to the end of providers media. */ 615 /* 616 * XXX: What if we truncate because of offset being 617 * bad, not length? 618 */ 619 excess = bp->bio_offset + bp->bio_length; 620 if (excess > bp->bio_to->mediasize) { 621 excess -= bp->bio_to->mediasize; 622 bp->bio_length -= excess; 623 if (excess > 0) 624 CTR3(KTR_GEOM, "g_down truncated bio " 625 "%p provider %s by %d", bp, 626 bp->bio_to->name, excess); 627 } 628 /* Deliver zero length transfers right here. */ 629 if (bp->bio_length == 0) { 630 g_io_deliver(bp, 0); 631 CTR2(KTR_GEOM, "g_down terminated 0-length " 632 "bp %p provider %s", bp, bp->bio_to->name); 633 continue; 634 } 635 break; 636 default: 637 break; 638 } 639 THREAD_NO_SLEEPING(); 640 CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld " 641 "len %ld", bp, bp->bio_to->name, bp->bio_offset, 642 bp->bio_length); 643 bp->bio_to->geom->start(bp); 644 THREAD_SLEEPING_OK(); 645 } 646 } 647 648 void 649 bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg) 650 { 651 bp->bio_task = func; 652 bp->bio_task_arg = arg; 653 /* 654 * The taskqueue is actually just a second queue off the "up" 655 * queue, so we use the same lock. 656 */ 657 g_bioq_lock(&g_bio_run_up); 658 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 659 ("Bio already on queue bp=%p target taskq", bp)); 660 bp->bio_flags |= BIO_ONQUEUE; 661 TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue); 662 g_bio_run_task.bio_queue_length++; 663 wakeup(&g_wait_up); 664 g_bioq_unlock(&g_bio_run_up); 665 } 666 667 668 void 669 g_io_schedule_up(struct thread *tp __unused) 670 { 671 struct bio *bp; 672 for(;;) { 673 g_bioq_lock(&g_bio_run_up); 674 bp = g_bioq_first(&g_bio_run_task); 675 if (bp != NULL) { 676 g_bioq_unlock(&g_bio_run_up); 677 THREAD_NO_SLEEPING(); 678 CTR1(KTR_GEOM, "g_up processing task bp %p", bp); 679 bp->bio_task(bp->bio_task_arg); 680 THREAD_SLEEPING_OK(); 681 continue; 682 } 683 bp = g_bioq_first(&g_bio_run_up); 684 if (bp != NULL) { 685 g_bioq_unlock(&g_bio_run_up); 686 THREAD_NO_SLEEPING(); 687 CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off " 688 "%jd len %ld", bp, bp->bio_to->name, 689 bp->bio_offset, bp->bio_length); 690 biodone(bp); 691 THREAD_SLEEPING_OK(); 692 continue; 693 } 694 CTR0(KTR_GEOM, "g_up going to sleep"); 695 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock, 696 PRIBIO | PDROP, "-", 0); 697 } 698 } 699 700 void * 701 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error) 702 { 703 struct bio *bp; 704 void *ptr; 705 int errorc; 706 707 KASSERT(length > 0 && length >= cp->provider->sectorsize && 708 length <= MAXPHYS, ("g_read_data(): invalid length %jd", 709 (intmax_t)length)); 710 711 bp = g_alloc_bio(); 712 bp->bio_cmd = BIO_READ; 713 bp->bio_done = NULL; 714 bp->bio_offset = offset; 715 bp->bio_length = length; 716 ptr = g_malloc(length, M_WAITOK); 717 bp->bio_data = ptr; 718 g_io_request(bp, cp); 719 errorc = biowait(bp, "gread"); 720 if (error != NULL) 721 *error = errorc; 722 g_destroy_bio(bp); 723 if (errorc) { 724 g_free(ptr); 725 ptr = NULL; 726 } 727 return (ptr); 728 } 729 730 int 731 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length) 732 { 733 struct bio *bp; 734 int error; 735 736 KASSERT(length > 0 && length >= cp->provider->sectorsize && 737 length <= MAXPHYS, ("g_write_data(): invalid length %jd", 738 (intmax_t)length)); 739 740 bp = g_alloc_bio(); 741 bp->bio_cmd = BIO_WRITE; 742 bp->bio_done = NULL; 743 bp->bio_offset = offset; 744 bp->bio_length = length; 745 bp->bio_data = ptr; 746 g_io_request(bp, cp); 747 error = biowait(bp, "gwrite"); 748 g_destroy_bio(bp); 749 return (error); 750 } 751 752 int 753 g_delete_data(struct g_consumer *cp, off_t offset, off_t length) 754 { 755 struct bio *bp; 756 int error; 757 758 KASSERT(length > 0 && length >= cp->provider->sectorsize, 759 ("g_delete_data(): invalid length %jd", (intmax_t)length)); 760 761 bp = g_alloc_bio(); 762 bp->bio_cmd = BIO_DELETE; 763 bp->bio_done = NULL; 764 bp->bio_offset = offset; 765 bp->bio_length = length; 766 bp->bio_data = NULL; 767 g_io_request(bp, cp); 768 error = biowait(bp, "gdelete"); 769 g_destroy_bio(bp); 770 return (error); 771 } 772 773 void 774 g_print_bio(struct bio *bp) 775 { 776 const char *pname, *cmd = NULL; 777 778 if (bp->bio_to != NULL) 779 pname = bp->bio_to->name; 780 else 781 pname = "[unknown]"; 782 783 switch (bp->bio_cmd) { 784 case BIO_GETATTR: 785 cmd = "GETATTR"; 786 printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute); 787 return; 788 case BIO_FLUSH: 789 cmd = "FLUSH"; 790 printf("%s[%s]", pname, cmd); 791 return; 792 case BIO_READ: 793 cmd = "READ"; 794 break; 795 case BIO_WRITE: 796 cmd = "WRITE"; 797 break; 798 case BIO_DELETE: 799 cmd = "DELETE"; 800 break; 801 default: 802 cmd = "UNKNOWN"; 803 printf("%s[%s()]", pname, cmd); 804 return; 805 } 806 printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd, 807 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 808 } 809