1 /*- 2 * Copyright (c) 2002 Poul-Henning Kamp 3 * Copyright (c) 2002 Networks Associates Technology, Inc. 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 7 * and NAI Labs, the Security Research Division of Network Associates, Inc. 8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the authors may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/malloc.h> 43 #include <sys/bio.h> 44 #include <sys/ktr.h> 45 #include <sys/proc.h> 46 #include <sys/stack.h> 47 48 #include <sys/errno.h> 49 #include <geom/geom.h> 50 #include <geom/geom_int.h> 51 #include <sys/devicestat.h> 52 53 #include <vm/uma.h> 54 55 static struct g_bioq g_bio_run_down; 56 static struct g_bioq g_bio_run_up; 57 static struct g_bioq g_bio_run_task; 58 59 static u_int pace; 60 static uma_zone_t biozone; 61 62 /* 63 * The head of the list of classifiers used in g_io_request. 64 * Use g_register_classifier() and g_unregister_classifier() 65 * to add/remove entries to the list. 66 * Classifiers are invoked in registration order. 67 */ 68 static TAILQ_HEAD(g_classifier_tailq, g_classifier_hook) 69 g_classifier_tailq = TAILQ_HEAD_INITIALIZER(g_classifier_tailq); 70 71 #include <machine/atomic.h> 72 73 static void 74 g_bioq_lock(struct g_bioq *bq) 75 { 76 77 mtx_lock(&bq->bio_queue_lock); 78 } 79 80 static void 81 g_bioq_unlock(struct g_bioq *bq) 82 { 83 84 mtx_unlock(&bq->bio_queue_lock); 85 } 86 87 #if 0 88 static void 89 g_bioq_destroy(struct g_bioq *bq) 90 { 91 92 mtx_destroy(&bq->bio_queue_lock); 93 } 94 #endif 95 96 static void 97 g_bioq_init(struct g_bioq *bq) 98 { 99 100 TAILQ_INIT(&bq->bio_queue); 101 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF); 102 } 103 104 static struct bio * 105 g_bioq_first(struct g_bioq *bq) 106 { 107 struct bio *bp; 108 109 bp = TAILQ_FIRST(&bq->bio_queue); 110 if (bp != NULL) { 111 KASSERT((bp->bio_flags & BIO_ONQUEUE), 112 ("Bio not on queue bp=%p target %p", bp, bq)); 113 bp->bio_flags &= ~BIO_ONQUEUE; 114 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue); 115 bq->bio_queue_length--; 116 } 117 return (bp); 118 } 119 120 struct bio * 121 g_new_bio(void) 122 { 123 struct bio *bp; 124 125 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 126 #ifdef KTR 127 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 128 struct stack st; 129 130 CTR1(KTR_GEOM, "g_new_bio(): %p", bp); 131 stack_save(&st); 132 CTRSTACK(KTR_GEOM, &st, 3, 0); 133 } 134 #endif 135 return (bp); 136 } 137 138 struct bio * 139 g_alloc_bio(void) 140 { 141 struct bio *bp; 142 143 bp = uma_zalloc(biozone, M_WAITOK | M_ZERO); 144 #ifdef KTR 145 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 146 struct stack st; 147 148 CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp); 149 stack_save(&st); 150 CTRSTACK(KTR_GEOM, &st, 3, 0); 151 } 152 #endif 153 return (bp); 154 } 155 156 void 157 g_destroy_bio(struct bio *bp) 158 { 159 #ifdef KTR 160 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 161 struct stack st; 162 163 CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp); 164 stack_save(&st); 165 CTRSTACK(KTR_GEOM, &st, 3, 0); 166 } 167 #endif 168 uma_zfree(biozone, bp); 169 } 170 171 struct bio * 172 g_clone_bio(struct bio *bp) 173 { 174 struct bio *bp2; 175 176 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 177 if (bp2 != NULL) { 178 bp2->bio_parent = bp; 179 bp2->bio_cmd = bp->bio_cmd; 180 bp2->bio_length = bp->bio_length; 181 bp2->bio_offset = bp->bio_offset; 182 bp2->bio_data = bp->bio_data; 183 bp2->bio_attribute = bp->bio_attribute; 184 /* Inherit classification info from the parent */ 185 bp2->bio_classifier1 = bp->bio_classifier1; 186 bp2->bio_classifier2 = bp->bio_classifier2; 187 bp->bio_children++; 188 } 189 #ifdef KTR 190 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 191 struct stack st; 192 193 CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2); 194 stack_save(&st); 195 CTRSTACK(KTR_GEOM, &st, 3, 0); 196 } 197 #endif 198 return(bp2); 199 } 200 201 struct bio * 202 g_duplicate_bio(struct bio *bp) 203 { 204 struct bio *bp2; 205 206 bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO); 207 bp2->bio_parent = bp; 208 bp2->bio_cmd = bp->bio_cmd; 209 bp2->bio_length = bp->bio_length; 210 bp2->bio_offset = bp->bio_offset; 211 bp2->bio_data = bp->bio_data; 212 bp2->bio_attribute = bp->bio_attribute; 213 bp->bio_children++; 214 #ifdef KTR 215 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 216 struct stack st; 217 218 CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2); 219 stack_save(&st); 220 CTRSTACK(KTR_GEOM, &st, 3, 0); 221 } 222 #endif 223 return(bp2); 224 } 225 226 void 227 g_io_init() 228 { 229 230 g_bioq_init(&g_bio_run_down); 231 g_bioq_init(&g_bio_run_up); 232 g_bioq_init(&g_bio_run_task); 233 biozone = uma_zcreate("g_bio", sizeof (struct bio), 234 NULL, NULL, 235 NULL, NULL, 236 0, 0); 237 } 238 239 int 240 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr) 241 { 242 struct bio *bp; 243 int error; 244 245 g_trace(G_T_BIO, "bio_getattr(%s)", attr); 246 bp = g_alloc_bio(); 247 bp->bio_cmd = BIO_GETATTR; 248 bp->bio_done = NULL; 249 bp->bio_attribute = attr; 250 bp->bio_length = *len; 251 bp->bio_data = ptr; 252 g_io_request(bp, cp); 253 error = biowait(bp, "ggetattr"); 254 *len = bp->bio_completed; 255 g_destroy_bio(bp); 256 return (error); 257 } 258 259 int 260 g_io_flush(struct g_consumer *cp) 261 { 262 struct bio *bp; 263 int error; 264 265 g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name); 266 bp = g_alloc_bio(); 267 bp->bio_cmd = BIO_FLUSH; 268 bp->bio_flags |= BIO_ORDERED; 269 bp->bio_done = NULL; 270 bp->bio_attribute = NULL; 271 bp->bio_offset = cp->provider->mediasize; 272 bp->bio_length = 0; 273 bp->bio_data = NULL; 274 g_io_request(bp, cp); 275 error = biowait(bp, "gflush"); 276 g_destroy_bio(bp); 277 return (error); 278 } 279 280 static int 281 g_io_check(struct bio *bp) 282 { 283 struct g_consumer *cp; 284 struct g_provider *pp; 285 286 cp = bp->bio_from; 287 pp = bp->bio_to; 288 289 /* Fail if access counters dont allow the operation */ 290 switch(bp->bio_cmd) { 291 case BIO_READ: 292 case BIO_GETATTR: 293 if (cp->acr == 0) 294 return (EPERM); 295 break; 296 case BIO_WRITE: 297 case BIO_DELETE: 298 case BIO_FLUSH: 299 if (cp->acw == 0) 300 return (EPERM); 301 break; 302 default: 303 return (EPERM); 304 } 305 /* if provider is marked for error, don't disturb. */ 306 if (pp->error) 307 return (pp->error); 308 309 switch(bp->bio_cmd) { 310 case BIO_READ: 311 case BIO_WRITE: 312 case BIO_DELETE: 313 /* Zero sectorsize or mediasize is probably a lack of media. */ 314 if (pp->sectorsize == 0 || pp->mediasize == 0) 315 return (ENXIO); 316 /* Reject I/O not on sector boundary */ 317 if (bp->bio_offset % pp->sectorsize) 318 return (EINVAL); 319 /* Reject I/O not integral sector long */ 320 if (bp->bio_length % pp->sectorsize) 321 return (EINVAL); 322 /* Reject requests before or past the end of media. */ 323 if (bp->bio_offset < 0) 324 return (EIO); 325 if (bp->bio_offset > pp->mediasize) 326 return (EIO); 327 break; 328 default: 329 break; 330 } 331 return (0); 332 } 333 334 /* 335 * bio classification support. 336 * 337 * g_register_classifier() and g_unregister_classifier() 338 * are used to add/remove a classifier from the list. 339 * The list is protected using the g_bio_run_down lock, 340 * because the classifiers are called in this path. 341 * 342 * g_io_request() passes bio's that are not already classified 343 * (i.e. those with bio_classifier1 == NULL) to g_run_classifiers(). 344 * Classifiers can store their result in the two fields 345 * bio_classifier1 and bio_classifier2. 346 * A classifier that updates one of the fields should 347 * return a non-zero value. 348 * If no classifier updates the field, g_run_classifiers() sets 349 * bio_classifier1 = BIO_NOTCLASSIFIED to avoid further calls. 350 */ 351 352 int 353 g_register_classifier(struct g_classifier_hook *hook) 354 { 355 356 g_bioq_lock(&g_bio_run_down); 357 TAILQ_INSERT_TAIL(&g_classifier_tailq, hook, link); 358 g_bioq_unlock(&g_bio_run_down); 359 360 return (0); 361 } 362 363 void 364 g_unregister_classifier(struct g_classifier_hook *hook) 365 { 366 struct g_classifier_hook *entry; 367 368 g_bioq_lock(&g_bio_run_down); 369 TAILQ_FOREACH(entry, &g_classifier_tailq, link) { 370 if (entry == hook) { 371 TAILQ_REMOVE(&g_classifier_tailq, hook, link); 372 break; 373 } 374 } 375 g_bioq_unlock(&g_bio_run_down); 376 } 377 378 static void 379 g_run_classifiers(struct bio *bp) 380 { 381 struct g_classifier_hook *hook; 382 int classified = 0; 383 384 TAILQ_FOREACH(hook, &g_classifier_tailq, link) 385 classified |= hook->func(hook->arg, bp); 386 387 if (!classified) 388 bp->bio_classifier1 = BIO_NOTCLASSIFIED; 389 } 390 391 void 392 g_io_request(struct bio *bp, struct g_consumer *cp) 393 { 394 struct g_provider *pp; 395 int first; 396 397 KASSERT(cp != NULL, ("NULL cp in g_io_request")); 398 KASSERT(bp != NULL, ("NULL bp in g_io_request")); 399 pp = cp->provider; 400 KASSERT(pp != NULL, ("consumer not attached in g_io_request")); 401 #ifdef DIAGNOSTIC 402 KASSERT(bp->bio_driver1 == NULL, 403 ("bio_driver1 used by the consumer (geom %s)", cp->geom->name)); 404 KASSERT(bp->bio_driver2 == NULL, 405 ("bio_driver2 used by the consumer (geom %s)", cp->geom->name)); 406 KASSERT(bp->bio_pflags == 0, 407 ("bio_pflags used by the consumer (geom %s)", cp->geom->name)); 408 /* 409 * Remember consumer's private fields, so we can detect if they were 410 * modified by the provider. 411 */ 412 bp->_bio_caller1 = bp->bio_caller1; 413 bp->_bio_caller2 = bp->bio_caller2; 414 bp->_bio_cflags = bp->bio_cflags; 415 #endif 416 417 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_GETATTR)) { 418 KASSERT(bp->bio_data != NULL, 419 ("NULL bp->data in g_io_request(cmd=%hhu)", bp->bio_cmd)); 420 } 421 if (bp->bio_cmd & (BIO_DELETE|BIO_FLUSH)) { 422 KASSERT(bp->bio_data == NULL, 423 ("non-NULL bp->data in g_io_request(cmd=%hhu)", 424 bp->bio_cmd)); 425 } 426 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE)) { 427 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0, 428 ("wrong offset %jd for sectorsize %u", 429 bp->bio_offset, cp->provider->sectorsize)); 430 KASSERT(bp->bio_length % cp->provider->sectorsize == 0, 431 ("wrong length %jd for sectorsize %u", 432 bp->bio_length, cp->provider->sectorsize)); 433 } 434 435 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d", 436 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd); 437 438 bp->bio_from = cp; 439 bp->bio_to = pp; 440 bp->bio_error = 0; 441 bp->bio_completed = 0; 442 443 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 444 ("Bio already on queue bp=%p", bp)); 445 bp->bio_flags |= BIO_ONQUEUE; 446 447 if (g_collectstats) 448 binuptime(&bp->bio_t0); 449 else 450 getbinuptime(&bp->bio_t0); 451 452 /* 453 * The statistics collection is lockless, as such, but we 454 * can not update one instance of the statistics from more 455 * than one thread at a time, so grab the lock first. 456 * 457 * We also use the lock to protect the list of classifiers. 458 */ 459 g_bioq_lock(&g_bio_run_down); 460 461 if (!TAILQ_EMPTY(&g_classifier_tailq) && !bp->bio_classifier1) 462 g_run_classifiers(bp); 463 464 if (g_collectstats & 1) 465 devstat_start_transaction(pp->stat, &bp->bio_t0); 466 if (g_collectstats & 2) 467 devstat_start_transaction(cp->stat, &bp->bio_t0); 468 469 pp->nstart++; 470 cp->nstart++; 471 first = TAILQ_EMPTY(&g_bio_run_down.bio_queue); 472 TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue); 473 g_bio_run_down.bio_queue_length++; 474 g_bioq_unlock(&g_bio_run_down); 475 476 /* Pass it on down. */ 477 if (first) 478 wakeup(&g_wait_down); 479 } 480 481 void 482 g_io_deliver(struct bio *bp, int error) 483 { 484 struct g_consumer *cp; 485 struct g_provider *pp; 486 int first; 487 488 KASSERT(bp != NULL, ("NULL bp in g_io_deliver")); 489 pp = bp->bio_to; 490 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver")); 491 cp = bp->bio_from; 492 if (cp == NULL) { 493 bp->bio_error = error; 494 bp->bio_done(bp); 495 return; 496 } 497 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver")); 498 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver")); 499 #ifdef DIAGNOSTIC 500 /* 501 * Some classes - GJournal in particular - can modify bio's 502 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO 503 * flag means it's an expected behaviour for that particular geom. 504 */ 505 if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) { 506 KASSERT(bp->bio_caller1 == bp->_bio_caller1, 507 ("bio_caller1 used by the provider %s", pp->name)); 508 KASSERT(bp->bio_caller2 == bp->_bio_caller2, 509 ("bio_caller2 used by the provider %s", pp->name)); 510 KASSERT(bp->bio_cflags == bp->_bio_cflags, 511 ("bio_cflags used by the provider %s", pp->name)); 512 } 513 #endif 514 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0")); 515 KASSERT(bp->bio_completed <= bp->bio_length, 516 ("bio_completed can't be greater than bio_length")); 517 518 g_trace(G_T_BIO, 519 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd", 520 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error, 521 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 522 523 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 524 ("Bio already on queue bp=%p", bp)); 525 526 /* 527 * XXX: next two doesn't belong here 528 */ 529 bp->bio_bcount = bp->bio_length; 530 bp->bio_resid = bp->bio_bcount - bp->bio_completed; 531 532 /* 533 * The statistics collection is lockless, as such, but we 534 * can not update one instance of the statistics from more 535 * than one thread at a time, so grab the lock first. 536 */ 537 g_bioq_lock(&g_bio_run_up); 538 if (g_collectstats & 1) 539 devstat_end_transaction_bio(pp->stat, bp); 540 if (g_collectstats & 2) 541 devstat_end_transaction_bio(cp->stat, bp); 542 543 cp->nend++; 544 pp->nend++; 545 if (error != ENOMEM) { 546 bp->bio_error = error; 547 first = TAILQ_EMPTY(&g_bio_run_up.bio_queue); 548 TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue); 549 bp->bio_flags |= BIO_ONQUEUE; 550 g_bio_run_up.bio_queue_length++; 551 g_bioq_unlock(&g_bio_run_up); 552 if (first) 553 wakeup(&g_wait_up); 554 return; 555 } 556 g_bioq_unlock(&g_bio_run_up); 557 558 if (bootverbose) 559 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name); 560 bp->bio_children = 0; 561 bp->bio_inbed = 0; 562 g_io_request(bp, cp); 563 pace++; 564 return; 565 } 566 567 void 568 g_io_schedule_down(struct thread *tp __unused) 569 { 570 struct bio *bp; 571 off_t excess; 572 int error; 573 574 for(;;) { 575 g_bioq_lock(&g_bio_run_down); 576 bp = g_bioq_first(&g_bio_run_down); 577 if (bp == NULL) { 578 CTR0(KTR_GEOM, "g_down going to sleep"); 579 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock, 580 PRIBIO | PDROP, "-", 0); 581 continue; 582 } 583 CTR0(KTR_GEOM, "g_down has work to do"); 584 g_bioq_unlock(&g_bio_run_down); 585 if (pace > 0) { 586 CTR1(KTR_GEOM, "g_down pacing self (pace %d)", pace); 587 pause("g_down", hz/10); 588 pace--; 589 } 590 error = g_io_check(bp); 591 if (error) { 592 CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider " 593 "%s returned %d", bp, bp->bio_to->name, error); 594 g_io_deliver(bp, error); 595 continue; 596 } 597 CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp, 598 bp->bio_to->name); 599 switch (bp->bio_cmd) { 600 case BIO_READ: 601 case BIO_WRITE: 602 case BIO_DELETE: 603 /* Truncate requests to the end of providers media. */ 604 /* 605 * XXX: What if we truncate because of offset being 606 * bad, not length? 607 */ 608 excess = bp->bio_offset + bp->bio_length; 609 if (excess > bp->bio_to->mediasize) { 610 excess -= bp->bio_to->mediasize; 611 bp->bio_length -= excess; 612 if (excess > 0) 613 CTR3(KTR_GEOM, "g_down truncated bio " 614 "%p provider %s by %d", bp, 615 bp->bio_to->name, excess); 616 } 617 /* Deliver zero length transfers right here. */ 618 if (bp->bio_length == 0) { 619 g_io_deliver(bp, 0); 620 CTR2(KTR_GEOM, "g_down terminated 0-length " 621 "bp %p provider %s", bp, bp->bio_to->name); 622 continue; 623 } 624 break; 625 default: 626 break; 627 } 628 THREAD_NO_SLEEPING(); 629 CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld " 630 "len %ld", bp, bp->bio_to->name, bp->bio_offset, 631 bp->bio_length); 632 bp->bio_to->geom->start(bp); 633 THREAD_SLEEPING_OK(); 634 } 635 } 636 637 void 638 bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg) 639 { 640 bp->bio_task = func; 641 bp->bio_task_arg = arg; 642 /* 643 * The taskqueue is actually just a second queue off the "up" 644 * queue, so we use the same lock. 645 */ 646 g_bioq_lock(&g_bio_run_up); 647 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 648 ("Bio already on queue bp=%p target taskq", bp)); 649 bp->bio_flags |= BIO_ONQUEUE; 650 TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue); 651 g_bio_run_task.bio_queue_length++; 652 wakeup(&g_wait_up); 653 g_bioq_unlock(&g_bio_run_up); 654 } 655 656 657 void 658 g_io_schedule_up(struct thread *tp __unused) 659 { 660 struct bio *bp; 661 for(;;) { 662 g_bioq_lock(&g_bio_run_up); 663 bp = g_bioq_first(&g_bio_run_task); 664 if (bp != NULL) { 665 g_bioq_unlock(&g_bio_run_up); 666 THREAD_NO_SLEEPING(); 667 CTR1(KTR_GEOM, "g_up processing task bp %p", bp); 668 bp->bio_task(bp->bio_task_arg); 669 THREAD_SLEEPING_OK(); 670 continue; 671 } 672 bp = g_bioq_first(&g_bio_run_up); 673 if (bp != NULL) { 674 g_bioq_unlock(&g_bio_run_up); 675 THREAD_NO_SLEEPING(); 676 CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off " 677 "%jd len %ld", bp, bp->bio_to->name, 678 bp->bio_offset, bp->bio_length); 679 biodone(bp); 680 THREAD_SLEEPING_OK(); 681 continue; 682 } 683 CTR0(KTR_GEOM, "g_up going to sleep"); 684 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock, 685 PRIBIO | PDROP, "-", 0); 686 } 687 } 688 689 void * 690 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error) 691 { 692 struct bio *bp; 693 void *ptr; 694 int errorc; 695 696 KASSERT(length > 0 && length >= cp->provider->sectorsize && 697 length <= MAXPHYS, ("g_read_data(): invalid length %jd", 698 (intmax_t)length)); 699 700 bp = g_alloc_bio(); 701 bp->bio_cmd = BIO_READ; 702 bp->bio_done = NULL; 703 bp->bio_offset = offset; 704 bp->bio_length = length; 705 ptr = g_malloc(length, M_WAITOK); 706 bp->bio_data = ptr; 707 g_io_request(bp, cp); 708 errorc = biowait(bp, "gread"); 709 if (error != NULL) 710 *error = errorc; 711 g_destroy_bio(bp); 712 if (errorc) { 713 g_free(ptr); 714 ptr = NULL; 715 } 716 return (ptr); 717 } 718 719 int 720 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length) 721 { 722 struct bio *bp; 723 int error; 724 725 KASSERT(length > 0 && length >= cp->provider->sectorsize && 726 length <= MAXPHYS, ("g_write_data(): invalid length %jd", 727 (intmax_t)length)); 728 729 bp = g_alloc_bio(); 730 bp->bio_cmd = BIO_WRITE; 731 bp->bio_done = NULL; 732 bp->bio_offset = offset; 733 bp->bio_length = length; 734 bp->bio_data = ptr; 735 g_io_request(bp, cp); 736 error = biowait(bp, "gwrite"); 737 g_destroy_bio(bp); 738 return (error); 739 } 740 741 int 742 g_delete_data(struct g_consumer *cp, off_t offset, off_t length) 743 { 744 struct bio *bp; 745 int error; 746 747 KASSERT(length > 0 && length >= cp->provider->sectorsize, 748 ("g_delete_data(): invalid length %jd", (intmax_t)length)); 749 750 bp = g_alloc_bio(); 751 bp->bio_cmd = BIO_DELETE; 752 bp->bio_done = NULL; 753 bp->bio_offset = offset; 754 bp->bio_length = length; 755 bp->bio_data = NULL; 756 g_io_request(bp, cp); 757 error = biowait(bp, "gdelete"); 758 g_destroy_bio(bp); 759 return (error); 760 } 761 762 void 763 g_print_bio(struct bio *bp) 764 { 765 const char *pname, *cmd = NULL; 766 767 if (bp->bio_to != NULL) 768 pname = bp->bio_to->name; 769 else 770 pname = "[unknown]"; 771 772 switch (bp->bio_cmd) { 773 case BIO_GETATTR: 774 cmd = "GETATTR"; 775 printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute); 776 return; 777 case BIO_FLUSH: 778 cmd = "FLUSH"; 779 printf("%s[%s]", pname, cmd); 780 return; 781 case BIO_READ: 782 cmd = "READ"; 783 break; 784 case BIO_WRITE: 785 cmd = "WRITE"; 786 break; 787 case BIO_DELETE: 788 cmd = "DELETE"; 789 break; 790 default: 791 cmd = "UNKNOWN"; 792 printf("%s[%s()]", pname, cmd); 793 return; 794 } 795 printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd, 796 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 797 } 798