1 /*- 2 * Copyright (c) 2002 Poul-Henning Kamp 3 * Copyright (c) 2002 Networks Associates Technology, Inc. 4 * Copyright (c) 2013 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 8 * and NAI Labs, the Security Research Division of Network Associates, Inc. 9 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 10 * DARPA CHATS research program. 11 * 12 * Portions of this software were developed by Konstantin Belousov 13 * under sponsorship from the FreeBSD Foundation. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. The names of the authors may not be used to endorse or promote 24 * products derived from this software without specific prior written 25 * permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/malloc.h> 47 #include <sys/bio.h> 48 #include <sys/ktr.h> 49 #include <sys/proc.h> 50 #include <sys/stack.h> 51 #include <sys/sysctl.h> 52 #include <sys/vmem.h> 53 54 #include <sys/errno.h> 55 #include <geom/geom.h> 56 #include <geom/geom_int.h> 57 #include <sys/devicestat.h> 58 59 #include <vm/uma.h> 60 #include <vm/vm.h> 61 #include <vm/vm_param.h> 62 #include <vm/vm_kern.h> 63 #include <vm/vm_page.h> 64 #include <vm/vm_object.h> 65 #include <vm/vm_extern.h> 66 #include <vm/vm_map.h> 67 68 static int g_io_transient_map_bio(struct bio *bp); 69 70 static struct g_bioq g_bio_run_down; 71 static struct g_bioq g_bio_run_up; 72 static struct g_bioq g_bio_run_task; 73 74 /* 75 * Pace is a hint that we've had some trouble recently allocating 76 * bios, so we should back off trying to send I/O down the stack 77 * a bit to let the problem resolve. When pacing, we also turn 78 * off direct dispatch to also reduce memory pressure from I/Os 79 * there, at the expxense of some added latency while the memory 80 * pressures exist. See g_io_schedule_down() for more details 81 * and limitations. 82 */ 83 static volatile u_int pace; 84 85 static uma_zone_t biozone; 86 87 /* 88 * The head of the list of classifiers used in g_io_request. 89 * Use g_register_classifier() and g_unregister_classifier() 90 * to add/remove entries to the list. 91 * Classifiers are invoked in registration order. 92 */ 93 static TAILQ_HEAD(g_classifier_tailq, g_classifier_hook) 94 g_classifier_tailq = TAILQ_HEAD_INITIALIZER(g_classifier_tailq); 95 96 #include <machine/atomic.h> 97 98 static void 99 g_bioq_lock(struct g_bioq *bq) 100 { 101 102 mtx_lock(&bq->bio_queue_lock); 103 } 104 105 static void 106 g_bioq_unlock(struct g_bioq *bq) 107 { 108 109 mtx_unlock(&bq->bio_queue_lock); 110 } 111 112 #if 0 113 static void 114 g_bioq_destroy(struct g_bioq *bq) 115 { 116 117 mtx_destroy(&bq->bio_queue_lock); 118 } 119 #endif 120 121 static void 122 g_bioq_init(struct g_bioq *bq) 123 { 124 125 TAILQ_INIT(&bq->bio_queue); 126 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF); 127 } 128 129 static struct bio * 130 g_bioq_first(struct g_bioq *bq) 131 { 132 struct bio *bp; 133 134 bp = TAILQ_FIRST(&bq->bio_queue); 135 if (bp != NULL) { 136 KASSERT((bp->bio_flags & BIO_ONQUEUE), 137 ("Bio not on queue bp=%p target %p", bp, bq)); 138 bp->bio_flags &= ~BIO_ONQUEUE; 139 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue); 140 bq->bio_queue_length--; 141 } 142 return (bp); 143 } 144 145 struct bio * 146 g_new_bio(void) 147 { 148 struct bio *bp; 149 150 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 151 #ifdef KTR 152 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 153 struct stack st; 154 155 CTR1(KTR_GEOM, "g_new_bio(): %p", bp); 156 stack_save(&st); 157 CTRSTACK(KTR_GEOM, &st, 3, 0); 158 } 159 #endif 160 return (bp); 161 } 162 163 struct bio * 164 g_alloc_bio(void) 165 { 166 struct bio *bp; 167 168 bp = uma_zalloc(biozone, M_WAITOK | M_ZERO); 169 #ifdef KTR 170 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 171 struct stack st; 172 173 CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp); 174 stack_save(&st); 175 CTRSTACK(KTR_GEOM, &st, 3, 0); 176 } 177 #endif 178 return (bp); 179 } 180 181 void 182 g_destroy_bio(struct bio *bp) 183 { 184 #ifdef KTR 185 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 186 struct stack st; 187 188 CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp); 189 stack_save(&st); 190 CTRSTACK(KTR_GEOM, &st, 3, 0); 191 } 192 #endif 193 uma_zfree(biozone, bp); 194 } 195 196 struct bio * 197 g_clone_bio(struct bio *bp) 198 { 199 struct bio *bp2; 200 201 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 202 if (bp2 != NULL) { 203 bp2->bio_parent = bp; 204 bp2->bio_cmd = bp->bio_cmd; 205 /* 206 * BIO_ORDERED flag may be used by disk drivers to enforce 207 * ordering restrictions, so this flag needs to be cloned. 208 * BIO_UNMAPPED and BIO_VLIST should be inherited, to properly 209 * indicate which way the buffer is passed. 210 * Other bio flags are not suitable for cloning. 211 */ 212 bp2->bio_flags = bp->bio_flags & 213 (BIO_ORDERED | BIO_UNMAPPED | BIO_VLIST); 214 bp2->bio_length = bp->bio_length; 215 bp2->bio_offset = bp->bio_offset; 216 bp2->bio_data = bp->bio_data; 217 bp2->bio_ma = bp->bio_ma; 218 bp2->bio_ma_n = bp->bio_ma_n; 219 bp2->bio_ma_offset = bp->bio_ma_offset; 220 bp2->bio_attribute = bp->bio_attribute; 221 /* Inherit classification info from the parent */ 222 bp2->bio_classifier1 = bp->bio_classifier1; 223 bp2->bio_classifier2 = bp->bio_classifier2; 224 bp->bio_children++; 225 } 226 #ifdef KTR 227 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 228 struct stack st; 229 230 CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2); 231 stack_save(&st); 232 CTRSTACK(KTR_GEOM, &st, 3, 0); 233 } 234 #endif 235 return(bp2); 236 } 237 238 struct bio * 239 g_duplicate_bio(struct bio *bp) 240 { 241 struct bio *bp2; 242 243 bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO); 244 bp2->bio_flags = bp->bio_flags & (BIO_UNMAPPED | BIO_VLIST); 245 bp2->bio_parent = bp; 246 bp2->bio_cmd = bp->bio_cmd; 247 bp2->bio_length = bp->bio_length; 248 bp2->bio_offset = bp->bio_offset; 249 bp2->bio_data = bp->bio_data; 250 bp2->bio_ma = bp->bio_ma; 251 bp2->bio_ma_n = bp->bio_ma_n; 252 bp2->bio_ma_offset = bp->bio_ma_offset; 253 bp2->bio_attribute = bp->bio_attribute; 254 bp->bio_children++; 255 #ifdef KTR 256 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { 257 struct stack st; 258 259 CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2); 260 stack_save(&st); 261 CTRSTACK(KTR_GEOM, &st, 3, 0); 262 } 263 #endif 264 return(bp2); 265 } 266 267 void 268 g_reset_bio(struct bio *bp) 269 { 270 271 bzero(bp, sizeof(*bp)); 272 } 273 274 void 275 g_io_init() 276 { 277 278 g_bioq_init(&g_bio_run_down); 279 g_bioq_init(&g_bio_run_up); 280 g_bioq_init(&g_bio_run_task); 281 biozone = uma_zcreate("g_bio", sizeof (struct bio), 282 NULL, NULL, 283 NULL, NULL, 284 0, 0); 285 } 286 287 int 288 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr) 289 { 290 struct bio *bp; 291 int error; 292 293 g_trace(G_T_BIO, "bio_getattr(%s)", attr); 294 bp = g_alloc_bio(); 295 bp->bio_cmd = BIO_GETATTR; 296 bp->bio_done = NULL; 297 bp->bio_attribute = attr; 298 bp->bio_length = *len; 299 bp->bio_data = ptr; 300 g_io_request(bp, cp); 301 error = biowait(bp, "ggetattr"); 302 *len = bp->bio_completed; 303 g_destroy_bio(bp); 304 return (error); 305 } 306 307 int 308 g_io_flush(struct g_consumer *cp) 309 { 310 struct bio *bp; 311 int error; 312 313 g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name); 314 bp = g_alloc_bio(); 315 bp->bio_cmd = BIO_FLUSH; 316 bp->bio_flags |= BIO_ORDERED; 317 bp->bio_done = NULL; 318 bp->bio_attribute = NULL; 319 bp->bio_offset = cp->provider->mediasize; 320 bp->bio_length = 0; 321 bp->bio_data = NULL; 322 g_io_request(bp, cp); 323 error = biowait(bp, "gflush"); 324 g_destroy_bio(bp); 325 return (error); 326 } 327 328 static int 329 g_io_check(struct bio *bp) 330 { 331 struct g_consumer *cp; 332 struct g_provider *pp; 333 off_t excess; 334 int error; 335 336 cp = bp->bio_from; 337 pp = bp->bio_to; 338 339 /* Fail if access counters dont allow the operation */ 340 switch(bp->bio_cmd) { 341 case BIO_READ: 342 case BIO_GETATTR: 343 if (cp->acr == 0) 344 return (EPERM); 345 break; 346 case BIO_WRITE: 347 case BIO_DELETE: 348 case BIO_FLUSH: 349 if (cp->acw == 0) 350 return (EPERM); 351 break; 352 default: 353 return (EPERM); 354 } 355 /* if provider is marked for error, don't disturb. */ 356 if (pp->error) 357 return (pp->error); 358 if (cp->flags & G_CF_ORPHAN) 359 return (ENXIO); 360 361 switch(bp->bio_cmd) { 362 case BIO_READ: 363 case BIO_WRITE: 364 case BIO_DELETE: 365 /* Zero sectorsize or mediasize is probably a lack of media. */ 366 if (pp->sectorsize == 0 || pp->mediasize == 0) 367 return (ENXIO); 368 /* Reject I/O not on sector boundary */ 369 if (bp->bio_offset % pp->sectorsize) 370 return (EINVAL); 371 /* Reject I/O not integral sector long */ 372 if (bp->bio_length % pp->sectorsize) 373 return (EINVAL); 374 /* Reject requests before or past the end of media. */ 375 if (bp->bio_offset < 0) 376 return (EIO); 377 if (bp->bio_offset > pp->mediasize) 378 return (EIO); 379 380 /* Truncate requests to the end of providers media. */ 381 excess = bp->bio_offset + bp->bio_length; 382 if (excess > bp->bio_to->mediasize) { 383 KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 || 384 round_page(bp->bio_ma_offset + 385 bp->bio_length) / PAGE_SIZE == bp->bio_ma_n, 386 ("excess bio %p too short", bp)); 387 excess -= bp->bio_to->mediasize; 388 bp->bio_length -= excess; 389 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 390 bp->bio_ma_n = round_page(bp->bio_ma_offset + 391 bp->bio_length) / PAGE_SIZE; 392 } 393 if (excess > 0) 394 CTR3(KTR_GEOM, "g_down truncated bio " 395 "%p provider %s by %d", bp, 396 bp->bio_to->name, excess); 397 } 398 399 /* Deliver zero length transfers right here. */ 400 if (bp->bio_length == 0) { 401 CTR2(KTR_GEOM, "g_down terminated 0-length " 402 "bp %p provider %s", bp, bp->bio_to->name); 403 return (0); 404 } 405 406 if ((bp->bio_flags & BIO_UNMAPPED) != 0 && 407 (bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 && 408 (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) { 409 if ((error = g_io_transient_map_bio(bp)) >= 0) 410 return (error); 411 } 412 break; 413 default: 414 break; 415 } 416 return (EJUSTRETURN); 417 } 418 419 /* 420 * bio classification support. 421 * 422 * g_register_classifier() and g_unregister_classifier() 423 * are used to add/remove a classifier from the list. 424 * The list is protected using the g_bio_run_down lock, 425 * because the classifiers are called in this path. 426 * 427 * g_io_request() passes bio's that are not already classified 428 * (i.e. those with bio_classifier1 == NULL) to g_run_classifiers(). 429 * Classifiers can store their result in the two fields 430 * bio_classifier1 and bio_classifier2. 431 * A classifier that updates one of the fields should 432 * return a non-zero value. 433 * If no classifier updates the field, g_run_classifiers() sets 434 * bio_classifier1 = BIO_NOTCLASSIFIED to avoid further calls. 435 */ 436 437 int 438 g_register_classifier(struct g_classifier_hook *hook) 439 { 440 441 g_bioq_lock(&g_bio_run_down); 442 TAILQ_INSERT_TAIL(&g_classifier_tailq, hook, link); 443 g_bioq_unlock(&g_bio_run_down); 444 445 return (0); 446 } 447 448 void 449 g_unregister_classifier(struct g_classifier_hook *hook) 450 { 451 struct g_classifier_hook *entry; 452 453 g_bioq_lock(&g_bio_run_down); 454 TAILQ_FOREACH(entry, &g_classifier_tailq, link) { 455 if (entry == hook) { 456 TAILQ_REMOVE(&g_classifier_tailq, hook, link); 457 break; 458 } 459 } 460 g_bioq_unlock(&g_bio_run_down); 461 } 462 463 static void 464 g_run_classifiers(struct bio *bp) 465 { 466 struct g_classifier_hook *hook; 467 int classified = 0; 468 469 TAILQ_FOREACH(hook, &g_classifier_tailq, link) 470 classified |= hook->func(hook->arg, bp); 471 472 if (!classified) 473 bp->bio_classifier1 = BIO_NOTCLASSIFIED; 474 } 475 476 void 477 g_io_request(struct bio *bp, struct g_consumer *cp) 478 { 479 struct g_provider *pp; 480 struct mtx *mtxp; 481 int direct, error, first; 482 uint8_t cmd; 483 484 KASSERT(cp != NULL, ("NULL cp in g_io_request")); 485 KASSERT(bp != NULL, ("NULL bp in g_io_request")); 486 pp = cp->provider; 487 KASSERT(pp != NULL, ("consumer not attached in g_io_request")); 488 #ifdef DIAGNOSTIC 489 KASSERT(bp->bio_driver1 == NULL, 490 ("bio_driver1 used by the consumer (geom %s)", cp->geom->name)); 491 KASSERT(bp->bio_driver2 == NULL, 492 ("bio_driver2 used by the consumer (geom %s)", cp->geom->name)); 493 KASSERT(bp->bio_pflags == 0, 494 ("bio_pflags used by the consumer (geom %s)", cp->geom->name)); 495 /* 496 * Remember consumer's private fields, so we can detect if they were 497 * modified by the provider. 498 */ 499 bp->_bio_caller1 = bp->bio_caller1; 500 bp->_bio_caller2 = bp->bio_caller2; 501 bp->_bio_cflags = bp->bio_cflags; 502 #endif 503 504 cmd = bp->bio_cmd; 505 if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_GETATTR) { 506 KASSERT(bp->bio_data != NULL, 507 ("NULL bp->data in g_io_request(cmd=%hu)", bp->bio_cmd)); 508 } 509 if (cmd == BIO_DELETE || cmd == BIO_FLUSH) { 510 KASSERT(bp->bio_data == NULL, 511 ("non-NULL bp->data in g_io_request(cmd=%hu)", 512 bp->bio_cmd)); 513 } 514 if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_DELETE) { 515 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0, 516 ("wrong offset %jd for sectorsize %u", 517 bp->bio_offset, cp->provider->sectorsize)); 518 KASSERT(bp->bio_length % cp->provider->sectorsize == 0, 519 ("wrong length %jd for sectorsize %u", 520 bp->bio_length, cp->provider->sectorsize)); 521 } 522 523 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d", 524 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd); 525 526 bp->bio_from = cp; 527 bp->bio_to = pp; 528 bp->bio_error = 0; 529 bp->bio_completed = 0; 530 531 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 532 ("Bio already on queue bp=%p", bp)); 533 if ((g_collectstats & G_STATS_CONSUMERS) != 0 || 534 ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL)) 535 binuptime(&bp->bio_t0); 536 else 537 getbinuptime(&bp->bio_t0); 538 539 #ifdef GET_STACK_USAGE 540 direct = (cp->flags & G_CF_DIRECT_SEND) != 0 && 541 (pp->flags & G_PF_DIRECT_RECEIVE) != 0 && 542 !g_is_geom_thread(curthread) && 543 ((pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 || 544 (bp->bio_flags & BIO_UNMAPPED) == 0 || THREAD_CAN_SLEEP()) && 545 pace == 0; 546 if (direct) { 547 /* Block direct execution if less then half of stack left. */ 548 size_t st, su; 549 GET_STACK_USAGE(st, su); 550 if (su * 2 > st) 551 direct = 0; 552 } 553 #else 554 direct = 0; 555 #endif 556 557 if (!TAILQ_EMPTY(&g_classifier_tailq) && !bp->bio_classifier1) { 558 g_bioq_lock(&g_bio_run_down); 559 g_run_classifiers(bp); 560 g_bioq_unlock(&g_bio_run_down); 561 } 562 563 /* 564 * The statistics collection is lockless, as such, but we 565 * can not update one instance of the statistics from more 566 * than one thread at a time, so grab the lock first. 567 */ 568 mtxp = mtx_pool_find(mtxpool_sleep, pp); 569 mtx_lock(mtxp); 570 if (g_collectstats & G_STATS_PROVIDERS) 571 devstat_start_transaction(pp->stat, &bp->bio_t0); 572 if (g_collectstats & G_STATS_CONSUMERS) 573 devstat_start_transaction(cp->stat, &bp->bio_t0); 574 pp->nstart++; 575 cp->nstart++; 576 mtx_unlock(mtxp); 577 578 if (direct) { 579 error = g_io_check(bp); 580 if (error >= 0) { 581 CTR3(KTR_GEOM, "g_io_request g_io_check on bp %p " 582 "provider %s returned %d", bp, bp->bio_to->name, 583 error); 584 g_io_deliver(bp, error); 585 return; 586 } 587 bp->bio_to->geom->start(bp); 588 } else { 589 g_bioq_lock(&g_bio_run_down); 590 first = TAILQ_EMPTY(&g_bio_run_down.bio_queue); 591 TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue); 592 bp->bio_flags |= BIO_ONQUEUE; 593 g_bio_run_down.bio_queue_length++; 594 g_bioq_unlock(&g_bio_run_down); 595 /* Pass it on down. */ 596 if (first) 597 wakeup(&g_wait_down); 598 } 599 } 600 601 void 602 g_io_deliver(struct bio *bp, int error) 603 { 604 struct bintime now; 605 struct g_consumer *cp; 606 struct g_provider *pp; 607 struct mtx *mtxp; 608 int direct, first; 609 610 KASSERT(bp != NULL, ("NULL bp in g_io_deliver")); 611 pp = bp->bio_to; 612 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver")); 613 cp = bp->bio_from; 614 if (cp == NULL) { 615 bp->bio_error = error; 616 bp->bio_done(bp); 617 return; 618 } 619 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver")); 620 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver")); 621 #ifdef DIAGNOSTIC 622 /* 623 * Some classes - GJournal in particular - can modify bio's 624 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO 625 * flag means it's an expected behaviour for that particular geom. 626 */ 627 if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) { 628 KASSERT(bp->bio_caller1 == bp->_bio_caller1, 629 ("bio_caller1 used by the provider %s", pp->name)); 630 KASSERT(bp->bio_caller2 == bp->_bio_caller2, 631 ("bio_caller2 used by the provider %s", pp->name)); 632 KASSERT(bp->bio_cflags == bp->_bio_cflags, 633 ("bio_cflags used by the provider %s", pp->name)); 634 } 635 #endif 636 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0")); 637 KASSERT(bp->bio_completed <= bp->bio_length, 638 ("bio_completed can't be greater than bio_length")); 639 640 g_trace(G_T_BIO, 641 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd", 642 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error, 643 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 644 645 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 646 ("Bio already on queue bp=%p", bp)); 647 648 /* 649 * XXX: next two doesn't belong here 650 */ 651 bp->bio_bcount = bp->bio_length; 652 bp->bio_resid = bp->bio_bcount - bp->bio_completed; 653 654 #ifdef GET_STACK_USAGE 655 direct = (pp->flags & G_PF_DIRECT_SEND) && 656 (cp->flags & G_CF_DIRECT_RECEIVE) && 657 !g_is_geom_thread(curthread); 658 if (direct) { 659 /* Block direct execution if less then half of stack left. */ 660 size_t st, su; 661 GET_STACK_USAGE(st, su); 662 if (su * 2 > st) 663 direct = 0; 664 } 665 #else 666 direct = 0; 667 #endif 668 669 /* 670 * The statistics collection is lockless, as such, but we 671 * can not update one instance of the statistics from more 672 * than one thread at a time, so grab the lock first. 673 */ 674 if ((g_collectstats & G_STATS_CONSUMERS) != 0 || 675 ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL)) 676 binuptime(&now); 677 mtxp = mtx_pool_find(mtxpool_sleep, cp); 678 mtx_lock(mtxp); 679 if (g_collectstats & G_STATS_PROVIDERS) 680 devstat_end_transaction_bio_bt(pp->stat, bp, &now); 681 if (g_collectstats & G_STATS_CONSUMERS) 682 devstat_end_transaction_bio_bt(cp->stat, bp, &now); 683 cp->nend++; 684 pp->nend++; 685 mtx_unlock(mtxp); 686 687 if (error != ENOMEM) { 688 bp->bio_error = error; 689 if (direct) { 690 biodone(bp); 691 } else { 692 g_bioq_lock(&g_bio_run_up); 693 first = TAILQ_EMPTY(&g_bio_run_up.bio_queue); 694 TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue); 695 bp->bio_flags |= BIO_ONQUEUE; 696 g_bio_run_up.bio_queue_length++; 697 g_bioq_unlock(&g_bio_run_up); 698 if (first) 699 wakeup(&g_wait_up); 700 } 701 return; 702 } 703 704 if (bootverbose) 705 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name); 706 bp->bio_children = 0; 707 bp->bio_inbed = 0; 708 bp->bio_driver1 = NULL; 709 bp->bio_driver2 = NULL; 710 bp->bio_pflags = 0; 711 g_io_request(bp, cp); 712 pace = 1; 713 return; 714 } 715 716 SYSCTL_DECL(_kern_geom); 717 718 static long transient_maps; 719 SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD, 720 &transient_maps, 0, 721 "Total count of the transient mapping requests"); 722 u_int transient_map_retries = 10; 723 SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW, 724 &transient_map_retries, 0, 725 "Max count of retries used before giving up on creating transient map"); 726 int transient_map_hard_failures; 727 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD, 728 &transient_map_hard_failures, 0, 729 "Failures to establish the transient mapping due to retry attempts " 730 "exhausted"); 731 int transient_map_soft_failures; 732 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD, 733 &transient_map_soft_failures, 0, 734 "Count of retried failures to establish the transient mapping"); 735 int inflight_transient_maps; 736 SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD, 737 &inflight_transient_maps, 0, 738 "Current count of the active transient maps"); 739 740 static int 741 g_io_transient_map_bio(struct bio *bp) 742 { 743 vm_offset_t addr; 744 long size; 745 u_int retried; 746 747 KASSERT(unmapped_buf_allowed, ("unmapped disabled")); 748 749 size = round_page(bp->bio_ma_offset + bp->bio_length); 750 KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp)); 751 addr = 0; 752 retried = 0; 753 atomic_add_long(&transient_maps, 1); 754 retry: 755 if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) { 756 if (transient_map_retries != 0 && 757 retried >= transient_map_retries) { 758 CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s", 759 bp, bp->bio_to->name); 760 atomic_add_int(&transient_map_hard_failures, 1); 761 return (EDEADLK/* XXXKIB */); 762 } else { 763 /* 764 * Naive attempt to quisce the I/O to get more 765 * in-flight requests completed and defragment 766 * the transient_arena. 767 */ 768 CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d", 769 bp, bp->bio_to->name, retried); 770 pause("g_d_tra", hz / 10); 771 retried++; 772 atomic_add_int(&transient_map_soft_failures, 1); 773 goto retry; 774 } 775 } 776 atomic_add_int(&inflight_transient_maps, 1); 777 pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size)); 778 bp->bio_data = (caddr_t)addr + bp->bio_ma_offset; 779 bp->bio_flags |= BIO_TRANSIENT_MAPPING; 780 bp->bio_flags &= ~BIO_UNMAPPED; 781 return (EJUSTRETURN); 782 } 783 784 void 785 g_io_schedule_down(struct thread *tp __unused) 786 { 787 struct bio *bp; 788 int error; 789 790 for(;;) { 791 g_bioq_lock(&g_bio_run_down); 792 bp = g_bioq_first(&g_bio_run_down); 793 if (bp == NULL) { 794 CTR0(KTR_GEOM, "g_down going to sleep"); 795 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock, 796 PRIBIO | PDROP, "-", 0); 797 continue; 798 } 799 CTR0(KTR_GEOM, "g_down has work to do"); 800 g_bioq_unlock(&g_bio_run_down); 801 if (pace != 0) { 802 /* 803 * There has been at least one memory allocation 804 * failure since the last I/O completed. Pause 1ms to 805 * give the system a chance to free up memory. We only 806 * do this once because a large number of allocations 807 * can fail in the direct dispatch case and there's no 808 * relationship between the number of these failures and 809 * the length of the outage. If there's still an outage, 810 * we'll pause again and again until it's 811 * resolved. Older versions paused longer and once per 812 * allocation failure. This was OK for a single threaded 813 * g_down, but with direct dispatch would lead to max of 814 * 10 IOPs for minutes at a time when transient memory 815 * issues prevented allocation for a batch of requests 816 * from the upper layers. 817 * 818 * XXX This pacing is really lame. It needs to be solved 819 * by other methods. This is OK only because the worst 820 * case scenario is so rare. In the worst case scenario 821 * all memory is tied up waiting for I/O to complete 822 * which can never happen since we can't allocate bios 823 * for that I/O. 824 */ 825 CTR0(KTR_GEOM, "g_down pacing self"); 826 pause("g_down", min(hz/1000, 1)); 827 pace = 0; 828 } 829 CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp, 830 bp->bio_to->name); 831 error = g_io_check(bp); 832 if (error >= 0) { 833 CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider " 834 "%s returned %d", bp, bp->bio_to->name, error); 835 g_io_deliver(bp, error); 836 continue; 837 } 838 THREAD_NO_SLEEPING(); 839 CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld " 840 "len %ld", bp, bp->bio_to->name, bp->bio_offset, 841 bp->bio_length); 842 bp->bio_to->geom->start(bp); 843 THREAD_SLEEPING_OK(); 844 } 845 } 846 847 void 848 bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg) 849 { 850 bp->bio_task = func; 851 bp->bio_task_arg = arg; 852 /* 853 * The taskqueue is actually just a second queue off the "up" 854 * queue, so we use the same lock. 855 */ 856 g_bioq_lock(&g_bio_run_up); 857 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 858 ("Bio already on queue bp=%p target taskq", bp)); 859 bp->bio_flags |= BIO_ONQUEUE; 860 TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue); 861 g_bio_run_task.bio_queue_length++; 862 wakeup(&g_wait_up); 863 g_bioq_unlock(&g_bio_run_up); 864 } 865 866 867 void 868 g_io_schedule_up(struct thread *tp __unused) 869 { 870 struct bio *bp; 871 for(;;) { 872 g_bioq_lock(&g_bio_run_up); 873 bp = g_bioq_first(&g_bio_run_task); 874 if (bp != NULL) { 875 g_bioq_unlock(&g_bio_run_up); 876 THREAD_NO_SLEEPING(); 877 CTR1(KTR_GEOM, "g_up processing task bp %p", bp); 878 bp->bio_task(bp->bio_task_arg); 879 THREAD_SLEEPING_OK(); 880 continue; 881 } 882 bp = g_bioq_first(&g_bio_run_up); 883 if (bp != NULL) { 884 g_bioq_unlock(&g_bio_run_up); 885 THREAD_NO_SLEEPING(); 886 CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off " 887 "%jd len %ld", bp, bp->bio_to->name, 888 bp->bio_offset, bp->bio_length); 889 biodone(bp); 890 THREAD_SLEEPING_OK(); 891 continue; 892 } 893 CTR0(KTR_GEOM, "g_up going to sleep"); 894 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock, 895 PRIBIO | PDROP, "-", 0); 896 } 897 } 898 899 void * 900 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error) 901 { 902 struct bio *bp; 903 void *ptr; 904 int errorc; 905 906 KASSERT(length > 0 && length >= cp->provider->sectorsize && 907 length <= MAXPHYS, ("g_read_data(): invalid length %jd", 908 (intmax_t)length)); 909 910 bp = g_alloc_bio(); 911 bp->bio_cmd = BIO_READ; 912 bp->bio_done = NULL; 913 bp->bio_offset = offset; 914 bp->bio_length = length; 915 ptr = g_malloc(length, M_WAITOK); 916 bp->bio_data = ptr; 917 g_io_request(bp, cp); 918 errorc = biowait(bp, "gread"); 919 if (error != NULL) 920 *error = errorc; 921 g_destroy_bio(bp); 922 if (errorc) { 923 g_free(ptr); 924 ptr = NULL; 925 } 926 return (ptr); 927 } 928 929 int 930 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length) 931 { 932 struct bio *bp; 933 int error; 934 935 KASSERT(length > 0 && length >= cp->provider->sectorsize && 936 length <= MAXPHYS, ("g_write_data(): invalid length %jd", 937 (intmax_t)length)); 938 939 bp = g_alloc_bio(); 940 bp->bio_cmd = BIO_WRITE; 941 bp->bio_done = NULL; 942 bp->bio_offset = offset; 943 bp->bio_length = length; 944 bp->bio_data = ptr; 945 g_io_request(bp, cp); 946 error = biowait(bp, "gwrite"); 947 g_destroy_bio(bp); 948 return (error); 949 } 950 951 int 952 g_delete_data(struct g_consumer *cp, off_t offset, off_t length) 953 { 954 struct bio *bp; 955 int error; 956 957 KASSERT(length > 0 && length >= cp->provider->sectorsize, 958 ("g_delete_data(): invalid length %jd", (intmax_t)length)); 959 960 bp = g_alloc_bio(); 961 bp->bio_cmd = BIO_DELETE; 962 bp->bio_done = NULL; 963 bp->bio_offset = offset; 964 bp->bio_length = length; 965 bp->bio_data = NULL; 966 g_io_request(bp, cp); 967 error = biowait(bp, "gdelete"); 968 g_destroy_bio(bp); 969 return (error); 970 } 971 972 void 973 g_print_bio(struct bio *bp) 974 { 975 const char *pname, *cmd = NULL; 976 977 if (bp->bio_to != NULL) 978 pname = bp->bio_to->name; 979 else 980 pname = "[unknown]"; 981 982 switch (bp->bio_cmd) { 983 case BIO_GETATTR: 984 cmd = "GETATTR"; 985 printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute); 986 return; 987 case BIO_FLUSH: 988 cmd = "FLUSH"; 989 printf("%s[%s]", pname, cmd); 990 return; 991 case BIO_READ: 992 cmd = "READ"; 993 break; 994 case BIO_WRITE: 995 cmd = "WRITE"; 996 break; 997 case BIO_DELETE: 998 cmd = "DELETE"; 999 break; 1000 default: 1001 cmd = "UNKNOWN"; 1002 printf("%s[%s()]", pname, cmd); 1003 return; 1004 } 1005 printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd, 1006 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 1007 } 1008