1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2002 Poul-Henning Kamp 5 * Copyright (c) 2002 Networks Associates Technology, Inc. 6 * Copyright (c) 2013 The FreeBSD Foundation 7 * All rights reserved. 8 * 9 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 10 * and NAI Labs, the Security Research Division of Network Associates, Inc. 11 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 12 * DARPA CHATS research program. 13 * 14 * Portions of this software were developed by Konstantin Belousov 15 * under sponsorship from the FreeBSD Foundation. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 3. The names of the authors may not be used to endorse or promote 26 * products derived from this software without specific prior written 27 * permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 */ 41 42 #include <sys/cdefs.h> 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/malloc.h> 47 #include <sys/bio.h> 48 #include <sys/ktr.h> 49 #include <sys/proc.h> 50 #include <sys/sbuf.h> 51 #include <sys/stack.h> 52 #include <sys/sysctl.h> 53 #include <sys/vmem.h> 54 #include <machine/stack.h> 55 #include <machine/stdarg.h> 56 57 #include <sys/errno.h> 58 #include <geom/geom.h> 59 #include <geom/geom_int.h> 60 #include <sys/devicestat.h> 61 62 #include <vm/uma.h> 63 #include <vm/vm.h> 64 #include <vm/vm_param.h> 65 #include <vm/vm_kern.h> 66 #include <vm/vm_page.h> 67 #include <vm/vm_object.h> 68 #include <vm/vm_extern.h> 69 #include <vm/vm_map.h> 70 71 #define KTR_GEOM_ENABLED \ 72 ((KTR_COMPILE & KTR_GEOM) != 0 && (ktr_mask & KTR_GEOM) != 0) 73 74 static int g_io_transient_map_bio(struct bio *bp); 75 76 static struct g_bioq g_bio_run_down; 77 static struct g_bioq g_bio_run_up; 78 79 /* 80 * Pace is a hint that we've had some trouble recently allocating 81 * bios, so we should back off trying to send I/O down the stack 82 * a bit to let the problem resolve. When pacing, we also turn 83 * off direct dispatch to also reduce memory pressure from I/Os 84 * there, at the expxense of some added latency while the memory 85 * pressures exist. See g_io_schedule_down() for more details 86 * and limitations. 87 */ 88 static volatile u_int __read_mostly pace; 89 90 static uma_zone_t __read_mostly biozone; 91 92 #include <machine/atomic.h> 93 94 static void 95 g_bioq_lock(struct g_bioq *bq) 96 { 97 98 mtx_lock(&bq->bio_queue_lock); 99 } 100 101 static void 102 g_bioq_unlock(struct g_bioq *bq) 103 { 104 105 mtx_unlock(&bq->bio_queue_lock); 106 } 107 108 #if 0 109 static void 110 g_bioq_destroy(struct g_bioq *bq) 111 { 112 113 mtx_destroy(&bq->bio_queue_lock); 114 } 115 #endif 116 117 static void 118 g_bioq_init(struct g_bioq *bq) 119 { 120 121 TAILQ_INIT(&bq->bio_queue); 122 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF); 123 } 124 125 static struct bio * 126 g_bioq_first(struct g_bioq *bq) 127 { 128 struct bio *bp; 129 130 bp = TAILQ_FIRST(&bq->bio_queue); 131 if (bp != NULL) { 132 KASSERT((bp->bio_flags & BIO_ONQUEUE), 133 ("Bio not on queue bp=%p target %p", bp, bq)); 134 bp->bio_flags &= ~BIO_ONQUEUE; 135 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue); 136 bq->bio_queue_length--; 137 } 138 return (bp); 139 } 140 141 struct bio * 142 g_new_bio(void) 143 { 144 struct bio *bp; 145 146 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 147 #ifdef KTR 148 if (KTR_GEOM_ENABLED) { 149 struct stack st; 150 151 CTR1(KTR_GEOM, "g_new_bio(): %p", bp); 152 stack_save(&st); 153 CTRSTACK(KTR_GEOM, &st, 3); 154 } 155 #endif 156 return (bp); 157 } 158 159 struct bio * 160 g_alloc_bio(void) 161 { 162 struct bio *bp; 163 164 bp = uma_zalloc(biozone, M_WAITOK | M_ZERO); 165 #ifdef KTR 166 if (KTR_GEOM_ENABLED) { 167 struct stack st; 168 169 CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp); 170 stack_save(&st); 171 CTRSTACK(KTR_GEOM, &st, 3); 172 } 173 #endif 174 return (bp); 175 } 176 177 void 178 g_destroy_bio(struct bio *bp) 179 { 180 #ifdef KTR 181 if (KTR_GEOM_ENABLED) { 182 struct stack st; 183 184 CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp); 185 stack_save(&st); 186 CTRSTACK(KTR_GEOM, &st, 3); 187 } 188 #endif 189 uma_zfree(biozone, bp); 190 } 191 192 struct bio * 193 g_clone_bio(struct bio *bp) 194 { 195 struct bio *bp2; 196 197 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO); 198 if (bp2 != NULL) { 199 bp2->bio_parent = bp; 200 bp2->bio_cmd = bp->bio_cmd; 201 /* 202 * BIO_ORDERED flag may be used by disk drivers to enforce 203 * ordering restrictions, so this flag needs to be cloned. 204 * BIO_UNMAPPED, BIO_VLIST, and BIO_SWAP should be inherited, 205 * to properly indicate which way the buffer is passed. 206 * Other bio flags are not suitable for cloning. 207 */ 208 bp2->bio_flags = bp->bio_flags & 209 (BIO_ORDERED | BIO_UNMAPPED | BIO_VLIST | BIO_SWAP); 210 bp2->bio_length = bp->bio_length; 211 bp2->bio_offset = bp->bio_offset; 212 bp2->bio_data = bp->bio_data; 213 bp2->bio_ma = bp->bio_ma; 214 bp2->bio_ma_n = bp->bio_ma_n; 215 bp2->bio_ma_offset = bp->bio_ma_offset; 216 bp2->bio_attribute = bp->bio_attribute; 217 if (bp->bio_cmd == BIO_ZONE) 218 bcopy(&bp->bio_zone, &bp2->bio_zone, 219 sizeof(bp->bio_zone)); 220 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 221 bp2->bio_track_bp = bp->bio_track_bp; 222 #endif 223 bp->bio_children++; 224 } 225 #ifdef KTR 226 if (KTR_GEOM_ENABLED) { 227 struct stack st; 228 229 CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2); 230 stack_save(&st); 231 CTRSTACK(KTR_GEOM, &st, 3); 232 } 233 #endif 234 return(bp2); 235 } 236 237 struct bio * 238 g_duplicate_bio(struct bio *bp) 239 { 240 struct bio *bp2; 241 242 bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO); 243 bp2->bio_flags = bp->bio_flags & (BIO_UNMAPPED | BIO_VLIST | BIO_SWAP); 244 bp2->bio_parent = bp; 245 bp2->bio_cmd = bp->bio_cmd; 246 bp2->bio_length = bp->bio_length; 247 bp2->bio_offset = bp->bio_offset; 248 bp2->bio_data = bp->bio_data; 249 bp2->bio_ma = bp->bio_ma; 250 bp2->bio_ma_n = bp->bio_ma_n; 251 bp2->bio_ma_offset = bp->bio_ma_offset; 252 bp2->bio_attribute = bp->bio_attribute; 253 bp->bio_children++; 254 #ifdef KTR 255 if (KTR_GEOM_ENABLED) { 256 struct stack st; 257 258 CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2); 259 stack_save(&st); 260 CTRSTACK(KTR_GEOM, &st, 3); 261 } 262 #endif 263 return(bp2); 264 } 265 266 void 267 g_reset_bio(struct bio *bp) 268 { 269 270 bzero(bp, sizeof(*bp)); 271 } 272 273 void 274 g_io_init(void) 275 { 276 277 g_bioq_init(&g_bio_run_down); 278 g_bioq_init(&g_bio_run_up); 279 biozone = uma_zcreate("g_bio", sizeof (struct bio), 280 NULL, NULL, 281 NULL, NULL, 282 0, 0); 283 } 284 285 int 286 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr) 287 { 288 struct bio *bp; 289 int error; 290 291 g_trace(G_T_BIO, "bio_getattr(%s)", attr); 292 bp = g_alloc_bio(); 293 bp->bio_cmd = BIO_GETATTR; 294 bp->bio_done = NULL; 295 bp->bio_attribute = attr; 296 bp->bio_length = *len; 297 bp->bio_data = ptr; 298 g_io_request(bp, cp); 299 error = biowait(bp, "ggetattr"); 300 *len = bp->bio_completed; 301 g_destroy_bio(bp); 302 return (error); 303 } 304 305 int 306 g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp) 307 { 308 struct bio *bp; 309 int error; 310 311 g_trace(G_T_BIO, "bio_zone(%d)", zone_args->zone_cmd); 312 bp = g_alloc_bio(); 313 bp->bio_cmd = BIO_ZONE; 314 bp->bio_done = NULL; 315 /* 316 * XXX KDM need to handle report zone data. 317 */ 318 bcopy(zone_args, &bp->bio_zone, sizeof(*zone_args)); 319 if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES) 320 bp->bio_length = 321 zone_args->zone_params.report.entries_allocated * 322 sizeof(struct disk_zone_rep_entry); 323 else 324 bp->bio_length = 0; 325 326 g_io_request(bp, cp); 327 error = biowait(bp, "gzone"); 328 bcopy(&bp->bio_zone, zone_args, sizeof(*zone_args)); 329 g_destroy_bio(bp); 330 return (error); 331 } 332 333 /* 334 * Send a BIO_SPEEDUP down the stack. This is used to tell the lower layers that 335 * the upper layers have detected a resource shortage. The lower layers are 336 * advised to stop delaying I/O that they might be holding for performance 337 * reasons and to schedule it (non-trims) or complete it successfully (trims) as 338 * quickly as it can. bio_length is the amount of the shortage. This call 339 * should be non-blocking. bio_resid is used to communicate back if the lower 340 * layers couldn't find bio_length worth of I/O to schedule or discard. A length 341 * of 0 means to do as much as you can (schedule the h/w queues full, discard 342 * all trims). flags are a hint from the upper layers to the lower layers what 343 * operation should be done. 344 */ 345 int 346 g_io_speedup(off_t shortage, u_int flags, size_t *resid, struct g_consumer *cp) 347 { 348 struct bio *bp; 349 int error; 350 351 KASSERT((flags & (BIO_SPEEDUP_TRIM | BIO_SPEEDUP_WRITE)) != 0, 352 ("Invalid flags passed to g_io_speedup: %#x", flags)); 353 g_trace(G_T_BIO, "bio_speedup(%s, %jd, %#x)", cp->provider->name, 354 (intmax_t)shortage, flags); 355 bp = g_new_bio(); 356 if (bp == NULL) 357 return (ENOMEM); 358 bp->bio_cmd = BIO_SPEEDUP; 359 bp->bio_length = shortage; 360 bp->bio_done = NULL; 361 bp->bio_flags |= flags; 362 g_io_request(bp, cp); 363 error = biowait(bp, "gflush"); 364 *resid = bp->bio_resid; 365 g_destroy_bio(bp); 366 return (error); 367 } 368 369 int 370 g_io_flush(struct g_consumer *cp) 371 { 372 struct bio *bp; 373 int error; 374 375 g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name); 376 bp = g_alloc_bio(); 377 bp->bio_cmd = BIO_FLUSH; 378 bp->bio_flags |= BIO_ORDERED; 379 bp->bio_done = NULL; 380 bp->bio_attribute = NULL; 381 bp->bio_offset = cp->provider->mediasize; 382 bp->bio_length = 0; 383 bp->bio_data = NULL; 384 g_io_request(bp, cp); 385 error = biowait(bp, "gflush"); 386 g_destroy_bio(bp); 387 return (error); 388 } 389 390 static int 391 g_io_check(struct bio *bp) 392 { 393 struct g_consumer *cp; 394 struct g_provider *pp; 395 off_t excess; 396 int error; 397 398 biotrack(bp, __func__); 399 400 cp = bp->bio_from; 401 pp = bp->bio_to; 402 403 /* Fail if access counters dont allow the operation */ 404 switch(bp->bio_cmd) { 405 case BIO_READ: 406 case BIO_GETATTR: 407 if (cp->acr == 0) 408 return (EPERM); 409 break; 410 case BIO_WRITE: 411 case BIO_DELETE: 412 case BIO_SPEEDUP: 413 case BIO_FLUSH: 414 if (cp->acw == 0) 415 return (EPERM); 416 break; 417 case BIO_ZONE: 418 if ((bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) || 419 (bp->bio_zone.zone_cmd == DISK_ZONE_GET_PARAMS)) { 420 if (cp->acr == 0) 421 return (EPERM); 422 } else if (cp->acw == 0) 423 return (EPERM); 424 break; 425 default: 426 return (EPERM); 427 } 428 /* if provider is marked for error, don't disturb. */ 429 if (pp->error) 430 return (pp->error); 431 if (cp->flags & G_CF_ORPHAN) 432 return (ENXIO); 433 434 switch(bp->bio_cmd) { 435 case BIO_READ: 436 case BIO_WRITE: 437 case BIO_DELETE: 438 /* Zero sectorsize or mediasize is probably a lack of media. */ 439 if (pp->sectorsize == 0 || pp->mediasize == 0) 440 return (ENXIO); 441 /* Reject I/O not on sector boundary */ 442 if (bp->bio_offset % pp->sectorsize) 443 return (EINVAL); 444 /* Reject I/O not integral sector long */ 445 if (bp->bio_length % pp->sectorsize) 446 return (EINVAL); 447 /* Reject requests before or past the end of media. */ 448 if (bp->bio_offset < 0) 449 return (EIO); 450 if (bp->bio_offset > pp->mediasize) 451 return (EIO); 452 453 /* Truncate requests to the end of providers media. */ 454 excess = bp->bio_offset + bp->bio_length; 455 if (excess > bp->bio_to->mediasize) { 456 KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 || 457 round_page(bp->bio_ma_offset + 458 bp->bio_length) / PAGE_SIZE == bp->bio_ma_n, 459 ("excess bio %p too short", bp)); 460 excess -= bp->bio_to->mediasize; 461 bp->bio_length -= excess; 462 if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 463 bp->bio_ma_n = round_page(bp->bio_ma_offset + 464 bp->bio_length) / PAGE_SIZE; 465 } 466 if (excess > 0) 467 CTR3(KTR_GEOM, "g_down truncated bio " 468 "%p provider %s by %d", bp, 469 bp->bio_to->name, excess); 470 } 471 472 /* Deliver zero length transfers right here. */ 473 if (bp->bio_length == 0) { 474 CTR2(KTR_GEOM, "g_down terminated 0-length " 475 "bp %p provider %s", bp, bp->bio_to->name); 476 return (0); 477 } 478 479 if ((bp->bio_flags & BIO_UNMAPPED) != 0 && 480 (bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 && 481 (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) { 482 if ((error = g_io_transient_map_bio(bp)) >= 0) 483 return (error); 484 } 485 break; 486 default: 487 break; 488 } 489 return (EJUSTRETURN); 490 } 491 492 void 493 g_io_request(struct bio *bp, struct g_consumer *cp) 494 { 495 struct g_provider *pp; 496 int direct, error, first; 497 uint8_t cmd; 498 499 biotrack(bp, __func__); 500 501 KASSERT(cp != NULL, ("NULL cp in g_io_request")); 502 KASSERT(bp != NULL, ("NULL bp in g_io_request")); 503 pp = cp->provider; 504 KASSERT(pp != NULL, ("consumer not attached in g_io_request")); 505 #ifdef DIAGNOSTIC 506 KASSERT(bp->bio_driver1 == NULL, 507 ("bio_driver1 used by the consumer (geom %s)", cp->geom->name)); 508 KASSERT(bp->bio_driver2 == NULL, 509 ("bio_driver2 used by the consumer (geom %s)", cp->geom->name)); 510 KASSERT(bp->bio_pflags == 0, 511 ("bio_pflags used by the consumer (geom %s)", cp->geom->name)); 512 /* 513 * Remember consumer's private fields, so we can detect if they were 514 * modified by the provider. 515 */ 516 bp->_bio_caller1 = bp->bio_caller1; 517 bp->_bio_caller2 = bp->bio_caller2; 518 bp->_bio_cflags = bp->bio_cflags; 519 #endif 520 521 cmd = bp->bio_cmd; 522 if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_GETATTR) { 523 KASSERT(bp->bio_data != NULL, 524 ("NULL bp->data in g_io_request(cmd=%hu)", bp->bio_cmd)); 525 } 526 if (cmd == BIO_DELETE || cmd == BIO_FLUSH || cmd == BIO_SPEEDUP) { 527 KASSERT(bp->bio_data == NULL, 528 ("non-NULL bp->data in g_io_request(cmd=%hu)", 529 bp->bio_cmd)); 530 } 531 if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_DELETE) { 532 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0, 533 ("wrong offset %jd for sectorsize %u", 534 bp->bio_offset, cp->provider->sectorsize)); 535 KASSERT(bp->bio_length % cp->provider->sectorsize == 0, 536 ("wrong length %jd for sectorsize %u", 537 bp->bio_length, cp->provider->sectorsize)); 538 } 539 540 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d", 541 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd); 542 543 bp->bio_from = cp; 544 bp->bio_to = pp; 545 bp->bio_error = 0; 546 bp->bio_completed = 0; 547 548 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 549 ("Bio already on queue bp=%p", bp)); 550 551 if ((g_collectstats & G_STATS_CONSUMERS) != 0 || 552 ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL)) 553 binuptime(&bp->bio_t0); 554 else 555 getbinuptime(&bp->bio_t0); 556 if (g_collectstats & G_STATS_CONSUMERS) 557 devstat_start_transaction_bio_t0(cp->stat, bp); 558 if (g_collectstats & G_STATS_PROVIDERS) 559 devstat_start_transaction_bio_t0(pp->stat, bp); 560 #ifdef INVARIANTS 561 atomic_add_int(&cp->nstart, 1); 562 #endif 563 564 direct = (cp->flags & G_CF_DIRECT_SEND) != 0 && 565 (pp->flags & G_PF_DIRECT_RECEIVE) != 0 && 566 curthread != g_down_td && 567 ((pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 || 568 (bp->bio_flags & BIO_UNMAPPED) == 0 || THREAD_CAN_SLEEP()) && 569 pace == 0; 570 if (direct) { 571 /* Block direct execution if less then half of stack left. */ 572 size_t st, su; 573 GET_STACK_USAGE(st, su); 574 if (su * 2 > st) 575 direct = 0; 576 } 577 578 if (direct) { 579 error = g_io_check(bp); 580 if (error >= 0) { 581 CTR3(KTR_GEOM, "g_io_request g_io_check on bp %p " 582 "provider %s returned %d", bp, bp->bio_to->name, 583 error); 584 g_io_deliver(bp, error); 585 return; 586 } 587 bp->bio_to->geom->start(bp); 588 } else { 589 g_bioq_lock(&g_bio_run_down); 590 first = TAILQ_EMPTY(&g_bio_run_down.bio_queue); 591 TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue); 592 bp->bio_flags |= BIO_ONQUEUE; 593 g_bio_run_down.bio_queue_length++; 594 g_bioq_unlock(&g_bio_run_down); 595 /* Pass it on down. */ 596 if (first) 597 wakeup(&g_wait_down); 598 } 599 } 600 601 void 602 g_io_deliver(struct bio *bp, int error) 603 { 604 struct bintime now; 605 struct g_consumer *cp; 606 struct g_provider *pp; 607 struct mtx *mtxp; 608 int direct, first; 609 610 biotrack(bp, __func__); 611 612 KASSERT(bp != NULL, ("NULL bp in g_io_deliver")); 613 pp = bp->bio_to; 614 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver")); 615 cp = bp->bio_from; 616 if (cp == NULL) { 617 bp->bio_error = error; 618 bp->bio_done(bp); 619 return; 620 } 621 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver")); 622 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver")); 623 #ifdef DIAGNOSTIC 624 /* 625 * Some classes - GJournal in particular - can modify bio's 626 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO 627 * flag means it's an expected behaviour for that particular geom. 628 */ 629 if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) { 630 KASSERT(bp->bio_caller1 == bp->_bio_caller1, 631 ("bio_caller1 used by the provider %s", pp->name)); 632 KASSERT(bp->bio_caller2 == bp->_bio_caller2, 633 ("bio_caller2 used by the provider %s", pp->name)); 634 KASSERT(bp->bio_cflags == bp->_bio_cflags, 635 ("bio_cflags used by the provider %s", pp->name)); 636 } 637 #endif 638 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0")); 639 KASSERT(bp->bio_completed <= bp->bio_length, 640 ("bio_completed can't be greater than bio_length")); 641 642 g_trace(G_T_BIO, 643 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd", 644 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error, 645 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 646 647 KASSERT(!(bp->bio_flags & BIO_ONQUEUE), 648 ("Bio already on queue bp=%p", bp)); 649 650 /* 651 * XXX: next two doesn't belong here 652 */ 653 bp->bio_bcount = bp->bio_length; 654 bp->bio_resid = bp->bio_bcount - bp->bio_completed; 655 656 direct = (pp->flags & G_PF_DIRECT_SEND) && 657 (cp->flags & G_CF_DIRECT_RECEIVE) && 658 curthread != g_up_td; 659 if (direct) { 660 /* Block direct execution if less then half of stack left. */ 661 size_t st, su; 662 GET_STACK_USAGE(st, su); 663 if (su * 2 > st) 664 direct = 0; 665 } 666 667 /* 668 * The statistics collection is lockless, as such, but we 669 * can not update one instance of the statistics from more 670 * than one thread at a time, so grab the lock first. 671 */ 672 if ((g_collectstats & G_STATS_CONSUMERS) != 0 || 673 ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL)) 674 binuptime(&now); 675 mtxp = mtx_pool_find(mtxpool_sleep, pp); 676 mtx_lock(mtxp); 677 if (g_collectstats & G_STATS_PROVIDERS) 678 devstat_end_transaction_bio_bt(pp->stat, bp, &now); 679 if (g_collectstats & G_STATS_CONSUMERS) 680 devstat_end_transaction_bio_bt(cp->stat, bp, &now); 681 #ifdef INVARIANTS 682 cp->nend++; 683 #endif 684 mtx_unlock(mtxp); 685 686 if (error != ENOMEM) { 687 bp->bio_error = error; 688 if (direct) { 689 biodone(bp); 690 } else { 691 g_bioq_lock(&g_bio_run_up); 692 first = TAILQ_EMPTY(&g_bio_run_up.bio_queue); 693 TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue); 694 bp->bio_flags |= BIO_ONQUEUE; 695 g_bio_run_up.bio_queue_length++; 696 g_bioq_unlock(&g_bio_run_up); 697 if (first) 698 wakeup(&g_wait_up); 699 } 700 return; 701 } 702 703 if (bootverbose) 704 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name); 705 bp->bio_children = 0; 706 bp->bio_inbed = 0; 707 bp->bio_driver1 = NULL; 708 bp->bio_driver2 = NULL; 709 bp->bio_pflags = 0; 710 g_io_request(bp, cp); 711 pace = 1; 712 return; 713 } 714 715 SYSCTL_DECL(_kern_geom); 716 717 static long transient_maps; 718 SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD, 719 &transient_maps, 0, 720 "Total count of the transient mapping requests"); 721 u_int transient_map_retries = 10; 722 SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW, 723 &transient_map_retries, 0, 724 "Max count of retries used before giving up on creating transient map"); 725 int transient_map_hard_failures; 726 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD, 727 &transient_map_hard_failures, 0, 728 "Failures to establish the transient mapping due to retry attempts " 729 "exhausted"); 730 int transient_map_soft_failures; 731 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD, 732 &transient_map_soft_failures, 0, 733 "Count of retried failures to establish the transient mapping"); 734 int inflight_transient_maps; 735 SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD, 736 &inflight_transient_maps, 0, 737 "Current count of the active transient maps"); 738 739 static int 740 g_io_transient_map_bio(struct bio *bp) 741 { 742 vm_offset_t addr; 743 long size; 744 u_int retried; 745 746 KASSERT(unmapped_buf_allowed, ("unmapped disabled")); 747 748 size = round_page(bp->bio_ma_offset + bp->bio_length); 749 KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp)); 750 addr = 0; 751 retried = 0; 752 atomic_add_long(&transient_maps, 1); 753 retry: 754 if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) { 755 if (transient_map_retries != 0 && 756 retried >= transient_map_retries) { 757 CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s", 758 bp, bp->bio_to->name); 759 atomic_add_int(&transient_map_hard_failures, 1); 760 return (EDEADLK/* XXXKIB */); 761 } else { 762 /* 763 * Naive attempt to quisce the I/O to get more 764 * in-flight requests completed and defragment 765 * the transient_arena. 766 */ 767 CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d", 768 bp, bp->bio_to->name, retried); 769 pause("g_d_tra", hz / 10); 770 retried++; 771 atomic_add_int(&transient_map_soft_failures, 1); 772 goto retry; 773 } 774 } 775 atomic_add_int(&inflight_transient_maps, 1); 776 pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size)); 777 bp->bio_data = (caddr_t)addr + bp->bio_ma_offset; 778 bp->bio_flags |= BIO_TRANSIENT_MAPPING; 779 bp->bio_flags &= ~BIO_UNMAPPED; 780 return (EJUSTRETURN); 781 } 782 783 void 784 g_io_schedule_down(struct thread *tp __unused) 785 { 786 struct bio *bp; 787 int error; 788 789 for(;;) { 790 g_bioq_lock(&g_bio_run_down); 791 bp = g_bioq_first(&g_bio_run_down); 792 if (bp == NULL) { 793 CTR0(KTR_GEOM, "g_down going to sleep"); 794 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock, 795 PRIBIO | PDROP, "-", 0); 796 continue; 797 } 798 CTR0(KTR_GEOM, "g_down has work to do"); 799 g_bioq_unlock(&g_bio_run_down); 800 biotrack(bp, __func__); 801 if (pace != 0) { 802 /* 803 * There has been at least one memory allocation 804 * failure since the last I/O completed. Pause 1ms to 805 * give the system a chance to free up memory. We only 806 * do this once because a large number of allocations 807 * can fail in the direct dispatch case and there's no 808 * relationship between the number of these failures and 809 * the length of the outage. If there's still an outage, 810 * we'll pause again and again until it's 811 * resolved. Older versions paused longer and once per 812 * allocation failure. This was OK for a single threaded 813 * g_down, but with direct dispatch would lead to max of 814 * 10 IOPs for minutes at a time when transient memory 815 * issues prevented allocation for a batch of requests 816 * from the upper layers. 817 * 818 * XXX This pacing is really lame. It needs to be solved 819 * by other methods. This is OK only because the worst 820 * case scenario is so rare. In the worst case scenario 821 * all memory is tied up waiting for I/O to complete 822 * which can never happen since we can't allocate bios 823 * for that I/O. 824 */ 825 CTR0(KTR_GEOM, "g_down pacing self"); 826 pause("g_down", min(hz/1000, 1)); 827 pace = 0; 828 } 829 CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp, 830 bp->bio_to->name); 831 error = g_io_check(bp); 832 if (error >= 0) { 833 CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider " 834 "%s returned %d", bp, bp->bio_to->name, error); 835 g_io_deliver(bp, error); 836 continue; 837 } 838 THREAD_NO_SLEEPING(); 839 CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld " 840 "len %ld", bp, bp->bio_to->name, bp->bio_offset, 841 bp->bio_length); 842 bp->bio_to->geom->start(bp); 843 THREAD_SLEEPING_OK(); 844 } 845 } 846 847 void 848 g_io_schedule_up(struct thread *tp __unused) 849 { 850 struct bio *bp; 851 852 for(;;) { 853 g_bioq_lock(&g_bio_run_up); 854 bp = g_bioq_first(&g_bio_run_up); 855 if (bp == NULL) { 856 CTR0(KTR_GEOM, "g_up going to sleep"); 857 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock, 858 PRIBIO | PDROP, "-", 0); 859 continue; 860 } 861 g_bioq_unlock(&g_bio_run_up); 862 THREAD_NO_SLEEPING(); 863 CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off " 864 "%jd len %ld", bp, bp->bio_to->name, 865 bp->bio_offset, bp->bio_length); 866 biodone(bp); 867 THREAD_SLEEPING_OK(); 868 } 869 } 870 871 void * 872 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error) 873 { 874 struct bio *bp; 875 void *ptr; 876 int errorc; 877 878 KASSERT(length > 0 && length >= cp->provider->sectorsize && 879 length <= maxphys, ("g_read_data(): invalid length %jd", 880 (intmax_t)length)); 881 882 bp = g_alloc_bio(); 883 bp->bio_cmd = BIO_READ; 884 bp->bio_done = NULL; 885 bp->bio_offset = offset; 886 bp->bio_length = length; 887 ptr = g_malloc(length, M_WAITOK); 888 bp->bio_data = ptr; 889 g_io_request(bp, cp); 890 errorc = biowait(bp, "gread"); 891 if (errorc == 0 && bp->bio_completed != length) 892 errorc = EIO; 893 if (error != NULL) 894 *error = errorc; 895 g_destroy_bio(bp); 896 if (errorc) { 897 g_free(ptr); 898 ptr = NULL; 899 } 900 return (ptr); 901 } 902 903 /* 904 * A read function for use by ffs_sbget when used by GEOM-layer routines. 905 */ 906 int 907 g_use_g_read_data(void *devfd, off_t loc, void **bufp, int size) 908 { 909 struct g_consumer *cp; 910 911 KASSERT(*bufp == NULL, 912 ("g_use_g_read_data: non-NULL *bufp %p\n", *bufp)); 913 914 cp = (struct g_consumer *)devfd; 915 /* 916 * Take care not to issue an invalid I/O request. The offset of 917 * the superblock candidate must be multiples of the provider's 918 * sector size, otherwise an FFS can't exist on the provider 919 * anyway. 920 */ 921 if (loc % cp->provider->sectorsize != 0) 922 return (ENOENT); 923 *bufp = g_read_data(cp, loc, size, NULL); 924 if (*bufp == NULL) 925 return (ENOENT); 926 return (0); 927 } 928 929 int 930 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length) 931 { 932 struct bio *bp; 933 int error; 934 935 KASSERT(length > 0 && length >= cp->provider->sectorsize && 936 length <= maxphys, ("g_write_data(): invalid length %jd", 937 (intmax_t)length)); 938 939 bp = g_alloc_bio(); 940 bp->bio_cmd = BIO_WRITE; 941 bp->bio_done = NULL; 942 bp->bio_offset = offset; 943 bp->bio_length = length; 944 bp->bio_data = ptr; 945 g_io_request(bp, cp); 946 error = biowait(bp, "gwrite"); 947 if (error == 0 && bp->bio_completed != length) 948 error = EIO; 949 g_destroy_bio(bp); 950 return (error); 951 } 952 953 /* 954 * A write function for use by ffs_sbput when used by GEOM-layer routines. 955 */ 956 int 957 g_use_g_write_data(void *devfd, off_t loc, void *buf, int size) 958 { 959 960 return (g_write_data((struct g_consumer *)devfd, loc, buf, size)); 961 } 962 963 int 964 g_delete_data(struct g_consumer *cp, off_t offset, off_t length) 965 { 966 struct bio *bp; 967 int error; 968 969 KASSERT(length > 0 && length >= cp->provider->sectorsize, 970 ("g_delete_data(): invalid length %jd", (intmax_t)length)); 971 972 bp = g_alloc_bio(); 973 bp->bio_cmd = BIO_DELETE; 974 bp->bio_done = NULL; 975 bp->bio_offset = offset; 976 bp->bio_length = length; 977 bp->bio_data = NULL; 978 g_io_request(bp, cp); 979 error = biowait(bp, "gdelete"); 980 if (error == 0 && bp->bio_completed != length) 981 error = EIO; 982 g_destroy_bio(bp); 983 return (error); 984 } 985 986 void 987 g_print_bio(const char *prefix, const struct bio *bp, const char *fmtsuffix, 988 ...) 989 { 990 #ifndef PRINTF_BUFR_SIZE 991 #define PRINTF_BUFR_SIZE 64 992 #endif 993 char bufr[PRINTF_BUFR_SIZE]; 994 struct sbuf sb, *sbp __unused; 995 va_list ap; 996 997 sbp = sbuf_new(&sb, bufr, sizeof(bufr), SBUF_FIXEDLEN); 998 KASSERT(sbp != NULL, ("sbuf_new misused?")); 999 1000 sbuf_set_drain(&sb, sbuf_printf_drain, NULL); 1001 1002 sbuf_cat(&sb, prefix); 1003 g_format_bio(&sb, bp); 1004 1005 va_start(ap, fmtsuffix); 1006 sbuf_vprintf(&sb, fmtsuffix, ap); 1007 va_end(ap); 1008 1009 sbuf_nl_terminate(&sb); 1010 1011 sbuf_finish(&sb); 1012 sbuf_delete(&sb); 1013 } 1014 1015 void 1016 g_format_bio(struct sbuf *sb, const struct bio *bp) 1017 { 1018 const char *pname, *cmd = NULL; 1019 1020 if (bp->bio_to != NULL) 1021 pname = bp->bio_to->name; 1022 else if (bp->bio_parent != NULL && bp->bio_parent->bio_to != NULL) 1023 pname = bp->bio_parent->bio_to->name; 1024 else 1025 pname = "[unknown]"; 1026 1027 switch (bp->bio_cmd) { 1028 case BIO_GETATTR: 1029 cmd = "GETATTR"; 1030 sbuf_printf(sb, "%s[%s(attr=%s)]", pname, cmd, 1031 bp->bio_attribute); 1032 return; 1033 case BIO_FLUSH: 1034 cmd = "FLUSH"; 1035 sbuf_printf(sb, "%s[%s]", pname, cmd); 1036 return; 1037 case BIO_ZONE: { 1038 char *subcmd = NULL; 1039 cmd = "ZONE"; 1040 switch (bp->bio_zone.zone_cmd) { 1041 case DISK_ZONE_OPEN: 1042 subcmd = "OPEN"; 1043 break; 1044 case DISK_ZONE_CLOSE: 1045 subcmd = "CLOSE"; 1046 break; 1047 case DISK_ZONE_FINISH: 1048 subcmd = "FINISH"; 1049 break; 1050 case DISK_ZONE_RWP: 1051 subcmd = "RWP"; 1052 break; 1053 case DISK_ZONE_REPORT_ZONES: 1054 subcmd = "REPORT ZONES"; 1055 break; 1056 case DISK_ZONE_GET_PARAMS: 1057 subcmd = "GET PARAMS"; 1058 break; 1059 default: 1060 subcmd = "UNKNOWN"; 1061 break; 1062 } 1063 sbuf_printf(sb, "%s[%s,%s]", pname, cmd, subcmd); 1064 return; 1065 } 1066 case BIO_READ: 1067 cmd = "READ"; 1068 break; 1069 case BIO_WRITE: 1070 cmd = "WRITE"; 1071 break; 1072 case BIO_DELETE: 1073 cmd = "DELETE"; 1074 break; 1075 default: 1076 cmd = "UNKNOWN"; 1077 sbuf_printf(sb, "%s[%s()]", pname, cmd); 1078 return; 1079 } 1080 sbuf_printf(sb, "%s[%s(offset=%jd, length=%jd)]", pname, cmd, 1081 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); 1082 } 1083