1 /*- 2 * CAM IO Scheduler Interface 3 * 4 * SPDX-License-Identifier: BSD-2-Clause 5 * 6 * Copyright (c) 2015 Netflix, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include "opt_cam.h" 31 #include "opt_ddb.h" 32 33 #include <sys/cdefs.h> 34 #include <sys/param.h> 35 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/bio.h> 39 #include <sys/lock.h> 40 #include <sys/malloc.h> 41 #include <sys/mutex.h> 42 #include <sys/sbuf.h> 43 #include <sys/sysctl.h> 44 45 #include <cam/cam.h> 46 #include <cam/cam_ccb.h> 47 #include <cam/cam_periph.h> 48 #include <cam/cam_xpt_periph.h> 49 #include <cam/cam_xpt_internal.h> 50 #include <cam/cam_iosched.h> 51 52 #include <ddb/ddb.h> 53 54 static MALLOC_DEFINE(M_CAMSCHED, "CAM I/O Scheduler", 55 "CAM I/O Scheduler buffers"); 56 57 static SYSCTL_NODE(_kern_cam, OID_AUTO, iosched, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 58 "CAM I/O Scheduler parameters"); 59 60 /* 61 * Default I/O scheduler for FreeBSD. This implementation is just a thin-vineer 62 * over the bioq_* interface, with notions of separate calls for normal I/O and 63 * for trims. 64 * 65 * When CAM_IOSCHED_DYNAMIC is defined, the scheduler is enhanced to dynamically 66 * steer the rate of one type of traffic to help other types of traffic (eg 67 * limit writes when read latency deteriorates on SSDs). 68 */ 69 70 #ifdef CAM_IOSCHED_DYNAMIC 71 72 static bool do_dynamic_iosched = true; 73 SYSCTL_BOOL(_kern_cam_iosched, OID_AUTO, dynamic, CTLFLAG_RDTUN, 74 &do_dynamic_iosched, 1, 75 "Enable Dynamic I/O scheduler optimizations."); 76 77 /* 78 * For an EMA, with an alpha of alpha, we know 79 * alpha = 2 / (N + 1) 80 * or 81 * N = 1 + (2 / alpha) 82 * where N is the number of samples that 86% of the current 83 * EMA is derived from. 84 * 85 * So we invent[*] alpha_bits: 86 * alpha_bits = -log_2(alpha) 87 * alpha = 2^-alpha_bits 88 * So 89 * N = 1 + 2^(alpha_bits + 1) 90 * 91 * The default 9 gives a 1025 lookback for 86% of the data. 92 * For a brief intro: https://en.wikipedia.org/wiki/Moving_average 93 * 94 * [*] Steal from the load average code and many other places. 95 * Note: See computation of EMA and EMVAR for acceptable ranges of alpha. 96 */ 97 static int alpha_bits = 9; 98 SYSCTL_INT(_kern_cam_iosched, OID_AUTO, alpha_bits, CTLFLAG_RWTUN, 99 &alpha_bits, 1, 100 "Bits in EMA's alpha."); 101 102 /* 103 * Different parameters for the buckets of latency we keep track of. These are all 104 * published read-only since at present they are compile time constants. 105 * 106 * Bucket base is the upper bounds of the first latency bucket. It's currently 20us. 107 * With 20 buckets (see below), that leads to a geometric progression with a max size 108 * of 5.2s which is safeily larger than 1s to help diagnose extreme outliers better. 109 */ 110 #ifndef BUCKET_BASE 111 #define BUCKET_BASE ((SBT_1S / 50000) + 1) /* 20us */ 112 #endif 113 static sbintime_t bucket_base = BUCKET_BASE; 114 SYSCTL_SBINTIME_USEC(_kern_cam_iosched, OID_AUTO, bucket_base_us, CTLFLAG_RD, 115 &bucket_base, 116 "Size of the smallest latency bucket"); 117 118 /* 119 * Bucket ratio is the geometric progression for the bucket. For a bucket b_n 120 * the size of bucket b_n+1 is b_n * bucket_ratio / 100. 121 */ 122 static int bucket_ratio = 200; /* Rather hard coded at the moment */ 123 SYSCTL_INT(_kern_cam_iosched, OID_AUTO, bucket_ratio, CTLFLAG_RD, 124 &bucket_ratio, 200, 125 "Latency Bucket Ratio for geometric progression."); 126 127 /* 128 * Number of total buckets. Starting at BUCKET_BASE, each one is a power of 2. 129 */ 130 #ifndef LAT_BUCKETS 131 #define LAT_BUCKETS 20 /* < 20us < 40us ... < 2^(n-1)*20us >= 2^(n-1)*20us */ 132 #endif 133 static int lat_buckets = LAT_BUCKETS; 134 SYSCTL_INT(_kern_cam_iosched, OID_AUTO, buckets, CTLFLAG_RD, 135 &lat_buckets, LAT_BUCKETS, 136 "Total number of latency buckets published"); 137 138 /* 139 * Read bias: how many reads do we favor before scheduling a write 140 * when we have a choice. 141 */ 142 static int default_read_bias = 0; 143 SYSCTL_INT(_kern_cam_iosched, OID_AUTO, read_bias, CTLFLAG_RWTUN, 144 &default_read_bias, 0, 145 "Default read bias for new devices."); 146 147 struct iop_stats; 148 struct cam_iosched_softc; 149 150 int iosched_debug = 0; 151 152 typedef enum { 153 none = 0, /* No limits */ 154 queue_depth, /* Limit how many ops we queue to SIM */ 155 iops, /* Limit # of IOPS to the drive */ 156 bandwidth, /* Limit bandwidth to the drive */ 157 limiter_max 158 } io_limiter; 159 160 static const char *cam_iosched_limiter_names[] = 161 { "none", "queue_depth", "iops", "bandwidth" }; 162 163 /* 164 * Called to initialize the bits of the iop_stats structure relevant to the 165 * limiter. Called just after the limiter is set. 166 */ 167 typedef int l_init_t(struct iop_stats *); 168 169 /* 170 * Called every tick. 171 */ 172 typedef int l_tick_t(struct iop_stats *); 173 174 /* 175 * Called to see if the limiter thinks this IOP can be allowed to 176 * proceed. If so, the limiter assumes that the IOP proceeded 177 * and makes any accounting of it that's needed. 178 */ 179 typedef int l_iop_t(struct iop_stats *, struct bio *); 180 181 /* 182 * Called when an I/O completes so the limiter can update its 183 * accounting. Pending I/Os may complete in any order (even when 184 * sent to the hardware at the same time), so the limiter may not 185 * make any assumptions other than this I/O has completed. If it 186 * returns 1, then xpt_schedule() needs to be called again. 187 */ 188 typedef int l_iodone_t(struct iop_stats *, struct bio *); 189 190 static l_iop_t cam_iosched_qd_iop; 191 static l_iop_t cam_iosched_qd_caniop; 192 static l_iodone_t cam_iosched_qd_iodone; 193 194 static l_init_t cam_iosched_iops_init; 195 static l_tick_t cam_iosched_iops_tick; 196 static l_iop_t cam_iosched_iops_caniop; 197 static l_iop_t cam_iosched_iops_iop; 198 199 static l_init_t cam_iosched_bw_init; 200 static l_tick_t cam_iosched_bw_tick; 201 static l_iop_t cam_iosched_bw_caniop; 202 static l_iop_t cam_iosched_bw_iop; 203 204 struct limswitch { 205 l_init_t *l_init; 206 l_tick_t *l_tick; 207 l_iop_t *l_iop; 208 l_iop_t *l_caniop; 209 l_iodone_t *l_iodone; 210 } limsw[] = 211 { 212 { /* none */ 213 .l_init = NULL, 214 .l_tick = NULL, 215 .l_iop = NULL, 216 .l_iodone= NULL, 217 }, 218 { /* queue_depth */ 219 .l_init = NULL, 220 .l_tick = NULL, 221 .l_caniop = cam_iosched_qd_caniop, 222 .l_iop = cam_iosched_qd_iop, 223 .l_iodone= cam_iosched_qd_iodone, 224 }, 225 { /* iops */ 226 .l_init = cam_iosched_iops_init, 227 .l_tick = cam_iosched_iops_tick, 228 .l_caniop = cam_iosched_iops_caniop, 229 .l_iop = cam_iosched_iops_iop, 230 .l_iodone= NULL, 231 }, 232 { /* bandwidth */ 233 .l_init = cam_iosched_bw_init, 234 .l_tick = cam_iosched_bw_tick, 235 .l_caniop = cam_iosched_bw_caniop, 236 .l_iop = cam_iosched_bw_iop, 237 .l_iodone= NULL, 238 }, 239 }; 240 241 struct iop_stats { 242 /* 243 * sysctl state for this subnode. 244 */ 245 struct sysctl_ctx_list sysctl_ctx; 246 struct sysctl_oid *sysctl_tree; 247 248 /* 249 * Information about the current rate limiters, if any 250 */ 251 io_limiter limiter; /* How are I/Os being limited */ 252 int min; /* Low range of limit */ 253 int max; /* High range of limit */ 254 int current; /* Current rate limiter */ 255 int l_value1; /* per-limiter scratch value 1. */ 256 int l_value2; /* per-limiter scratch value 2. */ 257 258 /* 259 * Debug information about counts of I/Os that have gone through the 260 * scheduler. 261 */ 262 int pending; /* I/Os pending in the hardware */ 263 int queued; /* number currently in the queue */ 264 int total; /* Total for all time -- wraps */ 265 int in; /* number queued all time -- wraps */ 266 int out; /* number completed all time -- wraps */ 267 int errs; /* Number of I/Os completed with error -- wraps */ 268 269 /* 270 * Statistics on different bits of the process. 271 */ 272 /* Exp Moving Average, see alpha_bits for more details */ 273 sbintime_t ema; 274 sbintime_t emvar; 275 sbintime_t sd; /* Last computed sd */ 276 277 uint32_t state_flags; 278 #define IOP_RATE_LIMITED 1u 279 280 uint64_t latencies[LAT_BUCKETS]; 281 282 struct cam_iosched_softc *softc; 283 }; 284 285 typedef enum { 286 set_max = 0, /* current = max */ 287 read_latency, /* Steer read latency by throttling writes */ 288 cl_max /* Keep last */ 289 } control_type; 290 291 static const char *cam_iosched_control_type_names[] = 292 { "set_max", "read_latency" }; 293 294 struct control_loop { 295 /* 296 * sysctl state for this subnode. 297 */ 298 struct sysctl_ctx_list sysctl_ctx; 299 struct sysctl_oid *sysctl_tree; 300 301 sbintime_t next_steer; /* Time of next steer */ 302 sbintime_t steer_interval; /* How often do we steer? */ 303 sbintime_t lolat; 304 sbintime_t hilat; 305 int alpha; 306 control_type type; /* What type of control? */ 307 int last_count; /* Last I/O count */ 308 309 struct cam_iosched_softc *softc; 310 }; 311 312 #endif 313 314 struct cam_iosched_softc { 315 struct bio_queue_head bio_queue; 316 struct bio_queue_head trim_queue; 317 /* scheduler flags < 16, user flags >= 16 */ 318 uint32_t flags; 319 int sort_io_queue; 320 int trim_goal; /* # of trims to queue before sending */ 321 int trim_ticks; /* Max ticks to hold trims */ 322 int last_trim_tick; /* Last 'tick' time ld a trim */ 323 int queued_trims; /* Number of trims in the queue */ 324 #ifdef CAM_IOSCHED_DYNAMIC 325 int read_bias; /* Read bias setting */ 326 int current_read_bias; /* Current read bias state */ 327 int total_ticks; 328 int load; /* EMA of 'load average' of disk / 2^16 */ 329 330 struct bio_queue_head write_queue; 331 struct iop_stats read_stats, write_stats, trim_stats; 332 struct sysctl_ctx_list sysctl_ctx; 333 struct sysctl_oid *sysctl_tree; 334 335 int quanta; /* Number of quanta per second */ 336 struct callout ticker; /* Callout for our quota system */ 337 struct cam_periph *periph; /* cam periph associated with this device */ 338 uint32_t this_frac; /* Fraction of a second (1024ths) for this tick */ 339 sbintime_t last_time; /* Last time we ticked */ 340 struct control_loop cl; 341 sbintime_t max_lat; /* when != 0, if iop latency > max_lat, call max_lat_fcn */ 342 cam_iosched_latfcn_t latfcn; 343 void *latarg; 344 #endif 345 }; 346 347 #ifdef CAM_IOSCHED_DYNAMIC 348 /* 349 * helper functions to call the limsw functions. 350 */ 351 static int 352 cam_iosched_limiter_init(struct iop_stats *ios) 353 { 354 int lim = ios->limiter; 355 356 /* maybe this should be a kassert */ 357 if (lim < none || lim >= limiter_max) 358 return EINVAL; 359 360 if (limsw[lim].l_init) 361 return limsw[lim].l_init(ios); 362 363 return 0; 364 } 365 366 static int 367 cam_iosched_limiter_tick(struct iop_stats *ios) 368 { 369 int lim = ios->limiter; 370 371 /* maybe this should be a kassert */ 372 if (lim < none || lim >= limiter_max) 373 return EINVAL; 374 375 if (limsw[lim].l_tick) 376 return limsw[lim].l_tick(ios); 377 378 return 0; 379 } 380 381 static int 382 cam_iosched_limiter_iop(struct iop_stats *ios, struct bio *bp) 383 { 384 int lim = ios->limiter; 385 386 /* maybe this should be a kassert */ 387 if (lim < none || lim >= limiter_max) 388 return EINVAL; 389 390 if (limsw[lim].l_iop) 391 return limsw[lim].l_iop(ios, bp); 392 393 return 0; 394 } 395 396 static int 397 cam_iosched_limiter_caniop(struct iop_stats *ios, struct bio *bp) 398 { 399 int lim = ios->limiter; 400 401 /* maybe this should be a kassert */ 402 if (lim < none || lim >= limiter_max) 403 return EINVAL; 404 405 if (limsw[lim].l_caniop) 406 return limsw[lim].l_caniop(ios, bp); 407 408 return 0; 409 } 410 411 static int 412 cam_iosched_limiter_iodone(struct iop_stats *ios, struct bio *bp) 413 { 414 int lim = ios->limiter; 415 416 /* maybe this should be a kassert */ 417 if (lim < none || lim >= limiter_max) 418 return 0; 419 420 if (limsw[lim].l_iodone) 421 return limsw[lim].l_iodone(ios, bp); 422 423 return 0; 424 } 425 426 /* 427 * Functions to implement the different kinds of limiters 428 */ 429 430 static int 431 cam_iosched_qd_iop(struct iop_stats *ios, struct bio *bp) 432 { 433 434 if (ios->current <= 0 || ios->pending < ios->current) 435 return 0; 436 437 return EAGAIN; 438 } 439 440 static int 441 cam_iosched_qd_caniop(struct iop_stats *ios, struct bio *bp) 442 { 443 444 if (ios->current <= 0 || ios->pending < ios->current) 445 return 0; 446 447 return EAGAIN; 448 } 449 450 static int 451 cam_iosched_qd_iodone(struct iop_stats *ios, struct bio *bp) 452 { 453 454 if (ios->current <= 0 || ios->pending != ios->current) 455 return 0; 456 457 return 1; 458 } 459 460 static int 461 cam_iosched_iops_init(struct iop_stats *ios) 462 { 463 464 ios->l_value1 = ios->current / ios->softc->quanta; 465 if (ios->l_value1 <= 0) 466 ios->l_value1 = 1; 467 ios->l_value2 = 0; 468 469 return 0; 470 } 471 472 static int 473 cam_iosched_iops_tick(struct iop_stats *ios) 474 { 475 int new_ios; 476 477 /* 478 * Allow at least one IO per tick until all 479 * the IOs for this interval have been spent. 480 */ 481 new_ios = (int)((ios->current * (uint64_t)ios->softc->this_frac) >> 16); 482 if (new_ios < 1 && ios->l_value2 < ios->current) { 483 new_ios = 1; 484 ios->l_value2++; 485 } 486 487 /* 488 * If this a new accounting interval, discard any "unspent" ios 489 * granted in the previous interval. Otherwise add the new ios to 490 * the previously granted ones that haven't been spent yet. 491 */ 492 if ((ios->softc->total_ticks % ios->softc->quanta) == 0) { 493 ios->l_value1 = new_ios; 494 ios->l_value2 = 1; 495 } else { 496 ios->l_value1 += new_ios; 497 } 498 499 return 0; 500 } 501 502 static int 503 cam_iosched_iops_caniop(struct iop_stats *ios, struct bio *bp) 504 { 505 506 /* 507 * So if we have any more IOPs left, allow it, 508 * otherwise wait. If current iops is 0, treat that 509 * as unlimited as a failsafe. 510 */ 511 if (ios->current > 0 && ios->l_value1 <= 0) 512 return EAGAIN; 513 return 0; 514 } 515 516 static int 517 cam_iosched_iops_iop(struct iop_stats *ios, struct bio *bp) 518 { 519 int rv; 520 521 rv = cam_iosched_limiter_caniop(ios, bp); 522 if (rv == 0) 523 ios->l_value1--; 524 525 return rv; 526 } 527 528 static int 529 cam_iosched_bw_init(struct iop_stats *ios) 530 { 531 532 /* ios->current is in kB/s, so scale to bytes */ 533 ios->l_value1 = ios->current * 1000 / ios->softc->quanta; 534 535 return 0; 536 } 537 538 static int 539 cam_iosched_bw_tick(struct iop_stats *ios) 540 { 541 int bw; 542 543 /* 544 * If we're in the hole for available quota from 545 * the last time, then add the quantum for this. 546 * If we have any left over from last quantum, 547 * then too bad, that's lost. Also, ios->current 548 * is in kB/s, so scale. 549 * 550 * We also allow up to 4 quanta of credits to 551 * accumulate to deal with burstiness. 4 is extremely 552 * arbitrary. 553 */ 554 bw = (int)((ios->current * 1000ull * (uint64_t)ios->softc->this_frac) >> 16); 555 if (ios->l_value1 < bw * 4) 556 ios->l_value1 += bw; 557 558 return 0; 559 } 560 561 static int 562 cam_iosched_bw_caniop(struct iop_stats *ios, struct bio *bp) 563 { 564 /* 565 * So if we have any more bw quota left, allow it, 566 * otherwise wait. Note, we'll go negative and that's 567 * OK. We'll just get a little less next quota. 568 * 569 * Note on going negative: that allows us to process 570 * requests in order better, since we won't allow 571 * shorter reads to get around the long one that we 572 * don't have the quota to do just yet. It also prevents 573 * starvation by being a little more permissive about 574 * what we let through this quantum (to prevent the 575 * starvation), at the cost of getting a little less 576 * next quantum. 577 * 578 * Also note that if the current limit is <= 0, 579 * we treat it as unlimited as a failsafe. 580 */ 581 if (ios->current > 0 && ios->l_value1 <= 0) 582 return EAGAIN; 583 584 return 0; 585 } 586 587 static int 588 cam_iosched_bw_iop(struct iop_stats *ios, struct bio *bp) 589 { 590 int rv; 591 592 rv = cam_iosched_limiter_caniop(ios, bp); 593 if (rv == 0) 594 ios->l_value1 -= bp->bio_length; 595 596 return rv; 597 } 598 599 static void cam_iosched_cl_maybe_steer(struct control_loop *clp); 600 601 static void 602 cam_iosched_ticker(void *arg) 603 { 604 struct cam_iosched_softc *isc = arg; 605 sbintime_t now, delta; 606 int pending; 607 608 callout_reset(&isc->ticker, hz / isc->quanta, cam_iosched_ticker, isc); 609 610 now = sbinuptime(); 611 delta = now - isc->last_time; 612 isc->this_frac = (uint32_t)delta >> 16; /* Note: discards seconds -- should be 0 harmless if not */ 613 isc->last_time = now; 614 615 cam_iosched_cl_maybe_steer(&isc->cl); 616 617 cam_iosched_limiter_tick(&isc->read_stats); 618 cam_iosched_limiter_tick(&isc->write_stats); 619 cam_iosched_limiter_tick(&isc->trim_stats); 620 621 cam_iosched_schedule(isc, isc->periph); 622 623 /* 624 * isc->load is an EMA of the pending I/Os at each tick. The number of 625 * pending I/Os is the sum of the I/Os queued to the hardware, and those 626 * in the software queue that could be queued to the hardware if there 627 * were slots. 628 * 629 * ios_stats.pending is a count of requests in the SIM right now for 630 * each of these types of I/O. So the total pending count is the sum of 631 * these I/Os and the sum of the queued I/Os still in the software queue 632 * for those operations that aren't being rate limited at the moment. 633 * 634 * The reason for the rate limiting bit is because those I/Os 635 * aren't part of the software queued load (since we could 636 * give them to hardware, but choose not to). 637 * 638 * Note: due to a bug in counting pending TRIM in the device, we 639 * don't include them in this count. We count each BIO_DELETE in 640 * the pending count, but the periph drivers collapse them down 641 * into one TRIM command. That one trim command gets the completion 642 * so the counts get off. 643 */ 644 pending = isc->read_stats.pending + isc->write_stats.pending /* + isc->trim_stats.pending */; 645 pending += !!(isc->read_stats.state_flags & IOP_RATE_LIMITED) * isc->read_stats.queued + 646 !!(isc->write_stats.state_flags & IOP_RATE_LIMITED) * isc->write_stats.queued /* + 647 !!(isc->trim_stats.state_flags & IOP_RATE_LIMITED) * isc->trim_stats.queued */ ; 648 pending <<= 16; 649 pending /= isc->periph->path->device->ccbq.total_openings; 650 651 isc->load = (pending + (isc->load << 13) - isc->load) >> 13; /* see above: 13 -> 16139 / 200/s = ~81s ~1 minute */ 652 653 isc->total_ticks++; 654 } 655 656 static void 657 cam_iosched_cl_init(struct control_loop *clp, struct cam_iosched_softc *isc) 658 { 659 660 clp->next_steer = sbinuptime(); 661 clp->softc = isc; 662 clp->steer_interval = SBT_1S * 5; /* Let's start out steering every 5s */ 663 clp->lolat = 5 * SBT_1MS; 664 clp->hilat = 15 * SBT_1MS; 665 clp->alpha = 20; /* Alpha == gain. 20 = .2 */ 666 clp->type = set_max; 667 } 668 669 static void 670 cam_iosched_cl_maybe_steer(struct control_loop *clp) 671 { 672 struct cam_iosched_softc *isc; 673 sbintime_t now, lat; 674 int old; 675 676 isc = clp->softc; 677 now = isc->last_time; 678 if (now < clp->next_steer) 679 return; 680 681 clp->next_steer = now + clp->steer_interval; 682 switch (clp->type) { 683 case set_max: 684 if (isc->write_stats.current != isc->write_stats.max) 685 printf("Steering write from %d kBps to %d kBps\n", 686 isc->write_stats.current, isc->write_stats.max); 687 isc->read_stats.current = isc->read_stats.max; 688 isc->write_stats.current = isc->write_stats.max; 689 isc->trim_stats.current = isc->trim_stats.max; 690 break; 691 case read_latency: 692 old = isc->write_stats.current; 693 lat = isc->read_stats.ema; 694 /* 695 * Simple PLL-like engine. Since we're steering to a range for 696 * the SP (set point) that makes things a little more 697 * complicated. In addition, we're not directly controlling our 698 * PV (process variable), the read latency, but instead are 699 * manipulating the write bandwidth limit for our MV 700 * (manipulation variable), analysis of this code gets a bit 701 * messy. Also, the MV is a very noisy control surface for read 702 * latency since it is affected by many hidden processes inside 703 * the device which change how responsive read latency will be 704 * in reaction to changes in write bandwidth. Unlike the classic 705 * boiler control PLL. this may result in over-steering while 706 * the SSD takes its time to react to the new, lower load. This 707 * is why we use a relatively low alpha of between .1 and .25 to 708 * compensate for this effect. At .1, it takes ~22 steering 709 * intervals to back off by a factor of 10. At .2 it only takes 710 * ~10. At .25 it only takes ~8. However some preliminary data 711 * from the SSD drives suggests a reasponse time in 10's of 712 * seconds before latency drops regardless of the new write 713 * rate. Careful observation will be required to tune this 714 * effectively. 715 * 716 * Also, when there's no read traffic, we jack up the write 717 * limit too regardless of the last read latency. 10 is 718 * somewhat arbitrary. 719 */ 720 if (lat < clp->lolat || isc->read_stats.total - clp->last_count < 10) 721 isc->write_stats.current = isc->write_stats.current * 722 (100 + clp->alpha) / 100; /* Scale up */ 723 else if (lat > clp->hilat) 724 isc->write_stats.current = isc->write_stats.current * 725 (100 - clp->alpha) / 100; /* Scale down */ 726 clp->last_count = isc->read_stats.total; 727 728 /* 729 * Even if we don't steer, per se, enforce the min/max limits as 730 * those may have changed. 731 */ 732 if (isc->write_stats.current < isc->write_stats.min) 733 isc->write_stats.current = isc->write_stats.min; 734 if (isc->write_stats.current > isc->write_stats.max) 735 isc->write_stats.current = isc->write_stats.max; 736 if (old != isc->write_stats.current && iosched_debug) 737 printf("Steering write from %d kBps to %d kBps due to latency of %jdus\n", 738 old, isc->write_stats.current, 739 (uintmax_t)((uint64_t)1000000 * (uint32_t)lat) >> 32); 740 break; 741 case cl_max: 742 break; 743 } 744 } 745 #endif 746 747 /* 748 * Trim or similar currently pending completion. Should only be set for 749 * those drivers wishing only one Trim active at a time. 750 */ 751 #define CAM_IOSCHED_FLAG_TRIM_ACTIVE (1ul << 0) 752 /* Callout active, and needs to be torn down */ 753 #define CAM_IOSCHED_FLAG_CALLOUT_ACTIVE (1ul << 1) 754 755 /* Periph drivers set these flags to indicate work */ 756 #define CAM_IOSCHED_FLAG_WORK_FLAGS ((0xffffu) << 16) 757 758 #ifdef CAM_IOSCHED_DYNAMIC 759 static void 760 cam_iosched_io_metric_update(struct cam_iosched_softc *isc, 761 sbintime_t sim_latency, int cmd, size_t size); 762 #endif 763 764 static inline bool 765 cam_iosched_has_flagged_work(struct cam_iosched_softc *isc) 766 { 767 return !!(isc->flags & CAM_IOSCHED_FLAG_WORK_FLAGS); 768 } 769 770 static inline bool 771 cam_iosched_has_io(struct cam_iosched_softc *isc) 772 { 773 #ifdef CAM_IOSCHED_DYNAMIC 774 if (do_dynamic_iosched) { 775 struct bio *rbp = bioq_first(&isc->bio_queue); 776 struct bio *wbp = bioq_first(&isc->write_queue); 777 bool can_write = wbp != NULL && 778 cam_iosched_limiter_caniop(&isc->write_stats, wbp) == 0; 779 bool can_read = rbp != NULL && 780 cam_iosched_limiter_caniop(&isc->read_stats, rbp) == 0; 781 if (iosched_debug > 2) { 782 printf("can write %d: pending_writes %d max_writes %d\n", can_write, isc->write_stats.pending, isc->write_stats.max); 783 printf("can read %d: read_stats.pending %d max_reads %d\n", can_read, isc->read_stats.pending, isc->read_stats.max); 784 printf("Queued reads %d writes %d\n", isc->read_stats.queued, isc->write_stats.queued); 785 } 786 return can_read || can_write; 787 } 788 #endif 789 return bioq_first(&isc->bio_queue) != NULL; 790 } 791 792 static inline bool 793 cam_iosched_has_more_trim(struct cam_iosched_softc *isc) 794 { 795 struct bio *bp; 796 797 bp = bioq_first(&isc->trim_queue); 798 #ifdef CAM_IOSCHED_DYNAMIC 799 if (do_dynamic_iosched) { 800 /* 801 * If we're limiting trims, then defer action on trims 802 * for a bit. 803 */ 804 if (bp == NULL || cam_iosched_limiter_caniop(&isc->trim_stats, bp) != 0) 805 return false; 806 } 807 #endif 808 809 /* 810 * If we've set a trim_goal, then if we exceed that allow trims 811 * to be passed back to the driver. If we've also set a tick timeout 812 * allow trims back to the driver. Otherwise, don't allow trims yet. 813 */ 814 if (isc->trim_goal > 0) { 815 if (isc->queued_trims >= isc->trim_goal) 816 return true; 817 if (isc->queued_trims > 0 && 818 isc->trim_ticks > 0 && 819 ticks - isc->last_trim_tick > isc->trim_ticks) 820 return true; 821 return false; 822 } 823 824 /* NB: Should perhaps have a max trim active independent of I/O limiters */ 825 return !(isc->flags & CAM_IOSCHED_FLAG_TRIM_ACTIVE) && bp != NULL; 826 } 827 828 #define cam_iosched_sort_queue(isc) ((isc)->sort_io_queue >= 0 ? \ 829 (isc)->sort_io_queue : cam_sort_io_queues) 830 831 static inline bool 832 cam_iosched_has_work(struct cam_iosched_softc *isc) 833 { 834 #ifdef CAM_IOSCHED_DYNAMIC 835 if (iosched_debug > 2) 836 printf("has work: %d %d %d\n", cam_iosched_has_io(isc), 837 cam_iosched_has_more_trim(isc), 838 cam_iosched_has_flagged_work(isc)); 839 #endif 840 841 return cam_iosched_has_io(isc) || 842 cam_iosched_has_more_trim(isc) || 843 cam_iosched_has_flagged_work(isc); 844 } 845 846 #ifdef CAM_IOSCHED_DYNAMIC 847 static void 848 cam_iosched_iop_stats_init(struct cam_iosched_softc *isc, struct iop_stats *ios) 849 { 850 851 ios->limiter = none; 852 ios->in = 0; 853 ios->max = ios->current = 300000; 854 ios->min = 1; 855 ios->out = 0; 856 ios->errs = 0; 857 ios->pending = 0; 858 ios->queued = 0; 859 ios->total = 0; 860 ios->ema = 0; 861 ios->emvar = 0; 862 ios->softc = isc; 863 cam_iosched_limiter_init(ios); 864 } 865 866 static int 867 cam_iosched_limiter_sysctl(SYSCTL_HANDLER_ARGS) 868 { 869 char buf[16]; 870 struct iop_stats *ios; 871 struct cam_iosched_softc *isc; 872 int value, i, error; 873 const char *p; 874 875 ios = arg1; 876 isc = ios->softc; 877 value = ios->limiter; 878 if (value < none || value >= limiter_max) 879 p = "UNKNOWN"; 880 else 881 p = cam_iosched_limiter_names[value]; 882 883 strlcpy(buf, p, sizeof(buf)); 884 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 885 if (error != 0 || req->newptr == NULL) 886 return error; 887 888 cam_periph_lock(isc->periph); 889 890 for (i = none; i < limiter_max; i++) { 891 if (strcmp(buf, cam_iosched_limiter_names[i]) != 0) 892 continue; 893 ios->limiter = i; 894 error = cam_iosched_limiter_init(ios); 895 if (error != 0) { 896 ios->limiter = value; 897 cam_periph_unlock(isc->periph); 898 return error; 899 } 900 /* Note: disk load averate requires ticker to be always running */ 901 callout_reset(&isc->ticker, hz / isc->quanta, cam_iosched_ticker, isc); 902 isc->flags |= CAM_IOSCHED_FLAG_CALLOUT_ACTIVE; 903 904 cam_periph_unlock(isc->periph); 905 return 0; 906 } 907 908 cam_periph_unlock(isc->periph); 909 return EINVAL; 910 } 911 912 static int 913 cam_iosched_control_type_sysctl(SYSCTL_HANDLER_ARGS) 914 { 915 char buf[16]; 916 struct control_loop *clp; 917 struct cam_iosched_softc *isc; 918 int value, i, error; 919 const char *p; 920 921 clp = arg1; 922 isc = clp->softc; 923 value = clp->type; 924 if (value < none || value >= cl_max) 925 p = "UNKNOWN"; 926 else 927 p = cam_iosched_control_type_names[value]; 928 929 strlcpy(buf, p, sizeof(buf)); 930 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 931 if (error != 0 || req->newptr == NULL) 932 return error; 933 934 for (i = set_max; i < cl_max; i++) { 935 if (strcmp(buf, cam_iosched_control_type_names[i]) != 0) 936 continue; 937 cam_periph_lock(isc->periph); 938 clp->type = i; 939 cam_periph_unlock(isc->periph); 940 return 0; 941 } 942 943 return EINVAL; 944 } 945 946 static int 947 cam_iosched_sbintime_sysctl(SYSCTL_HANDLER_ARGS) 948 { 949 char buf[16]; 950 sbintime_t value; 951 int error; 952 uint64_t us; 953 954 value = *(sbintime_t *)arg1; 955 us = (uint64_t)value / SBT_1US; 956 snprintf(buf, sizeof(buf), "%ju", (intmax_t)us); 957 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 958 if (error != 0 || req->newptr == NULL) 959 return error; 960 us = strtoul(buf, NULL, 10); 961 if (us == 0) 962 return EINVAL; 963 *(sbintime_t *)arg1 = us * SBT_1US; 964 return 0; 965 } 966 967 static int 968 cam_iosched_sysctl_latencies(SYSCTL_HANDLER_ARGS) 969 { 970 int i, error; 971 struct sbuf sb; 972 uint64_t *latencies; 973 974 latencies = arg1; 975 sbuf_new_for_sysctl(&sb, NULL, LAT_BUCKETS * 16, req); 976 977 for (i = 0; i < LAT_BUCKETS - 1; i++) 978 sbuf_printf(&sb, "%jd,", (intmax_t)latencies[i]); 979 sbuf_printf(&sb, "%jd", (intmax_t)latencies[LAT_BUCKETS - 1]); 980 error = sbuf_finish(&sb); 981 sbuf_delete(&sb); 982 983 return (error); 984 } 985 986 static int 987 cam_iosched_quanta_sysctl(SYSCTL_HANDLER_ARGS) 988 { 989 int *quanta; 990 int error, value; 991 992 quanta = (unsigned *)arg1; 993 value = *quanta; 994 995 error = sysctl_handle_int(oidp, (int *)&value, 0, req); 996 if ((error != 0) || (req->newptr == NULL)) 997 return (error); 998 999 if (value < 1 || value > hz) 1000 return (EINVAL); 1001 1002 *quanta = value; 1003 1004 return (0); 1005 } 1006 1007 static void 1008 cam_iosched_iop_stats_sysctl_init(struct cam_iosched_softc *isc, struct iop_stats *ios, char *name) 1009 { 1010 struct sysctl_oid_list *n; 1011 struct sysctl_ctx_list *ctx; 1012 1013 ios->sysctl_tree = SYSCTL_ADD_NODE(&isc->sysctl_ctx, 1014 SYSCTL_CHILDREN(isc->sysctl_tree), OID_AUTO, name, 1015 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, name); 1016 n = SYSCTL_CHILDREN(ios->sysctl_tree); 1017 ctx = &ios->sysctl_ctx; 1018 1019 SYSCTL_ADD_UQUAD(ctx, n, 1020 OID_AUTO, "ema", CTLFLAG_RD, 1021 &ios->ema, 1022 "Fast Exponentially Weighted Moving Average"); 1023 SYSCTL_ADD_UQUAD(ctx, n, 1024 OID_AUTO, "emvar", CTLFLAG_RD, 1025 &ios->emvar, 1026 "Fast Exponentially Weighted Moving Variance"); 1027 1028 SYSCTL_ADD_INT(ctx, n, 1029 OID_AUTO, "pending", CTLFLAG_RD, 1030 &ios->pending, 0, 1031 "Instantaneous # of pending transactions"); 1032 SYSCTL_ADD_INT(ctx, n, 1033 OID_AUTO, "count", CTLFLAG_RD, 1034 &ios->total, 0, 1035 "# of transactions submitted to hardware"); 1036 SYSCTL_ADD_INT(ctx, n, 1037 OID_AUTO, "queued", CTLFLAG_RD, 1038 &ios->queued, 0, 1039 "# of transactions in the queue"); 1040 SYSCTL_ADD_INT(ctx, n, 1041 OID_AUTO, "in", CTLFLAG_RD, 1042 &ios->in, 0, 1043 "# of transactions queued to driver"); 1044 SYSCTL_ADD_INT(ctx, n, 1045 OID_AUTO, "out", CTLFLAG_RD, 1046 &ios->out, 0, 1047 "# of transactions completed (including with error)"); 1048 SYSCTL_ADD_INT(ctx, n, 1049 OID_AUTO, "errs", CTLFLAG_RD, 1050 &ios->errs, 0, 1051 "# of transactions completed with an error"); 1052 1053 SYSCTL_ADD_PROC(ctx, n, 1054 OID_AUTO, "limiter", 1055 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 1056 ios, 0, cam_iosched_limiter_sysctl, "A", 1057 "Current limiting type."); 1058 SYSCTL_ADD_INT(ctx, n, 1059 OID_AUTO, "min", CTLFLAG_RW, 1060 &ios->min, 0, 1061 "min resource"); 1062 SYSCTL_ADD_INT(ctx, n, 1063 OID_AUTO, "max", CTLFLAG_RW, 1064 &ios->max, 0, 1065 "max resource"); 1066 SYSCTL_ADD_INT(ctx, n, 1067 OID_AUTO, "current", CTLFLAG_RW, 1068 &ios->current, 0, 1069 "current resource"); 1070 1071 SYSCTL_ADD_PROC(ctx, n, 1072 OID_AUTO, "latencies", 1073 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 1074 &ios->latencies, 0, 1075 cam_iosched_sysctl_latencies, "A", 1076 "Array of power of 2 latency from 1ms to 1.024s"); 1077 } 1078 1079 static void 1080 cam_iosched_iop_stats_fini(struct iop_stats *ios) 1081 { 1082 if (ios->sysctl_tree) 1083 if (sysctl_ctx_free(&ios->sysctl_ctx) != 0) 1084 printf("can't remove iosched sysctl stats context\n"); 1085 } 1086 1087 static void 1088 cam_iosched_cl_sysctl_init(struct cam_iosched_softc *isc) 1089 { 1090 struct sysctl_oid_list *n; 1091 struct sysctl_ctx_list *ctx; 1092 struct control_loop *clp; 1093 1094 clp = &isc->cl; 1095 clp->sysctl_tree = SYSCTL_ADD_NODE(&isc->sysctl_ctx, 1096 SYSCTL_CHILDREN(isc->sysctl_tree), OID_AUTO, "control", 1097 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Control loop info"); 1098 n = SYSCTL_CHILDREN(clp->sysctl_tree); 1099 ctx = &clp->sysctl_ctx; 1100 1101 SYSCTL_ADD_PROC(ctx, n, 1102 OID_AUTO, "type", 1103 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 1104 clp, 0, cam_iosched_control_type_sysctl, "A", 1105 "Control loop algorithm"); 1106 SYSCTL_ADD_PROC(ctx, n, 1107 OID_AUTO, "steer_interval", 1108 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 1109 &clp->steer_interval, 0, cam_iosched_sbintime_sysctl, "A", 1110 "How often to steer (in us)"); 1111 SYSCTL_ADD_PROC(ctx, n, 1112 OID_AUTO, "lolat", 1113 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 1114 &clp->lolat, 0, cam_iosched_sbintime_sysctl, "A", 1115 "Low water mark for Latency (in us)"); 1116 SYSCTL_ADD_PROC(ctx, n, 1117 OID_AUTO, "hilat", 1118 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 1119 &clp->hilat, 0, cam_iosched_sbintime_sysctl, "A", 1120 "Hi water mark for Latency (in us)"); 1121 SYSCTL_ADD_INT(ctx, n, 1122 OID_AUTO, "alpha", CTLFLAG_RW, 1123 &clp->alpha, 0, 1124 "Alpha for PLL (x100) aka gain"); 1125 } 1126 1127 static void 1128 cam_iosched_cl_sysctl_fini(struct control_loop *clp) 1129 { 1130 if (clp->sysctl_tree) 1131 if (sysctl_ctx_free(&clp->sysctl_ctx) != 0) 1132 printf("can't remove iosched sysctl control loop context\n"); 1133 } 1134 #endif 1135 1136 /* 1137 * Allocate the iosched structure. This also insulates callers from knowing 1138 * sizeof struct cam_iosched_softc. 1139 */ 1140 int 1141 cam_iosched_init(struct cam_iosched_softc **iscp, struct cam_periph *periph) 1142 { 1143 1144 *iscp = malloc(sizeof(**iscp), M_CAMSCHED, M_NOWAIT | M_ZERO); 1145 if (*iscp == NULL) 1146 return ENOMEM; 1147 #ifdef CAM_IOSCHED_DYNAMIC 1148 if (iosched_debug) 1149 printf("CAM IOSCHEDULER Allocating entry at %p\n", *iscp); 1150 #endif 1151 (*iscp)->sort_io_queue = -1; 1152 bioq_init(&(*iscp)->bio_queue); 1153 bioq_init(&(*iscp)->trim_queue); 1154 #ifdef CAM_IOSCHED_DYNAMIC 1155 if (do_dynamic_iosched) { 1156 bioq_init(&(*iscp)->write_queue); 1157 (*iscp)->read_bias = default_read_bias; 1158 (*iscp)->current_read_bias = 0; 1159 (*iscp)->quanta = min(hz, 200); 1160 cam_iosched_iop_stats_init(*iscp, &(*iscp)->read_stats); 1161 cam_iosched_iop_stats_init(*iscp, &(*iscp)->write_stats); 1162 cam_iosched_iop_stats_init(*iscp, &(*iscp)->trim_stats); 1163 (*iscp)->trim_stats.max = 1; /* Trims are special: one at a time for now */ 1164 (*iscp)->last_time = sbinuptime(); 1165 callout_init_mtx(&(*iscp)->ticker, cam_periph_mtx(periph), 0); 1166 (*iscp)->periph = periph; 1167 cam_iosched_cl_init(&(*iscp)->cl, *iscp); 1168 callout_reset(&(*iscp)->ticker, hz / (*iscp)->quanta, cam_iosched_ticker, *iscp); 1169 (*iscp)->flags |= CAM_IOSCHED_FLAG_CALLOUT_ACTIVE; 1170 } 1171 #endif 1172 1173 return 0; 1174 } 1175 1176 /* 1177 * Reclaim all used resources. This assumes that other folks have 1178 * drained the requests in the hardware. Maybe an unwise assumption. 1179 */ 1180 void 1181 cam_iosched_fini(struct cam_iosched_softc *isc) 1182 { 1183 if (isc) { 1184 cam_iosched_flush(isc, NULL, ENXIO); 1185 #ifdef CAM_IOSCHED_DYNAMIC 1186 cam_iosched_iop_stats_fini(&isc->read_stats); 1187 cam_iosched_iop_stats_fini(&isc->write_stats); 1188 cam_iosched_iop_stats_fini(&isc->trim_stats); 1189 cam_iosched_cl_sysctl_fini(&isc->cl); 1190 if (isc->sysctl_tree) 1191 if (sysctl_ctx_free(&isc->sysctl_ctx) != 0) 1192 printf("can't remove iosched sysctl stats context\n"); 1193 if (isc->flags & CAM_IOSCHED_FLAG_CALLOUT_ACTIVE) { 1194 callout_drain(&isc->ticker); 1195 isc->flags &= ~ CAM_IOSCHED_FLAG_CALLOUT_ACTIVE; 1196 } 1197 #endif 1198 free(isc, M_CAMSCHED); 1199 } 1200 } 1201 1202 /* 1203 * After we're sure we're attaching a device, go ahead and add 1204 * hooks for any sysctl we may wish to honor. 1205 */ 1206 void cam_iosched_sysctl_init(struct cam_iosched_softc *isc, 1207 struct sysctl_ctx_list *ctx, struct sysctl_oid *node) 1208 { 1209 struct sysctl_oid_list *n; 1210 1211 n = SYSCTL_CHILDREN(node); 1212 SYSCTL_ADD_INT(ctx, n, 1213 OID_AUTO, "sort_io_queue", CTLFLAG_RW | CTLFLAG_MPSAFE, 1214 &isc->sort_io_queue, 0, 1215 "Sort IO queue to try and optimise disk access patterns"); 1216 SYSCTL_ADD_INT(ctx, n, 1217 OID_AUTO, "trim_goal", CTLFLAG_RW, 1218 &isc->trim_goal, 0, 1219 "Number of trims to try to accumulate before sending to hardware"); 1220 SYSCTL_ADD_INT(ctx, n, 1221 OID_AUTO, "trim_ticks", CTLFLAG_RW, 1222 &isc->trim_goal, 0, 1223 "IO Schedul qaunta to hold back trims for when accumulating"); 1224 1225 #ifdef CAM_IOSCHED_DYNAMIC 1226 if (!do_dynamic_iosched) 1227 return; 1228 1229 isc->sysctl_tree = SYSCTL_ADD_NODE(&isc->sysctl_ctx, 1230 SYSCTL_CHILDREN(node), OID_AUTO, "iosched", 1231 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "I/O scheduler statistics"); 1232 n = SYSCTL_CHILDREN(isc->sysctl_tree); 1233 ctx = &isc->sysctl_ctx; 1234 1235 cam_iosched_iop_stats_sysctl_init(isc, &isc->read_stats, "read"); 1236 cam_iosched_iop_stats_sysctl_init(isc, &isc->write_stats, "write"); 1237 cam_iosched_iop_stats_sysctl_init(isc, &isc->trim_stats, "trim"); 1238 cam_iosched_cl_sysctl_init(isc); 1239 1240 SYSCTL_ADD_INT(ctx, n, 1241 OID_AUTO, "read_bias", CTLFLAG_RW, 1242 &isc->read_bias, default_read_bias, 1243 "How biased towards read should we be independent of limits"); 1244 1245 SYSCTL_ADD_PROC(ctx, n, 1246 OID_AUTO, "quanta", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1247 &isc->quanta, 0, cam_iosched_quanta_sysctl, "I", 1248 "How many quanta per second do we slice the I/O up into"); 1249 1250 SYSCTL_ADD_INT(ctx, n, 1251 OID_AUTO, "total_ticks", CTLFLAG_RD, 1252 &isc->total_ticks, 0, 1253 "Total number of ticks we've done"); 1254 1255 SYSCTL_ADD_INT(ctx, n, 1256 OID_AUTO, "load", CTLFLAG_RD, 1257 &isc->load, 0, 1258 "scaled load average / 100"); 1259 1260 SYSCTL_ADD_U64(ctx, n, 1261 OID_AUTO, "latency_trigger", CTLFLAG_RW, 1262 &isc->max_lat, 0, 1263 "Latency treshold to trigger callbacks"); 1264 #endif 1265 } 1266 1267 void 1268 cam_iosched_set_latfcn(struct cam_iosched_softc *isc, 1269 cam_iosched_latfcn_t fnp, void *argp) 1270 { 1271 #ifdef CAM_IOSCHED_DYNAMIC 1272 isc->latfcn = fnp; 1273 isc->latarg = argp; 1274 #endif 1275 } 1276 1277 /* 1278 * Client drivers can set two parameters. "goal" is the number of BIO_DELETEs 1279 * that will be queued up before iosched will "release" the trims to the client 1280 * driver to wo with what they will (usually combine as many as possible). If we 1281 * don't get this many, after trim_ticks we'll submit the I/O anyway with 1282 * whatever we have. We do need an I/O of some kind of to clock the deferred 1283 * trims out to disk. Since we will eventually get a write for the super block 1284 * or something before we shutdown, the trims will complete. To be safe, when a 1285 * BIO_FLUSH is presented to the iosched work queue, we set the ticks time far 1286 * enough in the past so we'll present the BIO_DELETEs to the client driver. 1287 * There might be a race if no BIO_DELETESs were queued, a BIO_FLUSH comes in 1288 * and then a BIO_DELETE is sent down. No know client does this, and there's 1289 * already a race between an ordered BIO_FLUSH and any BIO_DELETEs in flight, 1290 * but no client depends on the ordering being honored. 1291 * 1292 * XXX I'm not sure what the interaction between UFS direct BIOs and the BUF 1293 * flushing on shutdown. I think there's bufs that would be dependent on the BIO 1294 * finishing to write out at least metadata, so we'll be fine. To be safe, keep 1295 * the number of ticks low (less than maybe 10s) to avoid shutdown races. 1296 */ 1297 1298 void 1299 cam_iosched_set_trim_goal(struct cam_iosched_softc *isc, int goal) 1300 { 1301 1302 isc->trim_goal = goal; 1303 } 1304 1305 void 1306 cam_iosched_set_trim_ticks(struct cam_iosched_softc *isc, int trim_ticks) 1307 { 1308 1309 isc->trim_ticks = trim_ticks; 1310 } 1311 1312 /* 1313 * Flush outstanding I/O. Consumers of this library don't know all the 1314 * queues we may keep, so this allows all I/O to be flushed in one 1315 * convenient call. 1316 */ 1317 void 1318 cam_iosched_flush(struct cam_iosched_softc *isc, struct devstat *stp, int err) 1319 { 1320 bioq_flush(&isc->bio_queue, stp, err); 1321 bioq_flush(&isc->trim_queue, stp, err); 1322 #ifdef CAM_IOSCHED_DYNAMIC 1323 if (do_dynamic_iosched) 1324 bioq_flush(&isc->write_queue, stp, err); 1325 #endif 1326 } 1327 1328 #ifdef CAM_IOSCHED_DYNAMIC 1329 static struct bio * 1330 cam_iosched_get_write(struct cam_iosched_softc *isc) 1331 { 1332 struct bio *bp; 1333 1334 /* 1335 * We control the write rate by controlling how many requests we send 1336 * down to the drive at any one time. Fewer requests limits the 1337 * effects of both starvation when the requests take a while and write 1338 * amplification when each request is causing more than one write to 1339 * the NAND media. Limiting the queue depth like this will also limit 1340 * the write throughput and give and reads that want to compete to 1341 * compete unfairly. 1342 */ 1343 bp = bioq_first(&isc->write_queue); 1344 if (bp == NULL) { 1345 if (iosched_debug > 3) 1346 printf("No writes present in write_queue\n"); 1347 return NULL; 1348 } 1349 1350 /* 1351 * If pending read, prefer that based on current read bias 1352 * setting. 1353 */ 1354 if (bioq_first(&isc->bio_queue) && isc->current_read_bias) { 1355 if (iosched_debug) 1356 printf( 1357 "Reads present and current_read_bias is %d queued " 1358 "writes %d queued reads %d\n", 1359 isc->current_read_bias, isc->write_stats.queued, 1360 isc->read_stats.queued); 1361 isc->current_read_bias--; 1362 /* We're not limiting writes, per se, just doing reads first */ 1363 return NULL; 1364 } 1365 1366 /* 1367 * See if our current limiter allows this I/O. 1368 */ 1369 if (cam_iosched_limiter_iop(&isc->write_stats, bp) != 0) { 1370 if (iosched_debug) 1371 printf("Can't write because limiter says no.\n"); 1372 isc->write_stats.state_flags |= IOP_RATE_LIMITED; 1373 return NULL; 1374 } 1375 1376 /* 1377 * Let's do this: We've passed all the gates and we're a go 1378 * to schedule the I/O in the SIM. 1379 */ 1380 isc->current_read_bias = isc->read_bias; 1381 bioq_remove(&isc->write_queue, bp); 1382 if (bp->bio_cmd == BIO_WRITE) { 1383 isc->write_stats.queued--; 1384 isc->write_stats.total++; 1385 isc->write_stats.pending++; 1386 } 1387 if (iosched_debug > 9) 1388 printf("HWQ : %p %#x\n", bp, bp->bio_cmd); 1389 isc->write_stats.state_flags &= ~IOP_RATE_LIMITED; 1390 return bp; 1391 } 1392 #endif 1393 1394 /* 1395 * Put back a trim that you weren't able to actually schedule this time. 1396 */ 1397 void 1398 cam_iosched_put_back_trim(struct cam_iosched_softc *isc, struct bio *bp) 1399 { 1400 bioq_insert_head(&isc->trim_queue, bp); 1401 if (isc->queued_trims == 0) 1402 isc->last_trim_tick = ticks; 1403 isc->queued_trims++; 1404 #ifdef CAM_IOSCHED_DYNAMIC 1405 isc->trim_stats.queued++; 1406 isc->trim_stats.total--; /* since we put it back, don't double count */ 1407 isc->trim_stats.pending--; 1408 #endif 1409 } 1410 1411 /* 1412 * gets the next trim from the trim queue. 1413 * 1414 * Assumes we're called with the periph lock held. It removes this 1415 * trim from the queue and the device must explicitly reinsert it 1416 * should the need arise. 1417 */ 1418 struct bio * 1419 cam_iosched_next_trim(struct cam_iosched_softc *isc) 1420 { 1421 struct bio *bp; 1422 1423 bp = bioq_first(&isc->trim_queue); 1424 if (bp == NULL) 1425 return NULL; 1426 bioq_remove(&isc->trim_queue, bp); 1427 isc->queued_trims--; 1428 isc->last_trim_tick = ticks; /* Reset the tick timer when we take trims */ 1429 #ifdef CAM_IOSCHED_DYNAMIC 1430 isc->trim_stats.queued--; 1431 isc->trim_stats.total++; 1432 isc->trim_stats.pending++; 1433 #endif 1434 return bp; 1435 } 1436 1437 /* 1438 * gets an available trim from the trim queue, if there's no trim 1439 * already pending. It removes this trim from the queue and the device 1440 * must explicitly reinsert it should the need arise. 1441 * 1442 * Assumes we're called with the periph lock held. 1443 */ 1444 struct bio * 1445 cam_iosched_get_trim(struct cam_iosched_softc *isc) 1446 { 1447 #ifdef CAM_IOSCHED_DYNAMIC 1448 struct bio *bp; 1449 #endif 1450 1451 if (!cam_iosched_has_more_trim(isc)) 1452 return NULL; 1453 #ifdef CAM_IOSCHED_DYNAMIC 1454 bp = bioq_first(&isc->trim_queue); 1455 if (bp == NULL) 1456 return NULL; 1457 1458 /* 1459 * If pending read, prefer that based on current read bias setting. The 1460 * read bias is shared for both writes and TRIMs, but on TRIMs the bias 1461 * is for a combined TRIM not a single TRIM request that's come in. 1462 */ 1463 if (do_dynamic_iosched) { 1464 if (bioq_first(&isc->bio_queue) && isc->current_read_bias) { 1465 if (iosched_debug) 1466 printf("Reads present and current_read_bias is %d" 1467 " queued trims %d queued reads %d\n", 1468 isc->current_read_bias, isc->trim_stats.queued, 1469 isc->read_stats.queued); 1470 isc->current_read_bias--; 1471 /* We're not limiting TRIMS, per se, just doing reads first */ 1472 return NULL; 1473 } 1474 /* 1475 * We're going to do a trim, so reset the bias. 1476 */ 1477 isc->current_read_bias = isc->read_bias; 1478 } 1479 1480 /* 1481 * See if our current limiter allows this I/O. Because we only call this 1482 * here, and not in next_trim, the 'bandwidth' limits for trims won't 1483 * work, while the iops or max queued limits will work. It's tricky 1484 * because we want the limits to be from the perspective of the 1485 * "commands sent to the device." To make iops work, we need to check 1486 * only here (since we want all the ops we combine to count as one). To 1487 * make bw limits work, we'd need to check in next_trim, but that would 1488 * have the effect of limiting the iops as seen from the upper layers. 1489 */ 1490 if (cam_iosched_limiter_iop(&isc->trim_stats, bp) != 0) { 1491 if (iosched_debug) 1492 printf("Can't trim because limiter says no.\n"); 1493 isc->trim_stats.state_flags |= IOP_RATE_LIMITED; 1494 return NULL; 1495 } 1496 isc->current_read_bias = isc->read_bias; 1497 isc->trim_stats.state_flags &= ~IOP_RATE_LIMITED; 1498 /* cam_iosched_next_trim below keeps proper book */ 1499 #endif 1500 return cam_iosched_next_trim(isc); 1501 } 1502 1503 1504 #ifdef CAM_IOSCHED_DYNAMIC 1505 static struct bio * 1506 bio_next(struct bio *bp) 1507 { 1508 bp = TAILQ_NEXT(bp, bio_queue); 1509 /* 1510 * After the first commands, the ordered bit terminates 1511 * our search because BIO_ORDERED acts like a barrier. 1512 */ 1513 if (bp == NULL || bp->bio_flags & BIO_ORDERED) 1514 return NULL; 1515 return bp; 1516 } 1517 1518 static bool 1519 cam_iosched_rate_limited(struct iop_stats *ios) 1520 { 1521 return ios->state_flags & IOP_RATE_LIMITED; 1522 } 1523 #endif 1524 1525 /* 1526 * Determine what the next bit of work to do is for the periph. The 1527 * default implementation looks to see if we have trims to do, but no 1528 * trims outstanding. If so, we do that. Otherwise we see if we have 1529 * other work. If we do, then we do that. Otherwise why were we called? 1530 */ 1531 struct bio * 1532 cam_iosched_next_bio(struct cam_iosched_softc *isc) 1533 { 1534 struct bio *bp; 1535 1536 /* 1537 * See if we have a trim that can be scheduled. We can only send one 1538 * at a time down, so this takes that into account. 1539 * 1540 * XXX newer TRIM commands are queueable. Revisit this when we 1541 * implement them. 1542 */ 1543 if ((bp = cam_iosched_get_trim(isc)) != NULL) 1544 return bp; 1545 1546 #ifdef CAM_IOSCHED_DYNAMIC 1547 /* 1548 * See if we have any pending writes, room in the queue for them, 1549 * and no pending reads (unless we've scheduled too many). 1550 * if so, those are next. 1551 */ 1552 if (do_dynamic_iosched) { 1553 if ((bp = cam_iosched_get_write(isc)) != NULL) 1554 return bp; 1555 } 1556 #endif 1557 /* 1558 * next, see if there's other, normal I/O waiting. If so return that. 1559 */ 1560 #ifdef CAM_IOSCHED_DYNAMIC 1561 if (do_dynamic_iosched) { 1562 for (bp = bioq_first(&isc->bio_queue); bp != NULL; 1563 bp = bio_next(bp)) { 1564 /* 1565 * For the dynamic scheduler with a read bias, bio_queue 1566 * is only for reads. However, without one, all 1567 * operations are queued. Enforce limits here for any 1568 * operation we find here. 1569 */ 1570 if (bp->bio_cmd == BIO_READ) { 1571 if (cam_iosched_rate_limited(&isc->read_stats) || 1572 cam_iosched_limiter_iop(&isc->read_stats, bp) != 0) { 1573 isc->read_stats.state_flags |= IOP_RATE_LIMITED; 1574 continue; 1575 } 1576 isc->read_stats.state_flags &= ~IOP_RATE_LIMITED; 1577 } 1578 /* 1579 * There can only be write requests on the queue when 1580 * the read bias is 0, but we need to process them 1581 * here. We do not assert for read bias == 0, however, 1582 * since it is dynamic and we can have WRITE operations 1583 * in the queue after we transition from 0 to non-zero. 1584 */ 1585 if (bp->bio_cmd == BIO_WRITE) { 1586 if (cam_iosched_rate_limited(&isc->write_stats) || 1587 cam_iosched_limiter_iop(&isc->write_stats, bp) != 0) { 1588 isc->write_stats.state_flags |= IOP_RATE_LIMITED; 1589 continue; 1590 } 1591 isc->write_stats.state_flags &= ~IOP_RATE_LIMITED; 1592 } 1593 /* 1594 * here we know we have a bp that's != NULL, that's not rate limited 1595 * and can be the next I/O. 1596 */ 1597 break; 1598 } 1599 } else 1600 #endif 1601 bp = bioq_first(&isc->bio_queue); 1602 1603 if (bp == NULL) 1604 return (NULL); 1605 bioq_remove(&isc->bio_queue, bp); 1606 #ifdef CAM_IOSCHED_DYNAMIC 1607 if (do_dynamic_iosched) { 1608 if (bp->bio_cmd == BIO_READ) { 1609 isc->read_stats.queued--; 1610 isc->read_stats.total++; 1611 isc->read_stats.pending++; 1612 } else if (bp->bio_cmd == BIO_WRITE) { 1613 isc->write_stats.queued--; 1614 isc->write_stats.total++; 1615 isc->write_stats.pending++; 1616 } 1617 } 1618 if (iosched_debug > 9) 1619 printf("HWQ : %p %#x\n", bp, bp->bio_cmd); 1620 #endif 1621 return bp; 1622 } 1623 1624 /* 1625 * Driver has been given some work to do by the block layer. Tell the 1626 * scheduler about it and have it queue the work up. The scheduler module 1627 * will then return the currently most useful bit of work later, possibly 1628 * deferring work for various reasons. 1629 */ 1630 void 1631 cam_iosched_queue_work(struct cam_iosched_softc *isc, struct bio *bp) 1632 { 1633 1634 /* 1635 * A BIO_SPEEDUP from the upper layers means that they have a block 1636 * shortage. At the present, this is only sent when we're trying to 1637 * allocate blocks, but have a shortage before giving up. bio_length is 1638 * the size of their shortage. We will complete just enough BIO_DELETEs 1639 * in the queue to satisfy the need. If bio_length is 0, we'll complete 1640 * them all. This allows the scheduler to delay BIO_DELETEs to improve 1641 * read/write performance without worrying about the upper layers. When 1642 * it's possibly a problem, we respond by pretending the BIO_DELETEs 1643 * just worked. We can't do anything about the BIO_DELETEs in the 1644 * hardware, though. We have to wait for them to complete. 1645 */ 1646 if (bp->bio_cmd == BIO_SPEEDUP) { 1647 off_t len; 1648 struct bio *nbp; 1649 1650 len = 0; 1651 while (bioq_first(&isc->trim_queue) && 1652 (bp->bio_length == 0 || len < bp->bio_length)) { 1653 nbp = bioq_takefirst(&isc->trim_queue); 1654 len += nbp->bio_length; 1655 nbp->bio_error = 0; 1656 biodone(nbp); 1657 } 1658 if (bp->bio_length > 0) { 1659 if (bp->bio_length > len) 1660 bp->bio_resid = bp->bio_length - len; 1661 else 1662 bp->bio_resid = 0; 1663 } 1664 bp->bio_error = 0; 1665 biodone(bp); 1666 return; 1667 } 1668 1669 /* 1670 * If we get a BIO_FLUSH, and we're doing delayed BIO_DELETEs then we 1671 * set the last tick time to one less than the current ticks minus the 1672 * delay to force the BIO_DELETEs to be presented to the client driver. 1673 */ 1674 if (bp->bio_cmd == BIO_FLUSH && isc->trim_ticks > 0) 1675 isc->last_trim_tick = ticks - isc->trim_ticks - 1; 1676 1677 /* 1678 * Put all trims on the trim queue. Otherwise put the work on the bio 1679 * queue. 1680 */ 1681 if (bp->bio_cmd == BIO_DELETE) { 1682 bioq_insert_tail(&isc->trim_queue, bp); 1683 if (isc->queued_trims == 0) 1684 isc->last_trim_tick = ticks; 1685 isc->queued_trims++; 1686 #ifdef CAM_IOSCHED_DYNAMIC 1687 isc->trim_stats.in++; 1688 isc->trim_stats.queued++; 1689 #endif 1690 } 1691 #ifdef CAM_IOSCHED_DYNAMIC 1692 else if (do_dynamic_iosched && isc->read_bias != 0 && 1693 (bp->bio_cmd != BIO_READ)) { 1694 if (cam_iosched_sort_queue(isc)) 1695 bioq_disksort(&isc->write_queue, bp); 1696 else 1697 bioq_insert_tail(&isc->write_queue, bp); 1698 if (iosched_debug > 9) 1699 printf("Qw : %p %#x\n", bp, bp->bio_cmd); 1700 if (bp->bio_cmd == BIO_WRITE) { 1701 isc->write_stats.in++; 1702 isc->write_stats.queued++; 1703 } 1704 } 1705 #endif 1706 else { 1707 if (cam_iosched_sort_queue(isc)) 1708 bioq_disksort(&isc->bio_queue, bp); 1709 else 1710 bioq_insert_tail(&isc->bio_queue, bp); 1711 #ifdef CAM_IOSCHED_DYNAMIC 1712 if (iosched_debug > 9) 1713 printf("Qr : %p %#x\n", bp, bp->bio_cmd); 1714 if (bp->bio_cmd == BIO_READ) { 1715 isc->read_stats.in++; 1716 isc->read_stats.queued++; 1717 } else if (bp->bio_cmd == BIO_WRITE) { 1718 isc->write_stats.in++; 1719 isc->write_stats.queued++; 1720 } 1721 #endif 1722 } 1723 } 1724 1725 /* 1726 * If we have work, get it scheduled. Called with the periph lock held. 1727 */ 1728 void 1729 cam_iosched_schedule(struct cam_iosched_softc *isc, struct cam_periph *periph) 1730 { 1731 1732 if (cam_iosched_has_work(isc)) 1733 xpt_schedule(periph, CAM_PRIORITY_NORMAL); 1734 } 1735 1736 /* 1737 * Complete a trim request. Mark that we no longer have one in flight. 1738 */ 1739 void 1740 cam_iosched_trim_done(struct cam_iosched_softc *isc) 1741 { 1742 1743 isc->flags &= ~CAM_IOSCHED_FLAG_TRIM_ACTIVE; 1744 } 1745 1746 /* 1747 * Complete a bio. Called before we release the ccb with xpt_release_ccb so we 1748 * might use notes in the ccb for statistics. 1749 */ 1750 int 1751 cam_iosched_bio_complete(struct cam_iosched_softc *isc, struct bio *bp, 1752 union ccb *done_ccb) 1753 { 1754 int retval = 0; 1755 #ifdef CAM_IOSCHED_DYNAMIC 1756 if (!do_dynamic_iosched) 1757 return retval; 1758 1759 if (iosched_debug > 10) 1760 printf("done: %p %#x\n", bp, bp->bio_cmd); 1761 if (bp->bio_cmd == BIO_WRITE) { 1762 retval = cam_iosched_limiter_iodone(&isc->write_stats, bp); 1763 if ((bp->bio_flags & BIO_ERROR) != 0) 1764 isc->write_stats.errs++; 1765 isc->write_stats.out++; 1766 isc->write_stats.pending--; 1767 } else if (bp->bio_cmd == BIO_READ) { 1768 retval = cam_iosched_limiter_iodone(&isc->read_stats, bp); 1769 if ((bp->bio_flags & BIO_ERROR) != 0) 1770 isc->read_stats.errs++; 1771 isc->read_stats.out++; 1772 isc->read_stats.pending--; 1773 } else if (bp->bio_cmd == BIO_DELETE) { 1774 if ((bp->bio_flags & BIO_ERROR) != 0) 1775 isc->trim_stats.errs++; 1776 isc->trim_stats.out++; 1777 isc->trim_stats.pending--; 1778 } else if (bp->bio_cmd != BIO_FLUSH) { 1779 if (iosched_debug) 1780 printf("Completing command with bio_cmd == %#x\n", bp->bio_cmd); 1781 } 1782 1783 if ((bp->bio_flags & BIO_ERROR) == 0 && done_ccb != NULL && 1784 (done_ccb->ccb_h.status & CAM_QOS_VALID) != 0) { 1785 sbintime_t sim_latency; 1786 1787 sim_latency = cam_iosched_sbintime_t(done_ccb->ccb_h.qos.periph_data); 1788 1789 cam_iosched_io_metric_update(isc, sim_latency, 1790 bp->bio_cmd, bp->bio_bcount); 1791 /* 1792 * Debugging code: allow callbacks to the periph driver when latency max 1793 * is exceeded. This can be useful for triggering external debugging actions. 1794 */ 1795 if (isc->latfcn && isc->max_lat != 0 && sim_latency > isc->max_lat) 1796 isc->latfcn(isc->latarg, sim_latency, bp); 1797 } 1798 1799 #endif 1800 return retval; 1801 } 1802 1803 /* 1804 * Tell the io scheduler that you've pushed a trim down into the sim. 1805 * This also tells the I/O scheduler not to push any more trims down, so 1806 * some periphs do not call it if they can cope with multiple trims in flight. 1807 */ 1808 void 1809 cam_iosched_submit_trim(struct cam_iosched_softc *isc) 1810 { 1811 1812 isc->flags |= CAM_IOSCHED_FLAG_TRIM_ACTIVE; 1813 } 1814 1815 /* 1816 * Change the sorting policy hint for I/O transactions for this device. 1817 */ 1818 void 1819 cam_iosched_set_sort_queue(struct cam_iosched_softc *isc, int val) 1820 { 1821 1822 isc->sort_io_queue = val; 1823 } 1824 1825 int 1826 cam_iosched_has_work_flags(struct cam_iosched_softc *isc, uint32_t flags) 1827 { 1828 return isc->flags & flags; 1829 } 1830 1831 void 1832 cam_iosched_set_work_flags(struct cam_iosched_softc *isc, uint32_t flags) 1833 { 1834 isc->flags |= flags; 1835 } 1836 1837 void 1838 cam_iosched_clr_work_flags(struct cam_iosched_softc *isc, uint32_t flags) 1839 { 1840 isc->flags &= ~flags; 1841 } 1842 1843 #ifdef CAM_IOSCHED_DYNAMIC 1844 /* 1845 * After the method presented in Jack Crenshaw's 1998 article "Integer 1846 * Square Roots," reprinted at 1847 * http://www.embedded.com/electronics-blogs/programmer-s-toolbox/4219659/Integer-Square-Roots 1848 * and well worth the read. Briefly, we find the power of 4 that's the 1849 * largest smaller than val. We then check each smaller power of 4 to 1850 * see if val is still bigger. The right shifts at each step divide 1851 * the result by 2 which after successive application winds up 1852 * accumulating the right answer. It could also have been accumulated 1853 * using a separate root counter, but this code is smaller and faster 1854 * than that method. This method is also integer size invariant. 1855 * It returns floor(sqrt((float)val)), or the largest integer less than 1856 * or equal to the square root. 1857 */ 1858 static uint64_t 1859 isqrt64(uint64_t val) 1860 { 1861 uint64_t res = 0; 1862 uint64_t bit = 1ULL << (sizeof(uint64_t) * NBBY - 2); 1863 1864 /* 1865 * Find the largest power of 4 smaller than val. 1866 */ 1867 while (bit > val) 1868 bit >>= 2; 1869 1870 /* 1871 * Accumulate the answer, one bit at a time (we keep moving 1872 * them over since 2 is the square root of 4 and we test 1873 * powers of 4). We accumulate where we find the bit, but 1874 * the successive shifts land the bit in the right place 1875 * by the end. 1876 */ 1877 while (bit != 0) { 1878 if (val >= res + bit) { 1879 val -= res + bit; 1880 res = (res >> 1) + bit; 1881 } else 1882 res >>= 1; 1883 bit >>= 2; 1884 } 1885 1886 return res; 1887 } 1888 1889 static sbintime_t latencies[LAT_BUCKETS - 1] = { 1890 BUCKET_BASE << 0, /* 20us */ 1891 BUCKET_BASE << 1, 1892 BUCKET_BASE << 2, 1893 BUCKET_BASE << 3, 1894 BUCKET_BASE << 4, 1895 BUCKET_BASE << 5, 1896 BUCKET_BASE << 6, 1897 BUCKET_BASE << 7, 1898 BUCKET_BASE << 8, 1899 BUCKET_BASE << 9, 1900 BUCKET_BASE << 10, 1901 BUCKET_BASE << 11, 1902 BUCKET_BASE << 12, 1903 BUCKET_BASE << 13, 1904 BUCKET_BASE << 14, 1905 BUCKET_BASE << 15, 1906 BUCKET_BASE << 16, 1907 BUCKET_BASE << 17, 1908 BUCKET_BASE << 18 /* 5,242,880us */ 1909 }; 1910 1911 static void 1912 cam_iosched_update(struct iop_stats *iop, sbintime_t sim_latency) 1913 { 1914 sbintime_t y, deltasq, delta; 1915 int i; 1916 1917 /* 1918 * Keep counts for latency. We do it by power of two buckets. 1919 * This helps us spot outlier behavior obscured by averages. 1920 */ 1921 for (i = 0; i < LAT_BUCKETS - 1; i++) { 1922 if (sim_latency < latencies[i]) { 1923 iop->latencies[i]++; 1924 break; 1925 } 1926 } 1927 if (i == LAT_BUCKETS - 1) 1928 iop->latencies[i]++; /* Put all > 8192ms values into the last bucket. */ 1929 1930 /* 1931 * Classic exponentially decaying average with a tiny alpha 1932 * (2 ^ -alpha_bits). For more info see the NIST statistical 1933 * handbook. 1934 * 1935 * ema_t = y_t * alpha + ema_t-1 * (1 - alpha) [nist] 1936 * ema_t = y_t * alpha + ema_t-1 - alpha * ema_t-1 1937 * ema_t = alpha * y_t - alpha * ema_t-1 + ema_t-1 1938 * alpha = 1 / (1 << alpha_bits) 1939 * sub e == ema_t-1, b == 1/alpha (== 1 << alpha_bits), d == y_t - ema_t-1 1940 * = y_t/b - e/b + be/b 1941 * = (y_t - e + be) / b 1942 * = (e + d) / b 1943 * 1944 * Since alpha is a power of two, we can compute this w/o any mult or 1945 * division. 1946 * 1947 * Variance can also be computed. Usually, it would be expressed as follows: 1948 * diff_t = y_t - ema_t-1 1949 * emvar_t = (1 - alpha) * (emavar_t-1 + diff_t^2 * alpha) 1950 * = emavar_t-1 - alpha * emavar_t-1 + delta_t^2 * alpha - (delta_t * alpha)^2 1951 * sub b == 1/alpha (== 1 << alpha_bits), e == emavar_t-1, d = delta_t^2 1952 * = e - e/b + dd/b + dd/bb 1953 * = (bbe - be + bdd + dd) / bb 1954 * = (bbe + b(dd-e) + dd) / bb (which is expanded below bb = 1<<(2*alpha_bits)) 1955 */ 1956 /* 1957 * XXX possible numeric issues 1958 * o We assume right shifted integers do the right thing, since that's 1959 * implementation defined. You can change the right shifts to / (1LL << alpha). 1960 * o alpha_bits = 9 gives ema ceiling of 23 bits of seconds for ema and 14 bits 1961 * for emvar. This puts a ceiling of 13 bits on alpha since we need a 1962 * few tens of seconds of representation. 1963 * o We mitigate alpha issues by never setting it too high. 1964 */ 1965 y = sim_latency; 1966 delta = (y - iop->ema); /* d */ 1967 iop->ema = ((iop->ema << alpha_bits) + delta) >> alpha_bits; 1968 1969 /* 1970 * Were we to naively plow ahead at this point, we wind up with many numerical 1971 * issues making any SD > ~3ms unreliable. So, we shift right by 12. This leaves 1972 * us with microsecond level precision in the input, so the same in the 1973 * output. It means we can't overflow deltasq unless delta > 4k seconds. It 1974 * also means that emvar can be up 46 bits 40 of which are fraction, which 1975 * gives us a way to measure up to ~8s in the SD before the computation goes 1976 * unstable. Even the worst hard disk rarely has > 1s service time in the 1977 * drive. It does mean we have to shift left 12 bits after taking the 1978 * square root to compute the actual standard deviation estimate. This loss of 1979 * precision is preferable to needing int128 types to work. The above numbers 1980 * assume alpha=9. 10 or 11 are ok, but we start to run into issues at 12, 1981 * so 12 or 13 is OK for EMA, EMVAR and SD will be wrong in those cases. 1982 */ 1983 delta >>= 12; 1984 deltasq = delta * delta; /* dd */ 1985 iop->emvar = ((iop->emvar << (2 * alpha_bits)) + /* bbe */ 1986 ((deltasq - iop->emvar) << alpha_bits) + /* b(dd-e) */ 1987 deltasq) /* dd */ 1988 >> (2 * alpha_bits); /* div bb */ 1989 iop->sd = (sbintime_t)isqrt64((uint64_t)iop->emvar) << 12; 1990 } 1991 1992 static void 1993 cam_iosched_io_metric_update(struct cam_iosched_softc *isc, 1994 sbintime_t sim_latency, int cmd, size_t size) 1995 { 1996 /* xxx Do we need to scale based on the size of the I/O ? */ 1997 switch (cmd) { 1998 case BIO_READ: 1999 cam_iosched_update(&isc->read_stats, sim_latency); 2000 break; 2001 case BIO_WRITE: 2002 cam_iosched_update(&isc->write_stats, sim_latency); 2003 break; 2004 case BIO_DELETE: 2005 cam_iosched_update(&isc->trim_stats, sim_latency); 2006 break; 2007 default: 2008 break; 2009 } 2010 } 2011 2012 #ifdef DDB 2013 static int biolen(struct bio_queue_head *bq) 2014 { 2015 int i = 0; 2016 struct bio *bp; 2017 2018 TAILQ_FOREACH(bp, &bq->queue, bio_queue) { 2019 i++; 2020 } 2021 return i; 2022 } 2023 2024 /* 2025 * Show the internal state of the I/O scheduler. 2026 */ 2027 DB_SHOW_COMMAND(iosched, cam_iosched_db_show) 2028 { 2029 struct cam_iosched_softc *isc; 2030 2031 if (!have_addr) { 2032 db_printf("Need addr\n"); 2033 return; 2034 } 2035 isc = (struct cam_iosched_softc *)addr; 2036 db_printf("pending_reads: %d\n", isc->read_stats.pending); 2037 db_printf("min_reads: %d\n", isc->read_stats.min); 2038 db_printf("max_reads: %d\n", isc->read_stats.max); 2039 db_printf("reads: %d\n", isc->read_stats.total); 2040 db_printf("in_reads: %d\n", isc->read_stats.in); 2041 db_printf("out_reads: %d\n", isc->read_stats.out); 2042 db_printf("queued_reads: %d\n", isc->read_stats.queued); 2043 db_printf("Read Q len %d\n", biolen(&isc->bio_queue)); 2044 db_printf("pending_writes: %d\n", isc->write_stats.pending); 2045 db_printf("min_writes: %d\n", isc->write_stats.min); 2046 db_printf("max_writes: %d\n", isc->write_stats.max); 2047 db_printf("writes: %d\n", isc->write_stats.total); 2048 db_printf("in_writes: %d\n", isc->write_stats.in); 2049 db_printf("out_writes: %d\n", isc->write_stats.out); 2050 db_printf("queued_writes: %d\n", isc->write_stats.queued); 2051 db_printf("Write Q len %d\n", biolen(&isc->write_queue)); 2052 db_printf("pending_trims: %d\n", isc->trim_stats.pending); 2053 db_printf("min_trims: %d\n", isc->trim_stats.min); 2054 db_printf("max_trims: %d\n", isc->trim_stats.max); 2055 db_printf("trims: %d\n", isc->trim_stats.total); 2056 db_printf("in_trims: %d\n", isc->trim_stats.in); 2057 db_printf("out_trims: %d\n", isc->trim_stats.out); 2058 db_printf("queued_trims: %d\n", isc->trim_stats.queued); 2059 db_printf("Trim Q len %d\n", biolen(&isc->trim_queue)); 2060 db_printf("read_bias: %d\n", isc->read_bias); 2061 db_printf("current_read_bias: %d\n", isc->current_read_bias); 2062 db_printf("Trim active? %s\n", 2063 (isc->flags & CAM_IOSCHED_FLAG_TRIM_ACTIVE) ? "yes" : "no"); 2064 } 2065 #endif 2066 #endif 2067