1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 28 */ 29 30 #include <sys/zfs_context.h> 31 #include <sys/vdev_impl.h> 32 #include <sys/spa_impl.h> 33 #include <sys/zio.h> 34 #include <sys/avl.h> 35 #include <sys/dsl_pool.h> 36 #include <sys/metaslab_impl.h> 37 #include <sys/spa.h> 38 #include <sys/abd.h> 39 40 /* 41 * ZFS I/O Scheduler 42 * --------------- 43 * 44 * ZFS issues I/O operations to leaf vdevs to satisfy and complete zios. The 45 * I/O scheduler determines when and in what order those operations are 46 * issued. The I/O scheduler divides operations into five I/O classes 47 * prioritized in the following order: sync read, sync write, async read, 48 * async write, and scrub/resilver. Each queue defines the minimum and 49 * maximum number of concurrent operations that may be issued to the device. 50 * In addition, the device has an aggregate maximum. Note that the sum of the 51 * per-queue minimums must not exceed the aggregate maximum. If the 52 * sum of the per-queue maximums exceeds the aggregate maximum, then the 53 * number of active i/os may reach zfs_vdev_max_active, in which case no 54 * further i/os will be issued regardless of whether all per-queue 55 * minimums have been met. 56 * 57 * For many physical devices, throughput increases with the number of 58 * concurrent operations, but latency typically suffers. Further, physical 59 * devices typically have a limit at which more concurrent operations have no 60 * effect on throughput or can actually cause it to decrease. 61 * 62 * The scheduler selects the next operation to issue by first looking for an 63 * I/O class whose minimum has not been satisfied. Once all are satisfied and 64 * the aggregate maximum has not been hit, the scheduler looks for classes 65 * whose maximum has not been satisfied. Iteration through the I/O classes is 66 * done in the order specified above. No further operations are issued if the 67 * aggregate maximum number of concurrent operations has been hit or if there 68 * are no operations queued for an I/O class that has not hit its maximum. 69 * Every time an i/o is queued or an operation completes, the I/O scheduler 70 * looks for new operations to issue. 71 * 72 * All I/O classes have a fixed maximum number of outstanding operations 73 * except for the async write class. Asynchronous writes represent the data 74 * that is committed to stable storage during the syncing stage for 75 * transaction groups (see txg.c). Transaction groups enter the syncing state 76 * periodically so the number of queued async writes will quickly burst up and 77 * then bleed down to zero. Rather than servicing them as quickly as possible, 78 * the I/O scheduler changes the maximum number of active async write i/os 79 * according to the amount of dirty data in the pool (see dsl_pool.c). Since 80 * both throughput and latency typically increase with the number of 81 * concurrent operations issued to physical devices, reducing the burstiness 82 * in the number of concurrent operations also stabilizes the response time of 83 * operations from other -- and in particular synchronous -- queues. In broad 84 * strokes, the I/O scheduler will issue more concurrent operations from the 85 * async write queue as there's more dirty data in the pool. 86 * 87 * Async Writes 88 * 89 * The number of concurrent operations issued for the async write I/O class 90 * follows a piece-wise linear function defined by a few adjustable points. 91 * 92 * | o---------| <-- zfs_vdev_async_write_max_active 93 * ^ | /^ | 94 * | | / | | 95 * active | / | | 96 * I/O | / | | 97 * count | / | | 98 * | / | | 99 * |------------o | | <-- zfs_vdev_async_write_min_active 100 * 0|____________^______|_________| 101 * 0% | | 100% of zfs_dirty_data_max 102 * | | 103 * | `-- zfs_vdev_async_write_active_max_dirty_percent 104 * `--------- zfs_vdev_async_write_active_min_dirty_percent 105 * 106 * Until the amount of dirty data exceeds a minimum percentage of the dirty 107 * data allowed in the pool, the I/O scheduler will limit the number of 108 * concurrent operations to the minimum. As that threshold is crossed, the 109 * number of concurrent operations issued increases linearly to the maximum at 110 * the specified maximum percentage of the dirty data allowed in the pool. 111 * 112 * Ideally, the amount of dirty data on a busy pool will stay in the sloped 113 * part of the function between zfs_vdev_async_write_active_min_dirty_percent 114 * and zfs_vdev_async_write_active_max_dirty_percent. If it exceeds the 115 * maximum percentage, this indicates that the rate of incoming data is 116 * greater than the rate that the backend storage can handle. In this case, we 117 * must further throttle incoming writes (see dmu_tx_delay() for details). 118 */ 119 120 /* 121 * The maximum number of i/os active to each device. Ideally, this will be >= 122 * the sum of each queue's max_active. 123 */ 124 uint32_t zfs_vdev_max_active = 1000; 125 126 /* 127 * Per-queue limits on the number of i/os active to each device. If the 128 * number of active i/os is < zfs_vdev_max_active, then the min_active comes 129 * into play. We will send min_active from each queue round-robin, and then 130 * send from queues in the order defined by zio_priority_t up to max_active. 131 * Some queues have additional mechanisms to limit number of active I/Os in 132 * addition to min_active and max_active, see below. 133 * 134 * In general, smaller max_active's will lead to lower latency of synchronous 135 * operations. Larger max_active's may lead to higher overall throughput, 136 * depending on underlying storage. 137 * 138 * The ratio of the queues' max_actives determines the balance of performance 139 * between reads, writes, and scrubs. E.g., increasing 140 * zfs_vdev_scrub_max_active will cause the scrub or resilver to complete 141 * more quickly, but reads and writes to have higher latency and lower 142 * throughput. 143 */ 144 static uint32_t zfs_vdev_sync_read_min_active = 10; 145 static uint32_t zfs_vdev_sync_read_max_active = 10; 146 static uint32_t zfs_vdev_sync_write_min_active = 10; 147 static uint32_t zfs_vdev_sync_write_max_active = 10; 148 static uint32_t zfs_vdev_async_read_min_active = 1; 149 /* */ uint32_t zfs_vdev_async_read_max_active = 3; 150 static uint32_t zfs_vdev_async_write_min_active = 2; 151 /* */ uint32_t zfs_vdev_async_write_max_active = 10; 152 static uint32_t zfs_vdev_scrub_min_active = 1; 153 static uint32_t zfs_vdev_scrub_max_active = 3; 154 static uint32_t zfs_vdev_removal_min_active = 1; 155 static uint32_t zfs_vdev_removal_max_active = 2; 156 static uint32_t zfs_vdev_initializing_min_active = 1; 157 static uint32_t zfs_vdev_initializing_max_active = 1; 158 static uint32_t zfs_vdev_trim_min_active = 1; 159 static uint32_t zfs_vdev_trim_max_active = 2; 160 static uint32_t zfs_vdev_rebuild_min_active = 1; 161 static uint32_t zfs_vdev_rebuild_max_active = 3; 162 163 /* 164 * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent 165 * dirty data, use zfs_vdev_async_write_min_active. When it has more than 166 * zfs_vdev_async_write_active_max_dirty_percent, use 167 * zfs_vdev_async_write_max_active. The value is linearly interpolated 168 * between min and max. 169 */ 170 int zfs_vdev_async_write_active_min_dirty_percent = 30; 171 int zfs_vdev_async_write_active_max_dirty_percent = 60; 172 173 /* 174 * For non-interactive I/O (scrub, resilver, removal, initialize and rebuild), 175 * the number of concurrently-active I/O's is limited to *_min_active, unless 176 * the vdev is "idle". When there are no interactive I/Os active (sync or 177 * async), and zfs_vdev_nia_delay I/Os have completed since the last 178 * interactive I/O, then the vdev is considered to be "idle", and the number 179 * of concurrently-active non-interactive I/O's is increased to *_max_active. 180 */ 181 static uint_t zfs_vdev_nia_delay = 5; 182 183 /* 184 * Some HDDs tend to prioritize sequential I/O so high that concurrent 185 * random I/O latency reaches several seconds. On some HDDs it happens 186 * even if sequential I/Os are submitted one at a time, and so setting 187 * *_max_active to 1 does not help. To prevent non-interactive I/Os, like 188 * scrub, from monopolizing the device no more than zfs_vdev_nia_credit 189 * I/Os can be sent while there are outstanding incomplete interactive 190 * I/Os. This enforced wait ensures the HDD services the interactive I/O 191 * within a reasonable amount of time. 192 */ 193 static uint_t zfs_vdev_nia_credit = 5; 194 195 /* 196 * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O. 197 * For read I/Os, we also aggregate across small adjacency gaps; for writes 198 * we include spans of optional I/Os to aid aggregation at the disk even when 199 * they aren't able to help us aggregate at this level. 200 */ 201 static int zfs_vdev_aggregation_limit = 1 << 20; 202 static int zfs_vdev_aggregation_limit_non_rotating = SPA_OLD_MAXBLOCKSIZE; 203 static int zfs_vdev_read_gap_limit = 32 << 10; 204 static int zfs_vdev_write_gap_limit = 4 << 10; 205 206 /* 207 * Define the queue depth percentage for each top-level. This percentage is 208 * used in conjunction with zfs_vdev_async_max_active to determine how many 209 * allocations a specific top-level vdev should handle. Once the queue depth 210 * reaches zfs_vdev_queue_depth_pct * zfs_vdev_async_write_max_active / 100 211 * then allocator will stop allocating blocks on that top-level device. 212 * The default kernel setting is 1000% which will yield 100 allocations per 213 * device. For userland testing, the default setting is 300% which equates 214 * to 30 allocations per device. 215 */ 216 #ifdef _KERNEL 217 int zfs_vdev_queue_depth_pct = 1000; 218 #else 219 int zfs_vdev_queue_depth_pct = 300; 220 #endif 221 222 /* 223 * When performing allocations for a given metaslab, we want to make sure that 224 * there are enough IOs to aggregate together to improve throughput. We want to 225 * ensure that there are at least 128k worth of IOs that can be aggregated, and 226 * we assume that the average allocation size is 4k, so we need the queue depth 227 * to be 32 per allocator to get good aggregation of sequential writes. 228 */ 229 int zfs_vdev_def_queue_depth = 32; 230 231 /* 232 * Allow TRIM I/Os to be aggregated. This should normally not be needed since 233 * TRIM I/O for extents up to zfs_trim_extent_bytes_max (128M) can be submitted 234 * by the TRIM code in zfs_trim.c. 235 */ 236 static int zfs_vdev_aggregate_trim = 0; 237 238 static int 239 vdev_queue_offset_compare(const void *x1, const void *x2) 240 { 241 const zio_t *z1 = (const zio_t *)x1; 242 const zio_t *z2 = (const zio_t *)x2; 243 244 int cmp = TREE_CMP(z1->io_offset, z2->io_offset); 245 246 if (likely(cmp)) 247 return (cmp); 248 249 return (TREE_PCMP(z1, z2)); 250 } 251 252 static inline avl_tree_t * 253 vdev_queue_class_tree(vdev_queue_t *vq, zio_priority_t p) 254 { 255 return (&vq->vq_class[p].vqc_queued_tree); 256 } 257 258 static inline avl_tree_t * 259 vdev_queue_type_tree(vdev_queue_t *vq, zio_type_t t) 260 { 261 ASSERT(t == ZIO_TYPE_READ || t == ZIO_TYPE_WRITE || t == ZIO_TYPE_TRIM); 262 if (t == ZIO_TYPE_READ) 263 return (&vq->vq_read_offset_tree); 264 else if (t == ZIO_TYPE_WRITE) 265 return (&vq->vq_write_offset_tree); 266 else 267 return (&vq->vq_trim_offset_tree); 268 } 269 270 static int 271 vdev_queue_timestamp_compare(const void *x1, const void *x2) 272 { 273 const zio_t *z1 = (const zio_t *)x1; 274 const zio_t *z2 = (const zio_t *)x2; 275 276 int cmp = TREE_CMP(z1->io_timestamp, z2->io_timestamp); 277 278 if (likely(cmp)) 279 return (cmp); 280 281 return (TREE_PCMP(z1, z2)); 282 } 283 284 static int 285 vdev_queue_class_min_active(vdev_queue_t *vq, zio_priority_t p) 286 { 287 switch (p) { 288 case ZIO_PRIORITY_SYNC_READ: 289 return (zfs_vdev_sync_read_min_active); 290 case ZIO_PRIORITY_SYNC_WRITE: 291 return (zfs_vdev_sync_write_min_active); 292 case ZIO_PRIORITY_ASYNC_READ: 293 return (zfs_vdev_async_read_min_active); 294 case ZIO_PRIORITY_ASYNC_WRITE: 295 return (zfs_vdev_async_write_min_active); 296 case ZIO_PRIORITY_SCRUB: 297 return (vq->vq_ia_active == 0 ? zfs_vdev_scrub_min_active : 298 MIN(vq->vq_nia_credit, zfs_vdev_scrub_min_active)); 299 case ZIO_PRIORITY_REMOVAL: 300 return (vq->vq_ia_active == 0 ? zfs_vdev_removal_min_active : 301 MIN(vq->vq_nia_credit, zfs_vdev_removal_min_active)); 302 case ZIO_PRIORITY_INITIALIZING: 303 return (vq->vq_ia_active == 0 ?zfs_vdev_initializing_min_active: 304 MIN(vq->vq_nia_credit, zfs_vdev_initializing_min_active)); 305 case ZIO_PRIORITY_TRIM: 306 return (zfs_vdev_trim_min_active); 307 case ZIO_PRIORITY_REBUILD: 308 return (vq->vq_ia_active == 0 ? zfs_vdev_rebuild_min_active : 309 MIN(vq->vq_nia_credit, zfs_vdev_rebuild_min_active)); 310 default: 311 panic("invalid priority %u", p); 312 return (0); 313 } 314 } 315 316 static int 317 vdev_queue_max_async_writes(spa_t *spa) 318 { 319 int writes; 320 uint64_t dirty = 0; 321 dsl_pool_t *dp = spa_get_dsl(spa); 322 uint64_t min_bytes = zfs_dirty_data_max * 323 zfs_vdev_async_write_active_min_dirty_percent / 100; 324 uint64_t max_bytes = zfs_dirty_data_max * 325 zfs_vdev_async_write_active_max_dirty_percent / 100; 326 327 /* 328 * Async writes may occur before the assignment of the spa's 329 * dsl_pool_t if a self-healing zio is issued prior to the 330 * completion of dmu_objset_open_impl(). 331 */ 332 if (dp == NULL) 333 return (zfs_vdev_async_write_max_active); 334 335 /* 336 * Sync tasks correspond to interactive user actions. To reduce the 337 * execution time of those actions we push data out as fast as possible. 338 */ 339 dirty = dp->dp_dirty_total; 340 if (dirty > max_bytes || spa_has_pending_synctask(spa)) 341 return (zfs_vdev_async_write_max_active); 342 343 if (dirty < min_bytes) 344 return (zfs_vdev_async_write_min_active); 345 346 /* 347 * linear interpolation: 348 * slope = (max_writes - min_writes) / (max_bytes - min_bytes) 349 * move right by min_bytes 350 * move up by min_writes 351 */ 352 writes = (dirty - min_bytes) * 353 (zfs_vdev_async_write_max_active - 354 zfs_vdev_async_write_min_active) / 355 (max_bytes - min_bytes) + 356 zfs_vdev_async_write_min_active; 357 ASSERT3U(writes, >=, zfs_vdev_async_write_min_active); 358 ASSERT3U(writes, <=, zfs_vdev_async_write_max_active); 359 return (writes); 360 } 361 362 static int 363 vdev_queue_class_max_active(spa_t *spa, vdev_queue_t *vq, zio_priority_t p) 364 { 365 switch (p) { 366 case ZIO_PRIORITY_SYNC_READ: 367 return (zfs_vdev_sync_read_max_active); 368 case ZIO_PRIORITY_SYNC_WRITE: 369 return (zfs_vdev_sync_write_max_active); 370 case ZIO_PRIORITY_ASYNC_READ: 371 return (zfs_vdev_async_read_max_active); 372 case ZIO_PRIORITY_ASYNC_WRITE: 373 return (vdev_queue_max_async_writes(spa)); 374 case ZIO_PRIORITY_SCRUB: 375 if (vq->vq_ia_active > 0) { 376 return (MIN(vq->vq_nia_credit, 377 zfs_vdev_scrub_min_active)); 378 } else if (vq->vq_nia_credit < zfs_vdev_nia_delay) 379 return (MAX(1, zfs_vdev_scrub_min_active)); 380 return (zfs_vdev_scrub_max_active); 381 case ZIO_PRIORITY_REMOVAL: 382 if (vq->vq_ia_active > 0) { 383 return (MIN(vq->vq_nia_credit, 384 zfs_vdev_removal_min_active)); 385 } else if (vq->vq_nia_credit < zfs_vdev_nia_delay) 386 return (MAX(1, zfs_vdev_removal_min_active)); 387 return (zfs_vdev_removal_max_active); 388 case ZIO_PRIORITY_INITIALIZING: 389 if (vq->vq_ia_active > 0) { 390 return (MIN(vq->vq_nia_credit, 391 zfs_vdev_initializing_min_active)); 392 } else if (vq->vq_nia_credit < zfs_vdev_nia_delay) 393 return (MAX(1, zfs_vdev_initializing_min_active)); 394 return (zfs_vdev_initializing_max_active); 395 case ZIO_PRIORITY_TRIM: 396 return (zfs_vdev_trim_max_active); 397 case ZIO_PRIORITY_REBUILD: 398 if (vq->vq_ia_active > 0) { 399 return (MIN(vq->vq_nia_credit, 400 zfs_vdev_rebuild_min_active)); 401 } else if (vq->vq_nia_credit < zfs_vdev_nia_delay) 402 return (MAX(1, zfs_vdev_rebuild_min_active)); 403 return (zfs_vdev_rebuild_max_active); 404 default: 405 panic("invalid priority %u", p); 406 return (0); 407 } 408 } 409 410 /* 411 * Return the i/o class to issue from, or ZIO_PRIORITY_NUM_QUEUEABLE if 412 * there is no eligible class. 413 */ 414 static zio_priority_t 415 vdev_queue_class_to_issue(vdev_queue_t *vq) 416 { 417 spa_t *spa = vq->vq_vdev->vdev_spa; 418 zio_priority_t p, n; 419 420 if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active) 421 return (ZIO_PRIORITY_NUM_QUEUEABLE); 422 423 /* 424 * Find a queue that has not reached its minimum # outstanding i/os. 425 * Do round-robin to reduce starvation due to zfs_vdev_max_active 426 * and vq_nia_credit limits. 427 */ 428 for (n = 0; n < ZIO_PRIORITY_NUM_QUEUEABLE; n++) { 429 p = (vq->vq_last_prio + n + 1) % ZIO_PRIORITY_NUM_QUEUEABLE; 430 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 && 431 vq->vq_class[p].vqc_active < 432 vdev_queue_class_min_active(vq, p)) { 433 vq->vq_last_prio = p; 434 return (p); 435 } 436 } 437 438 /* 439 * If we haven't found a queue, look for one that hasn't reached its 440 * maximum # outstanding i/os. 441 */ 442 for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 443 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 && 444 vq->vq_class[p].vqc_active < 445 vdev_queue_class_max_active(spa, vq, p)) { 446 vq->vq_last_prio = p; 447 return (p); 448 } 449 } 450 451 /* No eligible queued i/os */ 452 return (ZIO_PRIORITY_NUM_QUEUEABLE); 453 } 454 455 void 456 vdev_queue_init(vdev_t *vd) 457 { 458 vdev_queue_t *vq = &vd->vdev_queue; 459 zio_priority_t p; 460 461 mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL); 462 vq->vq_vdev = vd; 463 taskq_init_ent(&vd->vdev_queue.vq_io_search.io_tqent); 464 465 avl_create(&vq->vq_active_tree, vdev_queue_offset_compare, 466 sizeof (zio_t), offsetof(struct zio, io_queue_node)); 467 avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_READ), 468 vdev_queue_offset_compare, sizeof (zio_t), 469 offsetof(struct zio, io_offset_node)); 470 avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE), 471 vdev_queue_offset_compare, sizeof (zio_t), 472 offsetof(struct zio, io_offset_node)); 473 avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_TRIM), 474 vdev_queue_offset_compare, sizeof (zio_t), 475 offsetof(struct zio, io_offset_node)); 476 477 for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 478 int (*compfn) (const void *, const void *); 479 480 /* 481 * The synchronous/trim i/o queues are dispatched in FIFO rather 482 * than LBA order. This provides more consistent latency for 483 * these i/os. 484 */ 485 if (p == ZIO_PRIORITY_SYNC_READ || 486 p == ZIO_PRIORITY_SYNC_WRITE || 487 p == ZIO_PRIORITY_TRIM) { 488 compfn = vdev_queue_timestamp_compare; 489 } else { 490 compfn = vdev_queue_offset_compare; 491 } 492 avl_create(vdev_queue_class_tree(vq, p), compfn, 493 sizeof (zio_t), offsetof(struct zio, io_queue_node)); 494 } 495 496 vq->vq_last_offset = 0; 497 } 498 499 void 500 vdev_queue_fini(vdev_t *vd) 501 { 502 vdev_queue_t *vq = &vd->vdev_queue; 503 504 for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) 505 avl_destroy(vdev_queue_class_tree(vq, p)); 506 avl_destroy(&vq->vq_active_tree); 507 avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ)); 508 avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE)); 509 avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_TRIM)); 510 511 mutex_destroy(&vq->vq_lock); 512 } 513 514 static void 515 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio) 516 { 517 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 518 avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio); 519 avl_add(vdev_queue_type_tree(vq, zio->io_type), zio); 520 } 521 522 static void 523 vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio) 524 { 525 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 526 avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio); 527 avl_remove(vdev_queue_type_tree(vq, zio->io_type), zio); 528 } 529 530 static boolean_t 531 vdev_queue_is_interactive(zio_priority_t p) 532 { 533 switch (p) { 534 case ZIO_PRIORITY_SCRUB: 535 case ZIO_PRIORITY_REMOVAL: 536 case ZIO_PRIORITY_INITIALIZING: 537 case ZIO_PRIORITY_REBUILD: 538 return (B_FALSE); 539 default: 540 return (B_TRUE); 541 } 542 } 543 544 static void 545 vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio) 546 { 547 ASSERT(MUTEX_HELD(&vq->vq_lock)); 548 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 549 vq->vq_class[zio->io_priority].vqc_active++; 550 if (vdev_queue_is_interactive(zio->io_priority)) { 551 if (++vq->vq_ia_active == 1) 552 vq->vq_nia_credit = 1; 553 } else if (vq->vq_ia_active > 0) { 554 vq->vq_nia_credit--; 555 } 556 avl_add(&vq->vq_active_tree, zio); 557 } 558 559 static void 560 vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio) 561 { 562 ASSERT(MUTEX_HELD(&vq->vq_lock)); 563 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 564 vq->vq_class[zio->io_priority].vqc_active--; 565 if (vdev_queue_is_interactive(zio->io_priority)) { 566 if (--vq->vq_ia_active == 0) 567 vq->vq_nia_credit = 0; 568 else 569 vq->vq_nia_credit = zfs_vdev_nia_credit; 570 } else if (vq->vq_ia_active == 0) 571 vq->vq_nia_credit++; 572 avl_remove(&vq->vq_active_tree, zio); 573 } 574 575 static void 576 vdev_queue_agg_io_done(zio_t *aio) 577 { 578 abd_free(aio->io_abd); 579 } 580 581 /* 582 * Compute the range spanned by two i/os, which is the endpoint of the last 583 * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset). 584 * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio); 585 * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0. 586 */ 587 #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset) 588 #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio)) 589 590 /* 591 * Sufficiently adjacent io_offset's in ZIOs will be aggregated. We do this 592 * by creating a gang ABD from the adjacent ZIOs io_abd's. By using 593 * a gang ABD we avoid doing memory copies to and from the parent, 594 * child ZIOs. The gang ABD also accounts for gaps between adjacent 595 * io_offsets by simply getting the zero ABD for writes or allocating 596 * a new ABD for reads and placing them in the gang ABD as well. 597 */ 598 static zio_t * 599 vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio) 600 { 601 zio_t *first, *last, *aio, *dio, *mandatory, *nio; 602 uint64_t maxgap = 0; 603 uint64_t size; 604 uint64_t limit; 605 int maxblocksize; 606 boolean_t stretch = B_FALSE; 607 avl_tree_t *t = vdev_queue_type_tree(vq, zio->io_type); 608 enum zio_flag flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT; 609 uint64_t next_offset; 610 abd_t *abd; 611 612 maxblocksize = spa_maxblocksize(vq->vq_vdev->vdev_spa); 613 if (vq->vq_vdev->vdev_nonrot) 614 limit = zfs_vdev_aggregation_limit_non_rotating; 615 else 616 limit = zfs_vdev_aggregation_limit; 617 limit = MAX(MIN(limit, maxblocksize), 0); 618 619 if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE || limit == 0) 620 return (NULL); 621 622 /* 623 * While TRIM commands could be aggregated based on offset this 624 * behavior is disabled until it's determined to be beneficial. 625 */ 626 if (zio->io_type == ZIO_TYPE_TRIM && !zfs_vdev_aggregate_trim) 627 return (NULL); 628 629 /* 630 * I/Os to distributed spares are directly dispatched to the dRAID 631 * leaf vdevs for aggregation. See the comment at the end of the 632 * zio_vdev_io_start() function. 633 */ 634 ASSERT(vq->vq_vdev->vdev_ops != &vdev_draid_spare_ops); 635 636 first = last = zio; 637 638 if (zio->io_type == ZIO_TYPE_READ) 639 maxgap = zfs_vdev_read_gap_limit; 640 641 /* 642 * We can aggregate I/Os that are sufficiently adjacent and of 643 * the same flavor, as expressed by the AGG_INHERIT flags. 644 * The latter requirement is necessary so that certain 645 * attributes of the I/O, such as whether it's a normal I/O 646 * or a scrub/resilver, can be preserved in the aggregate. 647 * We can include optional I/Os, but don't allow them 648 * to begin a range as they add no benefit in that situation. 649 */ 650 651 /* 652 * We keep track of the last non-optional I/O. 653 */ 654 mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first; 655 656 /* 657 * Walk backwards through sufficiently contiguous I/Os 658 * recording the last non-optional I/O. 659 */ 660 while ((dio = AVL_PREV(t, first)) != NULL && 661 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 662 IO_SPAN(dio, last) <= limit && 663 IO_GAP(dio, first) <= maxgap && 664 dio->io_type == zio->io_type) { 665 first = dio; 666 if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL)) 667 mandatory = first; 668 } 669 670 /* 671 * Skip any initial optional I/Os. 672 */ 673 while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) { 674 first = AVL_NEXT(t, first); 675 ASSERT(first != NULL); 676 } 677 678 679 /* 680 * Walk forward through sufficiently contiguous I/Os. 681 * The aggregation limit does not apply to optional i/os, so that 682 * we can issue contiguous writes even if they are larger than the 683 * aggregation limit. 684 */ 685 while ((dio = AVL_NEXT(t, last)) != NULL && 686 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 687 (IO_SPAN(first, dio) <= limit || 688 (dio->io_flags & ZIO_FLAG_OPTIONAL)) && 689 IO_SPAN(first, dio) <= maxblocksize && 690 IO_GAP(last, dio) <= maxgap && 691 dio->io_type == zio->io_type) { 692 last = dio; 693 if (!(last->io_flags & ZIO_FLAG_OPTIONAL)) 694 mandatory = last; 695 } 696 697 /* 698 * Now that we've established the range of the I/O aggregation 699 * we must decide what to do with trailing optional I/Os. 700 * For reads, there's nothing to do. While we are unable to 701 * aggregate further, it's possible that a trailing optional 702 * I/O would allow the underlying device to aggregate with 703 * subsequent I/Os. We must therefore determine if the next 704 * non-optional I/O is close enough to make aggregation 705 * worthwhile. 706 */ 707 if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) { 708 zio_t *nio = last; 709 while ((dio = AVL_NEXT(t, nio)) != NULL && 710 IO_GAP(nio, dio) == 0 && 711 IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) { 712 nio = dio; 713 if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) { 714 stretch = B_TRUE; 715 break; 716 } 717 } 718 } 719 720 if (stretch) { 721 /* 722 * We are going to include an optional io in our aggregated 723 * span, thus closing the write gap. Only mandatory i/os can 724 * start aggregated spans, so make sure that the next i/o 725 * after our span is mandatory. 726 */ 727 dio = AVL_NEXT(t, last); 728 dio->io_flags &= ~ZIO_FLAG_OPTIONAL; 729 } else { 730 /* do not include the optional i/o */ 731 while (last != mandatory && last != first) { 732 ASSERT(last->io_flags & ZIO_FLAG_OPTIONAL); 733 last = AVL_PREV(t, last); 734 ASSERT(last != NULL); 735 } 736 } 737 738 if (first == last) 739 return (NULL); 740 741 size = IO_SPAN(first, last); 742 ASSERT3U(size, <=, maxblocksize); 743 744 abd = abd_alloc_gang(); 745 if (abd == NULL) 746 return (NULL); 747 748 aio = zio_vdev_delegated_io(first->io_vd, first->io_offset, 749 abd, size, first->io_type, zio->io_priority, 750 flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE, 751 vdev_queue_agg_io_done, NULL); 752 aio->io_timestamp = first->io_timestamp; 753 754 nio = first; 755 next_offset = first->io_offset; 756 do { 757 dio = nio; 758 nio = AVL_NEXT(t, dio); 759 zio_add_child(dio, aio); 760 vdev_queue_io_remove(vq, dio); 761 762 if (dio->io_offset != next_offset) { 763 /* allocate a buffer for a read gap */ 764 ASSERT3U(dio->io_type, ==, ZIO_TYPE_READ); 765 ASSERT3U(dio->io_offset, >, next_offset); 766 abd = abd_alloc_for_io( 767 dio->io_offset - next_offset, B_TRUE); 768 abd_gang_add(aio->io_abd, abd, B_TRUE); 769 } 770 if (dio->io_abd && 771 (dio->io_size != abd_get_size(dio->io_abd))) { 772 /* abd size not the same as IO size */ 773 ASSERT3U(abd_get_size(dio->io_abd), >, dio->io_size); 774 abd = abd_get_offset_size(dio->io_abd, 0, dio->io_size); 775 abd_gang_add(aio->io_abd, abd, B_TRUE); 776 } else { 777 if (dio->io_flags & ZIO_FLAG_NODATA) { 778 /* allocate a buffer for a write gap */ 779 ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE); 780 ASSERT3P(dio->io_abd, ==, NULL); 781 abd_gang_add(aio->io_abd, 782 abd_get_zeros(dio->io_size), B_TRUE); 783 } else { 784 /* 785 * We pass B_FALSE to abd_gang_add() 786 * because we did not allocate a new 787 * ABD, so it is assumed the caller 788 * will free this ABD. 789 */ 790 abd_gang_add(aio->io_abd, dio->io_abd, 791 B_FALSE); 792 } 793 } 794 next_offset = dio->io_offset + dio->io_size; 795 } while (dio != last); 796 ASSERT3U(abd_get_size(aio->io_abd), ==, aio->io_size); 797 798 /* 799 * Callers must call zio_vdev_io_bypass() and zio_execute() for 800 * aggregated (parent) I/Os so that we could avoid dropping the 801 * queue's lock here to avoid a deadlock that we could encounter 802 * due to lock order reversal between vq_lock and io_lock in 803 * zio_change_priority(). 804 */ 805 return (aio); 806 } 807 808 static zio_t * 809 vdev_queue_io_to_issue(vdev_queue_t *vq) 810 { 811 zio_t *zio, *aio; 812 zio_priority_t p; 813 avl_index_t idx; 814 avl_tree_t *tree; 815 816 again: 817 ASSERT(MUTEX_HELD(&vq->vq_lock)); 818 819 p = vdev_queue_class_to_issue(vq); 820 821 if (p == ZIO_PRIORITY_NUM_QUEUEABLE) { 822 /* No eligible queued i/os */ 823 return (NULL); 824 } 825 826 /* 827 * For LBA-ordered queues (async / scrub / initializing), issue the 828 * i/o which follows the most recently issued i/o in LBA (offset) order. 829 * 830 * For FIFO queues (sync/trim), issue the i/o with the lowest timestamp. 831 */ 832 tree = vdev_queue_class_tree(vq, p); 833 vq->vq_io_search.io_timestamp = 0; 834 vq->vq_io_search.io_offset = vq->vq_last_offset - 1; 835 VERIFY3P(avl_find(tree, &vq->vq_io_search, &idx), ==, NULL); 836 zio = avl_nearest(tree, idx, AVL_AFTER); 837 if (zio == NULL) 838 zio = avl_first(tree); 839 ASSERT3U(zio->io_priority, ==, p); 840 841 aio = vdev_queue_aggregate(vq, zio); 842 if (aio != NULL) { 843 zio = aio; 844 } else { 845 vdev_queue_io_remove(vq, zio); 846 847 /* 848 * If the I/O is or was optional and therefore has no data, we 849 * need to simply discard it. We need to drop the vdev queue's 850 * lock to avoid a deadlock that we could encounter since this 851 * I/O will complete immediately. 852 */ 853 if (zio->io_flags & ZIO_FLAG_NODATA) { 854 mutex_exit(&vq->vq_lock); 855 zio_vdev_io_bypass(zio); 856 zio_execute(zio); 857 mutex_enter(&vq->vq_lock); 858 goto again; 859 } 860 } 861 862 vdev_queue_pending_add(vq, zio); 863 vq->vq_last_offset = zio->io_offset + zio->io_size; 864 865 return (zio); 866 } 867 868 zio_t * 869 vdev_queue_io(zio_t *zio) 870 { 871 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 872 zio_t *dio, *nio; 873 zio_link_t *zl = NULL; 874 875 if (zio->io_flags & ZIO_FLAG_DONT_QUEUE) 876 return (zio); 877 878 /* 879 * Children i/os inherent their parent's priority, which might 880 * not match the child's i/o type. Fix it up here. 881 */ 882 if (zio->io_type == ZIO_TYPE_READ) { 883 ASSERT(zio->io_priority != ZIO_PRIORITY_TRIM); 884 885 if (zio->io_priority != ZIO_PRIORITY_SYNC_READ && 886 zio->io_priority != ZIO_PRIORITY_ASYNC_READ && 887 zio->io_priority != ZIO_PRIORITY_SCRUB && 888 zio->io_priority != ZIO_PRIORITY_REMOVAL && 889 zio->io_priority != ZIO_PRIORITY_INITIALIZING && 890 zio->io_priority != ZIO_PRIORITY_REBUILD) { 891 zio->io_priority = ZIO_PRIORITY_ASYNC_READ; 892 } 893 } else if (zio->io_type == ZIO_TYPE_WRITE) { 894 ASSERT(zio->io_priority != ZIO_PRIORITY_TRIM); 895 896 if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE && 897 zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE && 898 zio->io_priority != ZIO_PRIORITY_REMOVAL && 899 zio->io_priority != ZIO_PRIORITY_INITIALIZING && 900 zio->io_priority != ZIO_PRIORITY_REBUILD) { 901 zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE; 902 } 903 } else { 904 ASSERT(zio->io_type == ZIO_TYPE_TRIM); 905 ASSERT(zio->io_priority == ZIO_PRIORITY_TRIM); 906 } 907 908 zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE; 909 zio->io_timestamp = gethrtime(); 910 911 mutex_enter(&vq->vq_lock); 912 vdev_queue_io_add(vq, zio); 913 nio = vdev_queue_io_to_issue(vq); 914 mutex_exit(&vq->vq_lock); 915 916 if (nio == NULL) 917 return (NULL); 918 919 if (nio->io_done == vdev_queue_agg_io_done) { 920 while ((dio = zio_walk_parents(nio, &zl)) != NULL) { 921 ASSERT3U(dio->io_type, ==, nio->io_type); 922 zio_vdev_io_bypass(dio); 923 zio_execute(dio); 924 } 925 zio_nowait(nio); 926 return (NULL); 927 } 928 929 return (nio); 930 } 931 932 void 933 vdev_queue_io_done(zio_t *zio) 934 { 935 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 936 zio_t *dio, *nio; 937 zio_link_t *zl = NULL; 938 939 hrtime_t now = gethrtime(); 940 vq->vq_io_complete_ts = now; 941 vq->vq_io_delta_ts = zio->io_delta = now - zio->io_timestamp; 942 943 mutex_enter(&vq->vq_lock); 944 vdev_queue_pending_remove(vq, zio); 945 946 while ((nio = vdev_queue_io_to_issue(vq)) != NULL) { 947 mutex_exit(&vq->vq_lock); 948 if (nio->io_done == vdev_queue_agg_io_done) { 949 while ((dio = zio_walk_parents(nio, &zl)) != NULL) { 950 ASSERT3U(dio->io_type, ==, nio->io_type); 951 zio_vdev_io_bypass(dio); 952 zio_execute(dio); 953 } 954 zio_nowait(nio); 955 } else { 956 zio_vdev_io_reissue(nio); 957 zio_execute(nio); 958 } 959 mutex_enter(&vq->vq_lock); 960 } 961 962 mutex_exit(&vq->vq_lock); 963 } 964 965 void 966 vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority) 967 { 968 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 969 avl_tree_t *tree; 970 971 /* 972 * ZIO_PRIORITY_NOW is used by the vdev cache code and the aggregate zio 973 * code to issue IOs without adding them to the vdev queue. In this 974 * case, the zio is already going to be issued as quickly as possible 975 * and so it doesn't need any reprioritization to help. 976 */ 977 if (zio->io_priority == ZIO_PRIORITY_NOW) 978 return; 979 980 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 981 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 982 983 if (zio->io_type == ZIO_TYPE_READ) { 984 if (priority != ZIO_PRIORITY_SYNC_READ && 985 priority != ZIO_PRIORITY_ASYNC_READ && 986 priority != ZIO_PRIORITY_SCRUB) 987 priority = ZIO_PRIORITY_ASYNC_READ; 988 } else { 989 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 990 if (priority != ZIO_PRIORITY_SYNC_WRITE && 991 priority != ZIO_PRIORITY_ASYNC_WRITE) 992 priority = ZIO_PRIORITY_ASYNC_WRITE; 993 } 994 995 mutex_enter(&vq->vq_lock); 996 997 /* 998 * If the zio is in none of the queues we can simply change 999 * the priority. If the zio is waiting to be submitted we must 1000 * remove it from the queue and re-insert it with the new priority. 1001 * Otherwise, the zio is currently active and we cannot change its 1002 * priority. 1003 */ 1004 tree = vdev_queue_class_tree(vq, zio->io_priority); 1005 if (avl_find(tree, zio, NULL) == zio) { 1006 avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio); 1007 zio->io_priority = priority; 1008 avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio); 1009 } else if (avl_find(&vq->vq_active_tree, zio, NULL) != zio) { 1010 zio->io_priority = priority; 1011 } 1012 1013 mutex_exit(&vq->vq_lock); 1014 } 1015 1016 /* 1017 * As these two methods are only used for load calculations we're not 1018 * concerned if we get an incorrect value on 32bit platforms due to lack of 1019 * vq_lock mutex use here, instead we prefer to keep it lock free for 1020 * performance. 1021 */ 1022 int 1023 vdev_queue_length(vdev_t *vd) 1024 { 1025 return (avl_numnodes(&vd->vdev_queue.vq_active_tree)); 1026 } 1027 1028 uint64_t 1029 vdev_queue_last_offset(vdev_t *vd) 1030 { 1031 return (vd->vdev_queue.vq_last_offset); 1032 } 1033 1034 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregation_limit, INT, ZMOD_RW, 1035 "Max vdev I/O aggregation size"); 1036 1037 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregation_limit_non_rotating, INT, 1038 ZMOD_RW, "Max vdev I/O aggregation size for non-rotating media"); 1039 1040 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregate_trim, INT, ZMOD_RW, 1041 "Allow TRIM I/O to be aggregated"); 1042 1043 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, read_gap_limit, INT, ZMOD_RW, 1044 "Aggregate read I/O over gap"); 1045 1046 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, write_gap_limit, INT, ZMOD_RW, 1047 "Aggregate write I/O over gap"); 1048 1049 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, max_active, INT, ZMOD_RW, 1050 "Maximum number of active I/Os per vdev"); 1051 1052 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_active_max_dirty_percent, INT, 1053 ZMOD_RW, "Async write concurrency max threshold"); 1054 1055 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_active_min_dirty_percent, INT, 1056 ZMOD_RW, "Async write concurrency min threshold"); 1057 1058 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_read_max_active, INT, ZMOD_RW, 1059 "Max active async read I/Os per vdev"); 1060 1061 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_read_min_active, INT, ZMOD_RW, 1062 "Min active async read I/Os per vdev"); 1063 1064 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_max_active, INT, ZMOD_RW, 1065 "Max active async write I/Os per vdev"); 1066 1067 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_min_active, INT, ZMOD_RW, 1068 "Min active async write I/Os per vdev"); 1069 1070 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, initializing_max_active, INT, ZMOD_RW, 1071 "Max active initializing I/Os per vdev"); 1072 1073 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, initializing_min_active, INT, ZMOD_RW, 1074 "Min active initializing I/Os per vdev"); 1075 1076 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, removal_max_active, INT, ZMOD_RW, 1077 "Max active removal I/Os per vdev"); 1078 1079 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, removal_min_active, INT, ZMOD_RW, 1080 "Min active removal I/Os per vdev"); 1081 1082 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, scrub_max_active, INT, ZMOD_RW, 1083 "Max active scrub I/Os per vdev"); 1084 1085 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, scrub_min_active, INT, ZMOD_RW, 1086 "Min active scrub I/Os per vdev"); 1087 1088 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_read_max_active, INT, ZMOD_RW, 1089 "Max active sync read I/Os per vdev"); 1090 1091 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_read_min_active, INT, ZMOD_RW, 1092 "Min active sync read I/Os per vdev"); 1093 1094 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_write_max_active, INT, ZMOD_RW, 1095 "Max active sync write I/Os per vdev"); 1096 1097 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_write_min_active, INT, ZMOD_RW, 1098 "Min active sync write I/Os per vdev"); 1099 1100 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, trim_max_active, INT, ZMOD_RW, 1101 "Max active trim/discard I/Os per vdev"); 1102 1103 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, trim_min_active, INT, ZMOD_RW, 1104 "Min active trim/discard I/Os per vdev"); 1105 1106 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, rebuild_max_active, INT, ZMOD_RW, 1107 "Max active rebuild I/Os per vdev"); 1108 1109 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, rebuild_min_active, INT, ZMOD_RW, 1110 "Min active rebuild I/Os per vdev"); 1111 1112 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, nia_credit, INT, ZMOD_RW, 1113 "Number of non-interactive I/Os to allow in sequence"); 1114 1115 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, nia_delay, INT, ZMOD_RW, 1116 "Number of non-interactive I/Os before _max_active"); 1117 1118 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, queue_depth_pct, INT, ZMOD_RW, 1119 "Queue depth percentage for each top-level vdev"); 1120