1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 28 */ 29 30 #include <sys/zfs_context.h> 31 #include <sys/vdev_impl.h> 32 #include <sys/spa_impl.h> 33 #include <sys/zio.h> 34 #include <sys/avl.h> 35 #include <sys/dsl_pool.h> 36 #include <sys/metaslab_impl.h> 37 #include <sys/spa.h> 38 #include <sys/abd.h> 39 40 /* 41 * ZFS I/O Scheduler 42 * --------------- 43 * 44 * ZFS issues I/O operations to leaf vdevs to satisfy and complete zios. The 45 * I/O scheduler determines when and in what order those operations are 46 * issued. The I/O scheduler divides operations into five I/O classes 47 * prioritized in the following order: sync read, sync write, async read, 48 * async write, and scrub/resilver. Each queue defines the minimum and 49 * maximum number of concurrent operations that may be issued to the device. 50 * In addition, the device has an aggregate maximum. Note that the sum of the 51 * per-queue minimums must not exceed the aggregate maximum. If the 52 * sum of the per-queue maximums exceeds the aggregate maximum, then the 53 * number of active i/os may reach zfs_vdev_max_active, in which case no 54 * further i/os will be issued regardless of whether all per-queue 55 * minimums have been met. 56 * 57 * For many physical devices, throughput increases with the number of 58 * concurrent operations, but latency typically suffers. Further, physical 59 * devices typically have a limit at which more concurrent operations have no 60 * effect on throughput or can actually cause it to decrease. 61 * 62 * The scheduler selects the next operation to issue by first looking for an 63 * I/O class whose minimum has not been satisfied. Once all are satisfied and 64 * the aggregate maximum has not been hit, the scheduler looks for classes 65 * whose maximum has not been satisfied. Iteration through the I/O classes is 66 * done in the order specified above. No further operations are issued if the 67 * aggregate maximum number of concurrent operations has been hit or if there 68 * are no operations queued for an I/O class that has not hit its maximum. 69 * Every time an i/o is queued or an operation completes, the I/O scheduler 70 * looks for new operations to issue. 71 * 72 * All I/O classes have a fixed maximum number of outstanding operations 73 * except for the async write class. Asynchronous writes represent the data 74 * that is committed to stable storage during the syncing stage for 75 * transaction groups (see txg.c). Transaction groups enter the syncing state 76 * periodically so the number of queued async writes will quickly burst up and 77 * then bleed down to zero. Rather than servicing them as quickly as possible, 78 * the I/O scheduler changes the maximum number of active async write i/os 79 * according to the amount of dirty data in the pool (see dsl_pool.c). Since 80 * both throughput and latency typically increase with the number of 81 * concurrent operations issued to physical devices, reducing the burstiness 82 * in the number of concurrent operations also stabilizes the response time of 83 * operations from other -- and in particular synchronous -- queues. In broad 84 * strokes, the I/O scheduler will issue more concurrent operations from the 85 * async write queue as there's more dirty data in the pool. 86 * 87 * Async Writes 88 * 89 * The number of concurrent operations issued for the async write I/O class 90 * follows a piece-wise linear function defined by a few adjustable points. 91 * 92 * | o---------| <-- zfs_vdev_async_write_max_active 93 * ^ | /^ | 94 * | | / | | 95 * active | / | | 96 * I/O | / | | 97 * count | / | | 98 * | / | | 99 * |------------o | | <-- zfs_vdev_async_write_min_active 100 * 0|____________^______|_________| 101 * 0% | | 100% of zfs_dirty_data_max 102 * | | 103 * | `-- zfs_vdev_async_write_active_max_dirty_percent 104 * `--------- zfs_vdev_async_write_active_min_dirty_percent 105 * 106 * Until the amount of dirty data exceeds a minimum percentage of the dirty 107 * data allowed in the pool, the I/O scheduler will limit the number of 108 * concurrent operations to the minimum. As that threshold is crossed, the 109 * number of concurrent operations issued increases linearly to the maximum at 110 * the specified maximum percentage of the dirty data allowed in the pool. 111 * 112 * Ideally, the amount of dirty data on a busy pool will stay in the sloped 113 * part of the function between zfs_vdev_async_write_active_min_dirty_percent 114 * and zfs_vdev_async_write_active_max_dirty_percent. If it exceeds the 115 * maximum percentage, this indicates that the rate of incoming data is 116 * greater than the rate that the backend storage can handle. In this case, we 117 * must further throttle incoming writes (see dmu_tx_delay() for details). 118 */ 119 120 /* 121 * The maximum number of i/os active to each device. Ideally, this will be >= 122 * the sum of each queue's max_active. 123 */ 124 uint32_t zfs_vdev_max_active = 1000; 125 126 /* 127 * Per-queue limits on the number of i/os active to each device. If the 128 * number of active i/os is < zfs_vdev_max_active, then the min_active comes 129 * into play. We will send min_active from each queue round-robin, and then 130 * send from queues in the order defined by zio_priority_t up to max_active. 131 * Some queues have additional mechanisms to limit number of active I/Os in 132 * addition to min_active and max_active, see below. 133 * 134 * In general, smaller max_active's will lead to lower latency of synchronous 135 * operations. Larger max_active's may lead to higher overall throughput, 136 * depending on underlying storage. 137 * 138 * The ratio of the queues' max_actives determines the balance of performance 139 * between reads, writes, and scrubs. E.g., increasing 140 * zfs_vdev_scrub_max_active will cause the scrub or resilver to complete 141 * more quickly, but reads and writes to have higher latency and lower 142 * throughput. 143 */ 144 uint32_t zfs_vdev_sync_read_min_active = 10; 145 uint32_t zfs_vdev_sync_read_max_active = 10; 146 uint32_t zfs_vdev_sync_write_min_active = 10; 147 uint32_t zfs_vdev_sync_write_max_active = 10; 148 uint32_t zfs_vdev_async_read_min_active = 1; 149 uint32_t zfs_vdev_async_read_max_active = 3; 150 uint32_t zfs_vdev_async_write_min_active = 2; 151 uint32_t zfs_vdev_async_write_max_active = 10; 152 uint32_t zfs_vdev_scrub_min_active = 1; 153 uint32_t zfs_vdev_scrub_max_active = 3; 154 uint32_t zfs_vdev_removal_min_active = 1; 155 uint32_t zfs_vdev_removal_max_active = 2; 156 uint32_t zfs_vdev_initializing_min_active = 1; 157 uint32_t zfs_vdev_initializing_max_active = 1; 158 uint32_t zfs_vdev_trim_min_active = 1; 159 uint32_t zfs_vdev_trim_max_active = 2; 160 uint32_t zfs_vdev_rebuild_min_active = 1; 161 uint32_t zfs_vdev_rebuild_max_active = 3; 162 163 /* 164 * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent 165 * dirty data, use zfs_vdev_async_write_min_active. When it has more than 166 * zfs_vdev_async_write_active_max_dirty_percent, use 167 * zfs_vdev_async_write_max_active. The value is linearly interpolated 168 * between min and max. 169 */ 170 int zfs_vdev_async_write_active_min_dirty_percent = 30; 171 int zfs_vdev_async_write_active_max_dirty_percent = 60; 172 173 /* 174 * For non-interactive I/O (scrub, resilver, removal, initialize and rebuild), 175 * the number of concurrently-active I/O's is limited to *_min_active, unless 176 * the vdev is "idle". When there are no interactive I/Os active (sync or 177 * async), and zfs_vdev_nia_delay I/Os have completed since the last 178 * interactive I/O, then the vdev is considered to be "idle", and the number 179 * of concurrently-active non-interactive I/O's is increased to *_max_active. 180 */ 181 uint_t zfs_vdev_nia_delay = 5; 182 183 /* 184 * Some HDDs tend to prioritize sequential I/O so high that concurrent 185 * random I/O latency reaches several seconds. On some HDDs it happens 186 * even if sequential I/Os are submitted one at a time, and so setting 187 * *_max_active to 1 does not help. To prevent non-interactive I/Os, like 188 * scrub, from monopolizing the device no more than zfs_vdev_nia_credit 189 * I/Os can be sent while there are outstanding incomplete interactive 190 * I/Os. This enforced wait ensures the HDD services the interactive I/O 191 * within a reasonable amount of time. 192 */ 193 uint_t zfs_vdev_nia_credit = 5; 194 195 /* 196 * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O. 197 * For read I/Os, we also aggregate across small adjacency gaps; for writes 198 * we include spans of optional I/Os to aid aggregation at the disk even when 199 * they aren't able to help us aggregate at this level. 200 */ 201 int zfs_vdev_aggregation_limit = 1 << 20; 202 int zfs_vdev_aggregation_limit_non_rotating = SPA_OLD_MAXBLOCKSIZE; 203 int zfs_vdev_read_gap_limit = 32 << 10; 204 int zfs_vdev_write_gap_limit = 4 << 10; 205 206 /* 207 * Define the queue depth percentage for each top-level. This percentage is 208 * used in conjunction with zfs_vdev_async_max_active to determine how many 209 * allocations a specific top-level vdev should handle. Once the queue depth 210 * reaches zfs_vdev_queue_depth_pct * zfs_vdev_async_write_max_active / 100 211 * then allocator will stop allocating blocks on that top-level device. 212 * The default kernel setting is 1000% which will yield 100 allocations per 213 * device. For userland testing, the default setting is 300% which equates 214 * to 30 allocations per device. 215 */ 216 #ifdef _KERNEL 217 int zfs_vdev_queue_depth_pct = 1000; 218 #else 219 int zfs_vdev_queue_depth_pct = 300; 220 #endif 221 222 /* 223 * When performing allocations for a given metaslab, we want to make sure that 224 * there are enough IOs to aggregate together to improve throughput. We want to 225 * ensure that there are at least 128k worth of IOs that can be aggregated, and 226 * we assume that the average allocation size is 4k, so we need the queue depth 227 * to be 32 per allocator to get good aggregation of sequential writes. 228 */ 229 int zfs_vdev_def_queue_depth = 32; 230 231 /* 232 * Allow TRIM I/Os to be aggregated. This should normally not be needed since 233 * TRIM I/O for extents up to zfs_trim_extent_bytes_max (128M) can be submitted 234 * by the TRIM code in zfs_trim.c. 235 */ 236 int zfs_vdev_aggregate_trim = 0; 237 238 static int 239 vdev_queue_offset_compare(const void *x1, const void *x2) 240 { 241 const zio_t *z1 = (const zio_t *)x1; 242 const zio_t *z2 = (const zio_t *)x2; 243 244 int cmp = TREE_CMP(z1->io_offset, z2->io_offset); 245 246 if (likely(cmp)) 247 return (cmp); 248 249 return (TREE_PCMP(z1, z2)); 250 } 251 252 static inline avl_tree_t * 253 vdev_queue_class_tree(vdev_queue_t *vq, zio_priority_t p) 254 { 255 return (&vq->vq_class[p].vqc_queued_tree); 256 } 257 258 static inline avl_tree_t * 259 vdev_queue_type_tree(vdev_queue_t *vq, zio_type_t t) 260 { 261 ASSERT(t == ZIO_TYPE_READ || t == ZIO_TYPE_WRITE || t == ZIO_TYPE_TRIM); 262 if (t == ZIO_TYPE_READ) 263 return (&vq->vq_read_offset_tree); 264 else if (t == ZIO_TYPE_WRITE) 265 return (&vq->vq_write_offset_tree); 266 else 267 return (&vq->vq_trim_offset_tree); 268 } 269 270 static int 271 vdev_queue_timestamp_compare(const void *x1, const void *x2) 272 { 273 const zio_t *z1 = (const zio_t *)x1; 274 const zio_t *z2 = (const zio_t *)x2; 275 276 int cmp = TREE_CMP(z1->io_timestamp, z2->io_timestamp); 277 278 if (likely(cmp)) 279 return (cmp); 280 281 return (TREE_PCMP(z1, z2)); 282 } 283 284 static int 285 vdev_queue_class_min_active(vdev_queue_t *vq, zio_priority_t p) 286 { 287 switch (p) { 288 case ZIO_PRIORITY_SYNC_READ: 289 return (zfs_vdev_sync_read_min_active); 290 case ZIO_PRIORITY_SYNC_WRITE: 291 return (zfs_vdev_sync_write_min_active); 292 case ZIO_PRIORITY_ASYNC_READ: 293 return (zfs_vdev_async_read_min_active); 294 case ZIO_PRIORITY_ASYNC_WRITE: 295 return (zfs_vdev_async_write_min_active); 296 case ZIO_PRIORITY_SCRUB: 297 return (vq->vq_ia_active == 0 ? zfs_vdev_scrub_min_active : 298 MIN(vq->vq_nia_credit, zfs_vdev_scrub_min_active)); 299 case ZIO_PRIORITY_REMOVAL: 300 return (vq->vq_ia_active == 0 ? zfs_vdev_removal_min_active : 301 MIN(vq->vq_nia_credit, zfs_vdev_removal_min_active)); 302 case ZIO_PRIORITY_INITIALIZING: 303 return (vq->vq_ia_active == 0 ?zfs_vdev_initializing_min_active: 304 MIN(vq->vq_nia_credit, zfs_vdev_initializing_min_active)); 305 case ZIO_PRIORITY_TRIM: 306 return (zfs_vdev_trim_min_active); 307 case ZIO_PRIORITY_REBUILD: 308 return (vq->vq_ia_active == 0 ? zfs_vdev_rebuild_min_active : 309 MIN(vq->vq_nia_credit, zfs_vdev_rebuild_min_active)); 310 default: 311 panic("invalid priority %u", p); 312 return (0); 313 } 314 } 315 316 static int 317 vdev_queue_max_async_writes(spa_t *spa) 318 { 319 int writes; 320 uint64_t dirty = 0; 321 dsl_pool_t *dp = spa_get_dsl(spa); 322 uint64_t min_bytes = zfs_dirty_data_max * 323 zfs_vdev_async_write_active_min_dirty_percent / 100; 324 uint64_t max_bytes = zfs_dirty_data_max * 325 zfs_vdev_async_write_active_max_dirty_percent / 100; 326 327 /* 328 * Async writes may occur before the assignment of the spa's 329 * dsl_pool_t if a self-healing zio is issued prior to the 330 * completion of dmu_objset_open_impl(). 331 */ 332 if (dp == NULL) 333 return (zfs_vdev_async_write_max_active); 334 335 /* 336 * Sync tasks correspond to interactive user actions. To reduce the 337 * execution time of those actions we push data out as fast as possible. 338 */ 339 dirty = dp->dp_dirty_total; 340 if (dirty > max_bytes || spa_has_pending_synctask(spa)) 341 return (zfs_vdev_async_write_max_active); 342 343 if (dirty < min_bytes) 344 return (zfs_vdev_async_write_min_active); 345 346 /* 347 * linear interpolation: 348 * slope = (max_writes - min_writes) / (max_bytes - min_bytes) 349 * move right by min_bytes 350 * move up by min_writes 351 */ 352 writes = (dirty - min_bytes) * 353 (zfs_vdev_async_write_max_active - 354 zfs_vdev_async_write_min_active) / 355 (max_bytes - min_bytes) + 356 zfs_vdev_async_write_min_active; 357 ASSERT3U(writes, >=, zfs_vdev_async_write_min_active); 358 ASSERT3U(writes, <=, zfs_vdev_async_write_max_active); 359 return (writes); 360 } 361 362 static int 363 vdev_queue_class_max_active(spa_t *spa, vdev_queue_t *vq, zio_priority_t p) 364 { 365 switch (p) { 366 case ZIO_PRIORITY_SYNC_READ: 367 return (zfs_vdev_sync_read_max_active); 368 case ZIO_PRIORITY_SYNC_WRITE: 369 return (zfs_vdev_sync_write_max_active); 370 case ZIO_PRIORITY_ASYNC_READ: 371 return (zfs_vdev_async_read_max_active); 372 case ZIO_PRIORITY_ASYNC_WRITE: 373 return (vdev_queue_max_async_writes(spa)); 374 case ZIO_PRIORITY_SCRUB: 375 if (vq->vq_ia_active > 0) { 376 return (MIN(vq->vq_nia_credit, 377 zfs_vdev_scrub_min_active)); 378 } else if (vq->vq_nia_credit < zfs_vdev_nia_delay) 379 return (MAX(1, zfs_vdev_scrub_min_active)); 380 return (zfs_vdev_scrub_max_active); 381 case ZIO_PRIORITY_REMOVAL: 382 if (vq->vq_ia_active > 0) { 383 return (MIN(vq->vq_nia_credit, 384 zfs_vdev_removal_min_active)); 385 } else if (vq->vq_nia_credit < zfs_vdev_nia_delay) 386 return (MAX(1, zfs_vdev_removal_min_active)); 387 return (zfs_vdev_removal_max_active); 388 case ZIO_PRIORITY_INITIALIZING: 389 if (vq->vq_ia_active > 0) { 390 return (MIN(vq->vq_nia_credit, 391 zfs_vdev_initializing_min_active)); 392 } else if (vq->vq_nia_credit < zfs_vdev_nia_delay) 393 return (MAX(1, zfs_vdev_initializing_min_active)); 394 return (zfs_vdev_initializing_max_active); 395 case ZIO_PRIORITY_TRIM: 396 return (zfs_vdev_trim_max_active); 397 case ZIO_PRIORITY_REBUILD: 398 if (vq->vq_ia_active > 0) { 399 return (MIN(vq->vq_nia_credit, 400 zfs_vdev_rebuild_min_active)); 401 } else if (vq->vq_nia_credit < zfs_vdev_nia_delay) 402 return (MAX(1, zfs_vdev_rebuild_min_active)); 403 return (zfs_vdev_rebuild_max_active); 404 default: 405 panic("invalid priority %u", p); 406 return (0); 407 } 408 } 409 410 /* 411 * Return the i/o class to issue from, or ZIO_PRIORITY_MAX_QUEUEABLE if 412 * there is no eligible class. 413 */ 414 static zio_priority_t 415 vdev_queue_class_to_issue(vdev_queue_t *vq) 416 { 417 spa_t *spa = vq->vq_vdev->vdev_spa; 418 zio_priority_t p, n; 419 420 if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active) 421 return (ZIO_PRIORITY_NUM_QUEUEABLE); 422 423 /* 424 * Find a queue that has not reached its minimum # outstanding i/os. 425 * Do round-robin to reduce starvation due to zfs_vdev_max_active 426 * and vq_nia_credit limits. 427 */ 428 for (n = 0; n < ZIO_PRIORITY_NUM_QUEUEABLE; n++) { 429 p = (vq->vq_last_prio + n + 1) % ZIO_PRIORITY_NUM_QUEUEABLE; 430 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 && 431 vq->vq_class[p].vqc_active < 432 vdev_queue_class_min_active(vq, p)) { 433 vq->vq_last_prio = p; 434 return (p); 435 } 436 } 437 438 /* 439 * If we haven't found a queue, look for one that hasn't reached its 440 * maximum # outstanding i/os. 441 */ 442 for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 443 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 && 444 vq->vq_class[p].vqc_active < 445 vdev_queue_class_max_active(spa, vq, p)) { 446 vq->vq_last_prio = p; 447 return (p); 448 } 449 } 450 451 /* No eligible queued i/os */ 452 return (ZIO_PRIORITY_NUM_QUEUEABLE); 453 } 454 455 void 456 vdev_queue_init(vdev_t *vd) 457 { 458 vdev_queue_t *vq = &vd->vdev_queue; 459 zio_priority_t p; 460 461 mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL); 462 vq->vq_vdev = vd; 463 taskq_init_ent(&vd->vdev_queue.vq_io_search.io_tqent); 464 465 avl_create(&vq->vq_active_tree, vdev_queue_offset_compare, 466 sizeof (zio_t), offsetof(struct zio, io_queue_node)); 467 avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_READ), 468 vdev_queue_offset_compare, sizeof (zio_t), 469 offsetof(struct zio, io_offset_node)); 470 avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE), 471 vdev_queue_offset_compare, sizeof (zio_t), 472 offsetof(struct zio, io_offset_node)); 473 avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_TRIM), 474 vdev_queue_offset_compare, sizeof (zio_t), 475 offsetof(struct zio, io_offset_node)); 476 477 for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 478 int (*compfn) (const void *, const void *); 479 480 /* 481 * The synchronous/trim i/o queues are dispatched in FIFO rather 482 * than LBA order. This provides more consistent latency for 483 * these i/os. 484 */ 485 if (p == ZIO_PRIORITY_SYNC_READ || 486 p == ZIO_PRIORITY_SYNC_WRITE || 487 p == ZIO_PRIORITY_TRIM) { 488 compfn = vdev_queue_timestamp_compare; 489 } else { 490 compfn = vdev_queue_offset_compare; 491 } 492 avl_create(vdev_queue_class_tree(vq, p), compfn, 493 sizeof (zio_t), offsetof(struct zio, io_queue_node)); 494 } 495 496 vq->vq_last_offset = 0; 497 } 498 499 void 500 vdev_queue_fini(vdev_t *vd) 501 { 502 vdev_queue_t *vq = &vd->vdev_queue; 503 504 for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) 505 avl_destroy(vdev_queue_class_tree(vq, p)); 506 avl_destroy(&vq->vq_active_tree); 507 avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ)); 508 avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE)); 509 avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_TRIM)); 510 511 mutex_destroy(&vq->vq_lock); 512 } 513 514 static void 515 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio) 516 { 517 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 518 avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio); 519 avl_add(vdev_queue_type_tree(vq, zio->io_type), zio); 520 } 521 522 static void 523 vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio) 524 { 525 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 526 avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio); 527 avl_remove(vdev_queue_type_tree(vq, zio->io_type), zio); 528 } 529 530 static boolean_t 531 vdev_queue_is_interactive(zio_priority_t p) 532 { 533 switch (p) { 534 case ZIO_PRIORITY_SCRUB: 535 case ZIO_PRIORITY_REMOVAL: 536 case ZIO_PRIORITY_INITIALIZING: 537 case ZIO_PRIORITY_REBUILD: 538 return (B_FALSE); 539 default: 540 return (B_TRUE); 541 } 542 } 543 544 static void 545 vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio) 546 { 547 ASSERT(MUTEX_HELD(&vq->vq_lock)); 548 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 549 vq->vq_class[zio->io_priority].vqc_active++; 550 if (vdev_queue_is_interactive(zio->io_priority)) { 551 if (++vq->vq_ia_active == 1) 552 vq->vq_nia_credit = 1; 553 } else if (vq->vq_ia_active > 0) { 554 vq->vq_nia_credit--; 555 } 556 avl_add(&vq->vq_active_tree, zio); 557 } 558 559 static void 560 vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio) 561 { 562 ASSERT(MUTEX_HELD(&vq->vq_lock)); 563 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 564 vq->vq_class[zio->io_priority].vqc_active--; 565 if (vdev_queue_is_interactive(zio->io_priority)) { 566 if (--vq->vq_ia_active == 0) 567 vq->vq_nia_credit = 0; 568 else 569 vq->vq_nia_credit = zfs_vdev_nia_credit; 570 } else if (vq->vq_ia_active == 0) 571 vq->vq_nia_credit++; 572 avl_remove(&vq->vq_active_tree, zio); 573 } 574 575 static void 576 vdev_queue_agg_io_done(zio_t *aio) 577 { 578 abd_free(aio->io_abd); 579 } 580 581 /* 582 * Compute the range spanned by two i/os, which is the endpoint of the last 583 * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset). 584 * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio); 585 * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0. 586 */ 587 #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset) 588 #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio)) 589 590 /* 591 * Sufficiently adjacent io_offset's in ZIOs will be aggregated. We do this 592 * by creating a gang ABD from the adjacent ZIOs io_abd's. By using 593 * a gang ABD we avoid doing memory copies to and from the parent, 594 * child ZIOs. The gang ABD also accounts for gaps between adjacent 595 * io_offsets by simply getting the zero ABD for writes or allocating 596 * a new ABD for reads and placing them in the gang ABD as well. 597 */ 598 static zio_t * 599 vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio) 600 { 601 zio_t *first, *last, *aio, *dio, *mandatory, *nio; 602 zio_link_t *zl = NULL; 603 uint64_t maxgap = 0; 604 uint64_t size; 605 uint64_t limit; 606 int maxblocksize; 607 boolean_t stretch = B_FALSE; 608 avl_tree_t *t = vdev_queue_type_tree(vq, zio->io_type); 609 enum zio_flag flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT; 610 uint64_t next_offset; 611 abd_t *abd; 612 613 maxblocksize = spa_maxblocksize(vq->vq_vdev->vdev_spa); 614 if (vq->vq_vdev->vdev_nonrot) 615 limit = zfs_vdev_aggregation_limit_non_rotating; 616 else 617 limit = zfs_vdev_aggregation_limit; 618 limit = MAX(MIN(limit, maxblocksize), 0); 619 620 if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE || limit == 0) 621 return (NULL); 622 623 /* 624 * While TRIM commands could be aggregated based on offset this 625 * behavior is disabled until it's determined to be beneficial. 626 */ 627 if (zio->io_type == ZIO_TYPE_TRIM && !zfs_vdev_aggregate_trim) 628 return (NULL); 629 630 /* 631 * I/Os to distributed spares are directly dispatched to the dRAID 632 * leaf vdevs for aggregation. See the comment at the end of the 633 * zio_vdev_io_start() function. 634 */ 635 ASSERT(vq->vq_vdev->vdev_ops != &vdev_draid_spare_ops); 636 637 first = last = zio; 638 639 if (zio->io_type == ZIO_TYPE_READ) 640 maxgap = zfs_vdev_read_gap_limit; 641 642 /* 643 * We can aggregate I/Os that are sufficiently adjacent and of 644 * the same flavor, as expressed by the AGG_INHERIT flags. 645 * The latter requirement is necessary so that certain 646 * attributes of the I/O, such as whether it's a normal I/O 647 * or a scrub/resilver, can be preserved in the aggregate. 648 * We can include optional I/Os, but don't allow them 649 * to begin a range as they add no benefit in that situation. 650 */ 651 652 /* 653 * We keep track of the last non-optional I/O. 654 */ 655 mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first; 656 657 /* 658 * Walk backwards through sufficiently contiguous I/Os 659 * recording the last non-optional I/O. 660 */ 661 while ((dio = AVL_PREV(t, first)) != NULL && 662 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 663 IO_SPAN(dio, last) <= limit && 664 IO_GAP(dio, first) <= maxgap && 665 dio->io_type == zio->io_type) { 666 first = dio; 667 if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL)) 668 mandatory = first; 669 } 670 671 /* 672 * Skip any initial optional I/Os. 673 */ 674 while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) { 675 first = AVL_NEXT(t, first); 676 ASSERT(first != NULL); 677 } 678 679 680 /* 681 * Walk forward through sufficiently contiguous I/Os. 682 * The aggregation limit does not apply to optional i/os, so that 683 * we can issue contiguous writes even if they are larger than the 684 * aggregation limit. 685 */ 686 while ((dio = AVL_NEXT(t, last)) != NULL && 687 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 688 (IO_SPAN(first, dio) <= limit || 689 (dio->io_flags & ZIO_FLAG_OPTIONAL)) && 690 IO_SPAN(first, dio) <= maxblocksize && 691 IO_GAP(last, dio) <= maxgap && 692 dio->io_type == zio->io_type) { 693 last = dio; 694 if (!(last->io_flags & ZIO_FLAG_OPTIONAL)) 695 mandatory = last; 696 } 697 698 /* 699 * Now that we've established the range of the I/O aggregation 700 * we must decide what to do with trailing optional I/Os. 701 * For reads, there's nothing to do. While we are unable to 702 * aggregate further, it's possible that a trailing optional 703 * I/O would allow the underlying device to aggregate with 704 * subsequent I/Os. We must therefore determine if the next 705 * non-optional I/O is close enough to make aggregation 706 * worthwhile. 707 */ 708 if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) { 709 zio_t *nio = last; 710 while ((dio = AVL_NEXT(t, nio)) != NULL && 711 IO_GAP(nio, dio) == 0 && 712 IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) { 713 nio = dio; 714 if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) { 715 stretch = B_TRUE; 716 break; 717 } 718 } 719 } 720 721 if (stretch) { 722 /* 723 * We are going to include an optional io in our aggregated 724 * span, thus closing the write gap. Only mandatory i/os can 725 * start aggregated spans, so make sure that the next i/o 726 * after our span is mandatory. 727 */ 728 dio = AVL_NEXT(t, last); 729 dio->io_flags &= ~ZIO_FLAG_OPTIONAL; 730 } else { 731 /* do not include the optional i/o */ 732 while (last != mandatory && last != first) { 733 ASSERT(last->io_flags & ZIO_FLAG_OPTIONAL); 734 last = AVL_PREV(t, last); 735 ASSERT(last != NULL); 736 } 737 } 738 739 if (first == last) 740 return (NULL); 741 742 size = IO_SPAN(first, last); 743 ASSERT3U(size, <=, maxblocksize); 744 745 abd = abd_alloc_gang(); 746 if (abd == NULL) 747 return (NULL); 748 749 aio = zio_vdev_delegated_io(first->io_vd, first->io_offset, 750 abd, size, first->io_type, zio->io_priority, 751 flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE, 752 vdev_queue_agg_io_done, NULL); 753 aio->io_timestamp = first->io_timestamp; 754 755 nio = first; 756 next_offset = first->io_offset; 757 do { 758 dio = nio; 759 nio = AVL_NEXT(t, dio); 760 zio_add_child(dio, aio); 761 vdev_queue_io_remove(vq, dio); 762 763 if (dio->io_offset != next_offset) { 764 /* allocate a buffer for a read gap */ 765 ASSERT3U(dio->io_type, ==, ZIO_TYPE_READ); 766 ASSERT3U(dio->io_offset, >, next_offset); 767 abd = abd_alloc_for_io( 768 dio->io_offset - next_offset, B_TRUE); 769 abd_gang_add(aio->io_abd, abd, B_TRUE); 770 } 771 if (dio->io_abd && 772 (dio->io_size != abd_get_size(dio->io_abd))) { 773 /* abd size not the same as IO size */ 774 ASSERT3U(abd_get_size(dio->io_abd), >, dio->io_size); 775 abd = abd_get_offset_size(dio->io_abd, 0, dio->io_size); 776 abd_gang_add(aio->io_abd, abd, B_TRUE); 777 } else { 778 if (dio->io_flags & ZIO_FLAG_NODATA) { 779 /* allocate a buffer for a write gap */ 780 ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE); 781 ASSERT3P(dio->io_abd, ==, NULL); 782 abd_gang_add(aio->io_abd, 783 abd_get_zeros(dio->io_size), B_TRUE); 784 } else { 785 /* 786 * We pass B_FALSE to abd_gang_add() 787 * because we did not allocate a new 788 * ABD, so it is assumed the caller 789 * will free this ABD. 790 */ 791 abd_gang_add(aio->io_abd, dio->io_abd, 792 B_FALSE); 793 } 794 } 795 next_offset = dio->io_offset + dio->io_size; 796 } while (dio != last); 797 ASSERT3U(abd_get_size(aio->io_abd), ==, aio->io_size); 798 799 /* 800 * We need to drop the vdev queue's lock during zio_execute() to 801 * avoid a deadlock that we could encounter due to lock order 802 * reversal between vq_lock and io_lock in zio_change_priority(). 803 */ 804 mutex_exit(&vq->vq_lock); 805 while ((dio = zio_walk_parents(aio, &zl)) != NULL) { 806 ASSERT3U(dio->io_type, ==, aio->io_type); 807 808 zio_vdev_io_bypass(dio); 809 zio_execute(dio); 810 } 811 mutex_enter(&vq->vq_lock); 812 813 return (aio); 814 } 815 816 static zio_t * 817 vdev_queue_io_to_issue(vdev_queue_t *vq) 818 { 819 zio_t *zio, *aio; 820 zio_priority_t p; 821 avl_index_t idx; 822 avl_tree_t *tree; 823 824 again: 825 ASSERT(MUTEX_HELD(&vq->vq_lock)); 826 827 p = vdev_queue_class_to_issue(vq); 828 829 if (p == ZIO_PRIORITY_NUM_QUEUEABLE) { 830 /* No eligible queued i/os */ 831 return (NULL); 832 } 833 834 /* 835 * For LBA-ordered queues (async / scrub / initializing), issue the 836 * i/o which follows the most recently issued i/o in LBA (offset) order. 837 * 838 * For FIFO queues (sync/trim), issue the i/o with the lowest timestamp. 839 */ 840 tree = vdev_queue_class_tree(vq, p); 841 vq->vq_io_search.io_timestamp = 0; 842 vq->vq_io_search.io_offset = vq->vq_last_offset - 1; 843 VERIFY3P(avl_find(tree, &vq->vq_io_search, &idx), ==, NULL); 844 zio = avl_nearest(tree, idx, AVL_AFTER); 845 if (zio == NULL) 846 zio = avl_first(tree); 847 ASSERT3U(zio->io_priority, ==, p); 848 849 aio = vdev_queue_aggregate(vq, zio); 850 if (aio != NULL) 851 zio = aio; 852 else 853 vdev_queue_io_remove(vq, zio); 854 855 /* 856 * If the I/O is or was optional and therefore has no data, we need to 857 * simply discard it. We need to drop the vdev queue's lock to avoid a 858 * deadlock that we could encounter since this I/O will complete 859 * immediately. 860 */ 861 if (zio->io_flags & ZIO_FLAG_NODATA) { 862 mutex_exit(&vq->vq_lock); 863 zio_vdev_io_bypass(zio); 864 zio_execute(zio); 865 mutex_enter(&vq->vq_lock); 866 goto again; 867 } 868 869 vdev_queue_pending_add(vq, zio); 870 vq->vq_last_offset = zio->io_offset + zio->io_size; 871 872 return (zio); 873 } 874 875 zio_t * 876 vdev_queue_io(zio_t *zio) 877 { 878 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 879 zio_t *nio; 880 881 if (zio->io_flags & ZIO_FLAG_DONT_QUEUE) 882 return (zio); 883 884 /* 885 * Children i/os inherent their parent's priority, which might 886 * not match the child's i/o type. Fix it up here. 887 */ 888 if (zio->io_type == ZIO_TYPE_READ) { 889 ASSERT(zio->io_priority != ZIO_PRIORITY_TRIM); 890 891 if (zio->io_priority != ZIO_PRIORITY_SYNC_READ && 892 zio->io_priority != ZIO_PRIORITY_ASYNC_READ && 893 zio->io_priority != ZIO_PRIORITY_SCRUB && 894 zio->io_priority != ZIO_PRIORITY_REMOVAL && 895 zio->io_priority != ZIO_PRIORITY_INITIALIZING && 896 zio->io_priority != ZIO_PRIORITY_REBUILD) { 897 zio->io_priority = ZIO_PRIORITY_ASYNC_READ; 898 } 899 } else if (zio->io_type == ZIO_TYPE_WRITE) { 900 ASSERT(zio->io_priority != ZIO_PRIORITY_TRIM); 901 902 if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE && 903 zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE && 904 zio->io_priority != ZIO_PRIORITY_REMOVAL && 905 zio->io_priority != ZIO_PRIORITY_INITIALIZING && 906 zio->io_priority != ZIO_PRIORITY_REBUILD) { 907 zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE; 908 } 909 } else { 910 ASSERT(zio->io_type == ZIO_TYPE_TRIM); 911 ASSERT(zio->io_priority == ZIO_PRIORITY_TRIM); 912 } 913 914 zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE; 915 916 mutex_enter(&vq->vq_lock); 917 zio->io_timestamp = gethrtime(); 918 vdev_queue_io_add(vq, zio); 919 nio = vdev_queue_io_to_issue(vq); 920 mutex_exit(&vq->vq_lock); 921 922 if (nio == NULL) 923 return (NULL); 924 925 if (nio->io_done == vdev_queue_agg_io_done) { 926 zio_nowait(nio); 927 return (NULL); 928 } 929 930 return (nio); 931 } 932 933 void 934 vdev_queue_io_done(zio_t *zio) 935 { 936 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 937 zio_t *nio; 938 939 mutex_enter(&vq->vq_lock); 940 941 vdev_queue_pending_remove(vq, zio); 942 943 zio->io_delta = gethrtime() - zio->io_timestamp; 944 vq->vq_io_complete_ts = gethrtime(); 945 vq->vq_io_delta_ts = vq->vq_io_complete_ts - zio->io_timestamp; 946 947 while ((nio = vdev_queue_io_to_issue(vq)) != NULL) { 948 mutex_exit(&vq->vq_lock); 949 if (nio->io_done == vdev_queue_agg_io_done) { 950 zio_nowait(nio); 951 } else { 952 zio_vdev_io_reissue(nio); 953 zio_execute(nio); 954 } 955 mutex_enter(&vq->vq_lock); 956 } 957 958 mutex_exit(&vq->vq_lock); 959 } 960 961 void 962 vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority) 963 { 964 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 965 avl_tree_t *tree; 966 967 /* 968 * ZIO_PRIORITY_NOW is used by the vdev cache code and the aggregate zio 969 * code to issue IOs without adding them to the vdev queue. In this 970 * case, the zio is already going to be issued as quickly as possible 971 * and so it doesn't need any reprioritization to help. 972 */ 973 if (zio->io_priority == ZIO_PRIORITY_NOW) 974 return; 975 976 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 977 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 978 979 if (zio->io_type == ZIO_TYPE_READ) { 980 if (priority != ZIO_PRIORITY_SYNC_READ && 981 priority != ZIO_PRIORITY_ASYNC_READ && 982 priority != ZIO_PRIORITY_SCRUB) 983 priority = ZIO_PRIORITY_ASYNC_READ; 984 } else { 985 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 986 if (priority != ZIO_PRIORITY_SYNC_WRITE && 987 priority != ZIO_PRIORITY_ASYNC_WRITE) 988 priority = ZIO_PRIORITY_ASYNC_WRITE; 989 } 990 991 mutex_enter(&vq->vq_lock); 992 993 /* 994 * If the zio is in none of the queues we can simply change 995 * the priority. If the zio is waiting to be submitted we must 996 * remove it from the queue and re-insert it with the new priority. 997 * Otherwise, the zio is currently active and we cannot change its 998 * priority. 999 */ 1000 tree = vdev_queue_class_tree(vq, zio->io_priority); 1001 if (avl_find(tree, zio, NULL) == zio) { 1002 avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio); 1003 zio->io_priority = priority; 1004 avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio); 1005 } else if (avl_find(&vq->vq_active_tree, zio, NULL) != zio) { 1006 zio->io_priority = priority; 1007 } 1008 1009 mutex_exit(&vq->vq_lock); 1010 } 1011 1012 /* 1013 * As these two methods are only used for load calculations we're not 1014 * concerned if we get an incorrect value on 32bit platforms due to lack of 1015 * vq_lock mutex use here, instead we prefer to keep it lock free for 1016 * performance. 1017 */ 1018 int 1019 vdev_queue_length(vdev_t *vd) 1020 { 1021 return (avl_numnodes(&vd->vdev_queue.vq_active_tree)); 1022 } 1023 1024 uint64_t 1025 vdev_queue_last_offset(vdev_t *vd) 1026 { 1027 return (vd->vdev_queue.vq_last_offset); 1028 } 1029 1030 /* BEGIN CSTYLED */ 1031 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregation_limit, INT, ZMOD_RW, 1032 "Max vdev I/O aggregation size"); 1033 1034 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregation_limit_non_rotating, INT, ZMOD_RW, 1035 "Max vdev I/O aggregation size for non-rotating media"); 1036 1037 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregate_trim, INT, ZMOD_RW, 1038 "Allow TRIM I/O to be aggregated"); 1039 1040 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, read_gap_limit, INT, ZMOD_RW, 1041 "Aggregate read I/O over gap"); 1042 1043 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, write_gap_limit, INT, ZMOD_RW, 1044 "Aggregate write I/O over gap"); 1045 1046 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, max_active, INT, ZMOD_RW, 1047 "Maximum number of active I/Os per vdev"); 1048 1049 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_active_max_dirty_percent, INT, ZMOD_RW, 1050 "Async write concurrency max threshold"); 1051 1052 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_active_min_dirty_percent, INT, ZMOD_RW, 1053 "Async write concurrency min threshold"); 1054 1055 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_read_max_active, INT, ZMOD_RW, 1056 "Max active async read I/Os per vdev"); 1057 1058 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_read_min_active, INT, ZMOD_RW, 1059 "Min active async read I/Os per vdev"); 1060 1061 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_max_active, INT, ZMOD_RW, 1062 "Max active async write I/Os per vdev"); 1063 1064 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_min_active, INT, ZMOD_RW, 1065 "Min active async write I/Os per vdev"); 1066 1067 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, initializing_max_active, INT, ZMOD_RW, 1068 "Max active initializing I/Os per vdev"); 1069 1070 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, initializing_min_active, INT, ZMOD_RW, 1071 "Min active initializing I/Os per vdev"); 1072 1073 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, removal_max_active, INT, ZMOD_RW, 1074 "Max active removal I/Os per vdev"); 1075 1076 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, removal_min_active, INT, ZMOD_RW, 1077 "Min active removal I/Os per vdev"); 1078 1079 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, scrub_max_active, INT, ZMOD_RW, 1080 "Max active scrub I/Os per vdev"); 1081 1082 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, scrub_min_active, INT, ZMOD_RW, 1083 "Min active scrub I/Os per vdev"); 1084 1085 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_read_max_active, INT, ZMOD_RW, 1086 "Max active sync read I/Os per vdev"); 1087 1088 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_read_min_active, INT, ZMOD_RW, 1089 "Min active sync read I/Os per vdev"); 1090 1091 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_write_max_active, INT, ZMOD_RW, 1092 "Max active sync write I/Os per vdev"); 1093 1094 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_write_min_active, INT, ZMOD_RW, 1095 "Min active sync write I/Os per vdev"); 1096 1097 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, trim_max_active, INT, ZMOD_RW, 1098 "Max active trim/discard I/Os per vdev"); 1099 1100 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, trim_min_active, INT, ZMOD_RW, 1101 "Min active trim/discard I/Os per vdev"); 1102 1103 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, rebuild_max_active, INT, ZMOD_RW, 1104 "Max active rebuild I/Os per vdev"); 1105 1106 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, rebuild_min_active, INT, ZMOD_RW, 1107 "Min active rebuild I/Os per vdev"); 1108 1109 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, nia_credit, INT, ZMOD_RW, 1110 "Number of non-interactive I/Os to allow in sequence"); 1111 1112 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, nia_delay, INT, ZMOD_RW, 1113 "Number of non-interactive I/Os before _max_active"); 1114 1115 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, queue_depth_pct, INT, ZMOD_RW, 1116 "Queue depth percentage for each top-level vdev"); 1117 /* END CSTYLED */ 1118