1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
28 * Copyright (c) 2014 Integros [integros.com]
29 * Copyright 2019 Joyent, Inc.
30 */
31
32 #include <sys/zfs_context.h>
33 #include <sys/vdev_impl.h>
34 #include <sys/spa_impl.h>
35 #include <sys/zio.h>
36 #include <sys/avl.h>
37 #include <sys/dsl_pool.h>
38 #include <sys/metaslab_impl.h>
39 #include <sys/abd.h>
40
41 /*
42 * ZFS I/O Scheduler
43 * ---------------
44 *
45 * ZFS issues I/O operations to leaf vdevs to satisfy and complete zios. The
46 * I/O scheduler determines when and in what order those operations are
47 * issued. The I/O scheduler divides operations into five I/O classes
48 * prioritized in the following order: sync read, sync write, async read,
49 * async write, and scrub/resilver. Each queue defines the minimum and
50 * maximum number of concurrent operations that may be issued to the device.
51 * In addition, the device has an aggregate maximum. Note that the sum of the
52 * per-queue minimums must not exceed the aggregate maximum, and if the
53 * aggregate maximum is equal to or greater than the sum of the per-queue
54 * maximums, the per-queue minimum has no effect.
55 *
56 * For many physical devices, throughput increases with the number of
57 * concurrent operations, but latency typically suffers. Further, physical
58 * devices typically have a limit at which more concurrent operations have no
59 * effect on throughput or can actually cause it to decrease.
60 *
61 * The scheduler selects the next operation to issue by first looking for an
62 * I/O class whose minimum has not been satisfied. Once all are satisfied and
63 * the aggregate maximum has not been hit, the scheduler looks for classes
64 * whose maximum has not been satisfied. Iteration through the I/O classes is
65 * done in the order specified above. No further operations are issued if the
66 * aggregate maximum number of concurrent operations has been hit or if there
67 * are no operations queued for an I/O class that has not hit its maximum.
68 * Every time an i/o is queued or an operation completes, the I/O scheduler
69 * looks for new operations to issue.
70 *
71 * All I/O classes have a fixed maximum number of outstanding operations
72 * except for the async write class. Asynchronous writes represent the data
73 * that is committed to stable storage during the syncing stage for
74 * transaction groups (see txg.c). Transaction groups enter the syncing state
75 * periodically so the number of queued async writes will quickly burst up and
76 * then bleed down to zero. Rather than servicing them as quickly as possible,
77 * the I/O scheduler changes the maximum number of active async write i/os
78 * according to the amount of dirty data in the pool (see dsl_pool.c). Since
79 * both throughput and latency typically increase with the number of
80 * concurrent operations issued to physical devices, reducing the burstiness
81 * in the number of concurrent operations also stabilizes the response time of
82 * operations from other -- and in particular synchronous -- queues. In broad
83 * strokes, the I/O scheduler will issue more concurrent operations from the
84 * async write queue as there's more dirty data in the pool.
85 *
86 * Async Writes
87 *
88 * The number of concurrent operations issued for the async write I/O class
89 * follows a piece-wise linear function defined by a few adjustable points.
90 *
91 * | o---------| <-- zfs_vdev_async_write_max_active
92 * ^ | /^ |
93 * | | / | |
94 * active | / | |
95 * I/O | / | |
96 * count | / | |
97 * | / | |
98 * |------------o | | <-- zfs_vdev_async_write_min_active
99 * 0|____________^______|_________|
100 * 0% | | 100% of zfs_dirty_data_max
101 * | |
102 * | `-- zfs_vdev_async_write_active_max_dirty_percent
103 * `--------- zfs_vdev_async_write_active_min_dirty_percent
104 *
105 * Until the amount of dirty data exceeds a minimum percentage of the dirty
106 * data allowed in the pool, the I/O scheduler will limit the number of
107 * concurrent operations to the minimum. As that threshold is crossed, the
108 * number of concurrent operations issued increases linearly to the maximum at
109 * the specified maximum percentage of the dirty data allowed in the pool.
110 *
111 * Ideally, the amount of dirty data on a busy pool will stay in the sloped
112 * part of the function between zfs_vdev_async_write_active_min_dirty_percent
113 * and zfs_vdev_async_write_active_max_dirty_percent. If it exceeds the
114 * maximum percentage, this indicates that the rate of incoming data is
115 * greater than the rate that the backend storage can handle. In this case, we
116 * must further throttle incoming writes (see dmu_tx_delay() for details).
117 */
118
119 /*
120 * The maximum number of i/os active to each device. Ideally, this will be >=
121 * the sum of each queue's max_active. It must be at least the sum of each
122 * queue's min_active.
123 */
124 uint32_t zfs_vdev_max_active = 1000;
125
126 /*
127 * Per-queue limits on the number of i/os active to each device. If the
128 * sum of the queue's max_active is < zfs_vdev_max_active, then the
129 * min_active comes into play. We will send min_active from each queue,
130 * and then select from queues in the order defined by zio_priority_t.
131 *
132 * In general, smaller max_active's will lead to lower latency of synchronous
133 * operations. Larger max_active's may lead to higher overall throughput,
134 * depending on underlying storage.
135 *
136 * The ratio of the queues' max_actives determines the balance of performance
137 * between reads, writes, and scrubs. E.g., increasing
138 * zfs_vdev_scrub_max_active will cause the scrub or resilver to complete
139 * more quickly, but reads and writes to have higher latency and lower
140 * throughput.
141 */
142 uint32_t zfs_vdev_sync_read_min_active = 10;
143 uint32_t zfs_vdev_sync_read_max_active = 10;
144 uint32_t zfs_vdev_sync_write_min_active = 10;
145 uint32_t zfs_vdev_sync_write_max_active = 10;
146 uint32_t zfs_vdev_async_read_min_active = 1;
147 uint32_t zfs_vdev_async_read_max_active = 3;
148 uint32_t zfs_vdev_async_write_min_active = 1;
149 uint32_t zfs_vdev_async_write_max_active = 10;
150 uint32_t zfs_vdev_scrub_min_active = 1;
151 uint32_t zfs_vdev_scrub_max_active = 2;
152 uint32_t zfs_vdev_removal_min_active = 1;
153 uint32_t zfs_vdev_removal_max_active = 2;
154 uint32_t zfs_vdev_initializing_min_active = 1;
155 uint32_t zfs_vdev_initializing_max_active = 1;
156 uint32_t zfs_vdev_trim_min_active = 1;
157 uint32_t zfs_vdev_trim_max_active = 2;
158
159 /*
160 * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent
161 * dirty data, use zfs_vdev_async_write_min_active. When it has more than
162 * zfs_vdev_async_write_active_max_dirty_percent, use
163 * zfs_vdev_async_write_max_active. The value is linearly interpolated
164 * between min and max.
165 */
166 int zfs_vdev_async_write_active_min_dirty_percent = 30;
167 int zfs_vdev_async_write_active_max_dirty_percent = 60;
168
169 /*
170 * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
171 * For read I/Os, we also aggregate across small adjacency gaps; for writes
172 * we include spans of optional I/Os to aid aggregation at the disk even when
173 * they aren't able to help us aggregate at this level.
174 */
175 int zfs_vdev_aggregation_limit = 1 << 20;
176 int zfs_vdev_read_gap_limit = 32 << 10;
177 int zfs_vdev_write_gap_limit = 4 << 10;
178
179 /*
180 * Define the queue depth percentage for each top-level. This percentage is
181 * used in conjunction with zfs_vdev_async_max_active to determine how many
182 * allocations a specific top-level vdev should handle. Once the queue depth
183 * reaches zfs_vdev_queue_depth_pct * zfs_vdev_async_write_max_active / 100
184 * then allocator will stop allocating blocks on that top-level device.
185 * The default kernel setting is 1000% which will yield 100 allocations per
186 * device. For userland testing, the default setting is 300% which equates
187 * to 30 allocations per device.
188 */
189 #ifdef _KERNEL
190 int zfs_vdev_queue_depth_pct = 1000;
191 #else
192 int zfs_vdev_queue_depth_pct = 300;
193 #endif
194
195 /*
196 * When performing allocations for a given metaslab, we want to make sure that
197 * there are enough IOs to aggregate together to improve throughput. We want to
198 * ensure that there are at least 128k worth of IOs that can be aggregated, and
199 * we assume that the average allocation size is 4k, so we need the queue depth
200 * to be 32 per allocator to get good aggregation of sequential writes.
201 */
202 int zfs_vdev_def_queue_depth = 32;
203
204 /*
205 * Allow TRIM I/Os to be aggregated. This should normally not be needed since
206 * TRIM I/O for extents up to zfs_trim_extent_bytes_max (128M) can be submitted
207 * by the TRIM code in zfs_trim.c.
208 */
209 int zfs_vdev_aggregate_trim = 0;
210
211 int
vdev_queue_offset_compare(const void * x1,const void * x2)212 vdev_queue_offset_compare(const void *x1, const void *x2)
213 {
214 const zio_t *z1 = (const zio_t *)x1;
215 const zio_t *z2 = (const zio_t *)x2;
216
217 int cmp = TREE_CMP(z1->io_offset, z2->io_offset);
218
219 if (likely(cmp))
220 return (cmp);
221
222 return (TREE_PCMP(z1, z2));
223 }
224
225 static inline avl_tree_t *
vdev_queue_class_tree(vdev_queue_t * vq,zio_priority_t p)226 vdev_queue_class_tree(vdev_queue_t *vq, zio_priority_t p)
227 {
228 return (&vq->vq_class[p].vqc_queued_tree);
229 }
230
231 static inline avl_tree_t *
vdev_queue_type_tree(vdev_queue_t * vq,zio_type_t t)232 vdev_queue_type_tree(vdev_queue_t *vq, zio_type_t t)
233 {
234 ASSERT(t == ZIO_TYPE_READ || t == ZIO_TYPE_WRITE || t == ZIO_TYPE_TRIM);
235 if (t == ZIO_TYPE_READ)
236 return (&vq->vq_read_offset_tree);
237 else if (t == ZIO_TYPE_WRITE)
238 return (&vq->vq_write_offset_tree);
239 else
240 return (&vq->vq_trim_offset_tree);
241 }
242
243 int
vdev_queue_timestamp_compare(const void * x1,const void * x2)244 vdev_queue_timestamp_compare(const void *x1, const void *x2)
245 {
246 const zio_t *z1 = (const zio_t *)x1;
247 const zio_t *z2 = (const zio_t *)x2;
248
249 int cmp = TREE_CMP(z1->io_timestamp, z2->io_timestamp);
250
251 if (likely(cmp))
252 return (cmp);
253
254 return (TREE_PCMP(z1, z2));
255 }
256
257 void
vdev_queue_init(vdev_t * vd)258 vdev_queue_init(vdev_t *vd)
259 {
260 vdev_queue_t *vq = &vd->vdev_queue;
261
262 mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL);
263 vq->vq_vdev = vd;
264
265 avl_create(&vq->vq_active_tree, vdev_queue_offset_compare,
266 sizeof (zio_t), offsetof(struct zio, io_queue_node));
267 avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_READ),
268 vdev_queue_offset_compare, sizeof (zio_t),
269 offsetof(struct zio, io_offset_node));
270 avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE),
271 vdev_queue_offset_compare, sizeof (zio_t),
272 offsetof(struct zio, io_offset_node));
273 avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_TRIM),
274 vdev_queue_offset_compare, sizeof (zio_t),
275 offsetof(struct zio, io_offset_node));
276
277 for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
278 int (*compfn) (const void *, const void *);
279
280 /*
281 * The synchronous/trim i/o queues are dispatched in FIFO rather
282 * than LBA order. This provides more consistent latency for
283 * these i/os.
284 */
285 if (p == ZIO_PRIORITY_SYNC_READ ||
286 p == ZIO_PRIORITY_SYNC_WRITE ||
287 p == ZIO_PRIORITY_TRIM) {
288 compfn = vdev_queue_timestamp_compare;
289 } else {
290 compfn = vdev_queue_offset_compare;
291 }
292
293 avl_create(vdev_queue_class_tree(vq, p), compfn,
294 sizeof (zio_t), offsetof(struct zio, io_queue_node));
295 }
296
297 vq->vq_last_offset = 0;
298 }
299
300 void
vdev_queue_fini(vdev_t * vd)301 vdev_queue_fini(vdev_t *vd)
302 {
303 vdev_queue_t *vq = &vd->vdev_queue;
304
305 for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++)
306 avl_destroy(vdev_queue_class_tree(vq, p));
307 avl_destroy(&vq->vq_active_tree);
308 avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ));
309 avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE));
310 avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_TRIM));
311
312 mutex_destroy(&vq->vq_lock);
313 }
314
315 static void
vdev_queue_io_add(vdev_queue_t * vq,zio_t * zio)316 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
317 {
318 spa_t *spa = zio->io_spa;
319
320 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
321 avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
322 avl_add(vdev_queue_type_tree(vq, zio->io_type), zio);
323
324 mutex_enter(&spa->spa_iokstat_lock);
325 spa->spa_queue_stats[zio->io_priority].spa_queued++;
326 if (spa->spa_iokstat != NULL)
327 kstat_waitq_enter(spa->spa_iokstat->ks_data);
328 mutex_exit(&spa->spa_iokstat_lock);
329 }
330
331 static void
vdev_queue_io_remove(vdev_queue_t * vq,zio_t * zio)332 vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
333 {
334 spa_t *spa = zio->io_spa;
335
336 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
337 avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio);
338 avl_remove(vdev_queue_type_tree(vq, zio->io_type), zio);
339
340 mutex_enter(&spa->spa_iokstat_lock);
341 ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_queued, >, 0);
342 spa->spa_queue_stats[zio->io_priority].spa_queued--;
343 if (spa->spa_iokstat != NULL)
344 kstat_waitq_exit(spa->spa_iokstat->ks_data);
345 mutex_exit(&spa->spa_iokstat_lock);
346 }
347
348 static void
vdev_queue_pending_add(vdev_queue_t * vq,zio_t * zio)349 vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
350 {
351 spa_t *spa = zio->io_spa;
352 ASSERT(MUTEX_HELD(&vq->vq_lock));
353 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
354 vq->vq_class[zio->io_priority].vqc_active++;
355 avl_add(&vq->vq_active_tree, zio);
356
357 mutex_enter(&spa->spa_iokstat_lock);
358 spa->spa_queue_stats[zio->io_priority].spa_active++;
359 if (spa->spa_iokstat != NULL)
360 kstat_runq_enter(spa->spa_iokstat->ks_data);
361 mutex_exit(&spa->spa_iokstat_lock);
362 }
363
364 static void
vdev_queue_pending_remove(vdev_queue_t * vq,zio_t * zio)365 vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
366 {
367 spa_t *spa = zio->io_spa;
368 ASSERT(MUTEX_HELD(&vq->vq_lock));
369 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
370 vq->vq_class[zio->io_priority].vqc_active--;
371 avl_remove(&vq->vq_active_tree, zio);
372
373 mutex_enter(&spa->spa_iokstat_lock);
374 ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_active, >, 0);
375 spa->spa_queue_stats[zio->io_priority].spa_active--;
376 if (spa->spa_iokstat != NULL) {
377 kstat_io_t *ksio = spa->spa_iokstat->ks_data;
378
379 kstat_runq_exit(spa->spa_iokstat->ks_data);
380 if (zio->io_type == ZIO_TYPE_READ) {
381 ksio->reads++;
382 ksio->nread += zio->io_size;
383 } else if (zio->io_type == ZIO_TYPE_WRITE) {
384 ksio->writes++;
385 ksio->nwritten += zio->io_size;
386 }
387 }
388 mutex_exit(&spa->spa_iokstat_lock);
389 }
390
391 static void
vdev_queue_agg_io_done(zio_t * aio)392 vdev_queue_agg_io_done(zio_t *aio)
393 {
394 if (aio->io_type == ZIO_TYPE_READ) {
395 zio_t *pio;
396 zio_link_t *zl = NULL;
397 while ((pio = zio_walk_parents(aio, &zl)) != NULL) {
398 abd_copy_off(pio->io_abd, aio->io_abd,
399 0, pio->io_offset - aio->io_offset, pio->io_size);
400 }
401 }
402
403 abd_free(aio->io_abd);
404 }
405
406 static int
vdev_queue_class_min_active(zio_priority_t p)407 vdev_queue_class_min_active(zio_priority_t p)
408 {
409 switch (p) {
410 case ZIO_PRIORITY_SYNC_READ:
411 return (zfs_vdev_sync_read_min_active);
412 case ZIO_PRIORITY_SYNC_WRITE:
413 return (zfs_vdev_sync_write_min_active);
414 case ZIO_PRIORITY_ASYNC_READ:
415 return (zfs_vdev_async_read_min_active);
416 case ZIO_PRIORITY_ASYNC_WRITE:
417 return (zfs_vdev_async_write_min_active);
418 case ZIO_PRIORITY_SCRUB:
419 return (zfs_vdev_scrub_min_active);
420 case ZIO_PRIORITY_REMOVAL:
421 return (zfs_vdev_removal_min_active);
422 case ZIO_PRIORITY_INITIALIZING:
423 return (zfs_vdev_initializing_min_active);
424 case ZIO_PRIORITY_TRIM:
425 return (zfs_vdev_trim_min_active);
426 default:
427 panic("invalid priority %u", p);
428 }
429 }
430
431 static int
vdev_queue_max_async_writes(spa_t * spa)432 vdev_queue_max_async_writes(spa_t *spa)
433 {
434 int writes;
435 uint64_t dirty = spa->spa_dsl_pool->dp_dirty_total;
436 uint64_t min_bytes = zfs_dirty_data_max *
437 zfs_vdev_async_write_active_min_dirty_percent / 100;
438 uint64_t max_bytes = zfs_dirty_data_max *
439 zfs_vdev_async_write_active_max_dirty_percent / 100;
440
441 /*
442 * Sync tasks correspond to interactive user actions. To reduce the
443 * execution time of those actions we push data out as fast as possible.
444 */
445 if (spa_has_pending_synctask(spa)) {
446 return (zfs_vdev_async_write_max_active);
447 }
448
449 if (dirty < min_bytes)
450 return (zfs_vdev_async_write_min_active);
451 if (dirty > max_bytes)
452 return (zfs_vdev_async_write_max_active);
453
454 /*
455 * linear interpolation:
456 * slope = (max_writes - min_writes) / (max_bytes - min_bytes)
457 * move right by min_bytes
458 * move up by min_writes
459 */
460 writes = (dirty - min_bytes) *
461 (zfs_vdev_async_write_max_active -
462 zfs_vdev_async_write_min_active) /
463 (max_bytes - min_bytes) +
464 zfs_vdev_async_write_min_active;
465 ASSERT3U(writes, >=, zfs_vdev_async_write_min_active);
466 ASSERT3U(writes, <=, zfs_vdev_async_write_max_active);
467 return (writes);
468 }
469
470 static int
vdev_queue_class_max_active(spa_t * spa,zio_priority_t p)471 vdev_queue_class_max_active(spa_t *spa, zio_priority_t p)
472 {
473 switch (p) {
474 case ZIO_PRIORITY_SYNC_READ:
475 return (zfs_vdev_sync_read_max_active);
476 case ZIO_PRIORITY_SYNC_WRITE:
477 return (zfs_vdev_sync_write_max_active);
478 case ZIO_PRIORITY_ASYNC_READ:
479 return (zfs_vdev_async_read_max_active);
480 case ZIO_PRIORITY_ASYNC_WRITE:
481 return (vdev_queue_max_async_writes(spa));
482 case ZIO_PRIORITY_SCRUB:
483 return (zfs_vdev_scrub_max_active);
484 case ZIO_PRIORITY_REMOVAL:
485 return (zfs_vdev_removal_max_active);
486 case ZIO_PRIORITY_INITIALIZING:
487 return (zfs_vdev_initializing_max_active);
488 case ZIO_PRIORITY_TRIM:
489 return (zfs_vdev_trim_max_active);
490 default:
491 panic("invalid priority %u", p);
492 }
493 }
494
495 /*
496 * Return the i/o class to issue from, or ZIO_PRIORITY_MAX_QUEUEABLE if
497 * there is no eligible class.
498 */
499 static zio_priority_t
vdev_queue_class_to_issue(vdev_queue_t * vq)500 vdev_queue_class_to_issue(vdev_queue_t *vq)
501 {
502 spa_t *spa = vq->vq_vdev->vdev_spa;
503 zio_priority_t p;
504
505 if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active)
506 return (ZIO_PRIORITY_NUM_QUEUEABLE);
507
508 /* find a queue that has not reached its minimum # outstanding i/os */
509 for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
510 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
511 vq->vq_class[p].vqc_active <
512 vdev_queue_class_min_active(p))
513 return (p);
514 }
515
516 /*
517 * If we haven't found a queue, look for one that hasn't reached its
518 * maximum # outstanding i/os.
519 */
520 for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
521 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
522 vq->vq_class[p].vqc_active <
523 vdev_queue_class_max_active(spa, p))
524 return (p);
525 }
526
527 /* No eligible queued i/os */
528 return (ZIO_PRIORITY_NUM_QUEUEABLE);
529 }
530
531 /*
532 * Compute the range spanned by two i/os, which is the endpoint of the last
533 * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
534 * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
535 * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
536 */
537 #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
538 #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
539
540 static zio_t *
vdev_queue_aggregate(vdev_queue_t * vq,zio_t * zio)541 vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
542 {
543 zio_t *first, *last, *aio, *dio, *mandatory, *nio;
544 zio_link_t *zl = NULL;
545 uint64_t maxgap = 0;
546 uint64_t size;
547 boolean_t stretch = B_FALSE;
548 avl_tree_t *t = vdev_queue_type_tree(vq, zio->io_type);
549 enum zio_flag flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT;
550
551 if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE)
552 return (NULL);
553
554 /*
555 * While TRIM commands could be aggregated based on offset this
556 * behavior is disabled until it's determined to be beneficial.
557 */
558 if (zio->io_type == ZIO_TYPE_TRIM && !zfs_vdev_aggregate_trim)
559 return (NULL);
560
561 first = last = zio;
562
563 if (zio->io_type == ZIO_TYPE_READ)
564 maxgap = zfs_vdev_read_gap_limit;
565
566 /*
567 * We can aggregate I/Os that are sufficiently adjacent and of
568 * the same flavor, as expressed by the AGG_INHERIT flags.
569 * The latter requirement is necessary so that certain
570 * attributes of the I/O, such as whether it's a normal I/O
571 * or a scrub/resilver, can be preserved in the aggregate.
572 * We can include optional I/Os, but don't allow them
573 * to begin a range as they add no benefit in that situation.
574 */
575
576 /*
577 * We keep track of the last non-optional I/O.
578 */
579 mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first;
580
581 /*
582 * Walk backwards through sufficiently contiguous I/Os
583 * recording the last non-optional I/O.
584 */
585 while ((dio = AVL_PREV(t, first)) != NULL &&
586 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
587 IO_SPAN(dio, last) <= zfs_vdev_aggregation_limit &&
588 IO_GAP(dio, first) <= maxgap &&
589 dio->io_type == zio->io_type) {
590 first = dio;
591 if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL))
592 mandatory = first;
593 }
594
595 /*
596 * Skip any initial optional I/Os.
597 */
598 while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) {
599 first = AVL_NEXT(t, first);
600 ASSERT(first != NULL);
601 }
602
603 /*
604 * Walk forward through sufficiently contiguous I/Os.
605 * The aggregation limit does not apply to optional i/os, so that
606 * we can issue contiguous writes even if they are larger than the
607 * aggregation limit.
608 */
609 while ((dio = AVL_NEXT(t, last)) != NULL &&
610 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
611 (IO_SPAN(first, dio) <= zfs_vdev_aggregation_limit ||
612 (dio->io_flags & ZIO_FLAG_OPTIONAL)) &&
613 IO_GAP(last, dio) <= maxgap &&
614 dio->io_type == zio->io_type) {
615 last = dio;
616 if (!(last->io_flags & ZIO_FLAG_OPTIONAL))
617 mandatory = last;
618 }
619
620 /*
621 * Now that we've established the range of the I/O aggregation
622 * we must decide what to do with trailing optional I/Os.
623 * For reads, there's nothing to do. While we are unable to
624 * aggregate further, it's possible that a trailing optional
625 * I/O would allow the underlying device to aggregate with
626 * subsequent I/Os. We must therefore determine if the next
627 * non-optional I/O is close enough to make aggregation
628 * worthwhile.
629 */
630 if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) {
631 zio_t *nio = last;
632 while ((dio = AVL_NEXT(t, nio)) != NULL &&
633 IO_GAP(nio, dio) == 0 &&
634 IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) {
635 nio = dio;
636 if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) {
637 stretch = B_TRUE;
638 break;
639 }
640 }
641 }
642
643 if (stretch) {
644 /*
645 * We are going to include an optional io in our aggregated
646 * span, thus closing the write gap. Only mandatory i/os can
647 * start aggregated spans, so make sure that the next i/o
648 * after our span is mandatory.
649 */
650 dio = AVL_NEXT(t, last);
651 dio->io_flags &= ~ZIO_FLAG_OPTIONAL;
652 } else {
653 /* do not include the optional i/o */
654 while (last != mandatory && last != first) {
655 ASSERT(last->io_flags & ZIO_FLAG_OPTIONAL);
656 last = AVL_PREV(t, last);
657 ASSERT(last != NULL);
658 }
659 }
660
661 if (first == last)
662 return (NULL);
663
664 size = IO_SPAN(first, last);
665 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
666
667 aio = zio_vdev_delegated_io(first->io_vd, first->io_offset,
668 abd_alloc_for_io(size, B_TRUE), size, first->io_type,
669 zio->io_priority, flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE,
670 vdev_queue_agg_io_done, NULL);
671 aio->io_timestamp = first->io_timestamp;
672
673 nio = first;
674 do {
675 dio = nio;
676 nio = AVL_NEXT(t, dio);
677 ASSERT3U(dio->io_type, ==, aio->io_type);
678
679 if (dio->io_flags & ZIO_FLAG_NODATA) {
680 ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE);
681 abd_zero_off(aio->io_abd,
682 dio->io_offset - aio->io_offset, dio->io_size);
683 } else if (dio->io_type == ZIO_TYPE_WRITE) {
684 abd_copy_off(aio->io_abd, dio->io_abd,
685 dio->io_offset - aio->io_offset, 0, dio->io_size);
686 }
687
688 zio_add_child(dio, aio);
689 vdev_queue_io_remove(vq, dio);
690 } while (dio != last);
691
692 /*
693 * We need to drop the vdev queue's lock to avoid a deadlock that we
694 * could encounter since this I/O will complete immediately.
695 */
696 mutex_exit(&vq->vq_lock);
697 while ((dio = zio_walk_parents(aio, &zl)) != NULL) {
698 zio_vdev_io_bypass(dio);
699 zio_execute(dio);
700 }
701 mutex_enter(&vq->vq_lock);
702
703 return (aio);
704 }
705
706 static zio_t *
vdev_queue_io_to_issue(vdev_queue_t * vq)707 vdev_queue_io_to_issue(vdev_queue_t *vq)
708 {
709 zio_t *zio, *aio;
710 zio_priority_t p;
711 avl_index_t idx;
712 avl_tree_t *tree;
713 zio_t search;
714
715 again:
716 ASSERT(MUTEX_HELD(&vq->vq_lock));
717
718 p = vdev_queue_class_to_issue(vq);
719
720 if (p == ZIO_PRIORITY_NUM_QUEUEABLE) {
721 /* No eligible queued i/os */
722 return (NULL);
723 }
724
725 /*
726 * For LBA-ordered queues (async / scrub / initializing), issue the
727 * i/o which follows the most recently issued i/o in LBA (offset) order.
728 *
729 * For FIFO queues (sync/trim), issue the i/o with the lowest timestamp.
730 */
731 tree = vdev_queue_class_tree(vq, p);
732 search.io_timestamp = 0;
733 search.io_offset = vq->vq_last_offset - 1;
734 VERIFY3P(avl_find(tree, &search, &idx), ==, NULL);
735 zio = avl_nearest(tree, idx, AVL_AFTER);
736 if (zio == NULL)
737 zio = avl_first(tree);
738 ASSERT3U(zio->io_priority, ==, p);
739
740 aio = vdev_queue_aggregate(vq, zio);
741 if (aio != NULL)
742 zio = aio;
743 else
744 vdev_queue_io_remove(vq, zio);
745
746 /*
747 * If the I/O is or was optional and therefore has no data, we need to
748 * simply discard it. We need to drop the vdev queue's lock to avoid a
749 * deadlock that we could encounter since this I/O will complete
750 * immediately.
751 */
752 if (zio->io_flags & ZIO_FLAG_NODATA) {
753 mutex_exit(&vq->vq_lock);
754 zio_vdev_io_bypass(zio);
755 zio_execute(zio);
756 mutex_enter(&vq->vq_lock);
757 goto again;
758 }
759
760 vdev_queue_pending_add(vq, zio);
761 vq->vq_last_offset = zio->io_offset + zio->io_size;
762
763 return (zio);
764 }
765
766 zio_t *
vdev_queue_io(zio_t * zio)767 vdev_queue_io(zio_t *zio)
768 {
769 vdev_queue_t *vq = &zio->io_vd->vdev_queue;
770 zio_t *nio;
771
772 if (zio->io_flags & ZIO_FLAG_DONT_QUEUE)
773 return (zio);
774
775 /*
776 * Children i/os inherent their parent's priority, which might
777 * not match the child's i/o type. Fix it up here.
778 */
779 if (zio->io_type == ZIO_TYPE_READ) {
780 ASSERT(zio->io_priority != ZIO_PRIORITY_TRIM);
781
782 if (zio->io_priority != ZIO_PRIORITY_SYNC_READ &&
783 zio->io_priority != ZIO_PRIORITY_ASYNC_READ &&
784 zio->io_priority != ZIO_PRIORITY_SCRUB &&
785 zio->io_priority != ZIO_PRIORITY_REMOVAL &&
786 zio->io_priority != ZIO_PRIORITY_INITIALIZING) {
787 zio->io_priority = ZIO_PRIORITY_ASYNC_READ;
788 }
789 } else if (zio->io_type == ZIO_TYPE_WRITE) {
790 ASSERT(zio->io_priority != ZIO_PRIORITY_TRIM);
791
792 if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE &&
793 zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE &&
794 zio->io_priority != ZIO_PRIORITY_REMOVAL &&
795 zio->io_priority != ZIO_PRIORITY_INITIALIZING) {
796 zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE;
797 }
798 } else {
799 ASSERT(zio->io_type == ZIO_TYPE_TRIM);
800 ASSERT(zio->io_priority == ZIO_PRIORITY_TRIM);
801 }
802
803 zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
804
805 mutex_enter(&vq->vq_lock);
806 zio->io_timestamp = gethrtime();
807 vdev_queue_io_add(vq, zio);
808 nio = vdev_queue_io_to_issue(vq);
809 mutex_exit(&vq->vq_lock);
810
811 if (nio == NULL)
812 return (NULL);
813
814 if (nio->io_done == vdev_queue_agg_io_done) {
815 zio_nowait(nio);
816 return (NULL);
817 }
818
819 return (nio);
820 }
821
822 void
vdev_queue_io_done(zio_t * zio)823 vdev_queue_io_done(zio_t *zio)
824 {
825 vdev_queue_t *vq = &zio->io_vd->vdev_queue;
826 zio_t *nio;
827
828 mutex_enter(&vq->vq_lock);
829
830 vdev_queue_pending_remove(vq, zio);
831
832 zio->io_delta = gethrtime() - zio->io_timestamp;
833 vq->vq_io_complete_ts = gethrtime();
834
835 while ((nio = vdev_queue_io_to_issue(vq)) != NULL) {
836 mutex_exit(&vq->vq_lock);
837 if (nio->io_done == vdev_queue_agg_io_done) {
838 zio_nowait(nio);
839 } else {
840 zio_vdev_io_reissue(nio);
841 zio_execute(nio);
842 }
843 mutex_enter(&vq->vq_lock);
844 }
845
846 mutex_exit(&vq->vq_lock);
847 }
848
849 void
vdev_queue_change_io_priority(zio_t * zio,zio_priority_t priority)850 vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority)
851 {
852 vdev_queue_t *vq = &zio->io_vd->vdev_queue;
853 avl_tree_t *tree;
854
855 /*
856 * ZIO_PRIORITY_NOW is used by the vdev cache code and the aggregate zio
857 * code to issue IOs without adding them to the vdev queue. In this
858 * case, the zio is already going to be issued as quickly as possible
859 * and so it doesn't need any reprioitization to help.
860 */
861 if (zio->io_priority == ZIO_PRIORITY_NOW)
862 return;
863
864 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
865 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
866
867 if (zio->io_type == ZIO_TYPE_READ) {
868 if (priority != ZIO_PRIORITY_SYNC_READ &&
869 priority != ZIO_PRIORITY_ASYNC_READ &&
870 priority != ZIO_PRIORITY_SCRUB)
871 priority = ZIO_PRIORITY_ASYNC_READ;
872 } else {
873 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
874 if (priority != ZIO_PRIORITY_SYNC_WRITE &&
875 priority != ZIO_PRIORITY_ASYNC_WRITE)
876 priority = ZIO_PRIORITY_ASYNC_WRITE;
877 }
878
879 mutex_enter(&vq->vq_lock);
880
881 /*
882 * If the zio is in none of the queues we can simply change
883 * the priority. If the zio is waiting to be submitted we must
884 * remove it from the queue and re-insert it with the new priority.
885 * Otherwise, the zio is currently active and we cannot change its
886 * priority.
887 */
888 tree = vdev_queue_class_tree(vq, zio->io_priority);
889 if (avl_find(tree, zio, NULL) == zio) {
890 spa_t *spa = zio->io_spa;
891 zio_priority_t oldpri = zio->io_priority;
892
893 avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio);
894 zio->io_priority = priority;
895 avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
896
897 mutex_enter(&spa->spa_iokstat_lock);
898 ASSERT3U(spa->spa_queue_stats[oldpri].spa_queued, >, 0);
899 spa->spa_queue_stats[oldpri].spa_queued--;
900 spa->spa_queue_stats[zio->io_priority].spa_queued++;
901 mutex_exit(&spa->spa_iokstat_lock);
902 } else if (avl_find(&vq->vq_active_tree, zio, NULL) != zio) {
903 zio->io_priority = priority;
904 }
905
906 mutex_exit(&vq->vq_lock);
907 }
908
909 /*
910 * As these two methods are only used for load calculations we're not
911 * concerned if we get an incorrect value on 32bit platforms due to lack of
912 * vq_lock mutex use here, instead we prefer to keep it lock free for
913 * performance.
914 */
915 int
vdev_queue_length(vdev_t * vd)916 vdev_queue_length(vdev_t *vd)
917 {
918 return (avl_numnodes(&vd->vdev_queue.vq_active_tree));
919 }
920
921 uint64_t
vdev_queue_last_offset(vdev_t * vd)922 vdev_queue_last_offset(vdev_t *vd)
923 {
924 return (vd->vdev_queue.vq_last_offset);
925 }
926