xref: /titanic_50/usr/src/uts/common/fs/zfs/vdev_queue.c (revision 8cc2da61b2209ea79a015eead3f390ecf885fcfb)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Copyright (c) 2012 by Delphix. All rights reserved.
28  */
29 
30 #include <sys/zfs_context.h>
31 #include <sys/vdev_impl.h>
32 #include <sys/spa_impl.h>
33 #include <sys/zio.h>
34 #include <sys/avl.h>
35 
36 /*
37  * These tunables are for performance analysis.
38  */
39 /*
40  * zfs_vdev_max_pending is the maximum number of i/os concurrently
41  * pending to each device.  zfs_vdev_min_pending is the initial number
42  * of i/os pending to each device (before it starts ramping up to
43  * max_pending).
44  */
45 int zfs_vdev_max_pending = 10;
46 int zfs_vdev_min_pending = 4;
47 
48 /* deadline = pri + ddi_get_lbolt64() >> time_shift) */
49 int zfs_vdev_time_shift = 6;
50 
51 /* exponential I/O issue ramp-up rate */
52 int zfs_vdev_ramp_rate = 2;
53 
54 /*
55  * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
56  * For read I/Os, we also aggregate across small adjacency gaps; for writes
57  * we include spans of optional I/Os to aid aggregation at the disk even when
58  * they aren't able to help us aggregate at this level.
59  */
60 int zfs_vdev_aggregation_limit = SPA_MAXBLOCKSIZE;
61 int zfs_vdev_read_gap_limit = 32 << 10;
62 int zfs_vdev_write_gap_limit = 4 << 10;
63 
64 /*
65  * Virtual device vector for disk I/O scheduling.
66  */
67 int
68 vdev_queue_deadline_compare(const void *x1, const void *x2)
69 {
70 	const zio_t *z1 = x1;
71 	const zio_t *z2 = x2;
72 
73 	if (z1->io_deadline < z2->io_deadline)
74 		return (-1);
75 	if (z1->io_deadline > z2->io_deadline)
76 		return (1);
77 
78 	if (z1->io_offset < z2->io_offset)
79 		return (-1);
80 	if (z1->io_offset > z2->io_offset)
81 		return (1);
82 
83 	if (z1 < z2)
84 		return (-1);
85 	if (z1 > z2)
86 		return (1);
87 
88 	return (0);
89 }
90 
91 int
92 vdev_queue_offset_compare(const void *x1, const void *x2)
93 {
94 	const zio_t *z1 = x1;
95 	const zio_t *z2 = x2;
96 
97 	if (z1->io_offset < z2->io_offset)
98 		return (-1);
99 	if (z1->io_offset > z2->io_offset)
100 		return (1);
101 
102 	if (z1 < z2)
103 		return (-1);
104 	if (z1 > z2)
105 		return (1);
106 
107 	return (0);
108 }
109 
110 void
111 vdev_queue_init(vdev_t *vd)
112 {
113 	vdev_queue_t *vq = &vd->vdev_queue;
114 
115 	mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL);
116 
117 	avl_create(&vq->vq_deadline_tree, vdev_queue_deadline_compare,
118 	    sizeof (zio_t), offsetof(struct zio, io_deadline_node));
119 
120 	avl_create(&vq->vq_read_tree, vdev_queue_offset_compare,
121 	    sizeof (zio_t), offsetof(struct zio, io_offset_node));
122 
123 	avl_create(&vq->vq_write_tree, vdev_queue_offset_compare,
124 	    sizeof (zio_t), offsetof(struct zio, io_offset_node));
125 
126 	avl_create(&vq->vq_pending_tree, vdev_queue_offset_compare,
127 	    sizeof (zio_t), offsetof(struct zio, io_offset_node));
128 }
129 
130 void
131 vdev_queue_fini(vdev_t *vd)
132 {
133 	vdev_queue_t *vq = &vd->vdev_queue;
134 
135 	avl_destroy(&vq->vq_deadline_tree);
136 	avl_destroy(&vq->vq_read_tree);
137 	avl_destroy(&vq->vq_write_tree);
138 	avl_destroy(&vq->vq_pending_tree);
139 
140 	mutex_destroy(&vq->vq_lock);
141 }
142 
143 static void
144 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
145 {
146 	spa_t *spa = zio->io_spa;
147 	avl_add(&vq->vq_deadline_tree, zio);
148 	avl_add(zio->io_vdev_tree, zio);
149 
150 	if (spa->spa_iokstat != NULL) {
151 		mutex_enter(&spa->spa_iokstat_lock);
152 		kstat_waitq_enter(spa->spa_iokstat->ks_data);
153 		mutex_exit(&spa->spa_iokstat_lock);
154 	}
155 }
156 
157 static void
158 vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
159 {
160 	spa_t *spa = zio->io_spa;
161 	avl_remove(&vq->vq_deadline_tree, zio);
162 	avl_remove(zio->io_vdev_tree, zio);
163 
164 	if (spa->spa_iokstat != NULL) {
165 		mutex_enter(&spa->spa_iokstat_lock);
166 		kstat_waitq_exit(spa->spa_iokstat->ks_data);
167 		mutex_exit(&spa->spa_iokstat_lock);
168 	}
169 }
170 
171 static void
172 vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
173 {
174 	spa_t *spa = zio->io_spa;
175 	avl_add(&vq->vq_pending_tree, zio);
176 	if (spa->spa_iokstat != NULL) {
177 		mutex_enter(&spa->spa_iokstat_lock);
178 		kstat_runq_enter(spa->spa_iokstat->ks_data);
179 		mutex_exit(&spa->spa_iokstat_lock);
180 	}
181 }
182 
183 static void
184 vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
185 {
186 	spa_t *spa = zio->io_spa;
187 	avl_remove(&vq->vq_pending_tree, zio);
188 	if (spa->spa_iokstat != NULL) {
189 		kstat_io_t *ksio = spa->spa_iokstat->ks_data;
190 
191 		mutex_enter(&spa->spa_iokstat_lock);
192 		kstat_runq_exit(spa->spa_iokstat->ks_data);
193 		if (zio->io_type == ZIO_TYPE_READ) {
194 			ksio->reads++;
195 			ksio->nread += zio->io_size;
196 		} else if (zio->io_type == ZIO_TYPE_WRITE) {
197 			ksio->writes++;
198 			ksio->nwritten += zio->io_size;
199 		}
200 		mutex_exit(&spa->spa_iokstat_lock);
201 	}
202 }
203 
204 static void
205 vdev_queue_agg_io_done(zio_t *aio)
206 {
207 	zio_t *pio;
208 
209 	while ((pio = zio_walk_parents(aio)) != NULL)
210 		if (aio->io_type == ZIO_TYPE_READ)
211 			bcopy((char *)aio->io_data + (pio->io_offset -
212 			    aio->io_offset), pio->io_data, pio->io_size);
213 
214 	zio_buf_free(aio->io_data, aio->io_size);
215 }
216 
217 /*
218  * Compute the range spanned by two i/os, which is the endpoint of the last
219  * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
220  * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
221  * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
222  */
223 #define	IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
224 #define	IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
225 
226 static zio_t *
227 vdev_queue_io_to_issue(vdev_queue_t *vq, uint64_t pending_limit)
228 {
229 	zio_t *fio, *lio, *aio, *dio, *nio, *mio;
230 	avl_tree_t *t;
231 	int flags;
232 	uint64_t maxspan = zfs_vdev_aggregation_limit;
233 	uint64_t maxgap;
234 	int stretch;
235 
236 again:
237 	ASSERT(MUTEX_HELD(&vq->vq_lock));
238 
239 	if (avl_numnodes(&vq->vq_pending_tree) >= pending_limit ||
240 	    avl_numnodes(&vq->vq_deadline_tree) == 0)
241 		return (NULL);
242 
243 	fio = lio = avl_first(&vq->vq_deadline_tree);
244 
245 	t = fio->io_vdev_tree;
246 	flags = fio->io_flags & ZIO_FLAG_AGG_INHERIT;
247 	maxgap = (t == &vq->vq_read_tree) ? zfs_vdev_read_gap_limit : 0;
248 
249 	if (!(flags & ZIO_FLAG_DONT_AGGREGATE)) {
250 		/*
251 		 * We can aggregate I/Os that are sufficiently adjacent and of
252 		 * the same flavor, as expressed by the AGG_INHERIT flags.
253 		 * The latter requirement is necessary so that certain
254 		 * attributes of the I/O, such as whether it's a normal I/O
255 		 * or a scrub/resilver, can be preserved in the aggregate.
256 		 * We can include optional I/Os, but don't allow them
257 		 * to begin a range as they add no benefit in that situation.
258 		 */
259 
260 		/*
261 		 * We keep track of the last non-optional I/O.
262 		 */
263 		mio = (fio->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : fio;
264 
265 		/*
266 		 * Walk backwards through sufficiently contiguous I/Os
267 		 * recording the last non-option I/O.
268 		 */
269 		while ((dio = AVL_PREV(t, fio)) != NULL &&
270 		    (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
271 		    IO_SPAN(dio, lio) <= maxspan &&
272 		    IO_GAP(dio, fio) <= maxgap) {
273 			fio = dio;
274 			if (mio == NULL && !(fio->io_flags & ZIO_FLAG_OPTIONAL))
275 				mio = fio;
276 		}
277 
278 		/*
279 		 * Skip any initial optional I/Os.
280 		 */
281 		while ((fio->io_flags & ZIO_FLAG_OPTIONAL) && fio != lio) {
282 			fio = AVL_NEXT(t, fio);
283 			ASSERT(fio != NULL);
284 		}
285 
286 		/*
287 		 * Walk forward through sufficiently contiguous I/Os.
288 		 */
289 		while ((dio = AVL_NEXT(t, lio)) != NULL &&
290 		    (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
291 		    IO_SPAN(fio, dio) <= maxspan &&
292 		    IO_GAP(lio, dio) <= maxgap) {
293 			lio = dio;
294 			if (!(lio->io_flags & ZIO_FLAG_OPTIONAL))
295 				mio = lio;
296 		}
297 
298 		/*
299 		 * Now that we've established the range of the I/O aggregation
300 		 * we must decide what to do with trailing optional I/Os.
301 		 * For reads, there's nothing to do. While we are unable to
302 		 * aggregate further, it's possible that a trailing optional
303 		 * I/O would allow the underlying device to aggregate with
304 		 * subsequent I/Os. We must therefore determine if the next
305 		 * non-optional I/O is close enough to make aggregation
306 		 * worthwhile.
307 		 */
308 		stretch = B_FALSE;
309 		if (t != &vq->vq_read_tree && mio != NULL) {
310 			nio = lio;
311 			while ((dio = AVL_NEXT(t, nio)) != NULL &&
312 			    IO_GAP(nio, dio) == 0 &&
313 			    IO_GAP(mio, dio) <= zfs_vdev_write_gap_limit) {
314 				nio = dio;
315 				if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) {
316 					stretch = B_TRUE;
317 					break;
318 				}
319 			}
320 		}
321 
322 		if (stretch) {
323 			/* This may be a no-op. */
324 			VERIFY((dio = AVL_NEXT(t, lio)) != NULL);
325 			dio->io_flags &= ~ZIO_FLAG_OPTIONAL;
326 		} else {
327 			while (lio != mio && lio != fio) {
328 				ASSERT(lio->io_flags & ZIO_FLAG_OPTIONAL);
329 				lio = AVL_PREV(t, lio);
330 				ASSERT(lio != NULL);
331 			}
332 		}
333 	}
334 
335 	if (fio != lio) {
336 		uint64_t size = IO_SPAN(fio, lio);
337 		ASSERT(size <= zfs_vdev_aggregation_limit);
338 
339 		aio = zio_vdev_delegated_io(fio->io_vd, fio->io_offset,
340 		    zio_buf_alloc(size), size, fio->io_type, ZIO_PRIORITY_AGG,
341 		    flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE,
342 		    vdev_queue_agg_io_done, NULL);
343 		aio->io_timestamp = fio->io_timestamp;
344 
345 		nio = fio;
346 		do {
347 			dio = nio;
348 			nio = AVL_NEXT(t, dio);
349 			ASSERT(dio->io_type == aio->io_type);
350 			ASSERT(dio->io_vdev_tree == t);
351 
352 			if (dio->io_flags & ZIO_FLAG_NODATA) {
353 				ASSERT(dio->io_type == ZIO_TYPE_WRITE);
354 				bzero((char *)aio->io_data + (dio->io_offset -
355 				    aio->io_offset), dio->io_size);
356 			} else if (dio->io_type == ZIO_TYPE_WRITE) {
357 				bcopy(dio->io_data, (char *)aio->io_data +
358 				    (dio->io_offset - aio->io_offset),
359 				    dio->io_size);
360 			}
361 
362 			zio_add_child(dio, aio);
363 			vdev_queue_io_remove(vq, dio);
364 			zio_vdev_io_bypass(dio);
365 			zio_execute(dio);
366 		} while (dio != lio);
367 
368 		vdev_queue_pending_add(vq, aio);
369 
370 		return (aio);
371 	}
372 
373 	ASSERT(fio->io_vdev_tree == t);
374 	vdev_queue_io_remove(vq, fio);
375 
376 	/*
377 	 * If the I/O is or was optional and therefore has no data, we need to
378 	 * simply discard it. We need to drop the vdev queue's lock to avoid a
379 	 * deadlock that we could encounter since this I/O will complete
380 	 * immediately.
381 	 */
382 	if (fio->io_flags & ZIO_FLAG_NODATA) {
383 		mutex_exit(&vq->vq_lock);
384 		zio_vdev_io_bypass(fio);
385 		zio_execute(fio);
386 		mutex_enter(&vq->vq_lock);
387 		goto again;
388 	}
389 
390 	vdev_queue_pending_add(vq, fio);
391 
392 	return (fio);
393 }
394 
395 zio_t *
396 vdev_queue_io(zio_t *zio)
397 {
398 	vdev_queue_t *vq = &zio->io_vd->vdev_queue;
399 	zio_t *nio;
400 
401 	ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);
402 
403 	if (zio->io_flags & ZIO_FLAG_DONT_QUEUE)
404 		return (zio);
405 
406 	zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
407 
408 	if (zio->io_type == ZIO_TYPE_READ)
409 		zio->io_vdev_tree = &vq->vq_read_tree;
410 	else
411 		zio->io_vdev_tree = &vq->vq_write_tree;
412 
413 	mutex_enter(&vq->vq_lock);
414 
415 	zio->io_timestamp = ddi_get_lbolt64();
416 	zio->io_deadline = (zio->io_timestamp >> zfs_vdev_time_shift) +
417 	    zio->io_priority;
418 
419 	vdev_queue_io_add(vq, zio);
420 
421 	nio = vdev_queue_io_to_issue(vq, zfs_vdev_min_pending);
422 
423 	mutex_exit(&vq->vq_lock);
424 
425 	if (nio == NULL)
426 		return (NULL);
427 
428 	if (nio->io_done == vdev_queue_agg_io_done) {
429 		zio_nowait(nio);
430 		return (NULL);
431 	}
432 
433 	return (nio);
434 }
435 
436 void
437 vdev_queue_io_done(zio_t *zio)
438 {
439 	vdev_queue_t *vq = &zio->io_vd->vdev_queue;
440 
441 	if (zio_injection_enabled)
442 		delay(SEC_TO_TICK(zio_handle_io_delay(zio)));
443 
444 	mutex_enter(&vq->vq_lock);
445 
446 	vdev_queue_pending_remove(vq, zio);
447 
448 	vq->vq_io_complete_ts = ddi_get_lbolt64();
449 	vq->vq_io_delta_ts = vq->vq_io_complete_ts - zio->io_timestamp;
450 
451 	for (int i = 0; i < zfs_vdev_ramp_rate; i++) {
452 		zio_t *nio = vdev_queue_io_to_issue(vq, zfs_vdev_max_pending);
453 		if (nio == NULL)
454 			break;
455 		mutex_exit(&vq->vq_lock);
456 		if (nio->io_done == vdev_queue_agg_io_done) {
457 			zio_nowait(nio);
458 		} else {
459 			zio_vdev_io_reissue(nio);
460 			zio_execute(nio);
461 		}
462 		mutex_enter(&vq->vq_lock);
463 	}
464 
465 	mutex_exit(&vq->vq_lock);
466 }
467