1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 /* 26 * Copyright (c) 2013, 2017 by Delphix. All rights reserved. 27 */ 28 29 #include <sys/zfs_context.h> 30 #include <sys/spa.h> 31 #include <sys/vdev_impl.h> 32 #include <sys/zio.h> 33 #include <sys/kstat.h> 34 #include <sys/abd.h> 35 36 /* 37 * Virtual device read-ahead caching. 38 * 39 * This file implements a simple LRU read-ahead cache. When the DMU reads 40 * a given block, it will often want other, nearby blocks soon thereafter. 41 * We take advantage of this by reading a larger disk region and caching 42 * the result. In the best case, this can turn 128 back-to-back 512-byte 43 * reads into a single 64k read followed by 127 cache hits; this reduces 44 * latency dramatically. In the worst case, it can turn an isolated 512-byte 45 * read into a 64k read, which doesn't affect latency all that much but is 46 * terribly wasteful of bandwidth. A more intelligent version of the cache 47 * could keep track of access patterns and not do read-ahead unless it sees 48 * at least two temporally close I/Os to the same region. Currently, only 49 * metadata I/O is inflated. A futher enhancement could take advantage of 50 * more semantic information about the I/O. And it could use something 51 * faster than an AVL tree; that was chosen solely for convenience. 52 * 53 * There are five cache operations: allocate, fill, read, write, evict. 54 * 55 * (1) Allocate. This reserves a cache entry for the specified region. 56 * We separate the allocate and fill operations so that multiple threads 57 * don't generate I/O for the same cache miss. 58 * 59 * (2) Fill. When the I/O for a cache miss completes, the fill routine 60 * places the data in the previously allocated cache entry. 61 * 62 * (3) Read. Read data from the cache. 63 * 64 * (4) Write. Update cache contents after write completion. 65 * 66 * (5) Evict. When allocating a new entry, we evict the oldest (LRU) entry 67 * if the total cache size exceeds zfs_vdev_cache_size. 68 */ 69 70 /* 71 * These tunables are for performance analysis. 72 */ 73 /* 74 * All i/os smaller than zfs_vdev_cache_max will be turned into 75 * 1<<zfs_vdev_cache_bshift byte reads by the vdev_cache (aka software 76 * track buffer). At most zfs_vdev_cache_size bytes will be kept in each 77 * vdev's vdev_cache. 78 * 79 * TODO: Note that with the current ZFS code, it turns out that the 80 * vdev cache is not helpful, and in some cases actually harmful. It 81 * is better if we disable this. Once some time has passed, we should 82 * actually remove this to simplify the code. For now we just disable 83 * it by setting the zfs_vdev_cache_size to zero. Note that Solaris 11 84 * has made these same changes. 85 */ 86 int zfs_vdev_cache_max = 1<<14; /* 16KB */ 87 int zfs_vdev_cache_size = 0; 88 int zfs_vdev_cache_bshift = 16; 89 90 #define VCBS (1 << zfs_vdev_cache_bshift) /* 64KB */ 91 92 kstat_t *vdc_ksp = NULL; 93 94 typedef struct vdc_stats { 95 kstat_named_t vdc_stat_delegations; 96 kstat_named_t vdc_stat_hits; 97 kstat_named_t vdc_stat_misses; 98 } vdc_stats_t; 99 100 static vdc_stats_t vdc_stats = { 101 { "delegations", KSTAT_DATA_UINT64 }, 102 { "hits", KSTAT_DATA_UINT64 }, 103 { "misses", KSTAT_DATA_UINT64 } 104 }; 105 106 #define VDCSTAT_BUMP(stat) atomic_inc_64(&vdc_stats.stat.value.ui64); 107 108 static int 109 vdev_cache_offset_compare(const void *a1, const void *a2) 110 { 111 const vdev_cache_entry_t *ve1 = a1; 112 const vdev_cache_entry_t *ve2 = a2; 113 114 if (ve1->ve_offset < ve2->ve_offset) 115 return (-1); 116 if (ve1->ve_offset > ve2->ve_offset) 117 return (1); 118 return (0); 119 } 120 121 static int 122 vdev_cache_lastused_compare(const void *a1, const void *a2) 123 { 124 const vdev_cache_entry_t *ve1 = a1; 125 const vdev_cache_entry_t *ve2 = a2; 126 127 if (ve1->ve_lastused < ve2->ve_lastused) 128 return (-1); 129 if (ve1->ve_lastused > ve2->ve_lastused) 130 return (1); 131 132 /* 133 * Among equally old entries, sort by offset to ensure uniqueness. 134 */ 135 return (vdev_cache_offset_compare(a1, a2)); 136 } 137 138 /* 139 * Evict the specified entry from the cache. 140 */ 141 static void 142 vdev_cache_evict(vdev_cache_t *vc, vdev_cache_entry_t *ve) 143 { 144 ASSERT(MUTEX_HELD(&vc->vc_lock)); 145 ASSERT3P(ve->ve_fill_io, ==, NULL); 146 ASSERT3P(ve->ve_abd, !=, NULL); 147 148 avl_remove(&vc->vc_lastused_tree, ve); 149 avl_remove(&vc->vc_offset_tree, ve); 150 abd_free(ve->ve_abd); 151 kmem_free(ve, sizeof (vdev_cache_entry_t)); 152 } 153 154 /* 155 * Allocate an entry in the cache. At the point we don't have the data, 156 * we're just creating a placeholder so that multiple threads don't all 157 * go off and read the same blocks. 158 */ 159 static vdev_cache_entry_t * 160 vdev_cache_allocate(zio_t *zio) 161 { 162 vdev_cache_t *vc = &zio->io_vd->vdev_cache; 163 uint64_t offset = P2ALIGN(zio->io_offset, VCBS); 164 vdev_cache_entry_t *ve; 165 166 ASSERT(MUTEX_HELD(&vc->vc_lock)); 167 168 if (zfs_vdev_cache_size == 0) 169 return (NULL); 170 171 /* 172 * If adding a new entry would exceed the cache size, 173 * evict the oldest entry (LRU). 174 */ 175 if ((avl_numnodes(&vc->vc_lastused_tree) << zfs_vdev_cache_bshift) > 176 zfs_vdev_cache_size) { 177 ve = avl_first(&vc->vc_lastused_tree); 178 if (ve->ve_fill_io != NULL) 179 return (NULL); 180 ASSERT3U(ve->ve_hits, !=, 0); 181 vdev_cache_evict(vc, ve); 182 } 183 184 ve = kmem_zalloc(sizeof (vdev_cache_entry_t), KM_SLEEP); 185 ve->ve_offset = offset; 186 ve->ve_lastused = ddi_get_lbolt(); 187 ve->ve_abd = abd_alloc_for_io(VCBS, B_TRUE); 188 189 avl_add(&vc->vc_offset_tree, ve); 190 avl_add(&vc->vc_lastused_tree, ve); 191 192 return (ve); 193 } 194 195 static void 196 vdev_cache_hit(vdev_cache_t *vc, vdev_cache_entry_t *ve, zio_t *zio) 197 { 198 uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS); 199 200 ASSERT(MUTEX_HELD(&vc->vc_lock)); 201 ASSERT3P(ve->ve_fill_io, ==, NULL); 202 203 if (ve->ve_lastused != ddi_get_lbolt()) { 204 avl_remove(&vc->vc_lastused_tree, ve); 205 ve->ve_lastused = ddi_get_lbolt(); 206 avl_add(&vc->vc_lastused_tree, ve); 207 } 208 209 ve->ve_hits++; 210 abd_copy_off(zio->io_abd, ve->ve_abd, 0, cache_phase, zio->io_size); 211 } 212 213 /* 214 * Fill a previously allocated cache entry with data. 215 */ 216 static void 217 vdev_cache_fill(zio_t *fio) 218 { 219 vdev_t *vd = fio->io_vd; 220 vdev_cache_t *vc = &vd->vdev_cache; 221 vdev_cache_entry_t *ve = fio->io_private; 222 zio_t *pio; 223 224 ASSERT3U(fio->io_size, ==, VCBS); 225 226 /* 227 * Add data to the cache. 228 */ 229 mutex_enter(&vc->vc_lock); 230 231 ASSERT3P(ve->ve_fill_io, ==, fio); 232 ASSERT3U(ve->ve_offset, ==, fio->io_offset); 233 ASSERT3P(ve->ve_abd, ==, fio->io_abd); 234 235 ve->ve_fill_io = NULL; 236 237 /* 238 * Even if this cache line was invalidated by a missed write update, 239 * any reads that were queued up before the missed update are still 240 * valid, so we can satisfy them from this line before we evict it. 241 */ 242 zio_link_t *zl = NULL; 243 while ((pio = zio_walk_parents(fio, &zl)) != NULL) 244 vdev_cache_hit(vc, ve, pio); 245 246 if (fio->io_error || ve->ve_missed_update) 247 vdev_cache_evict(vc, ve); 248 249 mutex_exit(&vc->vc_lock); 250 } 251 252 /* 253 * Read data from the cache. Returns B_TRUE cache hit, B_FALSE on miss. 254 */ 255 boolean_t 256 vdev_cache_read(zio_t *zio) 257 { 258 vdev_cache_t *vc = &zio->io_vd->vdev_cache; 259 vdev_cache_entry_t *ve, ve_search; 260 uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS); 261 uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS); 262 zio_t *fio; 263 264 ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ); 265 266 if (zio->io_flags & ZIO_FLAG_DONT_CACHE) 267 return (B_FALSE); 268 269 if (zio->io_size > zfs_vdev_cache_max) 270 return (B_FALSE); 271 272 /* 273 * If the I/O straddles two or more cache blocks, don't cache it. 274 */ 275 if (P2BOUNDARY(zio->io_offset, zio->io_size, VCBS)) 276 return (B_FALSE); 277 278 ASSERT3U(cache_phase + zio->io_size, <=, VCBS); 279 280 mutex_enter(&vc->vc_lock); 281 282 ve_search.ve_offset = cache_offset; 283 ve = avl_find(&vc->vc_offset_tree, &ve_search, NULL); 284 285 if (ve != NULL) { 286 if (ve->ve_missed_update) { 287 mutex_exit(&vc->vc_lock); 288 return (B_FALSE); 289 } 290 291 if ((fio = ve->ve_fill_io) != NULL) { 292 zio_vdev_io_bypass(zio); 293 zio_add_child(zio, fio); 294 mutex_exit(&vc->vc_lock); 295 VDCSTAT_BUMP(vdc_stat_delegations); 296 return (B_TRUE); 297 } 298 299 vdev_cache_hit(vc, ve, zio); 300 zio_vdev_io_bypass(zio); 301 302 mutex_exit(&vc->vc_lock); 303 VDCSTAT_BUMP(vdc_stat_hits); 304 return (B_TRUE); 305 } 306 307 ve = vdev_cache_allocate(zio); 308 309 if (ve == NULL) { 310 mutex_exit(&vc->vc_lock); 311 return (B_FALSE); 312 } 313 314 fio = zio_vdev_delegated_io(zio->io_vd, cache_offset, 315 ve->ve_abd, VCBS, ZIO_TYPE_READ, ZIO_PRIORITY_NOW, 316 ZIO_FLAG_DONT_CACHE, vdev_cache_fill, ve); 317 318 ve->ve_fill_io = fio; 319 zio_vdev_io_bypass(zio); 320 zio_add_child(zio, fio); 321 322 mutex_exit(&vc->vc_lock); 323 zio_nowait(fio); 324 VDCSTAT_BUMP(vdc_stat_misses); 325 326 return (B_TRUE); 327 } 328 329 /* 330 * Update cache contents upon write completion. 331 */ 332 void 333 vdev_cache_write(zio_t *zio) 334 { 335 vdev_cache_t *vc = &zio->io_vd->vdev_cache; 336 vdev_cache_entry_t *ve, ve_search; 337 uint64_t io_start = zio->io_offset; 338 uint64_t io_end = io_start + zio->io_size; 339 uint64_t min_offset = P2ALIGN(io_start, VCBS); 340 uint64_t max_offset = P2ROUNDUP(io_end, VCBS); 341 avl_index_t where; 342 343 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 344 345 mutex_enter(&vc->vc_lock); 346 347 ve_search.ve_offset = min_offset; 348 ve = avl_find(&vc->vc_offset_tree, &ve_search, &where); 349 350 if (ve == NULL) 351 ve = avl_nearest(&vc->vc_offset_tree, where, AVL_AFTER); 352 353 while (ve != NULL && ve->ve_offset < max_offset) { 354 uint64_t start = MAX(ve->ve_offset, io_start); 355 uint64_t end = MIN(ve->ve_offset + VCBS, io_end); 356 357 if (ve->ve_fill_io != NULL) { 358 ve->ve_missed_update = 1; 359 } else { 360 abd_copy_off(ve->ve_abd, zio->io_abd, 361 start - ve->ve_offset, start - io_start, 362 end - start); 363 } 364 ve = AVL_NEXT(&vc->vc_offset_tree, ve); 365 } 366 mutex_exit(&vc->vc_lock); 367 } 368 369 void 370 vdev_cache_purge(vdev_t *vd) 371 { 372 vdev_cache_t *vc = &vd->vdev_cache; 373 vdev_cache_entry_t *ve; 374 375 mutex_enter(&vc->vc_lock); 376 while ((ve = avl_first(&vc->vc_offset_tree)) != NULL) 377 vdev_cache_evict(vc, ve); 378 mutex_exit(&vc->vc_lock); 379 } 380 381 void 382 vdev_cache_init(vdev_t *vd) 383 { 384 vdev_cache_t *vc = &vd->vdev_cache; 385 386 mutex_init(&vc->vc_lock, NULL, MUTEX_DEFAULT, NULL); 387 388 avl_create(&vc->vc_offset_tree, vdev_cache_offset_compare, 389 sizeof (vdev_cache_entry_t), 390 offsetof(struct vdev_cache_entry, ve_offset_node)); 391 392 avl_create(&vc->vc_lastused_tree, vdev_cache_lastused_compare, 393 sizeof (vdev_cache_entry_t), 394 offsetof(struct vdev_cache_entry, ve_lastused_node)); 395 } 396 397 void 398 vdev_cache_fini(vdev_t *vd) 399 { 400 vdev_cache_t *vc = &vd->vdev_cache; 401 402 vdev_cache_purge(vd); 403 404 avl_destroy(&vc->vc_offset_tree); 405 avl_destroy(&vc->vc_lastused_tree); 406 407 mutex_destroy(&vc->vc_lock); 408 } 409 410 void 411 vdev_cache_stat_init(void) 412 { 413 vdc_ksp = kstat_create("zfs", 0, "vdev_cache_stats", "misc", 414 KSTAT_TYPE_NAMED, sizeof (vdc_stats) / sizeof (kstat_named_t), 415 KSTAT_FLAG_VIRTUAL); 416 if (vdc_ksp != NULL) { 417 vdc_ksp->ks_data = &vdc_stats; 418 kstat_install(vdc_ksp); 419 } 420 } 421 422 void 423 vdev_cache_stat_fini(void) 424 { 425 if (vdc_ksp != NULL) { 426 kstat_delete(vdc_ksp); 427 vdc_ksp = NULL; 428 } 429 } 430