1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/zfs_context.h> 29 #include <sys/spa.h> 30 #include <sys/vdev_impl.h> 31 #include <sys/zio.h> 32 #include <sys/kstat.h> 33 34 /* 35 * Virtual device read-ahead caching. 36 * 37 * This file implements a simple LRU read-ahead cache. When the DMU reads 38 * a given block, it will often want other, nearby blocks soon thereafter. 39 * We take advantage of this by reading a larger disk region and caching 40 * the result. In the best case, this can turn 128 back-to-back 512-byte 41 * reads into a single 64k read followed by 127 cache hits; this reduces 42 * latency dramatically. In the worst case, it can turn an isolated 512-byte 43 * read into a 64k read, which doesn't affect latency all that much but is 44 * terribly wasteful of bandwidth. A more intelligent version of the cache 45 * could keep track of access patterns and not do read-ahead unless it sees 46 * at least two temporally close I/Os to the same region. Currently, only 47 * metadata I/O is inflated. A futher enhancement could take advantage of 48 * more semantic information about the I/O. And it could use something 49 * faster than an AVL tree; that was chosen solely for convenience. 50 * 51 * There are five cache operations: allocate, fill, read, write, evict. 52 * 53 * (1) Allocate. This reserves a cache entry for the specified region. 54 * We separate the allocate and fill operations so that multiple threads 55 * don't generate I/O for the same cache miss. 56 * 57 * (2) Fill. When the I/O for a cache miss completes, the fill routine 58 * places the data in the previously allocated cache entry. 59 * 60 * (3) Read. Read data from the cache. 61 * 62 * (4) Write. Update cache contents after write completion. 63 * 64 * (5) Evict. When allocating a new entry, we evict the oldest (LRU) entry 65 * if the total cache size exceeds zfs_vdev_cache_size. 66 */ 67 68 /* 69 * These tunables are for performance analysis. 70 */ 71 /* 72 * All i/os smaller than zfs_vdev_cache_max will be turned into 73 * 1<<zfs_vdev_cache_bshift byte reads by the vdev_cache (aka software 74 * track buffer). At most zfs_vdev_cache_size bytes will be kept in each 75 * vdev's vdev_cache. 76 */ 77 int zfs_vdev_cache_max = 1<<14; /* 16KB */ 78 int zfs_vdev_cache_size = 10ULL << 20; /* 10MB */ 79 int zfs_vdev_cache_bshift = 16; 80 81 #define VCBS (1 << zfs_vdev_cache_bshift) /* 64KB */ 82 83 kstat_t *vdc_ksp = NULL; 84 85 typedef struct vdc_stats { 86 kstat_named_t vdc_stat_delegations; 87 kstat_named_t vdc_stat_hits; 88 kstat_named_t vdc_stat_misses; 89 } vdc_stats_t; 90 91 static vdc_stats_t vdc_stats = { 92 { "delegations", KSTAT_DATA_UINT64 }, 93 { "hits", KSTAT_DATA_UINT64 }, 94 { "misses", KSTAT_DATA_UINT64 } 95 }; 96 97 #define VDCSTAT_BUMP(stat) atomic_add_64(&vdc_stats.stat.value.ui64, 1); 98 99 static int 100 vdev_cache_offset_compare(const void *a1, const void *a2) 101 { 102 const vdev_cache_entry_t *ve1 = a1; 103 const vdev_cache_entry_t *ve2 = a2; 104 105 if (ve1->ve_offset < ve2->ve_offset) 106 return (-1); 107 if (ve1->ve_offset > ve2->ve_offset) 108 return (1); 109 return (0); 110 } 111 112 static int 113 vdev_cache_lastused_compare(const void *a1, const void *a2) 114 { 115 const vdev_cache_entry_t *ve1 = a1; 116 const vdev_cache_entry_t *ve2 = a2; 117 118 if (ve1->ve_lastused < ve2->ve_lastused) 119 return (-1); 120 if (ve1->ve_lastused > ve2->ve_lastused) 121 return (1); 122 123 /* 124 * Among equally old entries, sort by offset to ensure uniqueness. 125 */ 126 return (vdev_cache_offset_compare(a1, a2)); 127 } 128 129 /* 130 * Evict the specified entry from the cache. 131 */ 132 static void 133 vdev_cache_evict(vdev_cache_t *vc, vdev_cache_entry_t *ve) 134 { 135 ASSERT(MUTEX_HELD(&vc->vc_lock)); 136 ASSERT(ve->ve_fill_io == NULL); 137 ASSERT(ve->ve_data != NULL); 138 139 dprintf("evicting %p, off %llx, LRU %llu, age %lu, hits %u, stale %u\n", 140 vc, ve->ve_offset, ve->ve_lastused, lbolt - ve->ve_lastused, 141 ve->ve_hits, ve->ve_missed_update); 142 143 avl_remove(&vc->vc_lastused_tree, ve); 144 avl_remove(&vc->vc_offset_tree, ve); 145 zio_buf_free(ve->ve_data, VCBS); 146 kmem_free(ve, sizeof (vdev_cache_entry_t)); 147 } 148 149 /* 150 * Allocate an entry in the cache. At the point we don't have the data, 151 * we're just creating a placeholder so that multiple threads don't all 152 * go off and read the same blocks. 153 */ 154 static vdev_cache_entry_t * 155 vdev_cache_allocate(zio_t *zio) 156 { 157 vdev_cache_t *vc = &zio->io_vd->vdev_cache; 158 uint64_t offset = P2ALIGN(zio->io_offset, VCBS); 159 vdev_cache_entry_t *ve; 160 161 ASSERT(MUTEX_HELD(&vc->vc_lock)); 162 163 if (zfs_vdev_cache_size == 0) 164 return (NULL); 165 166 /* 167 * If adding a new entry would exceed the cache size, 168 * evict the oldest entry (LRU). 169 */ 170 if ((avl_numnodes(&vc->vc_lastused_tree) << zfs_vdev_cache_bshift) > 171 zfs_vdev_cache_size) { 172 ve = avl_first(&vc->vc_lastused_tree); 173 if (ve->ve_fill_io != NULL) { 174 dprintf("can't evict in %p, still filling\n", vc); 175 return (NULL); 176 } 177 ASSERT(ve->ve_hits != 0); 178 vdev_cache_evict(vc, ve); 179 } 180 181 ve = kmem_zalloc(sizeof (vdev_cache_entry_t), KM_SLEEP); 182 ve->ve_offset = offset; 183 ve->ve_lastused = lbolt; 184 ve->ve_data = zio_buf_alloc(VCBS); 185 186 avl_add(&vc->vc_offset_tree, ve); 187 avl_add(&vc->vc_lastused_tree, ve); 188 189 return (ve); 190 } 191 192 static void 193 vdev_cache_hit(vdev_cache_t *vc, vdev_cache_entry_t *ve, zio_t *zio) 194 { 195 uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS); 196 197 ASSERT(MUTEX_HELD(&vc->vc_lock)); 198 ASSERT(ve->ve_fill_io == NULL); 199 200 if (ve->ve_lastused != lbolt) { 201 avl_remove(&vc->vc_lastused_tree, ve); 202 ve->ve_lastused = lbolt; 203 avl_add(&vc->vc_lastused_tree, ve); 204 } 205 206 ve->ve_hits++; 207 bcopy(ve->ve_data + cache_phase, zio->io_data, zio->io_size); 208 } 209 210 /* 211 * Fill a previously allocated cache entry with data. 212 */ 213 static void 214 vdev_cache_fill(zio_t *zio) 215 { 216 vdev_t *vd = zio->io_vd; 217 vdev_cache_t *vc = &vd->vdev_cache; 218 vdev_cache_entry_t *ve = zio->io_private; 219 zio_t *dio; 220 221 ASSERT(zio->io_size == VCBS); 222 223 /* 224 * Add data to the cache. 225 */ 226 mutex_enter(&vc->vc_lock); 227 228 ASSERT(ve->ve_fill_io == zio); 229 ASSERT(ve->ve_offset == zio->io_offset); 230 ASSERT(ve->ve_data == zio->io_data); 231 232 ve->ve_fill_io = NULL; 233 234 /* 235 * Even if this cache line was invalidated by a missed write update, 236 * any reads that were queued up before the missed update are still 237 * valid, so we can satisfy them from this line before we evict it. 238 */ 239 for (dio = zio->io_delegate_list; dio; dio = dio->io_delegate_next) 240 vdev_cache_hit(vc, ve, dio); 241 242 if (zio->io_error || ve->ve_missed_update) 243 vdev_cache_evict(vc, ve); 244 245 mutex_exit(&vc->vc_lock); 246 247 while ((dio = zio->io_delegate_list) != NULL) { 248 zio->io_delegate_list = dio->io_delegate_next; 249 dio->io_delegate_next = NULL; 250 dio->io_error = zio->io_error; 251 zio_execute(dio); 252 } 253 } 254 255 /* 256 * Read data from the cache. Returns 0 on cache hit, errno on a miss. 257 */ 258 int 259 vdev_cache_read(zio_t *zio) 260 { 261 vdev_cache_t *vc = &zio->io_vd->vdev_cache; 262 vdev_cache_entry_t *ve, ve_search; 263 uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS); 264 uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS); 265 zio_t *fio; 266 267 ASSERT(zio->io_type == ZIO_TYPE_READ); 268 269 if (zio->io_flags & ZIO_FLAG_DONT_CACHE) 270 return (EINVAL); 271 272 if (zio->io_size > zfs_vdev_cache_max) 273 return (EOVERFLOW); 274 275 /* 276 * If the I/O straddles two or more cache blocks, don't cache it. 277 */ 278 if (P2CROSS(zio->io_offset, zio->io_offset + zio->io_size - 1, VCBS)) 279 return (EXDEV); 280 281 ASSERT(cache_phase + zio->io_size <= VCBS); 282 283 mutex_enter(&vc->vc_lock); 284 285 ve_search.ve_offset = cache_offset; 286 ve = avl_find(&vc->vc_offset_tree, &ve_search, NULL); 287 288 if (ve != NULL) { 289 if (ve->ve_missed_update) { 290 mutex_exit(&vc->vc_lock); 291 return (ESTALE); 292 } 293 294 if ((fio = ve->ve_fill_io) != NULL) { 295 zio->io_delegate_next = fio->io_delegate_list; 296 fio->io_delegate_list = zio; 297 zio_vdev_io_bypass(zio); 298 mutex_exit(&vc->vc_lock); 299 VDCSTAT_BUMP(vdc_stat_delegations); 300 return (0); 301 } 302 303 vdev_cache_hit(vc, ve, zio); 304 zio_vdev_io_bypass(zio); 305 306 mutex_exit(&vc->vc_lock); 307 zio_execute(zio); 308 VDCSTAT_BUMP(vdc_stat_hits); 309 return (0); 310 } 311 312 ve = vdev_cache_allocate(zio); 313 314 if (ve == NULL) { 315 mutex_exit(&vc->vc_lock); 316 return (ENOMEM); 317 } 318 319 fio = zio_vdev_child_io(zio, NULL, zio->io_vd, cache_offset, 320 ve->ve_data, VCBS, ZIO_TYPE_READ, ZIO_PRIORITY_CACHE_FILL, 321 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_PROPAGATE | 322 ZIO_FLAG_DONT_RETRY | ZIO_FLAG_NOBOOKMARK, 323 vdev_cache_fill, ve); 324 325 ve->ve_fill_io = fio; 326 fio->io_delegate_list = zio; 327 zio_vdev_io_bypass(zio); 328 329 mutex_exit(&vc->vc_lock); 330 zio_nowait(fio); 331 VDCSTAT_BUMP(vdc_stat_misses); 332 333 return (0); 334 } 335 336 /* 337 * Update cache contents upon write completion. 338 */ 339 void 340 vdev_cache_write(zio_t *zio) 341 { 342 vdev_cache_t *vc = &zio->io_vd->vdev_cache; 343 vdev_cache_entry_t *ve, ve_search; 344 uint64_t io_start = zio->io_offset; 345 uint64_t io_end = io_start + zio->io_size; 346 uint64_t min_offset = P2ALIGN(io_start, VCBS); 347 uint64_t max_offset = P2ROUNDUP(io_end, VCBS); 348 avl_index_t where; 349 350 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 351 352 mutex_enter(&vc->vc_lock); 353 354 ve_search.ve_offset = min_offset; 355 ve = avl_find(&vc->vc_offset_tree, &ve_search, &where); 356 357 if (ve == NULL) 358 ve = avl_nearest(&vc->vc_offset_tree, where, AVL_AFTER); 359 360 while (ve != NULL && ve->ve_offset < max_offset) { 361 uint64_t start = MAX(ve->ve_offset, io_start); 362 uint64_t end = MIN(ve->ve_offset + VCBS, io_end); 363 364 if (ve->ve_fill_io != NULL) { 365 ve->ve_missed_update = 1; 366 } else { 367 bcopy((char *)zio->io_data + start - io_start, 368 ve->ve_data + start - ve->ve_offset, end - start); 369 } 370 ve = AVL_NEXT(&vc->vc_offset_tree, ve); 371 } 372 mutex_exit(&vc->vc_lock); 373 } 374 375 void 376 vdev_cache_purge(vdev_t *vd) 377 { 378 vdev_cache_t *vc = &vd->vdev_cache; 379 vdev_cache_entry_t *ve; 380 381 mutex_enter(&vc->vc_lock); 382 while ((ve = avl_first(&vc->vc_offset_tree)) != NULL) 383 vdev_cache_evict(vc, ve); 384 mutex_exit(&vc->vc_lock); 385 } 386 387 void 388 vdev_cache_init(vdev_t *vd) 389 { 390 vdev_cache_t *vc = &vd->vdev_cache; 391 392 mutex_init(&vc->vc_lock, NULL, MUTEX_DEFAULT, NULL); 393 394 avl_create(&vc->vc_offset_tree, vdev_cache_offset_compare, 395 sizeof (vdev_cache_entry_t), 396 offsetof(struct vdev_cache_entry, ve_offset_node)); 397 398 avl_create(&vc->vc_lastused_tree, vdev_cache_lastused_compare, 399 sizeof (vdev_cache_entry_t), 400 offsetof(struct vdev_cache_entry, ve_lastused_node)); 401 } 402 403 void 404 vdev_cache_fini(vdev_t *vd) 405 { 406 vdev_cache_t *vc = &vd->vdev_cache; 407 408 vdev_cache_purge(vd); 409 410 avl_destroy(&vc->vc_offset_tree); 411 avl_destroy(&vc->vc_lastused_tree); 412 413 mutex_destroy(&vc->vc_lock); 414 } 415 416 void 417 vdev_cache_stat_init(void) 418 { 419 vdc_ksp = kstat_create("zfs", 0, "vdev_cache_stats", "misc", 420 KSTAT_TYPE_NAMED, sizeof (vdc_stats) / sizeof (kstat_named_t), 421 KSTAT_FLAG_VIRTUAL); 422 if (vdc_ksp != NULL) { 423 vdc_ksp->ks_data = &vdc_stats; 424 kstat_install(vdc_ksp); 425 } 426 } 427 428 void 429 vdev_cache_stat_fini(void) 430 { 431 if (vdc_ksp != NULL) { 432 kstat_delete(vdc_ksp); 433 vdc_ksp = NULL; 434 } 435 } 436