1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/zfs_context.h> 30 #include <sys/spa.h> 31 #include <sys/vdev_impl.h> 32 #include <sys/zio.h> 33 34 /* 35 * Virtual device read-ahead caching. 36 * 37 * This file implements a simple LRU read-ahead cache. When the DMU reads 38 * a given block, it will often want other, nearby blocks soon thereafter. 39 * We take advantage of this by reading a larger disk region and caching 40 * the result. In the best case, this can turn 256 back-to-back 512-byte 41 * reads into a single 128k read followed by 255 cache hits; this reduces 42 * latency dramatically. In the worst case, it can turn an isolated 512-byte 43 * read into a 128k read, which doesn't affect latency all that much but is 44 * terribly wasteful of bandwidth. A more intelligent version of the cache 45 * could keep track of access patterns and not do read-ahead unless it sees 46 * at least two temporally close I/Os to the same region. It could also 47 * take advantage of semantic information about the I/O. And it could use 48 * something faster than an AVL tree; that was chosen solely for convenience. 49 * 50 * There are five cache operations: allocate, fill, read, write, evict. 51 * 52 * (1) Allocate. This reserves a cache entry for the specified region. 53 * We separate the allocate and fill operations so that multiple threads 54 * don't generate I/O for the same cache miss. 55 * 56 * (2) Fill. When the I/O for a cache miss completes, the fill routine 57 * places the data in the previously allocated cache entry. 58 * 59 * (3) Read. Read data from the cache. 60 * 61 * (4) Write. Update cache contents after write completion. 62 * 63 * (5) Evict. When allocating a new entry, we evict the oldest (LRU) entry 64 * if the total cache size exceeds vc_size. 65 */ 66 67 static int 68 vdev_cache_offset_compare(const void *a1, const void *a2) 69 { 70 const vdev_cache_entry_t *ve1 = a1; 71 const vdev_cache_entry_t *ve2 = a2; 72 73 if (ve1->ve_offset < ve2->ve_offset) 74 return (-1); 75 if (ve1->ve_offset > ve2->ve_offset) 76 return (1); 77 return (0); 78 } 79 80 static int 81 vdev_cache_lastused_compare(const void *a1, const void *a2) 82 { 83 const vdev_cache_entry_t *ve1 = a1; 84 const vdev_cache_entry_t *ve2 = a2; 85 86 if (ve1->ve_lastused < ve2->ve_lastused) 87 return (-1); 88 if (ve1->ve_lastused > ve2->ve_lastused) 89 return (1); 90 91 /* 92 * Among equally old entries, sort by offset to ensure uniqueness. 93 */ 94 return (vdev_cache_offset_compare(a1, a2)); 95 } 96 97 /* 98 * Evict the specified entry from the cache. 99 */ 100 static void 101 vdev_cache_evict(vdev_cache_t *vc, vdev_cache_entry_t *ve) 102 { 103 ASSERT(MUTEX_HELD(&vc->vc_lock)); 104 ASSERT(ve->ve_fill_io == NULL); 105 ASSERT(ve->ve_data != NULL); 106 107 dprintf("evicting %p, off %llx, LRU %llu, age %lu, hits %u, stale %u\n", 108 vc, ve->ve_offset, ve->ve_lastused, lbolt - ve->ve_lastused, 109 ve->ve_hits, ve->ve_missed_update); 110 111 avl_remove(&vc->vc_lastused_tree, ve); 112 avl_remove(&vc->vc_offset_tree, ve); 113 zio_buf_free(ve->ve_data, vc->vc_blocksize); 114 kmem_free(ve, sizeof (vdev_cache_entry_t)); 115 } 116 117 /* 118 * Allocate an entry in the cache. At the point we don't have the data, 119 * we're just creating a placeholder so that multiple threads don't all 120 * go off and read the same blocks. 121 */ 122 static vdev_cache_entry_t * 123 vdev_cache_allocate(zio_t *zio) 124 { 125 vdev_cache_t *vc = &zio->io_vd->vdev_cache; 126 uint64_t offset = P2ALIGN(zio->io_offset, vc->vc_blocksize); 127 vdev_cache_entry_t *ve; 128 129 ASSERT(MUTEX_HELD(&vc->vc_lock)); 130 131 if (vc->vc_size == 0) 132 return (NULL); 133 134 /* 135 * If adding a new entry would exceed the cache size, 136 * evict the oldest entry (LRU). 137 */ 138 if ((avl_numnodes(&vc->vc_lastused_tree) << vc->vc_bshift) > 139 vc->vc_size) { 140 ve = avl_first(&vc->vc_lastused_tree); 141 if (ve->ve_fill_io != NULL) { 142 dprintf("can't evict in %p, still filling\n", vc); 143 return (NULL); 144 } 145 ASSERT(ve->ve_hits != 0); 146 vdev_cache_evict(vc, ve); 147 } 148 149 ve = kmem_zalloc(sizeof (vdev_cache_entry_t), KM_SLEEP); 150 ve->ve_offset = offset; 151 ve->ve_lastused = lbolt; 152 ve->ve_data = zio_buf_alloc(vc->vc_blocksize); 153 154 avl_add(&vc->vc_offset_tree, ve); 155 avl_add(&vc->vc_lastused_tree, ve); 156 157 return (ve); 158 } 159 160 static void 161 vdev_cache_hit(vdev_cache_t *vc, vdev_cache_entry_t *ve, zio_t *zio) 162 { 163 uint64_t cache_phase = P2PHASE(zio->io_offset, vc->vc_blocksize); 164 165 ASSERT(MUTEX_HELD(&vc->vc_lock)); 166 ASSERT(ve->ve_fill_io == NULL); 167 168 if (ve->ve_lastused != lbolt) { 169 avl_remove(&vc->vc_lastused_tree, ve); 170 ve->ve_lastused = lbolt; 171 avl_add(&vc->vc_lastused_tree, ve); 172 } 173 174 ve->ve_hits++; 175 bcopy(ve->ve_data + cache_phase, zio->io_data, zio->io_size); 176 } 177 178 /* 179 * Fill a previously allocated cache entry with data. 180 */ 181 static void 182 vdev_cache_fill(zio_t *zio) 183 { 184 vdev_t *vd = zio->io_vd; 185 vdev_cache_t *vc = &vd->vdev_cache; 186 vdev_cache_entry_t *ve = zio->io_private; 187 zio_t *dio; 188 189 ASSERT(zio->io_size == vc->vc_blocksize); 190 191 /* 192 * Add data to the cache. 193 */ 194 mutex_enter(&vc->vc_lock); 195 196 ASSERT(ve->ve_fill_io == zio); 197 ASSERT(ve->ve_offset == zio->io_offset); 198 ASSERT(ve->ve_data == zio->io_data); 199 200 ve->ve_fill_io = NULL; 201 202 /* 203 * Even if this cache line was invalidated by a missed write update, 204 * any reads that were queued up before the missed update are still 205 * valid, so we can satisfy them from this line before we evict it. 206 */ 207 for (dio = zio->io_delegate_list; dio; dio = dio->io_delegate_next) 208 vdev_cache_hit(vc, ve, dio); 209 210 if (zio->io_error || ve->ve_missed_update) 211 vdev_cache_evict(vc, ve); 212 213 mutex_exit(&vc->vc_lock); 214 215 while ((dio = zio->io_delegate_list) != NULL) { 216 zio->io_delegate_list = dio->io_delegate_next; 217 dio->io_delegate_next = NULL; 218 dio->io_error = zio->io_error; 219 zio_next_stage(dio); 220 } 221 } 222 223 /* 224 * Read data from the cache. Returns 0 on cache hit, errno on a miss. 225 */ 226 int 227 vdev_cache_read(zio_t *zio) 228 { 229 vdev_cache_t *vc = &zio->io_vd->vdev_cache; 230 vdev_cache_entry_t *ve, ve_search; 231 uint64_t cache_offset = P2ALIGN(zio->io_offset, vc->vc_blocksize); 232 uint64_t cache_phase = P2PHASE(zio->io_offset, vc->vc_blocksize); 233 zio_t *fio; 234 235 ASSERT(zio->io_type == ZIO_TYPE_READ); 236 237 if (zio->io_flags & ZIO_FLAG_DONT_CACHE) 238 return (EINVAL); 239 240 if (zio->io_size > vc->vc_max) 241 return (EOVERFLOW); 242 243 /* 244 * If the I/O straddles two or more cache blocks, don't cache it. 245 */ 246 if (P2CROSS(zio->io_offset, zio->io_offset + zio->io_size - 1, 247 vc->vc_blocksize)) 248 return (EXDEV); 249 250 ASSERT(cache_phase + zio->io_size <= vc->vc_blocksize); 251 252 mutex_enter(&vc->vc_lock); 253 254 ve_search.ve_offset = cache_offset; 255 ve = avl_find(&vc->vc_offset_tree, &ve_search, NULL); 256 257 if (ve != NULL) { 258 if (ve->ve_missed_update) { 259 mutex_exit(&vc->vc_lock); 260 return (ESTALE); 261 } 262 263 if ((fio = ve->ve_fill_io) != NULL) { 264 zio->io_delegate_next = fio->io_delegate_list; 265 fio->io_delegate_list = zio; 266 zio_vdev_io_bypass(zio); 267 mutex_exit(&vc->vc_lock); 268 return (0); 269 } 270 271 vdev_cache_hit(vc, ve, zio); 272 zio_vdev_io_bypass(zio); 273 274 mutex_exit(&vc->vc_lock); 275 zio_next_stage(zio); 276 return (0); 277 } 278 279 ve = vdev_cache_allocate(zio); 280 281 if (ve == NULL) { 282 mutex_exit(&vc->vc_lock); 283 return (ENOMEM); 284 } 285 286 fio = zio_vdev_child_io(zio, NULL, zio->io_vd, cache_offset, 287 ve->ve_data, vc->vc_blocksize, ZIO_TYPE_READ, 288 ZIO_PRIORITY_CACHE_FILL, 289 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY, 290 vdev_cache_fill, ve); 291 292 ve->ve_fill_io = fio; 293 fio->io_delegate_list = zio; 294 zio_vdev_io_bypass(zio); 295 296 mutex_exit(&vc->vc_lock); 297 zio_nowait(fio); 298 299 return (0); 300 } 301 302 /* 303 * Update cache contents upon write completion. 304 */ 305 void 306 vdev_cache_write(zio_t *zio) 307 { 308 vdev_cache_t *vc = &zio->io_vd->vdev_cache; 309 vdev_cache_entry_t *ve, ve_search; 310 uint64_t io_start = zio->io_offset; 311 uint64_t io_end = io_start + zio->io_size; 312 uint64_t min_offset = P2ALIGN(io_start, vc->vc_blocksize); 313 uint64_t max_offset = P2ROUNDUP(io_end, vc->vc_blocksize); 314 avl_index_t where; 315 316 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 317 318 mutex_enter(&vc->vc_lock); 319 320 ve_search.ve_offset = min_offset; 321 ve = avl_find(&vc->vc_offset_tree, &ve_search, &where); 322 323 if (ve == NULL) 324 ve = avl_nearest(&vc->vc_offset_tree, where, AVL_AFTER); 325 326 while (ve != NULL && ve->ve_offset < max_offset) { 327 uint64_t start = MAX(ve->ve_offset, io_start); 328 uint64_t end = MIN(ve->ve_offset + vc->vc_blocksize, io_end); 329 330 if (ve->ve_fill_io != NULL) { 331 ve->ve_missed_update = 1; 332 } else { 333 bcopy((char *)zio->io_data + start - io_start, 334 ve->ve_data + start - ve->ve_offset, end - start); 335 } 336 ve = AVL_NEXT(&vc->vc_offset_tree, ve); 337 } 338 mutex_exit(&vc->vc_lock); 339 } 340 341 void 342 vdev_cache_init(vdev_t *vd) 343 { 344 vdev_cache_t *vc = &vd->vdev_cache; 345 346 mutex_init(&vc->vc_lock, NULL, MUTEX_DEFAULT, NULL); 347 348 avl_create(&vc->vc_offset_tree, vdev_cache_offset_compare, 349 sizeof (vdev_cache_entry_t), 350 offsetof(struct vdev_cache_entry, ve_offset_node)); 351 352 avl_create(&vc->vc_lastused_tree, vdev_cache_lastused_compare, 353 sizeof (vdev_cache_entry_t), 354 offsetof(struct vdev_cache_entry, ve_lastused_node)); 355 356 vc->vc_blocksize = 1ULL << vc->vc_bshift; 357 } 358 359 void 360 vdev_cache_fini(vdev_t *vd) 361 { 362 vdev_cache_t *vc = &vd->vdev_cache; 363 vdev_cache_entry_t *ve; 364 365 mutex_enter(&vc->vc_lock); 366 while ((ve = avl_first(&vc->vc_offset_tree)) != NULL) 367 vdev_cache_evict(vc, ve); 368 mutex_exit(&vc->vc_lock); 369 370 avl_destroy(&vc->vc_offset_tree); 371 avl_destroy(&vc->vc_lastused_tree); 372 373 mutex_destroy(&vc->vc_lock); 374 } 375