1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Copyright (c) 2013, 2014 by Delphix. All rights reserved. 28 */ 29 30 #include <sys/zfs_context.h> 31 #include <sys/dnode.h> 32 #include <sys/dmu_objset.h> 33 #include <sys/dmu_zfetch.h> 34 #include <sys/dmu.h> 35 #include <sys/dbuf.h> 36 #include <sys/kstat.h> 37 38 /* 39 * This tunable disables predictive prefetch. Note that it leaves "prescient" 40 * prefetch (e.g. prefetch for zfs send) intact. Unlike predictive prefetch, 41 * prescient prefetch never issues i/os that end up not being needed, 42 * so it can't hurt performance. 43 */ 44 boolean_t zfs_prefetch_disable = B_FALSE; 45 46 /* max # of streams per zfetch */ 47 uint32_t zfetch_max_streams = 8; 48 /* min time before stream reclaim */ 49 uint32_t zfetch_min_sec_reap = 2; 50 /* max bytes to prefetch per stream (default 8MB) */ 51 uint32_t zfetch_max_distance = 8 * 1024 * 1024; 52 /* number of bytes in a array_read at which we stop prefetching (1MB) */ 53 uint64_t zfetch_array_rd_sz = 1024 * 1024; 54 55 typedef struct zfetch_stats { 56 kstat_named_t zfetchstat_hits; 57 kstat_named_t zfetchstat_misses; 58 kstat_named_t zfetchstat_max_streams; 59 } zfetch_stats_t; 60 61 static zfetch_stats_t zfetch_stats = { 62 { "hits", KSTAT_DATA_UINT64 }, 63 { "misses", KSTAT_DATA_UINT64 }, 64 { "max_streams", KSTAT_DATA_UINT64 }, 65 }; 66 67 #define ZFETCHSTAT_BUMP(stat) \ 68 atomic_inc_64(&zfetch_stats.stat.value.ui64); 69 70 kstat_t *zfetch_ksp; 71 72 void 73 zfetch_init(void) 74 { 75 zfetch_ksp = kstat_create("zfs", 0, "zfetchstats", "misc", 76 KSTAT_TYPE_NAMED, sizeof (zfetch_stats) / sizeof (kstat_named_t), 77 KSTAT_FLAG_VIRTUAL); 78 79 if (zfetch_ksp != NULL) { 80 zfetch_ksp->ks_data = &zfetch_stats; 81 kstat_install(zfetch_ksp); 82 } 83 } 84 85 void 86 zfetch_fini(void) 87 { 88 if (zfetch_ksp != NULL) { 89 kstat_delete(zfetch_ksp); 90 zfetch_ksp = NULL; 91 } 92 } 93 94 /* 95 * This takes a pointer to a zfetch structure and a dnode. It performs the 96 * necessary setup for the zfetch structure, grokking data from the 97 * associated dnode. 98 */ 99 void 100 dmu_zfetch_init(zfetch_t *zf, dnode_t *dno) 101 { 102 if (zf == NULL) 103 return; 104 105 zf->zf_dnode = dno; 106 107 list_create(&zf->zf_stream, sizeof (zstream_t), 108 offsetof(zstream_t, zs_node)); 109 110 rw_init(&zf->zf_rwlock, NULL, RW_DEFAULT, NULL); 111 } 112 113 static void 114 dmu_zfetch_stream_remove(zfetch_t *zf, zstream_t *zs) 115 { 116 ASSERT(RW_WRITE_HELD(&zf->zf_rwlock)); 117 list_remove(&zf->zf_stream, zs); 118 mutex_destroy(&zs->zs_lock); 119 kmem_free(zs, sizeof (*zs)); 120 } 121 122 /* 123 * Clean-up state associated with a zfetch structure (e.g. destroy the 124 * streams). This doesn't free the zfetch_t itself, that's left to the caller. 125 */ 126 void 127 dmu_zfetch_fini(zfetch_t *zf) 128 { 129 zstream_t *zs; 130 131 ASSERT(!RW_LOCK_HELD(&zf->zf_rwlock)); 132 133 rw_enter(&zf->zf_rwlock, RW_WRITER); 134 while ((zs = list_head(&zf->zf_stream)) != NULL) 135 dmu_zfetch_stream_remove(zf, zs); 136 rw_exit(&zf->zf_rwlock); 137 list_destroy(&zf->zf_stream); 138 rw_destroy(&zf->zf_rwlock); 139 140 zf->zf_dnode = NULL; 141 } 142 143 /* 144 * If there aren't too many streams already, create a new stream. 145 * The "blkid" argument is the next block that we expect this stream to access. 146 * While we're here, clean up old streams (which haven't been 147 * accessed for at least zfetch_min_sec_reap seconds). 148 */ 149 static void 150 dmu_zfetch_stream_create(zfetch_t *zf, uint64_t blkid) 151 { 152 zstream_t *zs_next; 153 int numstreams = 0; 154 155 ASSERT(RW_WRITE_HELD(&zf->zf_rwlock)); 156 157 /* 158 * Clean up old streams. 159 */ 160 for (zstream_t *zs = list_head(&zf->zf_stream); 161 zs != NULL; zs = zs_next) { 162 zs_next = list_next(&zf->zf_stream, zs); 163 if (((gethrtime() - zs->zs_atime) / NANOSEC) > 164 zfetch_min_sec_reap) 165 dmu_zfetch_stream_remove(zf, zs); 166 else 167 numstreams++; 168 } 169 170 /* 171 * The maximum number of streams is normally zfetch_max_streams, 172 * but for small files we lower it such that it's at least possible 173 * for all the streams to be non-overlapping. 174 * 175 * If we are already at the maximum number of streams for this file, 176 * even after removing old streams, then don't create this stream. 177 */ 178 uint32_t max_streams = MAX(1, MIN(zfetch_max_streams, 179 zf->zf_dnode->dn_maxblkid * zf->zf_dnode->dn_datablksz / 180 zfetch_max_distance)); 181 if (numstreams >= max_streams) { 182 ZFETCHSTAT_BUMP(zfetchstat_max_streams); 183 return; 184 } 185 186 zstream_t *zs = kmem_zalloc(sizeof (*zs), KM_SLEEP); 187 zs->zs_blkid = blkid; 188 zs->zs_pf_blkid = blkid; 189 zs->zs_atime = gethrtime(); 190 mutex_init(&zs->zs_lock, NULL, MUTEX_DEFAULT, NULL); 191 192 list_insert_head(&zf->zf_stream, zs); 193 } 194 195 /* 196 * This is the prefetch entry point. It calls all of the other dmu_zfetch 197 * routines to create, delete, find, or operate upon prefetch streams. 198 */ 199 void 200 dmu_zfetch(zfetch_t *zf, uint64_t blkid, uint64_t nblks) 201 { 202 zstream_t *zs; 203 204 if (zfs_prefetch_disable) 205 return; 206 207 /* 208 * As a fast path for small (single-block) files, ignore access 209 * to the first block. 210 */ 211 if (blkid == 0) 212 return; 213 214 rw_enter(&zf->zf_rwlock, RW_READER); 215 216 for (zs = list_head(&zf->zf_stream); zs != NULL; 217 zs = list_next(&zf->zf_stream, zs)) { 218 if (blkid == zs->zs_blkid) { 219 mutex_enter(&zs->zs_lock); 220 /* 221 * zs_blkid could have changed before we 222 * acquired zs_lock; re-check them here. 223 */ 224 if (blkid != zs->zs_blkid) { 225 mutex_exit(&zs->zs_lock); 226 continue; 227 } 228 break; 229 } 230 } 231 232 if (zs == NULL) { 233 /* 234 * This access is not part of any existing stream. Create 235 * a new stream for it. 236 */ 237 ZFETCHSTAT_BUMP(zfetchstat_misses); 238 if (rw_tryupgrade(&zf->zf_rwlock)) 239 dmu_zfetch_stream_create(zf, blkid + nblks); 240 rw_exit(&zf->zf_rwlock); 241 return; 242 } 243 244 /* 245 * This access was to a block that we issued a prefetch for on 246 * behalf of this stream. Issue further prefetches for this stream. 247 * 248 * Normally, we start prefetching where we stopped 249 * prefetching last (zs_pf_blkid). But when we get our first 250 * hit on this stream, zs_pf_blkid == zs_blkid, we don't 251 * want to prefetch to block we just accessed. In this case, 252 * start just after the block we just accessed. 253 */ 254 int64_t pf_start = MAX(zs->zs_pf_blkid, blkid + nblks); 255 256 /* 257 * Double our amount of prefetched data, but don't let the 258 * prefetch get further ahead than zfetch_max_distance. 259 */ 260 int pf_nblks = 261 MIN((int64_t)zs->zs_pf_blkid - zs->zs_blkid + nblks, 262 zs->zs_blkid + nblks + 263 (zfetch_max_distance >> zf->zf_dnode->dn_datablkshift) - pf_start); 264 265 zs->zs_pf_blkid = pf_start + pf_nblks; 266 zs->zs_atime = gethrtime(); 267 zs->zs_blkid = blkid + nblks; 268 269 /* 270 * dbuf_prefetch() issues the prefetch i/o 271 * asynchronously, but it may need to wait for an 272 * indirect block to be read from disk. Therefore 273 * we do not want to hold any locks while we call it. 274 */ 275 mutex_exit(&zs->zs_lock); 276 rw_exit(&zf->zf_rwlock); 277 for (int i = 0; i < pf_nblks; i++) { 278 dbuf_prefetch(zf->zf_dnode, 0, pf_start + i, 279 ZIO_PRIORITY_ASYNC_READ, ARC_FLAG_PREDICTIVE_PREFETCH); 280 } 281 ZFETCHSTAT_BUMP(zfetchstat_hits); 282 } 283