1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * DVA-based Adjustable Replacement Cache 30 * 31 * While much of the theory of operation used here is 32 * based on the self-tuning, low overhead replacement cache 33 * presented by Megiddo and Modha at FAST 2003, there are some 34 * significant differences: 35 * 36 * 1. The Megiddo and Modha model assumes any page is evictable. 37 * Pages in its cache cannot be "locked" into memory. This makes 38 * the eviction algorithm simple: evict the last page in the list. 39 * This also make the performance characteristics easy to reason 40 * about. Our cache is not so simple. At any given moment, some 41 * subset of the blocks in the cache are un-evictable because we 42 * have handed out a reference to them. Blocks are only evictable 43 * when there are no external references active. This makes 44 * eviction far more problematic: we choose to evict the evictable 45 * blocks that are the "lowest" in the list. 46 * 47 * There are times when it is not possible to evict the requested 48 * space. In these circumstances we are unable to adjust the cache 49 * size. To prevent the cache growing unbounded at these times we 50 * implement a "cache throttle" that slows the flow of new data 51 * into the cache until we can make space available. 52 * 53 * 2. The Megiddo and Modha model assumes a fixed cache size. 54 * Pages are evicted when the cache is full and there is a cache 55 * miss. Our model has a variable sized cache. It grows with 56 * high use, but also tries to react to memory pressure from the 57 * operating system: decreasing its size when system memory is 58 * tight. 59 * 60 * 3. The Megiddo and Modha model assumes a fixed page size. All 61 * elements of the cache are therefor exactly the same size. So 62 * when adjusting the cache size following a cache miss, its simply 63 * a matter of choosing a single page to evict. In our model, we 64 * have variable sized cache blocks (rangeing from 512 bytes to 65 * 128K bytes). We therefor choose a set of blocks to evict to make 66 * space for a cache miss that approximates as closely as possible 67 * the space used by the new block. 68 * 69 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 70 * by N. Megiddo & D. Modha, FAST 2003 71 */ 72 73 /* 74 * The locking model: 75 * 76 * A new reference to a cache buffer can be obtained in two 77 * ways: 1) via a hash table lookup using the DVA as a key, 78 * or 2) via one of the ARC lists. The arc_read() interface 79 * uses method 1, while the internal arc algorithms for 80 * adjusting the cache use method 2. We therefor provide two 81 * types of locks: 1) the hash table lock array, and 2) the 82 * arc list locks. 83 * 84 * Buffers do not have their own mutexs, rather they rely on the 85 * hash table mutexs for the bulk of their protection (i.e. most 86 * fields in the arc_buf_hdr_t are protected by these mutexs). 87 * 88 * buf_hash_find() returns the appropriate mutex (held) when it 89 * locates the requested buffer in the hash table. It returns 90 * NULL for the mutex if the buffer was not in the table. 91 * 92 * buf_hash_remove() expects the appropriate hash mutex to be 93 * already held before it is invoked. 94 * 95 * Each arc state also has a mutex which is used to protect the 96 * buffer list associated with the state. When attempting to 97 * obtain a hash table lock while holding an arc list lock you 98 * must use: mutex_tryenter() to avoid deadlock. Also note that 99 * the active state mutex must be held before the ghost state mutex. 100 * 101 * Arc buffers may have an associated eviction callback function. 102 * This function will be invoked prior to removing the buffer (e.g. 103 * in arc_do_user_evicts()). Note however that the data associated 104 * with the buffer may be evicted prior to the callback. The callback 105 * must be made with *no locks held* (to prevent deadlock). Additionally, 106 * the users of callbacks must ensure that their private data is 107 * protected from simultaneous callbacks from arc_buf_evict() 108 * and arc_do_user_evicts(). 109 * 110 * Note that the majority of the performance stats are manipulated 111 * with atomic operations. 112 * 113 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 114 * 115 * - L2ARC buflist creation 116 * - L2ARC buflist eviction 117 * - L2ARC write completion, which walks L2ARC buflists 118 * - ARC header destruction, as it removes from L2ARC buflists 119 * - ARC header release, as it removes from L2ARC buflists 120 */ 121 122 #include <sys/spa.h> 123 #include <sys/zio.h> 124 #include <sys/zio_checksum.h> 125 #include <sys/zfs_context.h> 126 #include <sys/arc.h> 127 #include <sys/refcount.h> 128 #ifdef _KERNEL 129 #include <sys/vmsystm.h> 130 #include <vm/anon.h> 131 #include <sys/fs/swapnode.h> 132 #include <sys/dnlc.h> 133 #endif 134 #include <sys/callb.h> 135 #include <sys/kstat.h> 136 137 static kmutex_t arc_reclaim_thr_lock; 138 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 139 static uint8_t arc_thread_exit; 140 141 #define ARC_REDUCE_DNLC_PERCENT 3 142 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 143 144 typedef enum arc_reclaim_strategy { 145 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 146 ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 147 } arc_reclaim_strategy_t; 148 149 /* number of seconds before growing cache again */ 150 static int arc_grow_retry = 60; 151 152 /* 153 * minimum lifespan of a prefetch block in clock ticks 154 * (initialized in arc_init()) 155 */ 156 static int arc_min_prefetch_lifespan; 157 158 static int arc_dead; 159 160 /* 161 * These tunables are for performance analysis. 162 */ 163 uint64_t zfs_arc_max; 164 uint64_t zfs_arc_min; 165 uint64_t zfs_arc_meta_limit = 0; 166 167 /* 168 * Note that buffers can be in one of 6 states: 169 * ARC_anon - anonymous (discussed below) 170 * ARC_mru - recently used, currently cached 171 * ARC_mru_ghost - recentely used, no longer in cache 172 * ARC_mfu - frequently used, currently cached 173 * ARC_mfu_ghost - frequently used, no longer in cache 174 * ARC_l2c_only - exists in L2ARC but not other states 175 * When there are no active references to the buffer, they are 176 * are linked onto a list in one of these arc states. These are 177 * the only buffers that can be evicted or deleted. Within each 178 * state there are multiple lists, one for meta-data and one for 179 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 180 * etc.) is tracked separately so that it can be managed more 181 * explicitly: favored over data, limited explicitly. 182 * 183 * Anonymous buffers are buffers that are not associated with 184 * a DVA. These are buffers that hold dirty block copies 185 * before they are written to stable storage. By definition, 186 * they are "ref'd" and are considered part of arc_mru 187 * that cannot be freed. Generally, they will aquire a DVA 188 * as they are written and migrate onto the arc_mru list. 189 * 190 * The ARC_l2c_only state is for buffers that are in the second 191 * level ARC but no longer in any of the ARC_m* lists. The second 192 * level ARC itself may also contain buffers that are in any of 193 * the ARC_m* states - meaning that a buffer can exist in two 194 * places. The reason for the ARC_l2c_only state is to keep the 195 * buffer header in the hash table, so that reads that hit the 196 * second level ARC benefit from these fast lookups. 197 */ 198 199 typedef struct arc_state { 200 list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ 201 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 202 uint64_t arcs_size; /* total amount of data in this state */ 203 kmutex_t arcs_mtx; 204 } arc_state_t; 205 206 /* The 6 states: */ 207 static arc_state_t ARC_anon; 208 static arc_state_t ARC_mru; 209 static arc_state_t ARC_mru_ghost; 210 static arc_state_t ARC_mfu; 211 static arc_state_t ARC_mfu_ghost; 212 static arc_state_t ARC_l2c_only; 213 214 typedef struct arc_stats { 215 kstat_named_t arcstat_hits; 216 kstat_named_t arcstat_misses; 217 kstat_named_t arcstat_demand_data_hits; 218 kstat_named_t arcstat_demand_data_misses; 219 kstat_named_t arcstat_demand_metadata_hits; 220 kstat_named_t arcstat_demand_metadata_misses; 221 kstat_named_t arcstat_prefetch_data_hits; 222 kstat_named_t arcstat_prefetch_data_misses; 223 kstat_named_t arcstat_prefetch_metadata_hits; 224 kstat_named_t arcstat_prefetch_metadata_misses; 225 kstat_named_t arcstat_mru_hits; 226 kstat_named_t arcstat_mru_ghost_hits; 227 kstat_named_t arcstat_mfu_hits; 228 kstat_named_t arcstat_mfu_ghost_hits; 229 kstat_named_t arcstat_deleted; 230 kstat_named_t arcstat_recycle_miss; 231 kstat_named_t arcstat_mutex_miss; 232 kstat_named_t arcstat_evict_skip; 233 kstat_named_t arcstat_hash_elements; 234 kstat_named_t arcstat_hash_elements_max; 235 kstat_named_t arcstat_hash_collisions; 236 kstat_named_t arcstat_hash_chains; 237 kstat_named_t arcstat_hash_chain_max; 238 kstat_named_t arcstat_p; 239 kstat_named_t arcstat_c; 240 kstat_named_t arcstat_c_min; 241 kstat_named_t arcstat_c_max; 242 kstat_named_t arcstat_size; 243 kstat_named_t arcstat_hdr_size; 244 kstat_named_t arcstat_l2_hits; 245 kstat_named_t arcstat_l2_misses; 246 kstat_named_t arcstat_l2_feeds; 247 kstat_named_t arcstat_l2_rw_clash; 248 kstat_named_t arcstat_l2_writes_sent; 249 kstat_named_t arcstat_l2_writes_done; 250 kstat_named_t arcstat_l2_writes_error; 251 kstat_named_t arcstat_l2_writes_hdr_miss; 252 kstat_named_t arcstat_l2_evict_lock_retry; 253 kstat_named_t arcstat_l2_evict_reading; 254 kstat_named_t arcstat_l2_free_on_write; 255 kstat_named_t arcstat_l2_abort_lowmem; 256 kstat_named_t arcstat_l2_cksum_bad; 257 kstat_named_t arcstat_l2_io_error; 258 kstat_named_t arcstat_l2_size; 259 kstat_named_t arcstat_l2_hdr_size; 260 } arc_stats_t; 261 262 static arc_stats_t arc_stats = { 263 { "hits", KSTAT_DATA_UINT64 }, 264 { "misses", KSTAT_DATA_UINT64 }, 265 { "demand_data_hits", KSTAT_DATA_UINT64 }, 266 { "demand_data_misses", KSTAT_DATA_UINT64 }, 267 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 268 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 269 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 270 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 271 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 272 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 273 { "mru_hits", KSTAT_DATA_UINT64 }, 274 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 275 { "mfu_hits", KSTAT_DATA_UINT64 }, 276 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 277 { "deleted", KSTAT_DATA_UINT64 }, 278 { "recycle_miss", KSTAT_DATA_UINT64 }, 279 { "mutex_miss", KSTAT_DATA_UINT64 }, 280 { "evict_skip", KSTAT_DATA_UINT64 }, 281 { "hash_elements", KSTAT_DATA_UINT64 }, 282 { "hash_elements_max", KSTAT_DATA_UINT64 }, 283 { "hash_collisions", KSTAT_DATA_UINT64 }, 284 { "hash_chains", KSTAT_DATA_UINT64 }, 285 { "hash_chain_max", KSTAT_DATA_UINT64 }, 286 { "p", KSTAT_DATA_UINT64 }, 287 { "c", KSTAT_DATA_UINT64 }, 288 { "c_min", KSTAT_DATA_UINT64 }, 289 { "c_max", KSTAT_DATA_UINT64 }, 290 { "size", KSTAT_DATA_UINT64 }, 291 { "hdr_size", KSTAT_DATA_UINT64 }, 292 { "l2_hits", KSTAT_DATA_UINT64 }, 293 { "l2_misses", KSTAT_DATA_UINT64 }, 294 { "l2_feeds", KSTAT_DATA_UINT64 }, 295 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 296 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 297 { "l2_writes_done", KSTAT_DATA_UINT64 }, 298 { "l2_writes_error", KSTAT_DATA_UINT64 }, 299 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 300 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 301 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 302 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 303 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 304 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 305 { "l2_io_error", KSTAT_DATA_UINT64 }, 306 { "l2_size", KSTAT_DATA_UINT64 }, 307 { "l2_hdr_size", KSTAT_DATA_UINT64 } 308 }; 309 310 #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 311 312 #define ARCSTAT_INCR(stat, val) \ 313 atomic_add_64(&arc_stats.stat.value.ui64, (val)); 314 315 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 316 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 317 318 #define ARCSTAT_MAX(stat, val) { \ 319 uint64_t m; \ 320 while ((val) > (m = arc_stats.stat.value.ui64) && \ 321 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 322 continue; \ 323 } 324 325 #define ARCSTAT_MAXSTAT(stat) \ 326 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 327 328 /* 329 * We define a macro to allow ARC hits/misses to be easily broken down by 330 * two separate conditions, giving a total of four different subtypes for 331 * each of hits and misses (so eight statistics total). 332 */ 333 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 334 if (cond1) { \ 335 if (cond2) { \ 336 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 337 } else { \ 338 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 339 } \ 340 } else { \ 341 if (cond2) { \ 342 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 343 } else { \ 344 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 345 } \ 346 } 347 348 kstat_t *arc_ksp; 349 static arc_state_t *arc_anon; 350 static arc_state_t *arc_mru; 351 static arc_state_t *arc_mru_ghost; 352 static arc_state_t *arc_mfu; 353 static arc_state_t *arc_mfu_ghost; 354 static arc_state_t *arc_l2c_only; 355 356 /* 357 * There are several ARC variables that are critical to export as kstats -- 358 * but we don't want to have to grovel around in the kstat whenever we wish to 359 * manipulate them. For these variables, we therefore define them to be in 360 * terms of the statistic variable. This assures that we are not introducing 361 * the possibility of inconsistency by having shadow copies of the variables, 362 * while still allowing the code to be readable. 363 */ 364 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 365 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 366 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 367 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 368 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 369 370 static int arc_no_grow; /* Don't try to grow cache size */ 371 static uint64_t arc_tempreserve; 372 static uint64_t arc_meta_used; 373 static uint64_t arc_meta_limit; 374 static uint64_t arc_meta_max = 0; 375 376 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 377 378 typedef struct arc_callback arc_callback_t; 379 380 struct arc_callback { 381 void *acb_private; 382 arc_done_func_t *acb_done; 383 arc_byteswap_func_t *acb_byteswap; 384 arc_buf_t *acb_buf; 385 zio_t *acb_zio_dummy; 386 arc_callback_t *acb_next; 387 }; 388 389 typedef struct arc_write_callback arc_write_callback_t; 390 391 struct arc_write_callback { 392 void *awcb_private; 393 arc_done_func_t *awcb_ready; 394 arc_done_func_t *awcb_done; 395 arc_buf_t *awcb_buf; 396 }; 397 398 struct arc_buf_hdr { 399 /* protected by hash lock */ 400 dva_t b_dva; 401 uint64_t b_birth; 402 uint64_t b_cksum0; 403 404 kmutex_t b_freeze_lock; 405 zio_cksum_t *b_freeze_cksum; 406 407 arc_buf_hdr_t *b_hash_next; 408 arc_buf_t *b_buf; 409 uint32_t b_flags; 410 uint32_t b_datacnt; 411 412 arc_callback_t *b_acb; 413 kcondvar_t b_cv; 414 415 /* immutable */ 416 arc_buf_contents_t b_type; 417 uint64_t b_size; 418 spa_t *b_spa; 419 420 /* protected by arc state mutex */ 421 arc_state_t *b_state; 422 list_node_t b_arc_node; 423 424 /* updated atomically */ 425 clock_t b_arc_access; 426 427 /* self protecting */ 428 refcount_t b_refcnt; 429 430 l2arc_buf_hdr_t *b_l2hdr; 431 list_node_t b_l2node; 432 }; 433 434 static arc_buf_t *arc_eviction_list; 435 static kmutex_t arc_eviction_mtx; 436 static arc_buf_hdr_t arc_eviction_hdr; 437 static void arc_get_data_buf(arc_buf_t *buf); 438 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 439 static int arc_evict_needed(arc_buf_contents_t type); 440 static void arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes); 441 442 #define GHOST_STATE(state) \ 443 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 444 (state) == arc_l2c_only) 445 446 /* 447 * Private ARC flags. These flags are private ARC only flags that will show up 448 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 449 * be passed in as arc_flags in things like arc_read. However, these flags 450 * should never be passed and should only be set by ARC code. When adding new 451 * public flags, make sure not to smash the private ones. 452 */ 453 454 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 455 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 456 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 457 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 458 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 459 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 460 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 461 #define ARC_DONT_L2CACHE (1 << 16) /* originated by prefetch */ 462 #define ARC_L2_READING (1 << 17) /* L2ARC read in progress */ 463 #define ARC_L2_WRITING (1 << 18) /* L2ARC write in progress */ 464 #define ARC_L2_EVICTED (1 << 19) /* evicted during I/O */ 465 #define ARC_L2_WRITE_HEAD (1 << 20) /* head of write list */ 466 467 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 468 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 469 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 470 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 471 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 472 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 473 #define HDR_DONT_L2CACHE(hdr) ((hdr)->b_flags & ARC_DONT_L2CACHE) 474 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_L2_READING) 475 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 476 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 477 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 478 479 /* 480 * Hash table routines 481 */ 482 483 #define HT_LOCK_PAD 64 484 485 struct ht_lock { 486 kmutex_t ht_lock; 487 #ifdef _KERNEL 488 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 489 #endif 490 }; 491 492 #define BUF_LOCKS 256 493 typedef struct buf_hash_table { 494 uint64_t ht_mask; 495 arc_buf_hdr_t **ht_table; 496 struct ht_lock ht_locks[BUF_LOCKS]; 497 } buf_hash_table_t; 498 499 static buf_hash_table_t buf_hash_table; 500 501 #define BUF_HASH_INDEX(spa, dva, birth) \ 502 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 503 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 504 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 505 #define HDR_LOCK(buf) \ 506 (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 507 508 uint64_t zfs_crc64_table[256]; 509 510 /* 511 * Level 2 ARC 512 */ 513 514 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 515 #define L2ARC_HEADROOM 4 /* num of writes */ 516 #define L2ARC_FEED_DELAY 180 /* starting grace */ 517 #define L2ARC_FEED_SECS 1 /* caching interval */ 518 519 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 520 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 521 522 /* 523 * L2ARC Performance Tunables 524 */ 525 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 526 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 527 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 528 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 529 530 /* 531 * L2ARC Internals 532 */ 533 typedef struct l2arc_dev { 534 vdev_t *l2ad_vdev; /* vdev */ 535 spa_t *l2ad_spa; /* spa */ 536 uint64_t l2ad_hand; /* next write location */ 537 uint64_t l2ad_write; /* desired write size, bytes */ 538 uint64_t l2ad_start; /* first addr on device */ 539 uint64_t l2ad_end; /* last addr on device */ 540 uint64_t l2ad_evict; /* last addr eviction reached */ 541 boolean_t l2ad_first; /* first sweep through */ 542 list_t *l2ad_buflist; /* buffer list */ 543 list_node_t l2ad_node; /* device list node */ 544 } l2arc_dev_t; 545 546 static list_t L2ARC_dev_list; /* device list */ 547 static list_t *l2arc_dev_list; /* device list pointer */ 548 static kmutex_t l2arc_dev_mtx; /* device list mutex */ 549 static l2arc_dev_t *l2arc_dev_last; /* last device used */ 550 static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 551 static list_t L2ARC_free_on_write; /* free after write buf list */ 552 static list_t *l2arc_free_on_write; /* free after write list ptr */ 553 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 554 static uint64_t l2arc_ndev; /* number of devices */ 555 556 typedef struct l2arc_read_callback { 557 arc_buf_t *l2rcb_buf; /* read buffer */ 558 spa_t *l2rcb_spa; /* spa */ 559 blkptr_t l2rcb_bp; /* original blkptr */ 560 zbookmark_t l2rcb_zb; /* original bookmark */ 561 int l2rcb_flags; /* original flags */ 562 } l2arc_read_callback_t; 563 564 typedef struct l2arc_write_callback { 565 l2arc_dev_t *l2wcb_dev; /* device info */ 566 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 567 } l2arc_write_callback_t; 568 569 struct l2arc_buf_hdr { 570 /* protected by arc_buf_hdr mutex */ 571 l2arc_dev_t *b_dev; /* L2ARC device */ 572 daddr_t b_daddr; /* disk address, offset byte */ 573 }; 574 575 typedef struct l2arc_data_free { 576 /* protected by l2arc_free_on_write_mtx */ 577 void *l2df_data; 578 size_t l2df_size; 579 void (*l2df_func)(void *, size_t); 580 list_node_t l2df_list_node; 581 } l2arc_data_free_t; 582 583 static kmutex_t l2arc_feed_thr_lock; 584 static kcondvar_t l2arc_feed_thr_cv; 585 static uint8_t l2arc_thread_exit; 586 587 static void l2arc_read_done(zio_t *zio); 588 static void l2arc_hdr_stat_add(void); 589 static void l2arc_hdr_stat_remove(void); 590 591 static uint64_t 592 buf_hash(spa_t *spa, dva_t *dva, uint64_t birth) 593 { 594 uintptr_t spav = (uintptr_t)spa; 595 uint8_t *vdva = (uint8_t *)dva; 596 uint64_t crc = -1ULL; 597 int i; 598 599 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 600 601 for (i = 0; i < sizeof (dva_t); i++) 602 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 603 604 crc ^= (spav>>8) ^ birth; 605 606 return (crc); 607 } 608 609 #define BUF_EMPTY(buf) \ 610 ((buf)->b_dva.dva_word[0] == 0 && \ 611 (buf)->b_dva.dva_word[1] == 0 && \ 612 (buf)->b_birth == 0) 613 614 #define BUF_EQUAL(spa, dva, birth, buf) \ 615 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 616 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 617 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 618 619 static arc_buf_hdr_t * 620 buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp) 621 { 622 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 623 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 624 arc_buf_hdr_t *buf; 625 626 mutex_enter(hash_lock); 627 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 628 buf = buf->b_hash_next) { 629 if (BUF_EQUAL(spa, dva, birth, buf)) { 630 *lockp = hash_lock; 631 return (buf); 632 } 633 } 634 mutex_exit(hash_lock); 635 *lockp = NULL; 636 return (NULL); 637 } 638 639 /* 640 * Insert an entry into the hash table. If there is already an element 641 * equal to elem in the hash table, then the already existing element 642 * will be returned and the new element will not be inserted. 643 * Otherwise returns NULL. 644 */ 645 static arc_buf_hdr_t * 646 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 647 { 648 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 649 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 650 arc_buf_hdr_t *fbuf; 651 uint32_t i; 652 653 ASSERT(!HDR_IN_HASH_TABLE(buf)); 654 *lockp = hash_lock; 655 mutex_enter(hash_lock); 656 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 657 fbuf = fbuf->b_hash_next, i++) { 658 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 659 return (fbuf); 660 } 661 662 buf->b_hash_next = buf_hash_table.ht_table[idx]; 663 buf_hash_table.ht_table[idx] = buf; 664 buf->b_flags |= ARC_IN_HASH_TABLE; 665 666 /* collect some hash table performance data */ 667 if (i > 0) { 668 ARCSTAT_BUMP(arcstat_hash_collisions); 669 if (i == 1) 670 ARCSTAT_BUMP(arcstat_hash_chains); 671 672 ARCSTAT_MAX(arcstat_hash_chain_max, i); 673 } 674 675 ARCSTAT_BUMP(arcstat_hash_elements); 676 ARCSTAT_MAXSTAT(arcstat_hash_elements); 677 678 return (NULL); 679 } 680 681 static void 682 buf_hash_remove(arc_buf_hdr_t *buf) 683 { 684 arc_buf_hdr_t *fbuf, **bufp; 685 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 686 687 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 688 ASSERT(HDR_IN_HASH_TABLE(buf)); 689 690 bufp = &buf_hash_table.ht_table[idx]; 691 while ((fbuf = *bufp) != buf) { 692 ASSERT(fbuf != NULL); 693 bufp = &fbuf->b_hash_next; 694 } 695 *bufp = buf->b_hash_next; 696 buf->b_hash_next = NULL; 697 buf->b_flags &= ~ARC_IN_HASH_TABLE; 698 699 /* collect some hash table performance data */ 700 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 701 702 if (buf_hash_table.ht_table[idx] && 703 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 704 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 705 } 706 707 /* 708 * Global data structures and functions for the buf kmem cache. 709 */ 710 static kmem_cache_t *hdr_cache; 711 static kmem_cache_t *buf_cache; 712 713 static void 714 buf_fini(void) 715 { 716 int i; 717 718 kmem_free(buf_hash_table.ht_table, 719 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 720 for (i = 0; i < BUF_LOCKS; i++) 721 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 722 kmem_cache_destroy(hdr_cache); 723 kmem_cache_destroy(buf_cache); 724 } 725 726 /* 727 * Constructor callback - called when the cache is empty 728 * and a new buf is requested. 729 */ 730 /* ARGSUSED */ 731 static int 732 hdr_cons(void *vbuf, void *unused, int kmflag) 733 { 734 arc_buf_hdr_t *buf = vbuf; 735 736 bzero(buf, sizeof (arc_buf_hdr_t)); 737 refcount_create(&buf->b_refcnt); 738 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 739 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 740 741 ARCSTAT_INCR(arcstat_hdr_size, sizeof (arc_buf_hdr_t)); 742 return (0); 743 } 744 745 /* 746 * Destructor callback - called when a cached buf is 747 * no longer required. 748 */ 749 /* ARGSUSED */ 750 static void 751 hdr_dest(void *vbuf, void *unused) 752 { 753 arc_buf_hdr_t *buf = vbuf; 754 755 refcount_destroy(&buf->b_refcnt); 756 cv_destroy(&buf->b_cv); 757 mutex_destroy(&buf->b_freeze_lock); 758 759 ARCSTAT_INCR(arcstat_hdr_size, -sizeof (arc_buf_hdr_t)); 760 } 761 762 /* 763 * Reclaim callback -- invoked when memory is low. 764 */ 765 /* ARGSUSED */ 766 static void 767 hdr_recl(void *unused) 768 { 769 dprintf("hdr_recl called\n"); 770 /* 771 * umem calls the reclaim func when we destroy the buf cache, 772 * which is after we do arc_fini(). 773 */ 774 if (!arc_dead) 775 cv_signal(&arc_reclaim_thr_cv); 776 } 777 778 static void 779 buf_init(void) 780 { 781 uint64_t *ct; 782 uint64_t hsize = 1ULL << 12; 783 int i, j; 784 785 /* 786 * The hash table is big enough to fill all of physical memory 787 * with an average 64K block size. The table will take up 788 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 789 */ 790 while (hsize * 65536 < physmem * PAGESIZE) 791 hsize <<= 1; 792 retry: 793 buf_hash_table.ht_mask = hsize - 1; 794 buf_hash_table.ht_table = 795 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 796 if (buf_hash_table.ht_table == NULL) { 797 ASSERT(hsize > (1ULL << 8)); 798 hsize >>= 1; 799 goto retry; 800 } 801 802 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 803 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 804 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 805 0, NULL, NULL, NULL, NULL, NULL, 0); 806 807 for (i = 0; i < 256; i++) 808 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 809 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 810 811 for (i = 0; i < BUF_LOCKS; i++) { 812 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 813 NULL, MUTEX_DEFAULT, NULL); 814 } 815 } 816 817 #define ARC_MINTIME (hz>>4) /* 62 ms */ 818 819 static void 820 arc_cksum_verify(arc_buf_t *buf) 821 { 822 zio_cksum_t zc; 823 824 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 825 return; 826 827 mutex_enter(&buf->b_hdr->b_freeze_lock); 828 if (buf->b_hdr->b_freeze_cksum == NULL || 829 (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 830 mutex_exit(&buf->b_hdr->b_freeze_lock); 831 return; 832 } 833 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 834 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 835 panic("buffer modified while frozen!"); 836 mutex_exit(&buf->b_hdr->b_freeze_lock); 837 } 838 839 static int 840 arc_cksum_equal(arc_buf_t *buf) 841 { 842 zio_cksum_t zc; 843 int equal; 844 845 mutex_enter(&buf->b_hdr->b_freeze_lock); 846 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 847 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 848 mutex_exit(&buf->b_hdr->b_freeze_lock); 849 850 return (equal); 851 } 852 853 static void 854 arc_cksum_compute(arc_buf_t *buf, boolean_t force) 855 { 856 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 857 return; 858 859 mutex_enter(&buf->b_hdr->b_freeze_lock); 860 if (buf->b_hdr->b_freeze_cksum != NULL) { 861 mutex_exit(&buf->b_hdr->b_freeze_lock); 862 return; 863 } 864 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 865 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 866 buf->b_hdr->b_freeze_cksum); 867 mutex_exit(&buf->b_hdr->b_freeze_lock); 868 } 869 870 void 871 arc_buf_thaw(arc_buf_t *buf) 872 { 873 if (zfs_flags & ZFS_DEBUG_MODIFY) { 874 if (buf->b_hdr->b_state != arc_anon) 875 panic("modifying non-anon buffer!"); 876 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 877 panic("modifying buffer while i/o in progress!"); 878 arc_cksum_verify(buf); 879 } 880 881 mutex_enter(&buf->b_hdr->b_freeze_lock); 882 if (buf->b_hdr->b_freeze_cksum != NULL) { 883 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 884 buf->b_hdr->b_freeze_cksum = NULL; 885 } 886 mutex_exit(&buf->b_hdr->b_freeze_lock); 887 } 888 889 void 890 arc_buf_freeze(arc_buf_t *buf) 891 { 892 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 893 return; 894 895 ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 896 buf->b_hdr->b_state == arc_anon); 897 arc_cksum_compute(buf, B_FALSE); 898 } 899 900 static void 901 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 902 { 903 ASSERT(MUTEX_HELD(hash_lock)); 904 905 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 906 (ab->b_state != arc_anon)) { 907 uint64_t delta = ab->b_size * ab->b_datacnt; 908 list_t *list = &ab->b_state->arcs_list[ab->b_type]; 909 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 910 911 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 912 mutex_enter(&ab->b_state->arcs_mtx); 913 ASSERT(list_link_active(&ab->b_arc_node)); 914 list_remove(list, ab); 915 if (GHOST_STATE(ab->b_state)) { 916 ASSERT3U(ab->b_datacnt, ==, 0); 917 ASSERT3P(ab->b_buf, ==, NULL); 918 delta = ab->b_size; 919 } 920 ASSERT(delta > 0); 921 ASSERT3U(*size, >=, delta); 922 atomic_add_64(size, -delta); 923 mutex_exit(&ab->b_state->arcs_mtx); 924 /* remove the prefetch flag is we get a reference */ 925 if (ab->b_flags & ARC_PREFETCH) 926 ab->b_flags &= ~ARC_PREFETCH; 927 } 928 } 929 930 static int 931 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 932 { 933 int cnt; 934 arc_state_t *state = ab->b_state; 935 936 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 937 ASSERT(!GHOST_STATE(state)); 938 939 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 940 (state != arc_anon)) { 941 uint64_t *size = &state->arcs_lsize[ab->b_type]; 942 943 ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 944 mutex_enter(&state->arcs_mtx); 945 ASSERT(!list_link_active(&ab->b_arc_node)); 946 list_insert_head(&state->arcs_list[ab->b_type], ab); 947 ASSERT(ab->b_datacnt > 0); 948 atomic_add_64(size, ab->b_size * ab->b_datacnt); 949 mutex_exit(&state->arcs_mtx); 950 } 951 return (cnt); 952 } 953 954 /* 955 * Move the supplied buffer to the indicated state. The mutex 956 * for the buffer must be held by the caller. 957 */ 958 static void 959 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 960 { 961 arc_state_t *old_state = ab->b_state; 962 int64_t refcnt = refcount_count(&ab->b_refcnt); 963 uint64_t from_delta, to_delta; 964 965 ASSERT(MUTEX_HELD(hash_lock)); 966 ASSERT(new_state != old_state); 967 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 968 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 969 970 from_delta = to_delta = ab->b_datacnt * ab->b_size; 971 972 /* 973 * If this buffer is evictable, transfer it from the 974 * old state list to the new state list. 975 */ 976 if (refcnt == 0) { 977 if (old_state != arc_anon) { 978 int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 979 uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 980 981 if (use_mutex) 982 mutex_enter(&old_state->arcs_mtx); 983 984 ASSERT(list_link_active(&ab->b_arc_node)); 985 list_remove(&old_state->arcs_list[ab->b_type], ab); 986 987 /* 988 * If prefetching out of the ghost cache, 989 * we will have a non-null datacnt. 990 */ 991 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 992 /* ghost elements have a ghost size */ 993 ASSERT(ab->b_buf == NULL); 994 from_delta = ab->b_size; 995 } 996 ASSERT3U(*size, >=, from_delta); 997 atomic_add_64(size, -from_delta); 998 999 if (use_mutex) 1000 mutex_exit(&old_state->arcs_mtx); 1001 } 1002 if (new_state != arc_anon) { 1003 int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 1004 uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1005 1006 if (use_mutex) 1007 mutex_enter(&new_state->arcs_mtx); 1008 1009 list_insert_head(&new_state->arcs_list[ab->b_type], ab); 1010 1011 /* ghost elements have a ghost size */ 1012 if (GHOST_STATE(new_state)) { 1013 ASSERT(ab->b_datacnt == 0); 1014 ASSERT(ab->b_buf == NULL); 1015 to_delta = ab->b_size; 1016 } 1017 atomic_add_64(size, to_delta); 1018 1019 if (use_mutex) 1020 mutex_exit(&new_state->arcs_mtx); 1021 } 1022 } 1023 1024 ASSERT(!BUF_EMPTY(ab)); 1025 if (new_state == arc_anon) { 1026 buf_hash_remove(ab); 1027 } 1028 1029 /* adjust state sizes */ 1030 if (to_delta) 1031 atomic_add_64(&new_state->arcs_size, to_delta); 1032 if (from_delta) { 1033 ASSERT3U(old_state->arcs_size, >=, from_delta); 1034 atomic_add_64(&old_state->arcs_size, -from_delta); 1035 } 1036 ab->b_state = new_state; 1037 1038 /* adjust l2arc hdr stats */ 1039 if (new_state == arc_l2c_only) 1040 l2arc_hdr_stat_add(); 1041 else if (old_state == arc_l2c_only) 1042 l2arc_hdr_stat_remove(); 1043 } 1044 1045 void 1046 arc_space_consume(uint64_t space) 1047 { 1048 atomic_add_64(&arc_meta_used, space); 1049 atomic_add_64(&arc_size, space); 1050 } 1051 1052 void 1053 arc_space_return(uint64_t space) 1054 { 1055 ASSERT(arc_meta_used >= space); 1056 if (arc_meta_max < arc_meta_used) 1057 arc_meta_max = arc_meta_used; 1058 atomic_add_64(&arc_meta_used, -space); 1059 ASSERT(arc_size >= space); 1060 atomic_add_64(&arc_size, -space); 1061 } 1062 1063 void * 1064 arc_data_buf_alloc(uint64_t size) 1065 { 1066 if (arc_evict_needed(ARC_BUFC_DATA)) 1067 cv_signal(&arc_reclaim_thr_cv); 1068 atomic_add_64(&arc_size, size); 1069 return (zio_data_buf_alloc(size)); 1070 } 1071 1072 void 1073 arc_data_buf_free(void *buf, uint64_t size) 1074 { 1075 zio_data_buf_free(buf, size); 1076 ASSERT(arc_size >= size); 1077 atomic_add_64(&arc_size, -size); 1078 } 1079 1080 arc_buf_t * 1081 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1082 { 1083 arc_buf_hdr_t *hdr; 1084 arc_buf_t *buf; 1085 1086 ASSERT3U(size, >, 0); 1087 hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 1088 ASSERT(BUF_EMPTY(hdr)); 1089 hdr->b_size = size; 1090 hdr->b_type = type; 1091 hdr->b_spa = spa; 1092 hdr->b_state = arc_anon; 1093 hdr->b_arc_access = 0; 1094 buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 1095 buf->b_hdr = hdr; 1096 buf->b_data = NULL; 1097 buf->b_efunc = NULL; 1098 buf->b_private = NULL; 1099 buf->b_next = NULL; 1100 hdr->b_buf = buf; 1101 arc_get_data_buf(buf); 1102 hdr->b_datacnt = 1; 1103 hdr->b_flags = 0; 1104 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1105 (void) refcount_add(&hdr->b_refcnt, tag); 1106 1107 return (buf); 1108 } 1109 1110 static arc_buf_t * 1111 arc_buf_clone(arc_buf_t *from) 1112 { 1113 arc_buf_t *buf; 1114 arc_buf_hdr_t *hdr = from->b_hdr; 1115 uint64_t size = hdr->b_size; 1116 1117 buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 1118 buf->b_hdr = hdr; 1119 buf->b_data = NULL; 1120 buf->b_efunc = NULL; 1121 buf->b_private = NULL; 1122 buf->b_next = hdr->b_buf; 1123 hdr->b_buf = buf; 1124 arc_get_data_buf(buf); 1125 bcopy(from->b_data, buf->b_data, size); 1126 hdr->b_datacnt += 1; 1127 return (buf); 1128 } 1129 1130 void 1131 arc_buf_add_ref(arc_buf_t *buf, void* tag) 1132 { 1133 arc_buf_hdr_t *hdr; 1134 kmutex_t *hash_lock; 1135 1136 /* 1137 * Check to see if this buffer is currently being evicted via 1138 * arc_do_user_evicts(). 1139 */ 1140 mutex_enter(&arc_eviction_mtx); 1141 hdr = buf->b_hdr; 1142 if (hdr == NULL) { 1143 mutex_exit(&arc_eviction_mtx); 1144 return; 1145 } 1146 hash_lock = HDR_LOCK(hdr); 1147 mutex_exit(&arc_eviction_mtx); 1148 1149 mutex_enter(hash_lock); 1150 if (buf->b_data == NULL) { 1151 /* 1152 * This buffer is evicted. 1153 */ 1154 mutex_exit(hash_lock); 1155 return; 1156 } 1157 1158 ASSERT(buf->b_hdr == hdr); 1159 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1160 add_reference(hdr, hash_lock, tag); 1161 arc_access(hdr, hash_lock); 1162 mutex_exit(hash_lock); 1163 ARCSTAT_BUMP(arcstat_hits); 1164 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 1165 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 1166 data, metadata, hits); 1167 } 1168 1169 /* 1170 * Free the arc data buffer. If it is an l2arc write in progress, 1171 * the buffer is placed on l2arc_free_on_write to be freed later. 1172 */ 1173 static void 1174 arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), 1175 void *data, size_t size) 1176 { 1177 if (HDR_L2_WRITING(hdr)) { 1178 l2arc_data_free_t *df; 1179 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 1180 df->l2df_data = data; 1181 df->l2df_size = size; 1182 df->l2df_func = free_func; 1183 mutex_enter(&l2arc_free_on_write_mtx); 1184 list_insert_head(l2arc_free_on_write, df); 1185 mutex_exit(&l2arc_free_on_write_mtx); 1186 ARCSTAT_BUMP(arcstat_l2_free_on_write); 1187 } else { 1188 free_func(data, size); 1189 } 1190 } 1191 1192 static void 1193 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1194 { 1195 arc_buf_t **bufp; 1196 1197 /* free up data associated with the buf */ 1198 if (buf->b_data) { 1199 arc_state_t *state = buf->b_hdr->b_state; 1200 uint64_t size = buf->b_hdr->b_size; 1201 arc_buf_contents_t type = buf->b_hdr->b_type; 1202 1203 arc_cksum_verify(buf); 1204 if (!recycle) { 1205 if (type == ARC_BUFC_METADATA) { 1206 arc_buf_data_free(buf->b_hdr, zio_buf_free, 1207 buf->b_data, size); 1208 arc_space_return(size); 1209 } else { 1210 ASSERT(type == ARC_BUFC_DATA); 1211 arc_buf_data_free(buf->b_hdr, 1212 zio_data_buf_free, buf->b_data, size); 1213 atomic_add_64(&arc_size, -size); 1214 } 1215 } 1216 if (list_link_active(&buf->b_hdr->b_arc_node)) { 1217 uint64_t *cnt = &state->arcs_lsize[type]; 1218 1219 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 1220 ASSERT(state != arc_anon); 1221 1222 ASSERT3U(*cnt, >=, size); 1223 atomic_add_64(cnt, -size); 1224 } 1225 ASSERT3U(state->arcs_size, >=, size); 1226 atomic_add_64(&state->arcs_size, -size); 1227 buf->b_data = NULL; 1228 ASSERT(buf->b_hdr->b_datacnt > 0); 1229 buf->b_hdr->b_datacnt -= 1; 1230 } 1231 1232 /* only remove the buf if requested */ 1233 if (!all) 1234 return; 1235 1236 /* remove the buf from the hdr list */ 1237 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1238 continue; 1239 *bufp = buf->b_next; 1240 1241 ASSERT(buf->b_efunc == NULL); 1242 1243 /* clean up the buf */ 1244 buf->b_hdr = NULL; 1245 kmem_cache_free(buf_cache, buf); 1246 } 1247 1248 static void 1249 arc_hdr_destroy(arc_buf_hdr_t *hdr) 1250 { 1251 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1252 ASSERT3P(hdr->b_state, ==, arc_anon); 1253 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1254 1255 if (hdr->b_l2hdr != NULL) { 1256 if (!MUTEX_HELD(&l2arc_buflist_mtx)) { 1257 /* 1258 * To prevent arc_free() and l2arc_evict() from 1259 * attempting to free the same buffer at the same time, 1260 * a FREE_IN_PROGRESS flag is given to arc_free() to 1261 * give it priority. l2arc_evict() can't destroy this 1262 * header while we are waiting on l2arc_buflist_mtx. 1263 */ 1264 mutex_enter(&l2arc_buflist_mtx); 1265 ASSERT(hdr->b_l2hdr != NULL); 1266 1267 list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); 1268 mutex_exit(&l2arc_buflist_mtx); 1269 } else { 1270 list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); 1271 } 1272 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1273 kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t)); 1274 if (hdr->b_state == arc_l2c_only) 1275 l2arc_hdr_stat_remove(); 1276 hdr->b_l2hdr = NULL; 1277 } 1278 1279 if (!BUF_EMPTY(hdr)) { 1280 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1281 bzero(&hdr->b_dva, sizeof (dva_t)); 1282 hdr->b_birth = 0; 1283 hdr->b_cksum0 = 0; 1284 } 1285 while (hdr->b_buf) { 1286 arc_buf_t *buf = hdr->b_buf; 1287 1288 if (buf->b_efunc) { 1289 mutex_enter(&arc_eviction_mtx); 1290 ASSERT(buf->b_hdr != NULL); 1291 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1292 hdr->b_buf = buf->b_next; 1293 buf->b_hdr = &arc_eviction_hdr; 1294 buf->b_next = arc_eviction_list; 1295 arc_eviction_list = buf; 1296 mutex_exit(&arc_eviction_mtx); 1297 } else { 1298 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1299 } 1300 } 1301 if (hdr->b_freeze_cksum != NULL) { 1302 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1303 hdr->b_freeze_cksum = NULL; 1304 } 1305 1306 ASSERT(!list_link_active(&hdr->b_arc_node)); 1307 ASSERT3P(hdr->b_hash_next, ==, NULL); 1308 ASSERT3P(hdr->b_acb, ==, NULL); 1309 kmem_cache_free(hdr_cache, hdr); 1310 } 1311 1312 void 1313 arc_buf_free(arc_buf_t *buf, void *tag) 1314 { 1315 arc_buf_hdr_t *hdr = buf->b_hdr; 1316 int hashed = hdr->b_state != arc_anon; 1317 1318 ASSERT(buf->b_efunc == NULL); 1319 ASSERT(buf->b_data != NULL); 1320 1321 if (hashed) { 1322 kmutex_t *hash_lock = HDR_LOCK(hdr); 1323 1324 mutex_enter(hash_lock); 1325 (void) remove_reference(hdr, hash_lock, tag); 1326 if (hdr->b_datacnt > 1) 1327 arc_buf_destroy(buf, FALSE, TRUE); 1328 else 1329 hdr->b_flags |= ARC_BUF_AVAILABLE; 1330 mutex_exit(hash_lock); 1331 } else if (HDR_IO_IN_PROGRESS(hdr)) { 1332 int destroy_hdr; 1333 /* 1334 * We are in the middle of an async write. Don't destroy 1335 * this buffer unless the write completes before we finish 1336 * decrementing the reference count. 1337 */ 1338 mutex_enter(&arc_eviction_mtx); 1339 (void) remove_reference(hdr, NULL, tag); 1340 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1341 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1342 mutex_exit(&arc_eviction_mtx); 1343 if (destroy_hdr) 1344 arc_hdr_destroy(hdr); 1345 } else { 1346 if (remove_reference(hdr, NULL, tag) > 0) { 1347 ASSERT(HDR_IO_ERROR(hdr)); 1348 arc_buf_destroy(buf, FALSE, TRUE); 1349 } else { 1350 arc_hdr_destroy(hdr); 1351 } 1352 } 1353 } 1354 1355 int 1356 arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1357 { 1358 arc_buf_hdr_t *hdr = buf->b_hdr; 1359 kmutex_t *hash_lock = HDR_LOCK(hdr); 1360 int no_callback = (buf->b_efunc == NULL); 1361 1362 if (hdr->b_state == arc_anon) { 1363 arc_buf_free(buf, tag); 1364 return (no_callback); 1365 } 1366 1367 mutex_enter(hash_lock); 1368 ASSERT(hdr->b_state != arc_anon); 1369 ASSERT(buf->b_data != NULL); 1370 1371 (void) remove_reference(hdr, hash_lock, tag); 1372 if (hdr->b_datacnt > 1) { 1373 if (no_callback) 1374 arc_buf_destroy(buf, FALSE, TRUE); 1375 } else if (no_callback) { 1376 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1377 hdr->b_flags |= ARC_BUF_AVAILABLE; 1378 } 1379 ASSERT(no_callback || hdr->b_datacnt > 1 || 1380 refcount_is_zero(&hdr->b_refcnt)); 1381 mutex_exit(hash_lock); 1382 return (no_callback); 1383 } 1384 1385 int 1386 arc_buf_size(arc_buf_t *buf) 1387 { 1388 return (buf->b_hdr->b_size); 1389 } 1390 1391 /* 1392 * Evict buffers from list until we've removed the specified number of 1393 * bytes. Move the removed buffers to the appropriate evict state. 1394 * If the recycle flag is set, then attempt to "recycle" a buffer: 1395 * - look for a buffer to evict that is `bytes' long. 1396 * - return the data block from this buffer rather than freeing it. 1397 * This flag is used by callers that are trying to make space for a 1398 * new buffer in a full arc cache. 1399 * 1400 * This function makes a "best effort". It skips over any buffers 1401 * it can't get a hash_lock on, and so may not catch all candidates. 1402 * It may also return without evicting as much space as requested. 1403 */ 1404 static void * 1405 arc_evict(arc_state_t *state, spa_t *spa, int64_t bytes, boolean_t recycle, 1406 arc_buf_contents_t type) 1407 { 1408 arc_state_t *evicted_state; 1409 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1410 arc_buf_hdr_t *ab, *ab_prev = NULL; 1411 list_t *list = &state->arcs_list[type]; 1412 kmutex_t *hash_lock; 1413 boolean_t have_lock; 1414 void *stolen = NULL; 1415 1416 ASSERT(state == arc_mru || state == arc_mfu); 1417 1418 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1419 1420 mutex_enter(&state->arcs_mtx); 1421 mutex_enter(&evicted_state->arcs_mtx); 1422 1423 for (ab = list_tail(list); ab; ab = ab_prev) { 1424 ab_prev = list_prev(list, ab); 1425 /* prefetch buffers have a minimum lifespan */ 1426 if (HDR_IO_IN_PROGRESS(ab) || 1427 (spa && ab->b_spa != spa) || 1428 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1429 lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { 1430 skipped++; 1431 continue; 1432 } 1433 /* "lookahead" for better eviction candidate */ 1434 if (recycle && ab->b_size != bytes && 1435 ab_prev && ab_prev->b_size == bytes) 1436 continue; 1437 hash_lock = HDR_LOCK(ab); 1438 have_lock = MUTEX_HELD(hash_lock); 1439 if (have_lock || mutex_tryenter(hash_lock)) { 1440 ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 1441 ASSERT(ab->b_datacnt > 0); 1442 while (ab->b_buf) { 1443 arc_buf_t *buf = ab->b_buf; 1444 if (buf->b_data) { 1445 bytes_evicted += ab->b_size; 1446 if (recycle && ab->b_type == type && 1447 ab->b_size == bytes && 1448 !HDR_L2_WRITING(ab)) { 1449 stolen = buf->b_data; 1450 recycle = FALSE; 1451 } 1452 } 1453 if (buf->b_efunc) { 1454 mutex_enter(&arc_eviction_mtx); 1455 arc_buf_destroy(buf, 1456 buf->b_data == stolen, FALSE); 1457 ab->b_buf = buf->b_next; 1458 buf->b_hdr = &arc_eviction_hdr; 1459 buf->b_next = arc_eviction_list; 1460 arc_eviction_list = buf; 1461 mutex_exit(&arc_eviction_mtx); 1462 } else { 1463 arc_buf_destroy(buf, 1464 buf->b_data == stolen, TRUE); 1465 } 1466 } 1467 ASSERT(ab->b_datacnt == 0); 1468 arc_change_state(evicted_state, ab, hash_lock); 1469 ASSERT(HDR_IN_HASH_TABLE(ab)); 1470 ab->b_flags |= ARC_IN_HASH_TABLE; 1471 ab->b_flags &= ~ARC_BUF_AVAILABLE; 1472 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1473 if (!have_lock) 1474 mutex_exit(hash_lock); 1475 if (bytes >= 0 && bytes_evicted >= bytes) 1476 break; 1477 } else { 1478 missed += 1; 1479 } 1480 } 1481 1482 mutex_exit(&evicted_state->arcs_mtx); 1483 mutex_exit(&state->arcs_mtx); 1484 1485 if (bytes_evicted < bytes) 1486 dprintf("only evicted %lld bytes from %x", 1487 (longlong_t)bytes_evicted, state); 1488 1489 if (skipped) 1490 ARCSTAT_INCR(arcstat_evict_skip, skipped); 1491 1492 if (missed) 1493 ARCSTAT_INCR(arcstat_mutex_miss, missed); 1494 1495 /* 1496 * We have just evicted some date into the ghost state, make 1497 * sure we also adjust the ghost state size if necessary. 1498 */ 1499 if (arc_no_grow && 1500 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 1501 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 1502 arc_mru_ghost->arcs_size - arc_c; 1503 1504 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 1505 int64_t todelete = 1506 MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 1507 arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1508 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 1509 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 1510 arc_mru_ghost->arcs_size + 1511 arc_mfu_ghost->arcs_size - arc_c); 1512 arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1513 } 1514 } 1515 1516 return (stolen); 1517 } 1518 1519 /* 1520 * Remove buffers from list until we've removed the specified number of 1521 * bytes. Destroy the buffers that are removed. 1522 */ 1523 static void 1524 arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes) 1525 { 1526 arc_buf_hdr_t *ab, *ab_prev; 1527 list_t *list = &state->arcs_list[ARC_BUFC_DATA]; 1528 kmutex_t *hash_lock; 1529 uint64_t bytes_deleted = 0; 1530 uint64_t bufs_skipped = 0; 1531 1532 ASSERT(GHOST_STATE(state)); 1533 top: 1534 mutex_enter(&state->arcs_mtx); 1535 for (ab = list_tail(list); ab; ab = ab_prev) { 1536 ab_prev = list_prev(list, ab); 1537 if (spa && ab->b_spa != spa) 1538 continue; 1539 hash_lock = HDR_LOCK(ab); 1540 if (mutex_tryenter(hash_lock)) { 1541 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1542 ASSERT(ab->b_buf == NULL); 1543 ARCSTAT_BUMP(arcstat_deleted); 1544 bytes_deleted += ab->b_size; 1545 1546 if (ab->b_l2hdr != NULL) { 1547 /* 1548 * This buffer is cached on the 2nd Level ARC; 1549 * don't destroy the header. 1550 */ 1551 arc_change_state(arc_l2c_only, ab, hash_lock); 1552 mutex_exit(hash_lock); 1553 } else { 1554 arc_change_state(arc_anon, ab, hash_lock); 1555 mutex_exit(hash_lock); 1556 arc_hdr_destroy(ab); 1557 } 1558 1559 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1560 if (bytes >= 0 && bytes_deleted >= bytes) 1561 break; 1562 } else { 1563 if (bytes < 0) { 1564 mutex_exit(&state->arcs_mtx); 1565 mutex_enter(hash_lock); 1566 mutex_exit(hash_lock); 1567 goto top; 1568 } 1569 bufs_skipped += 1; 1570 } 1571 } 1572 mutex_exit(&state->arcs_mtx); 1573 1574 if (list == &state->arcs_list[ARC_BUFC_DATA] && 1575 (bytes < 0 || bytes_deleted < bytes)) { 1576 list = &state->arcs_list[ARC_BUFC_METADATA]; 1577 goto top; 1578 } 1579 1580 if (bufs_skipped) { 1581 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1582 ASSERT(bytes >= 0); 1583 } 1584 1585 if (bytes_deleted < bytes) 1586 dprintf("only deleted %lld bytes from %p", 1587 (longlong_t)bytes_deleted, state); 1588 } 1589 1590 static void 1591 arc_adjust(void) 1592 { 1593 int64_t top_sz, mru_over, arc_over, todelete; 1594 1595 top_sz = arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used; 1596 1597 if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 1598 int64_t toevict = 1599 MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], top_sz - arc_p); 1600 (void) arc_evict(arc_mru, NULL, toevict, FALSE, ARC_BUFC_DATA); 1601 top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1602 } 1603 1604 if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1605 int64_t toevict = 1606 MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], top_sz - arc_p); 1607 (void) arc_evict(arc_mru, NULL, toevict, FALSE, 1608 ARC_BUFC_METADATA); 1609 top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1610 } 1611 1612 mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; 1613 1614 if (mru_over > 0) { 1615 if (arc_mru_ghost->arcs_size > 0) { 1616 todelete = MIN(arc_mru_ghost->arcs_size, mru_over); 1617 arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1618 } 1619 } 1620 1621 if ((arc_over = arc_size - arc_c) > 0) { 1622 int64_t tbl_over; 1623 1624 if (arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 1625 int64_t toevict = 1626 MIN(arc_mfu->arcs_lsize[ARC_BUFC_DATA], arc_over); 1627 (void) arc_evict(arc_mfu, NULL, toevict, FALSE, 1628 ARC_BUFC_DATA); 1629 arc_over = arc_size - arc_c; 1630 } 1631 1632 if (arc_over > 0 && 1633 arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1634 int64_t toevict = 1635 MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA], 1636 arc_over); 1637 (void) arc_evict(arc_mfu, NULL, toevict, FALSE, 1638 ARC_BUFC_METADATA); 1639 } 1640 1641 tbl_over = arc_size + arc_mru_ghost->arcs_size + 1642 arc_mfu_ghost->arcs_size - arc_c * 2; 1643 1644 if (tbl_over > 0 && arc_mfu_ghost->arcs_size > 0) { 1645 todelete = MIN(arc_mfu_ghost->arcs_size, tbl_over); 1646 arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1647 } 1648 } 1649 } 1650 1651 static void 1652 arc_do_user_evicts(void) 1653 { 1654 mutex_enter(&arc_eviction_mtx); 1655 while (arc_eviction_list != NULL) { 1656 arc_buf_t *buf = arc_eviction_list; 1657 arc_eviction_list = buf->b_next; 1658 buf->b_hdr = NULL; 1659 mutex_exit(&arc_eviction_mtx); 1660 1661 if (buf->b_efunc != NULL) 1662 VERIFY(buf->b_efunc(buf) == 0); 1663 1664 buf->b_efunc = NULL; 1665 buf->b_private = NULL; 1666 kmem_cache_free(buf_cache, buf); 1667 mutex_enter(&arc_eviction_mtx); 1668 } 1669 mutex_exit(&arc_eviction_mtx); 1670 } 1671 1672 /* 1673 * Flush all *evictable* data from the cache for the given spa. 1674 * NOTE: this will not touch "active" (i.e. referenced) data. 1675 */ 1676 void 1677 arc_flush(spa_t *spa) 1678 { 1679 while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { 1680 (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_DATA); 1681 if (spa) 1682 break; 1683 } 1684 while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) { 1685 (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_METADATA); 1686 if (spa) 1687 break; 1688 } 1689 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) { 1690 (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_DATA); 1691 if (spa) 1692 break; 1693 } 1694 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) { 1695 (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_METADATA); 1696 if (spa) 1697 break; 1698 } 1699 1700 arc_evict_ghost(arc_mru_ghost, spa, -1); 1701 arc_evict_ghost(arc_mfu_ghost, spa, -1); 1702 1703 mutex_enter(&arc_reclaim_thr_lock); 1704 arc_do_user_evicts(); 1705 mutex_exit(&arc_reclaim_thr_lock); 1706 ASSERT(spa || arc_eviction_list == NULL); 1707 } 1708 1709 int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ 1710 1711 void 1712 arc_shrink(void) 1713 { 1714 if (arc_c > arc_c_min) { 1715 uint64_t to_free; 1716 1717 #ifdef _KERNEL 1718 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); 1719 #else 1720 to_free = arc_c >> arc_shrink_shift; 1721 #endif 1722 if (arc_c > arc_c_min + to_free) 1723 atomic_add_64(&arc_c, -to_free); 1724 else 1725 arc_c = arc_c_min; 1726 1727 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 1728 if (arc_c > arc_size) 1729 arc_c = MAX(arc_size, arc_c_min); 1730 if (arc_p > arc_c) 1731 arc_p = (arc_c >> 1); 1732 ASSERT(arc_c >= arc_c_min); 1733 ASSERT((int64_t)arc_p >= 0); 1734 } 1735 1736 if (arc_size > arc_c) 1737 arc_adjust(); 1738 } 1739 1740 static int 1741 arc_reclaim_needed(void) 1742 { 1743 uint64_t extra; 1744 1745 #ifdef _KERNEL 1746 1747 if (needfree) 1748 return (1); 1749 1750 /* 1751 * take 'desfree' extra pages, so we reclaim sooner, rather than later 1752 */ 1753 extra = desfree; 1754 1755 /* 1756 * check that we're out of range of the pageout scanner. It starts to 1757 * schedule paging if freemem is less than lotsfree and needfree. 1758 * lotsfree is the high-water mark for pageout, and needfree is the 1759 * number of needed free pages. We add extra pages here to make sure 1760 * the scanner doesn't start up while we're freeing memory. 1761 */ 1762 if (freemem < lotsfree + needfree + extra) 1763 return (1); 1764 1765 /* 1766 * check to make sure that swapfs has enough space so that anon 1767 * reservations can still succeed. anon_resvmem() checks that the 1768 * availrmem is greater than swapfs_minfree, and the number of reserved 1769 * swap pages. We also add a bit of extra here just to prevent 1770 * circumstances from getting really dire. 1771 */ 1772 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1773 return (1); 1774 1775 #if defined(__i386) 1776 /* 1777 * If we're on an i386 platform, it's possible that we'll exhaust the 1778 * kernel heap space before we ever run out of available physical 1779 * memory. Most checks of the size of the heap_area compare against 1780 * tune.t_minarmem, which is the minimum available real memory that we 1781 * can have in the system. However, this is generally fixed at 25 pages 1782 * which is so low that it's useless. In this comparison, we seek to 1783 * calculate the total heap-size, and reclaim if more than 3/4ths of the 1784 * heap is allocated. (Or, in the calculation, if less than 1/4th is 1785 * free) 1786 */ 1787 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1788 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1789 return (1); 1790 #endif 1791 1792 #else 1793 if (spa_get_random(100) == 0) 1794 return (1); 1795 #endif 1796 return (0); 1797 } 1798 1799 static void 1800 arc_kmem_reap_now(arc_reclaim_strategy_t strat) 1801 { 1802 size_t i; 1803 kmem_cache_t *prev_cache = NULL; 1804 kmem_cache_t *prev_data_cache = NULL; 1805 extern kmem_cache_t *zio_buf_cache[]; 1806 extern kmem_cache_t *zio_data_buf_cache[]; 1807 1808 #ifdef _KERNEL 1809 if (arc_meta_used >= arc_meta_limit) { 1810 /* 1811 * We are exceeding our meta-data cache limit. 1812 * Purge some DNLC entries to release holds on meta-data. 1813 */ 1814 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 1815 } 1816 #if defined(__i386) 1817 /* 1818 * Reclaim unused memory from all kmem caches. 1819 */ 1820 kmem_reap(); 1821 #endif 1822 #endif 1823 1824 /* 1825 * An aggressive reclamation will shrink the cache size as well as 1826 * reap free buffers from the arc kmem caches. 1827 */ 1828 if (strat == ARC_RECLAIM_AGGR) 1829 arc_shrink(); 1830 1831 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1832 if (zio_buf_cache[i] != prev_cache) { 1833 prev_cache = zio_buf_cache[i]; 1834 kmem_cache_reap_now(zio_buf_cache[i]); 1835 } 1836 if (zio_data_buf_cache[i] != prev_data_cache) { 1837 prev_data_cache = zio_data_buf_cache[i]; 1838 kmem_cache_reap_now(zio_data_buf_cache[i]); 1839 } 1840 } 1841 kmem_cache_reap_now(buf_cache); 1842 kmem_cache_reap_now(hdr_cache); 1843 } 1844 1845 static void 1846 arc_reclaim_thread(void) 1847 { 1848 clock_t growtime = 0; 1849 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1850 callb_cpr_t cpr; 1851 1852 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1853 1854 mutex_enter(&arc_reclaim_thr_lock); 1855 while (arc_thread_exit == 0) { 1856 if (arc_reclaim_needed()) { 1857 1858 if (arc_no_grow) { 1859 if (last_reclaim == ARC_RECLAIM_CONS) { 1860 last_reclaim = ARC_RECLAIM_AGGR; 1861 } else { 1862 last_reclaim = ARC_RECLAIM_CONS; 1863 } 1864 } else { 1865 arc_no_grow = TRUE; 1866 last_reclaim = ARC_RECLAIM_AGGR; 1867 membar_producer(); 1868 } 1869 1870 /* reset the growth delay for every reclaim */ 1871 growtime = lbolt + (arc_grow_retry * hz); 1872 1873 arc_kmem_reap_now(last_reclaim); 1874 1875 } else if (arc_no_grow && lbolt >= growtime) { 1876 arc_no_grow = FALSE; 1877 } 1878 1879 if (2 * arc_c < arc_size + 1880 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size) 1881 arc_adjust(); 1882 1883 if (arc_eviction_list != NULL) 1884 arc_do_user_evicts(); 1885 1886 /* block until needed, or one second, whichever is shorter */ 1887 CALLB_CPR_SAFE_BEGIN(&cpr); 1888 (void) cv_timedwait(&arc_reclaim_thr_cv, 1889 &arc_reclaim_thr_lock, (lbolt + hz)); 1890 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 1891 } 1892 1893 arc_thread_exit = 0; 1894 cv_broadcast(&arc_reclaim_thr_cv); 1895 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 1896 thread_exit(); 1897 } 1898 1899 /* 1900 * Adapt arc info given the number of bytes we are trying to add and 1901 * the state that we are comming from. This function is only called 1902 * when we are adding new content to the cache. 1903 */ 1904 static void 1905 arc_adapt(int bytes, arc_state_t *state) 1906 { 1907 int mult; 1908 1909 if (state == arc_l2c_only) 1910 return; 1911 1912 ASSERT(bytes > 0); 1913 /* 1914 * Adapt the target size of the MRU list: 1915 * - if we just hit in the MRU ghost list, then increase 1916 * the target size of the MRU list. 1917 * - if we just hit in the MFU ghost list, then increase 1918 * the target size of the MFU list by decreasing the 1919 * target size of the MRU list. 1920 */ 1921 if (state == arc_mru_ghost) { 1922 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 1923 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 1924 1925 arc_p = MIN(arc_c, arc_p + bytes * mult); 1926 } else if (state == arc_mfu_ghost) { 1927 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 1928 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 1929 1930 arc_p = MAX(0, (int64_t)arc_p - bytes * mult); 1931 } 1932 ASSERT((int64_t)arc_p >= 0); 1933 1934 if (arc_reclaim_needed()) { 1935 cv_signal(&arc_reclaim_thr_cv); 1936 return; 1937 } 1938 1939 if (arc_no_grow) 1940 return; 1941 1942 if (arc_c >= arc_c_max) 1943 return; 1944 1945 /* 1946 * If we're within (2 * maxblocksize) bytes of the target 1947 * cache size, increment the target cache size 1948 */ 1949 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 1950 atomic_add_64(&arc_c, (int64_t)bytes); 1951 if (arc_c > arc_c_max) 1952 arc_c = arc_c_max; 1953 else if (state == arc_anon) 1954 atomic_add_64(&arc_p, (int64_t)bytes); 1955 if (arc_p > arc_c) 1956 arc_p = arc_c; 1957 } 1958 ASSERT((int64_t)arc_p >= 0); 1959 } 1960 1961 /* 1962 * Check if the cache has reached its limits and eviction is required 1963 * prior to insert. 1964 */ 1965 static int 1966 arc_evict_needed(arc_buf_contents_t type) 1967 { 1968 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 1969 return (1); 1970 1971 #ifdef _KERNEL 1972 /* 1973 * If zio data pages are being allocated out of a separate heap segment, 1974 * then enforce that the size of available vmem for this area remains 1975 * above about 1/32nd free. 1976 */ 1977 if (type == ARC_BUFC_DATA && zio_arena != NULL && 1978 vmem_size(zio_arena, VMEM_FREE) < 1979 (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 1980 return (1); 1981 #endif 1982 1983 if (arc_reclaim_needed()) 1984 return (1); 1985 1986 return (arc_size > arc_c); 1987 } 1988 1989 /* 1990 * The buffer, supplied as the first argument, needs a data block. 1991 * So, if we are at cache max, determine which cache should be victimized. 1992 * We have the following cases: 1993 * 1994 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 1995 * In this situation if we're out of space, but the resident size of the MFU is 1996 * under the limit, victimize the MFU cache to satisfy this insertion request. 1997 * 1998 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 1999 * Here, we've used up all of the available space for the MRU, so we need to 2000 * evict from our own cache instead. Evict from the set of resident MRU 2001 * entries. 2002 * 2003 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2004 * c minus p represents the MFU space in the cache, since p is the size of the 2005 * cache that is dedicated to the MRU. In this situation there's still space on 2006 * the MFU side, so the MRU side needs to be victimized. 2007 * 2008 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2009 * MFU's resident set is consuming more space than it has been allotted. In 2010 * this situation, we must victimize our own cache, the MFU, for this insertion. 2011 */ 2012 static void 2013 arc_get_data_buf(arc_buf_t *buf) 2014 { 2015 arc_state_t *state = buf->b_hdr->b_state; 2016 uint64_t size = buf->b_hdr->b_size; 2017 arc_buf_contents_t type = buf->b_hdr->b_type; 2018 2019 arc_adapt(size, state); 2020 2021 /* 2022 * We have not yet reached cache maximum size, 2023 * just allocate a new buffer. 2024 */ 2025 if (!arc_evict_needed(type)) { 2026 if (type == ARC_BUFC_METADATA) { 2027 buf->b_data = zio_buf_alloc(size); 2028 arc_space_consume(size); 2029 } else { 2030 ASSERT(type == ARC_BUFC_DATA); 2031 buf->b_data = zio_data_buf_alloc(size); 2032 atomic_add_64(&arc_size, size); 2033 } 2034 goto out; 2035 } 2036 2037 /* 2038 * If we are prefetching from the mfu ghost list, this buffer 2039 * will end up on the mru list; so steal space from there. 2040 */ 2041 if (state == arc_mfu_ghost) 2042 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 2043 else if (state == arc_mru_ghost) 2044 state = arc_mru; 2045 2046 if (state == arc_mru || state == arc_anon) { 2047 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 2048 state = (arc_mfu->arcs_lsize[type] > 0 && 2049 arc_p > mru_used) ? arc_mfu : arc_mru; 2050 } else { 2051 /* MFU cases */ 2052 uint64_t mfu_space = arc_c - arc_p; 2053 state = (arc_mru->arcs_lsize[type] > 0 && 2054 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 2055 } 2056 if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { 2057 if (type == ARC_BUFC_METADATA) { 2058 buf->b_data = zio_buf_alloc(size); 2059 arc_space_consume(size); 2060 } else { 2061 ASSERT(type == ARC_BUFC_DATA); 2062 buf->b_data = zio_data_buf_alloc(size); 2063 atomic_add_64(&arc_size, size); 2064 } 2065 ARCSTAT_BUMP(arcstat_recycle_miss); 2066 } 2067 ASSERT(buf->b_data != NULL); 2068 out: 2069 /* 2070 * Update the state size. Note that ghost states have a 2071 * "ghost size" and so don't need to be updated. 2072 */ 2073 if (!GHOST_STATE(buf->b_hdr->b_state)) { 2074 arc_buf_hdr_t *hdr = buf->b_hdr; 2075 2076 atomic_add_64(&hdr->b_state->arcs_size, size); 2077 if (list_link_active(&hdr->b_arc_node)) { 2078 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2079 atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2080 } 2081 /* 2082 * If we are growing the cache, and we are adding anonymous 2083 * data, and we have outgrown arc_p, update arc_p 2084 */ 2085 if (arc_size < arc_c && hdr->b_state == arc_anon && 2086 arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 2087 arc_p = MIN(arc_c, arc_p + size); 2088 } 2089 } 2090 2091 /* 2092 * This routine is called whenever a buffer is accessed. 2093 * NOTE: the hash lock is dropped in this function. 2094 */ 2095 static void 2096 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2097 { 2098 ASSERT(MUTEX_HELD(hash_lock)); 2099 2100 if (buf->b_state == arc_anon) { 2101 /* 2102 * This buffer is not in the cache, and does not 2103 * appear in our "ghost" list. Add the new buffer 2104 * to the MRU state. 2105 */ 2106 2107 ASSERT(buf->b_arc_access == 0); 2108 buf->b_arc_access = lbolt; 2109 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2110 arc_change_state(arc_mru, buf, hash_lock); 2111 2112 } else if (buf->b_state == arc_mru) { 2113 /* 2114 * If this buffer is here because of a prefetch, then either: 2115 * - clear the flag if this is a "referencing" read 2116 * (any subsequent access will bump this into the MFU state). 2117 * or 2118 * - move the buffer to the head of the list if this is 2119 * another prefetch (to make it less likely to be evicted). 2120 */ 2121 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2122 if (refcount_count(&buf->b_refcnt) == 0) { 2123 ASSERT(list_link_active(&buf->b_arc_node)); 2124 } else { 2125 buf->b_flags &= ~ARC_PREFETCH; 2126 ARCSTAT_BUMP(arcstat_mru_hits); 2127 } 2128 buf->b_arc_access = lbolt; 2129 return; 2130 } 2131 2132 /* 2133 * This buffer has been "accessed" only once so far, 2134 * but it is still in the cache. Move it to the MFU 2135 * state. 2136 */ 2137 if (lbolt > buf->b_arc_access + ARC_MINTIME) { 2138 /* 2139 * More than 125ms have passed since we 2140 * instantiated this buffer. Move it to the 2141 * most frequently used state. 2142 */ 2143 buf->b_arc_access = lbolt; 2144 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2145 arc_change_state(arc_mfu, buf, hash_lock); 2146 } 2147 ARCSTAT_BUMP(arcstat_mru_hits); 2148 } else if (buf->b_state == arc_mru_ghost) { 2149 arc_state_t *new_state; 2150 /* 2151 * This buffer has been "accessed" recently, but 2152 * was evicted from the cache. Move it to the 2153 * MFU state. 2154 */ 2155 2156 if (buf->b_flags & ARC_PREFETCH) { 2157 new_state = arc_mru; 2158 if (refcount_count(&buf->b_refcnt) > 0) 2159 buf->b_flags &= ~ARC_PREFETCH; 2160 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2161 } else { 2162 new_state = arc_mfu; 2163 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2164 } 2165 2166 buf->b_arc_access = lbolt; 2167 arc_change_state(new_state, buf, hash_lock); 2168 2169 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2170 } else if (buf->b_state == arc_mfu) { 2171 /* 2172 * This buffer has been accessed more than once and is 2173 * still in the cache. Keep it in the MFU state. 2174 * 2175 * NOTE: an add_reference() that occurred when we did 2176 * the arc_read() will have kicked this off the list. 2177 * If it was a prefetch, we will explicitly move it to 2178 * the head of the list now. 2179 */ 2180 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2181 ASSERT(refcount_count(&buf->b_refcnt) == 0); 2182 ASSERT(list_link_active(&buf->b_arc_node)); 2183 } 2184 ARCSTAT_BUMP(arcstat_mfu_hits); 2185 buf->b_arc_access = lbolt; 2186 } else if (buf->b_state == arc_mfu_ghost) { 2187 arc_state_t *new_state = arc_mfu; 2188 /* 2189 * This buffer has been accessed more than once but has 2190 * been evicted from the cache. Move it back to the 2191 * MFU state. 2192 */ 2193 2194 if (buf->b_flags & ARC_PREFETCH) { 2195 /* 2196 * This is a prefetch access... 2197 * move this block back to the MRU state. 2198 */ 2199 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 2200 new_state = arc_mru; 2201 } 2202 2203 buf->b_arc_access = lbolt; 2204 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2205 arc_change_state(new_state, buf, hash_lock); 2206 2207 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2208 } else if (buf->b_state == arc_l2c_only) { 2209 /* 2210 * This buffer is on the 2nd Level ARC. 2211 */ 2212 2213 buf->b_arc_access = lbolt; 2214 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2215 arc_change_state(arc_mfu, buf, hash_lock); 2216 } else { 2217 ASSERT(!"invalid arc state"); 2218 } 2219 } 2220 2221 /* a generic arc_done_func_t which you can use */ 2222 /* ARGSUSED */ 2223 void 2224 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2225 { 2226 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2227 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2228 } 2229 2230 /* a generic arc_done_func_t */ 2231 void 2232 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2233 { 2234 arc_buf_t **bufp = arg; 2235 if (zio && zio->io_error) { 2236 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2237 *bufp = NULL; 2238 } else { 2239 *bufp = buf; 2240 } 2241 } 2242 2243 static void 2244 arc_read_done(zio_t *zio) 2245 { 2246 arc_buf_hdr_t *hdr, *found; 2247 arc_buf_t *buf; 2248 arc_buf_t *abuf; /* buffer we're assigning to callback */ 2249 kmutex_t *hash_lock; 2250 arc_callback_t *callback_list, *acb; 2251 int freeable = FALSE; 2252 2253 buf = zio->io_private; 2254 hdr = buf->b_hdr; 2255 2256 /* 2257 * The hdr was inserted into hash-table and removed from lists 2258 * prior to starting I/O. We should find this header, since 2259 * it's in the hash table, and it should be legit since it's 2260 * not possible to evict it during the I/O. The only possible 2261 * reason for it not to be found is if we were freed during the 2262 * read. 2263 */ 2264 found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 2265 &hash_lock); 2266 2267 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 2268 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 2269 (found == hdr && HDR_L2_READING(hdr))); 2270 2271 hdr->b_flags &= ~(ARC_L2_READING|ARC_L2_EVICTED); 2272 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 2273 hdr->b_flags |= ARC_DONT_L2CACHE; 2274 2275 /* byteswap if necessary */ 2276 callback_list = hdr->b_acb; 2277 ASSERT(callback_list != NULL); 2278 if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap) 2279 callback_list->acb_byteswap(buf->b_data, hdr->b_size); 2280 2281 arc_cksum_compute(buf, B_FALSE); 2282 2283 /* create copies of the data buffer for the callers */ 2284 abuf = buf; 2285 for (acb = callback_list; acb; acb = acb->acb_next) { 2286 if (acb->acb_done) { 2287 if (abuf == NULL) 2288 abuf = arc_buf_clone(buf); 2289 acb->acb_buf = abuf; 2290 abuf = NULL; 2291 } 2292 } 2293 hdr->b_acb = NULL; 2294 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2295 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 2296 if (abuf == buf) 2297 hdr->b_flags |= ARC_BUF_AVAILABLE; 2298 2299 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2300 2301 if (zio->io_error != 0) { 2302 hdr->b_flags |= ARC_IO_ERROR; 2303 if (hdr->b_state != arc_anon) 2304 arc_change_state(arc_anon, hdr, hash_lock); 2305 if (HDR_IN_HASH_TABLE(hdr)) 2306 buf_hash_remove(hdr); 2307 freeable = refcount_is_zero(&hdr->b_refcnt); 2308 /* convert checksum errors into IO errors */ 2309 if (zio->io_error == ECKSUM) 2310 zio->io_error = EIO; 2311 } 2312 2313 /* 2314 * Broadcast before we drop the hash_lock to avoid the possibility 2315 * that the hdr (and hence the cv) might be freed before we get to 2316 * the cv_broadcast(). 2317 */ 2318 cv_broadcast(&hdr->b_cv); 2319 2320 if (hash_lock) { 2321 /* 2322 * Only call arc_access on anonymous buffers. This is because 2323 * if we've issued an I/O for an evicted buffer, we've already 2324 * called arc_access (to prevent any simultaneous readers from 2325 * getting confused). 2326 */ 2327 if (zio->io_error == 0 && hdr->b_state == arc_anon) 2328 arc_access(hdr, hash_lock); 2329 mutex_exit(hash_lock); 2330 } else { 2331 /* 2332 * This block was freed while we waited for the read to 2333 * complete. It has been removed from the hash table and 2334 * moved to the anonymous state (so that it won't show up 2335 * in the cache). 2336 */ 2337 ASSERT3P(hdr->b_state, ==, arc_anon); 2338 freeable = refcount_is_zero(&hdr->b_refcnt); 2339 } 2340 2341 /* execute each callback and free its structure */ 2342 while ((acb = callback_list) != NULL) { 2343 if (acb->acb_done) 2344 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2345 2346 if (acb->acb_zio_dummy != NULL) { 2347 acb->acb_zio_dummy->io_error = zio->io_error; 2348 zio_nowait(acb->acb_zio_dummy); 2349 } 2350 2351 callback_list = acb->acb_next; 2352 kmem_free(acb, sizeof (arc_callback_t)); 2353 } 2354 2355 if (freeable) 2356 arc_hdr_destroy(hdr); 2357 } 2358 2359 /* 2360 * "Read" the block block at the specified DVA (in bp) via the 2361 * cache. If the block is found in the cache, invoke the provided 2362 * callback immediately and return. Note that the `zio' parameter 2363 * in the callback will be NULL in this case, since no IO was 2364 * required. If the block is not in the cache pass the read request 2365 * on to the spa with a substitute callback function, so that the 2366 * requested block will be added to the cache. 2367 * 2368 * If a read request arrives for a block that has a read in-progress, 2369 * either wait for the in-progress read to complete (and return the 2370 * results); or, if this is a read with a "done" func, add a record 2371 * to the read to invoke the "done" func when the read completes, 2372 * and return; or just return. 2373 * 2374 * arc_read_done() will invoke all the requested "done" functions 2375 * for readers of this block. 2376 */ 2377 int 2378 arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap, 2379 arc_done_func_t *done, void *private, int priority, int flags, 2380 uint32_t *arc_flags, zbookmark_t *zb) 2381 { 2382 arc_buf_hdr_t *hdr; 2383 arc_buf_t *buf; 2384 kmutex_t *hash_lock; 2385 zio_t *rzio; 2386 2387 top: 2388 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2389 if (hdr && hdr->b_datacnt > 0) { 2390 2391 *arc_flags |= ARC_CACHED; 2392 2393 if (HDR_IO_IN_PROGRESS(hdr)) { 2394 2395 if (*arc_flags & ARC_WAIT) { 2396 cv_wait(&hdr->b_cv, hash_lock); 2397 mutex_exit(hash_lock); 2398 goto top; 2399 } 2400 ASSERT(*arc_flags & ARC_NOWAIT); 2401 2402 if (done) { 2403 arc_callback_t *acb = NULL; 2404 2405 acb = kmem_zalloc(sizeof (arc_callback_t), 2406 KM_SLEEP); 2407 acb->acb_done = done; 2408 acb->acb_private = private; 2409 acb->acb_byteswap = swap; 2410 if (pio != NULL) 2411 acb->acb_zio_dummy = zio_null(pio, 2412 spa, NULL, NULL, flags); 2413 2414 ASSERT(acb->acb_done != NULL); 2415 acb->acb_next = hdr->b_acb; 2416 hdr->b_acb = acb; 2417 add_reference(hdr, hash_lock, private); 2418 mutex_exit(hash_lock); 2419 return (0); 2420 } 2421 mutex_exit(hash_lock); 2422 return (0); 2423 } 2424 2425 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2426 2427 if (done) { 2428 add_reference(hdr, hash_lock, private); 2429 /* 2430 * If this block is already in use, create a new 2431 * copy of the data so that we will be guaranteed 2432 * that arc_release() will always succeed. 2433 */ 2434 buf = hdr->b_buf; 2435 ASSERT(buf); 2436 ASSERT(buf->b_data); 2437 if (HDR_BUF_AVAILABLE(hdr)) { 2438 ASSERT(buf->b_efunc == NULL); 2439 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2440 } else { 2441 buf = arc_buf_clone(buf); 2442 } 2443 } else if (*arc_flags & ARC_PREFETCH && 2444 refcount_count(&hdr->b_refcnt) == 0) { 2445 hdr->b_flags |= ARC_PREFETCH; 2446 } 2447 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 2448 arc_access(hdr, hash_lock); 2449 mutex_exit(hash_lock); 2450 ARCSTAT_BUMP(arcstat_hits); 2451 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2452 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2453 data, metadata, hits); 2454 2455 if (done) 2456 done(NULL, buf, private); 2457 } else { 2458 uint64_t size = BP_GET_LSIZE(bp); 2459 arc_callback_t *acb; 2460 2461 if (hdr == NULL) { 2462 /* this block is not in the cache */ 2463 arc_buf_hdr_t *exists; 2464 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 2465 buf = arc_buf_alloc(spa, size, private, type); 2466 hdr = buf->b_hdr; 2467 hdr->b_dva = *BP_IDENTITY(bp); 2468 hdr->b_birth = bp->blk_birth; 2469 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2470 exists = buf_hash_insert(hdr, &hash_lock); 2471 if (exists) { 2472 /* somebody beat us to the hash insert */ 2473 mutex_exit(hash_lock); 2474 bzero(&hdr->b_dva, sizeof (dva_t)); 2475 hdr->b_birth = 0; 2476 hdr->b_cksum0 = 0; 2477 (void) arc_buf_remove_ref(buf, private); 2478 goto top; /* restart the IO request */ 2479 } 2480 /* if this is a prefetch, we don't have a reference */ 2481 if (*arc_flags & ARC_PREFETCH) { 2482 (void) remove_reference(hdr, hash_lock, 2483 private); 2484 hdr->b_flags |= ARC_PREFETCH; 2485 } 2486 if (BP_GET_LEVEL(bp) > 0) 2487 hdr->b_flags |= ARC_INDIRECT; 2488 } else { 2489 /* this block is in the ghost cache */ 2490 ASSERT(GHOST_STATE(hdr->b_state)); 2491 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2492 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 2493 ASSERT(hdr->b_buf == NULL); 2494 2495 /* if this is a prefetch, we don't have a reference */ 2496 if (*arc_flags & ARC_PREFETCH) 2497 hdr->b_flags |= ARC_PREFETCH; 2498 else 2499 add_reference(hdr, hash_lock, private); 2500 buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 2501 buf->b_hdr = hdr; 2502 buf->b_data = NULL; 2503 buf->b_efunc = NULL; 2504 buf->b_private = NULL; 2505 buf->b_next = NULL; 2506 hdr->b_buf = buf; 2507 arc_get_data_buf(buf); 2508 ASSERT(hdr->b_datacnt == 0); 2509 hdr->b_datacnt = 1; 2510 2511 } 2512 2513 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2514 acb->acb_done = done; 2515 acb->acb_private = private; 2516 acb->acb_byteswap = swap; 2517 2518 ASSERT(hdr->b_acb == NULL); 2519 hdr->b_acb = acb; 2520 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2521 2522 /* 2523 * If the buffer has been evicted, migrate it to a present state 2524 * before issuing the I/O. Once we drop the hash-table lock, 2525 * the header will be marked as I/O in progress and have an 2526 * attached buffer. At this point, anybody who finds this 2527 * buffer ought to notice that it's legit but has a pending I/O. 2528 */ 2529 2530 if (GHOST_STATE(hdr->b_state)) 2531 arc_access(hdr, hash_lock); 2532 2533 ASSERT3U(hdr->b_size, ==, size); 2534 DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 2535 zbookmark_t *, zb); 2536 ARCSTAT_BUMP(arcstat_misses); 2537 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2538 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2539 data, metadata, misses); 2540 2541 if (l2arc_ndev != 0) { 2542 /* 2543 * Read from the L2ARC if the following are true: 2544 * 1. This buffer has L2ARC metadata. 2545 * 2. This buffer isn't currently writing to the L2ARC. 2546 */ 2547 if (hdr->b_l2hdr != NULL && !HDR_L2_WRITING(hdr)) { 2548 vdev_t *vd = hdr->b_l2hdr->b_dev->l2ad_vdev; 2549 daddr_t addr = hdr->b_l2hdr->b_daddr; 2550 l2arc_read_callback_t *cb; 2551 2552 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 2553 ARCSTAT_BUMP(arcstat_l2_hits); 2554 2555 hdr->b_flags |= ARC_L2_READING; 2556 mutex_exit(hash_lock); 2557 2558 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 2559 KM_SLEEP); 2560 cb->l2rcb_buf = buf; 2561 cb->l2rcb_spa = spa; 2562 cb->l2rcb_bp = *bp; 2563 cb->l2rcb_zb = *zb; 2564 cb->l2rcb_flags = flags; 2565 2566 /* 2567 * l2arc read. 2568 */ 2569 rzio = zio_read_phys(pio, vd, addr, size, 2570 buf->b_data, ZIO_CHECKSUM_OFF, 2571 l2arc_read_done, cb, priority, 2572 flags | ZIO_FLAG_DONT_CACHE, B_FALSE); 2573 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 2574 zio_t *, rzio); 2575 2576 if (*arc_flags & ARC_WAIT) 2577 return (zio_wait(rzio)); 2578 2579 ASSERT(*arc_flags & ARC_NOWAIT); 2580 zio_nowait(rzio); 2581 return (0); 2582 } else { 2583 DTRACE_PROBE1(l2arc__miss, 2584 arc_buf_hdr_t *, hdr); 2585 ARCSTAT_BUMP(arcstat_l2_misses); 2586 if (HDR_L2_WRITING(hdr)) 2587 ARCSTAT_BUMP(arcstat_l2_rw_clash); 2588 } 2589 } 2590 mutex_exit(hash_lock); 2591 2592 rzio = zio_read(pio, spa, bp, buf->b_data, size, 2593 arc_read_done, buf, priority, flags, zb); 2594 2595 if (*arc_flags & ARC_WAIT) 2596 return (zio_wait(rzio)); 2597 2598 ASSERT(*arc_flags & ARC_NOWAIT); 2599 zio_nowait(rzio); 2600 } 2601 return (0); 2602 } 2603 2604 /* 2605 * arc_read() variant to support pool traversal. If the block is already 2606 * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 2607 * The idea is that we don't want pool traversal filling up memory, but 2608 * if the ARC already has the data anyway, we shouldn't pay for the I/O. 2609 */ 2610 int 2611 arc_tryread(spa_t *spa, blkptr_t *bp, void *data) 2612 { 2613 arc_buf_hdr_t *hdr; 2614 kmutex_t *hash_mtx; 2615 int rc = 0; 2616 2617 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 2618 2619 if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 2620 arc_buf_t *buf = hdr->b_buf; 2621 2622 ASSERT(buf); 2623 while (buf->b_data == NULL) { 2624 buf = buf->b_next; 2625 ASSERT(buf); 2626 } 2627 bcopy(buf->b_data, data, hdr->b_size); 2628 } else { 2629 rc = ENOENT; 2630 } 2631 2632 if (hash_mtx) 2633 mutex_exit(hash_mtx); 2634 2635 return (rc); 2636 } 2637 2638 void 2639 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 2640 { 2641 ASSERT(buf->b_hdr != NULL); 2642 ASSERT(buf->b_hdr->b_state != arc_anon); 2643 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 2644 buf->b_efunc = func; 2645 buf->b_private = private; 2646 } 2647 2648 /* 2649 * This is used by the DMU to let the ARC know that a buffer is 2650 * being evicted, so the ARC should clean up. If this arc buf 2651 * is not yet in the evicted state, it will be put there. 2652 */ 2653 int 2654 arc_buf_evict(arc_buf_t *buf) 2655 { 2656 arc_buf_hdr_t *hdr; 2657 kmutex_t *hash_lock; 2658 arc_buf_t **bufp; 2659 2660 mutex_enter(&arc_eviction_mtx); 2661 hdr = buf->b_hdr; 2662 if (hdr == NULL) { 2663 /* 2664 * We are in arc_do_user_evicts(). 2665 */ 2666 ASSERT(buf->b_data == NULL); 2667 mutex_exit(&arc_eviction_mtx); 2668 return (0); 2669 } 2670 hash_lock = HDR_LOCK(hdr); 2671 mutex_exit(&arc_eviction_mtx); 2672 2673 mutex_enter(hash_lock); 2674 2675 if (buf->b_data == NULL) { 2676 /* 2677 * We are on the eviction list. 2678 */ 2679 mutex_exit(hash_lock); 2680 mutex_enter(&arc_eviction_mtx); 2681 if (buf->b_hdr == NULL) { 2682 /* 2683 * We are already in arc_do_user_evicts(). 2684 */ 2685 mutex_exit(&arc_eviction_mtx); 2686 return (0); 2687 } else { 2688 arc_buf_t copy = *buf; /* structure assignment */ 2689 /* 2690 * Process this buffer now 2691 * but let arc_do_user_evicts() do the reaping. 2692 */ 2693 buf->b_efunc = NULL; 2694 mutex_exit(&arc_eviction_mtx); 2695 VERIFY(copy.b_efunc(©) == 0); 2696 return (1); 2697 } 2698 } 2699 2700 ASSERT(buf->b_hdr == hdr); 2701 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 2702 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2703 2704 /* 2705 * Pull this buffer off of the hdr 2706 */ 2707 bufp = &hdr->b_buf; 2708 while (*bufp != buf) 2709 bufp = &(*bufp)->b_next; 2710 *bufp = buf->b_next; 2711 2712 ASSERT(buf->b_data != NULL); 2713 arc_buf_destroy(buf, FALSE, FALSE); 2714 2715 if (hdr->b_datacnt == 0) { 2716 arc_state_t *old_state = hdr->b_state; 2717 arc_state_t *evicted_state; 2718 2719 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2720 2721 evicted_state = 2722 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 2723 2724 mutex_enter(&old_state->arcs_mtx); 2725 mutex_enter(&evicted_state->arcs_mtx); 2726 2727 arc_change_state(evicted_state, hdr, hash_lock); 2728 ASSERT(HDR_IN_HASH_TABLE(hdr)); 2729 hdr->b_flags |= ARC_IN_HASH_TABLE; 2730 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2731 2732 mutex_exit(&evicted_state->arcs_mtx); 2733 mutex_exit(&old_state->arcs_mtx); 2734 } 2735 mutex_exit(hash_lock); 2736 2737 VERIFY(buf->b_efunc(buf) == 0); 2738 buf->b_efunc = NULL; 2739 buf->b_private = NULL; 2740 buf->b_hdr = NULL; 2741 kmem_cache_free(buf_cache, buf); 2742 return (1); 2743 } 2744 2745 /* 2746 * Release this buffer from the cache. This must be done 2747 * after a read and prior to modifying the buffer contents. 2748 * If the buffer has more than one reference, we must make 2749 * make a new hdr for the buffer. 2750 */ 2751 void 2752 arc_release(arc_buf_t *buf, void *tag) 2753 { 2754 arc_buf_hdr_t *hdr = buf->b_hdr; 2755 kmutex_t *hash_lock = HDR_LOCK(hdr); 2756 l2arc_buf_hdr_t *l2hdr = NULL; 2757 uint64_t buf_size; 2758 2759 /* this buffer is not on any list */ 2760 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2761 2762 if (hdr->b_state == arc_anon) { 2763 /* this buffer is already released */ 2764 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2765 ASSERT(BUF_EMPTY(hdr)); 2766 ASSERT(buf->b_efunc == NULL); 2767 arc_buf_thaw(buf); 2768 return; 2769 } 2770 2771 mutex_enter(hash_lock); 2772 2773 /* 2774 * Do we have more than one buf? 2775 */ 2776 if (hdr->b_buf != buf || buf->b_next != NULL) { 2777 arc_buf_hdr_t *nhdr; 2778 arc_buf_t **bufp; 2779 uint64_t blksz = hdr->b_size; 2780 spa_t *spa = hdr->b_spa; 2781 arc_buf_contents_t type = hdr->b_type; 2782 uint32_t flags = hdr->b_flags; 2783 2784 ASSERT(hdr->b_datacnt > 1); 2785 /* 2786 * Pull the data off of this buf and attach it to 2787 * a new anonymous buf. 2788 */ 2789 (void) remove_reference(hdr, hash_lock, tag); 2790 bufp = &hdr->b_buf; 2791 while (*bufp != buf) 2792 bufp = &(*bufp)->b_next; 2793 *bufp = (*bufp)->b_next; 2794 buf->b_next = NULL; 2795 2796 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 2797 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 2798 if (refcount_is_zero(&hdr->b_refcnt)) { 2799 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 2800 ASSERT3U(*size, >=, hdr->b_size); 2801 atomic_add_64(size, -hdr->b_size); 2802 } 2803 hdr->b_datacnt -= 1; 2804 if (hdr->b_l2hdr != NULL) { 2805 mutex_enter(&l2arc_buflist_mtx); 2806 l2hdr = hdr->b_l2hdr; 2807 hdr->b_l2hdr = NULL; 2808 buf_size = hdr->b_size; 2809 } 2810 arc_cksum_verify(buf); 2811 2812 mutex_exit(hash_lock); 2813 2814 nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 2815 nhdr->b_size = blksz; 2816 nhdr->b_spa = spa; 2817 nhdr->b_type = type; 2818 nhdr->b_buf = buf; 2819 nhdr->b_state = arc_anon; 2820 nhdr->b_arc_access = 0; 2821 nhdr->b_flags = flags & ARC_L2_WRITING; 2822 nhdr->b_l2hdr = NULL; 2823 nhdr->b_datacnt = 1; 2824 nhdr->b_freeze_cksum = NULL; 2825 (void) refcount_add(&nhdr->b_refcnt, tag); 2826 buf->b_hdr = nhdr; 2827 atomic_add_64(&arc_anon->arcs_size, blksz); 2828 } else { 2829 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2830 ASSERT(!list_link_active(&hdr->b_arc_node)); 2831 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2832 arc_change_state(arc_anon, hdr, hash_lock); 2833 hdr->b_arc_access = 0; 2834 if (hdr->b_l2hdr != NULL) { 2835 mutex_enter(&l2arc_buflist_mtx); 2836 l2hdr = hdr->b_l2hdr; 2837 hdr->b_l2hdr = NULL; 2838 buf_size = hdr->b_size; 2839 } 2840 mutex_exit(hash_lock); 2841 2842 bzero(&hdr->b_dva, sizeof (dva_t)); 2843 hdr->b_birth = 0; 2844 hdr->b_cksum0 = 0; 2845 arc_buf_thaw(buf); 2846 } 2847 buf->b_efunc = NULL; 2848 buf->b_private = NULL; 2849 2850 if (l2hdr) { 2851 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 2852 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 2853 ARCSTAT_INCR(arcstat_l2_size, -buf_size); 2854 } 2855 if (MUTEX_HELD(&l2arc_buflist_mtx)) 2856 mutex_exit(&l2arc_buflist_mtx); 2857 } 2858 2859 int 2860 arc_released(arc_buf_t *buf) 2861 { 2862 return (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 2863 } 2864 2865 int 2866 arc_has_callback(arc_buf_t *buf) 2867 { 2868 return (buf->b_efunc != NULL); 2869 } 2870 2871 #ifdef ZFS_DEBUG 2872 int 2873 arc_referenced(arc_buf_t *buf) 2874 { 2875 return (refcount_count(&buf->b_hdr->b_refcnt)); 2876 } 2877 #endif 2878 2879 static void 2880 arc_write_ready(zio_t *zio) 2881 { 2882 arc_write_callback_t *callback = zio->io_private; 2883 arc_buf_t *buf = callback->awcb_buf; 2884 arc_buf_hdr_t *hdr = buf->b_hdr; 2885 2886 if (zio->io_error == 0 && callback->awcb_ready) { 2887 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 2888 callback->awcb_ready(zio, buf, callback->awcb_private); 2889 } 2890 /* 2891 * If the IO is already in progress, then this is a re-write 2892 * attempt, so we need to thaw and re-compute the cksum. It is 2893 * the responsibility of the callback to handle the freeing 2894 * and accounting for any re-write attempt. If we don't have a 2895 * callback registered then simply free the block here. 2896 */ 2897 if (HDR_IO_IN_PROGRESS(hdr)) { 2898 if (!BP_IS_HOLE(&zio->io_bp_orig) && 2899 callback->awcb_ready == NULL) { 2900 zio_nowait(zio_free(zio, zio->io_spa, zio->io_txg, 2901 &zio->io_bp_orig, NULL, NULL)); 2902 } 2903 mutex_enter(&hdr->b_freeze_lock); 2904 if (hdr->b_freeze_cksum != NULL) { 2905 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 2906 hdr->b_freeze_cksum = NULL; 2907 } 2908 mutex_exit(&hdr->b_freeze_lock); 2909 } 2910 arc_cksum_compute(buf, B_FALSE); 2911 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2912 } 2913 2914 static void 2915 arc_write_done(zio_t *zio) 2916 { 2917 arc_write_callback_t *callback = zio->io_private; 2918 arc_buf_t *buf = callback->awcb_buf; 2919 arc_buf_hdr_t *hdr = buf->b_hdr; 2920 2921 hdr->b_acb = NULL; 2922 2923 /* this buffer is on no lists and is not in the hash table */ 2924 ASSERT3P(hdr->b_state, ==, arc_anon); 2925 2926 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 2927 hdr->b_birth = zio->io_bp->blk_birth; 2928 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 2929 /* 2930 * If the block to be written was all-zero, we may have 2931 * compressed it away. In this case no write was performed 2932 * so there will be no dva/birth-date/checksum. The buffer 2933 * must therefor remain anonymous (and uncached). 2934 */ 2935 if (!BUF_EMPTY(hdr)) { 2936 arc_buf_hdr_t *exists; 2937 kmutex_t *hash_lock; 2938 2939 arc_cksum_verify(buf); 2940 2941 exists = buf_hash_insert(hdr, &hash_lock); 2942 if (exists) { 2943 /* 2944 * This can only happen if we overwrite for 2945 * sync-to-convergence, because we remove 2946 * buffers from the hash table when we arc_free(). 2947 */ 2948 ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 2949 BP_IDENTITY(zio->io_bp))); 2950 ASSERT3U(zio->io_bp_orig.blk_birth, ==, 2951 zio->io_bp->blk_birth); 2952 2953 ASSERT(refcount_is_zero(&exists->b_refcnt)); 2954 arc_change_state(arc_anon, exists, hash_lock); 2955 mutex_exit(hash_lock); 2956 arc_hdr_destroy(exists); 2957 exists = buf_hash_insert(hdr, &hash_lock); 2958 ASSERT3P(exists, ==, NULL); 2959 } 2960 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2961 arc_access(hdr, hash_lock); 2962 mutex_exit(hash_lock); 2963 } else if (callback->awcb_done == NULL) { 2964 int destroy_hdr; 2965 /* 2966 * This is an anonymous buffer with no user callback, 2967 * destroy it if there are no active references. 2968 */ 2969 mutex_enter(&arc_eviction_mtx); 2970 destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 2971 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2972 mutex_exit(&arc_eviction_mtx); 2973 if (destroy_hdr) 2974 arc_hdr_destroy(hdr); 2975 } else { 2976 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2977 } 2978 2979 if (callback->awcb_done) { 2980 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 2981 callback->awcb_done(zio, buf, callback->awcb_private); 2982 } 2983 2984 kmem_free(callback, sizeof (arc_write_callback_t)); 2985 } 2986 2987 zio_t * 2988 arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies, 2989 uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 2990 arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, 2991 int flags, zbookmark_t *zb) 2992 { 2993 arc_buf_hdr_t *hdr = buf->b_hdr; 2994 arc_write_callback_t *callback; 2995 zio_t *zio; 2996 2997 /* this is a private buffer - no locking required */ 2998 ASSERT3P(hdr->b_state, ==, arc_anon); 2999 ASSERT(BUF_EMPTY(hdr)); 3000 ASSERT(!HDR_IO_ERROR(hdr)); 3001 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 3002 ASSERT(hdr->b_acb == 0); 3003 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3004 callback->awcb_ready = ready; 3005 callback->awcb_done = done; 3006 callback->awcb_private = private; 3007 callback->awcb_buf = buf; 3008 zio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp, 3009 buf->b_data, hdr->b_size, arc_write_ready, arc_write_done, callback, 3010 priority, flags, zb); 3011 3012 return (zio); 3013 } 3014 3015 int 3016 arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 3017 zio_done_func_t *done, void *private, uint32_t arc_flags) 3018 { 3019 arc_buf_hdr_t *ab; 3020 kmutex_t *hash_lock; 3021 zio_t *zio; 3022 3023 /* 3024 * If this buffer is in the cache, release it, so it 3025 * can be re-used. 3026 */ 3027 ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 3028 if (ab != NULL) { 3029 /* 3030 * The checksum of blocks to free is not always 3031 * preserved (eg. on the deadlist). However, if it is 3032 * nonzero, it should match what we have in the cache. 3033 */ 3034 ASSERT(bp->blk_cksum.zc_word[0] == 0 || 3035 ab->b_cksum0 == bp->blk_cksum.zc_word[0]); 3036 if (ab->b_state != arc_anon) 3037 arc_change_state(arc_anon, ab, hash_lock); 3038 if (HDR_IO_IN_PROGRESS(ab)) { 3039 /* 3040 * This should only happen when we prefetch. 3041 */ 3042 ASSERT(ab->b_flags & ARC_PREFETCH); 3043 ASSERT3U(ab->b_datacnt, ==, 1); 3044 ab->b_flags |= ARC_FREED_IN_READ; 3045 if (HDR_IN_HASH_TABLE(ab)) 3046 buf_hash_remove(ab); 3047 ab->b_arc_access = 0; 3048 bzero(&ab->b_dva, sizeof (dva_t)); 3049 ab->b_birth = 0; 3050 ab->b_cksum0 = 0; 3051 ab->b_buf->b_efunc = NULL; 3052 ab->b_buf->b_private = NULL; 3053 mutex_exit(hash_lock); 3054 } else if (refcount_is_zero(&ab->b_refcnt)) { 3055 ab->b_flags |= ARC_FREE_IN_PROGRESS; 3056 mutex_exit(hash_lock); 3057 arc_hdr_destroy(ab); 3058 ARCSTAT_BUMP(arcstat_deleted); 3059 } else { 3060 /* 3061 * We still have an active reference on this 3062 * buffer. This can happen, e.g., from 3063 * dbuf_unoverride(). 3064 */ 3065 ASSERT(!HDR_IN_HASH_TABLE(ab)); 3066 ab->b_arc_access = 0; 3067 bzero(&ab->b_dva, sizeof (dva_t)); 3068 ab->b_birth = 0; 3069 ab->b_cksum0 = 0; 3070 ab->b_buf->b_efunc = NULL; 3071 ab->b_buf->b_private = NULL; 3072 mutex_exit(hash_lock); 3073 } 3074 } 3075 3076 zio = zio_free(pio, spa, txg, bp, done, private); 3077 3078 if (arc_flags & ARC_WAIT) 3079 return (zio_wait(zio)); 3080 3081 ASSERT(arc_flags & ARC_NOWAIT); 3082 zio_nowait(zio); 3083 3084 return (0); 3085 } 3086 3087 void 3088 arc_tempreserve_clear(uint64_t tempreserve) 3089 { 3090 atomic_add_64(&arc_tempreserve, -tempreserve); 3091 ASSERT((int64_t)arc_tempreserve >= 0); 3092 } 3093 3094 int 3095 arc_tempreserve_space(uint64_t tempreserve) 3096 { 3097 #ifdef ZFS_DEBUG 3098 /* 3099 * Once in a while, fail for no reason. Everything should cope. 3100 */ 3101 if (spa_get_random(10000) == 0) { 3102 dprintf("forcing random failure\n"); 3103 return (ERESTART); 3104 } 3105 #endif 3106 if (tempreserve > arc_c/4 && !arc_no_grow) 3107 arc_c = MIN(arc_c_max, tempreserve * 4); 3108 if (tempreserve > arc_c) 3109 return (ENOMEM); 3110 3111 /* 3112 * Throttle writes when the amount of dirty data in the cache 3113 * gets too large. We try to keep the cache less than half full 3114 * of dirty blocks so that our sync times don't grow too large. 3115 * Note: if two requests come in concurrently, we might let them 3116 * both succeed, when one of them should fail. Not a huge deal. 3117 * 3118 * XXX The limit should be adjusted dynamically to keep the time 3119 * to sync a dataset fixed (around 1-5 seconds?). 3120 */ 3121 3122 if (tempreserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && 3123 arc_tempreserve + arc_anon->arcs_size > arc_c / 4) { 3124 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 3125 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 3126 arc_tempreserve>>10, 3127 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 3128 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3129 tempreserve>>10, arc_c>>10); 3130 return (ERESTART); 3131 } 3132 atomic_add_64(&arc_tempreserve, tempreserve); 3133 return (0); 3134 } 3135 3136 void 3137 arc_init(void) 3138 { 3139 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3140 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3141 3142 /* Convert seconds to clock ticks */ 3143 arc_min_prefetch_lifespan = 1 * hz; 3144 3145 /* Start out with 1/8 of all memory */ 3146 arc_c = physmem * PAGESIZE / 8; 3147 3148 #ifdef _KERNEL 3149 /* 3150 * On architectures where the physical memory can be larger 3151 * than the addressable space (intel in 32-bit mode), we may 3152 * need to limit the cache to 1/8 of VM size. 3153 */ 3154 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3155 #endif 3156 3157 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 3158 arc_c_min = MAX(arc_c / 4, 64<<20); 3159 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 3160 if (arc_c * 8 >= 1<<30) 3161 arc_c_max = (arc_c * 8) - (1<<30); 3162 else 3163 arc_c_max = arc_c_min; 3164 arc_c_max = MAX(arc_c * 6, arc_c_max); 3165 3166 /* 3167 * Allow the tunables to override our calculations if they are 3168 * reasonable (ie. over 64MB) 3169 */ 3170 if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) 3171 arc_c_max = zfs_arc_max; 3172 if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) 3173 arc_c_min = zfs_arc_min; 3174 3175 arc_c = arc_c_max; 3176 arc_p = (arc_c >> 1); 3177 3178 /* limit meta-data to 1/4 of the arc capacity */ 3179 arc_meta_limit = arc_c_max / 4; 3180 3181 /* Allow the tunable to override if it is reasonable */ 3182 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 3183 arc_meta_limit = zfs_arc_meta_limit; 3184 3185 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 3186 arc_c_min = arc_meta_limit / 2; 3187 3188 /* if kmem_flags are set, lets try to use less memory */ 3189 if (kmem_debugging()) 3190 arc_c = arc_c / 2; 3191 if (arc_c < arc_c_min) 3192 arc_c = arc_c_min; 3193 3194 arc_anon = &ARC_anon; 3195 arc_mru = &ARC_mru; 3196 arc_mru_ghost = &ARC_mru_ghost; 3197 arc_mfu = &ARC_mfu; 3198 arc_mfu_ghost = &ARC_mfu_ghost; 3199 arc_l2c_only = &ARC_l2c_only; 3200 arc_size = 0; 3201 3202 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3203 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3204 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3205 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3206 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3207 mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3208 3209 list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], 3210 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3211 list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], 3212 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3213 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], 3214 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3215 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], 3216 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3217 list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], 3218 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3219 list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], 3220 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3221 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], 3222 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3223 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], 3224 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3225 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], 3226 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3227 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], 3228 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3229 3230 buf_init(); 3231 3232 arc_thread_exit = 0; 3233 arc_eviction_list = NULL; 3234 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 3235 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3236 3237 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 3238 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 3239 3240 if (arc_ksp != NULL) { 3241 arc_ksp->ks_data = &arc_stats; 3242 kstat_install(arc_ksp); 3243 } 3244 3245 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3246 TS_RUN, minclsyspri); 3247 3248 arc_dead = FALSE; 3249 } 3250 3251 void 3252 arc_fini(void) 3253 { 3254 mutex_enter(&arc_reclaim_thr_lock); 3255 arc_thread_exit = 1; 3256 while (arc_thread_exit != 0) 3257 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 3258 mutex_exit(&arc_reclaim_thr_lock); 3259 3260 arc_flush(NULL); 3261 3262 arc_dead = TRUE; 3263 3264 if (arc_ksp != NULL) { 3265 kstat_delete(arc_ksp); 3266 arc_ksp = NULL; 3267 } 3268 3269 mutex_destroy(&arc_eviction_mtx); 3270 mutex_destroy(&arc_reclaim_thr_lock); 3271 cv_destroy(&arc_reclaim_thr_cv); 3272 3273 list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); 3274 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 3275 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); 3276 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 3277 list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); 3278 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 3279 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); 3280 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 3281 3282 mutex_destroy(&arc_anon->arcs_mtx); 3283 mutex_destroy(&arc_mru->arcs_mtx); 3284 mutex_destroy(&arc_mru_ghost->arcs_mtx); 3285 mutex_destroy(&arc_mfu->arcs_mtx); 3286 mutex_destroy(&arc_mfu_ghost->arcs_mtx); 3287 3288 buf_fini(); 3289 } 3290 3291 /* 3292 * Level 2 ARC 3293 * 3294 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 3295 * It uses dedicated storage devices to hold cached data, which are populated 3296 * using large infrequent writes. The main role of this cache is to boost 3297 * the performance of random read workloads. The intended L2ARC devices 3298 * include short-stroked disks, solid state disks, and other media with 3299 * substantially faster read latency than disk. 3300 * 3301 * +-----------------------+ 3302 * | ARC | 3303 * +-----------------------+ 3304 * | ^ ^ 3305 * | | | 3306 * l2arc_feed_thread() arc_read() 3307 * | | | 3308 * | l2arc read | 3309 * V | | 3310 * +---------------+ | 3311 * | L2ARC | | 3312 * +---------------+ | 3313 * | ^ | 3314 * l2arc_write() | | 3315 * | | | 3316 * V | | 3317 * +-------+ +-------+ 3318 * | vdev | | vdev | 3319 * | cache | | cache | 3320 * +-------+ +-------+ 3321 * +=========+ .-----. 3322 * : L2ARC : |-_____-| 3323 * : devices : | Disks | 3324 * +=========+ `-_____-' 3325 * 3326 * Read requests are satisfied from the following sources, in order: 3327 * 3328 * 1) ARC 3329 * 2) vdev cache of L2ARC devices 3330 * 3) L2ARC devices 3331 * 4) vdev cache of disks 3332 * 5) disks 3333 * 3334 * Some L2ARC device types exhibit extremely slow write performance. 3335 * To accommodate for this there are some significant differences between 3336 * the L2ARC and traditional cache design: 3337 * 3338 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 3339 * the ARC behave as usual, freeing buffers and placing headers on ghost 3340 * lists. The ARC does not send buffers to the L2ARC during eviction as 3341 * this would add inflated write latencies for all ARC memory pressure. 3342 * 3343 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 3344 * It does this by periodically scanning buffers from the eviction-end of 3345 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 3346 * not already there. It scans until a headroom of buffers is satisfied, 3347 * which itself is a buffer for ARC eviction. The thread that does this is 3348 * l2arc_feed_thread(), illustrated below; example sizes are included to 3349 * provide a better sense of ratio than this diagram: 3350 * 3351 * head --> tail 3352 * +---------------------+----------+ 3353 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 3354 * +---------------------+----------+ | o L2ARC eligible 3355 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 3356 * +---------------------+----------+ | 3357 * 15.9 Gbytes ^ 32 Mbytes | 3358 * headroom | 3359 * l2arc_feed_thread() 3360 * | 3361 * l2arc write hand <--[oooo]--' 3362 * | 8 Mbyte 3363 * | write max 3364 * V 3365 * +==============================+ 3366 * L2ARC dev |####|#|###|###| |####| ... | 3367 * +==============================+ 3368 * 32 Gbytes 3369 * 3370 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 3371 * evicted, then the L2ARC has cached a buffer much sooner than it probably 3372 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 3373 * safe to say that this is an uncommon case, since buffers at the end of 3374 * the ARC lists have moved there due to inactivity. 3375 * 3376 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 3377 * then the L2ARC simply misses copying some buffers. This serves as a 3378 * pressure valve to prevent heavy read workloads from both stalling the ARC 3379 * with waits and clogging the L2ARC with writes. This also helps prevent 3380 * the potential for the L2ARC to churn if it attempts to cache content too 3381 * quickly, such as during backups of the entire pool. 3382 * 3383 * 5. Writes to the L2ARC devices are grouped and sent in-sequence, so that 3384 * the vdev queue can aggregate them into larger and fewer writes. Each 3385 * device is written to in a rotor fashion, sweeping writes through 3386 * available space then repeating. 3387 * 3388 * 6. The L2ARC does not store dirty content. It never needs to flush 3389 * write buffers back to disk based storage. 3390 * 3391 * 7. If an ARC buffer is written (and dirtied) which also exists in the 3392 * L2ARC, the now stale L2ARC buffer is immediately dropped. 3393 * 3394 * The performance of the L2ARC can be tweaked by a number of tunables, which 3395 * may be necessary for different workloads: 3396 * 3397 * l2arc_write_max max write bytes per interval 3398 * l2arc_noprefetch skip caching prefetched buffers 3399 * l2arc_headroom number of max device writes to precache 3400 * l2arc_feed_secs seconds between L2ARC writing 3401 * 3402 * Tunables may be removed or added as future performance improvements are 3403 * integrated, and also may become zpool properties. 3404 */ 3405 3406 static void 3407 l2arc_hdr_stat_add(void) 3408 { 3409 ARCSTAT_INCR(arcstat_l2_hdr_size, sizeof (arc_buf_hdr_t) + 3410 sizeof (l2arc_buf_hdr_t)); 3411 ARCSTAT_INCR(arcstat_hdr_size, -sizeof (arc_buf_hdr_t)); 3412 } 3413 3414 static void 3415 l2arc_hdr_stat_remove(void) 3416 { 3417 ARCSTAT_INCR(arcstat_l2_hdr_size, -sizeof (arc_buf_hdr_t) - 3418 sizeof (l2arc_buf_hdr_t)); 3419 ARCSTAT_INCR(arcstat_hdr_size, sizeof (arc_buf_hdr_t)); 3420 } 3421 3422 /* 3423 * Cycle through L2ARC devices. This is how L2ARC load balances. 3424 * This is called with l2arc_dev_mtx held, which also locks out spa removal. 3425 */ 3426 static l2arc_dev_t * 3427 l2arc_dev_get_next(void) 3428 { 3429 l2arc_dev_t *next; 3430 3431 if (l2arc_dev_last == NULL) { 3432 next = list_head(l2arc_dev_list); 3433 } else { 3434 next = list_next(l2arc_dev_list, l2arc_dev_last); 3435 if (next == NULL) 3436 next = list_head(l2arc_dev_list); 3437 } 3438 3439 l2arc_dev_last = next; 3440 3441 return (next); 3442 } 3443 3444 /* 3445 * A write to a cache device has completed. Update all headers to allow 3446 * reads from these buffers to begin. 3447 */ 3448 static void 3449 l2arc_write_done(zio_t *zio) 3450 { 3451 l2arc_write_callback_t *cb; 3452 l2arc_dev_t *dev; 3453 list_t *buflist; 3454 l2arc_data_free_t *df, *df_prev; 3455 arc_buf_hdr_t *head, *ab, *ab_prev; 3456 kmutex_t *hash_lock; 3457 3458 cb = zio->io_private; 3459 ASSERT(cb != NULL); 3460 dev = cb->l2wcb_dev; 3461 ASSERT(dev != NULL); 3462 head = cb->l2wcb_head; 3463 ASSERT(head != NULL); 3464 buflist = dev->l2ad_buflist; 3465 ASSERT(buflist != NULL); 3466 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 3467 l2arc_write_callback_t *, cb); 3468 3469 if (zio->io_error != 0) 3470 ARCSTAT_BUMP(arcstat_l2_writes_error); 3471 3472 mutex_enter(&l2arc_buflist_mtx); 3473 3474 /* 3475 * All writes completed, or an error was hit. 3476 */ 3477 for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 3478 ab_prev = list_prev(buflist, ab); 3479 3480 hash_lock = HDR_LOCK(ab); 3481 if (!mutex_tryenter(hash_lock)) { 3482 /* 3483 * This buffer misses out. It may be in a stage 3484 * of eviction. Its ARC_L2_WRITING flag will be 3485 * left set, denying reads to this buffer. 3486 */ 3487 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 3488 continue; 3489 } 3490 3491 if (zio->io_error != 0) { 3492 /* 3493 * Error - invalidate L2ARC entry. 3494 */ 3495 ab->b_l2hdr = NULL; 3496 } 3497 3498 /* 3499 * Allow ARC to begin reads to this L2ARC entry. 3500 */ 3501 ab->b_flags &= ~ARC_L2_WRITING; 3502 3503 mutex_exit(hash_lock); 3504 } 3505 3506 atomic_inc_64(&l2arc_writes_done); 3507 list_remove(buflist, head); 3508 kmem_cache_free(hdr_cache, head); 3509 mutex_exit(&l2arc_buflist_mtx); 3510 3511 /* 3512 * Free buffers that were tagged for destruction. 3513 */ 3514 mutex_enter(&l2arc_free_on_write_mtx); 3515 buflist = l2arc_free_on_write; 3516 for (df = list_tail(buflist); df; df = df_prev) { 3517 df_prev = list_prev(buflist, df); 3518 ASSERT(df->l2df_data != NULL); 3519 ASSERT(df->l2df_func != NULL); 3520 df->l2df_func(df->l2df_data, df->l2df_size); 3521 list_remove(buflist, df); 3522 kmem_free(df, sizeof (l2arc_data_free_t)); 3523 } 3524 mutex_exit(&l2arc_free_on_write_mtx); 3525 3526 kmem_free(cb, sizeof (l2arc_write_callback_t)); 3527 } 3528 3529 /* 3530 * A read to a cache device completed. Validate buffer contents before 3531 * handing over to the regular ARC routines. 3532 */ 3533 static void 3534 l2arc_read_done(zio_t *zio) 3535 { 3536 l2arc_read_callback_t *cb; 3537 arc_buf_hdr_t *hdr; 3538 arc_buf_t *buf; 3539 zio_t *rzio; 3540 kmutex_t *hash_lock; 3541 int equal, err = 0; 3542 3543 cb = zio->io_private; 3544 ASSERT(cb != NULL); 3545 buf = cb->l2rcb_buf; 3546 ASSERT(buf != NULL); 3547 hdr = buf->b_hdr; 3548 ASSERT(hdr != NULL); 3549 3550 hash_lock = HDR_LOCK(hdr); 3551 mutex_enter(hash_lock); 3552 3553 /* 3554 * Check this survived the L2ARC journey. 3555 */ 3556 equal = arc_cksum_equal(buf); 3557 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 3558 mutex_exit(hash_lock); 3559 zio->io_private = buf; 3560 arc_read_done(zio); 3561 } else { 3562 mutex_exit(hash_lock); 3563 /* 3564 * Buffer didn't survive caching. Increment stats and 3565 * reissue to the original storage device. 3566 */ 3567 if (zio->io_error != 0) 3568 ARCSTAT_BUMP(arcstat_l2_io_error); 3569 if (!equal) 3570 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 3571 3572 zio->io_flags &= ~ZIO_FLAG_DONT_CACHE; 3573 rzio = zio_read(NULL, cb->l2rcb_spa, &cb->l2rcb_bp, 3574 buf->b_data, zio->io_size, arc_read_done, buf, 3575 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb); 3576 3577 /* 3578 * Since this is a seperate thread, we can wait on this 3579 * I/O whether there is an io_waiter or not. 3580 */ 3581 err = zio_wait(rzio); 3582 3583 /* 3584 * Let the resent I/O call arc_read_done() instead. 3585 * io_error is set to the reissued I/O error status. 3586 */ 3587 zio->io_done = NULL; 3588 zio->io_waiter = NULL; 3589 zio->io_error = err; 3590 } 3591 3592 kmem_free(cb, sizeof (l2arc_read_callback_t)); 3593 } 3594 3595 /* 3596 * This is the list priority from which the L2ARC will search for pages to 3597 * cache. This is used within loops (0..3) to cycle through lists in the 3598 * desired order. This order can have a significant effect on cache 3599 * performance. 3600 * 3601 * Currently the metadata lists are hit first, MFU then MRU, followed by 3602 * the data lists. This function returns a locked list, and also returns 3603 * the lock pointer. 3604 */ 3605 static list_t * 3606 l2arc_list_locked(int list_num, kmutex_t **lock) 3607 { 3608 list_t *list; 3609 3610 ASSERT(list_num >= 0 && list_num <= 3); 3611 3612 switch (list_num) { 3613 case 0: 3614 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; 3615 *lock = &arc_mfu->arcs_mtx; 3616 break; 3617 case 1: 3618 list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; 3619 *lock = &arc_mru->arcs_mtx; 3620 break; 3621 case 2: 3622 list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; 3623 *lock = &arc_mfu->arcs_mtx; 3624 break; 3625 case 3: 3626 list = &arc_mru->arcs_list[ARC_BUFC_DATA]; 3627 *lock = &arc_mru->arcs_mtx; 3628 break; 3629 } 3630 3631 ASSERT(!(MUTEX_HELD(*lock))); 3632 mutex_enter(*lock); 3633 return (list); 3634 } 3635 3636 /* 3637 * Evict buffers from the device write hand to the distance specified in 3638 * bytes. This distance may span populated buffers, it may span nothing. 3639 * This is clearing a region on the L2ARC device ready for writing. 3640 * If the 'all' boolean is set, every buffer is evicted. 3641 */ 3642 static void 3643 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 3644 { 3645 list_t *buflist; 3646 l2arc_buf_hdr_t *abl2; 3647 arc_buf_hdr_t *ab, *ab_prev; 3648 kmutex_t *hash_lock; 3649 uint64_t taddr; 3650 3651 ASSERT(MUTEX_HELD(&l2arc_dev_mtx)); 3652 3653 buflist = dev->l2ad_buflist; 3654 3655 if (buflist == NULL) 3656 return; 3657 3658 if (!all && dev->l2ad_first) { 3659 /* 3660 * This is the first sweep through the device. There is 3661 * nothing to evict. 3662 */ 3663 return; 3664 } 3665 3666 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * dev->l2ad_write))) { 3667 /* 3668 * When nearing the end of the device, evict to the end 3669 * before the device write hand jumps to the start. 3670 */ 3671 taddr = dev->l2ad_end; 3672 } else { 3673 taddr = dev->l2ad_hand + distance; 3674 } 3675 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 3676 uint64_t, taddr, boolean_t, all); 3677 3678 top: 3679 mutex_enter(&l2arc_buflist_mtx); 3680 for (ab = list_tail(buflist); ab; ab = ab_prev) { 3681 ab_prev = list_prev(buflist, ab); 3682 3683 hash_lock = HDR_LOCK(ab); 3684 if (!mutex_tryenter(hash_lock)) { 3685 /* 3686 * Missed the hash lock. Retry. 3687 */ 3688 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 3689 mutex_exit(&l2arc_buflist_mtx); 3690 mutex_enter(hash_lock); 3691 mutex_exit(hash_lock); 3692 goto top; 3693 } 3694 3695 if (HDR_L2_WRITE_HEAD(ab)) { 3696 /* 3697 * We hit a write head node. Leave it for 3698 * l2arc_write_done(). 3699 */ 3700 list_remove(buflist, ab); 3701 mutex_exit(hash_lock); 3702 continue; 3703 } 3704 3705 if (!all && ab->b_l2hdr != NULL && 3706 (ab->b_l2hdr->b_daddr > taddr || 3707 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 3708 /* 3709 * We've evicted to the target address, 3710 * or the end of the device. 3711 */ 3712 mutex_exit(hash_lock); 3713 break; 3714 } 3715 3716 if (HDR_FREE_IN_PROGRESS(ab)) { 3717 /* 3718 * Already on the path to destruction. 3719 */ 3720 mutex_exit(hash_lock); 3721 continue; 3722 } 3723 3724 if (ab->b_state == arc_l2c_only) { 3725 ASSERT(!HDR_L2_READING(ab)); 3726 /* 3727 * This doesn't exist in the ARC. Destroy. 3728 * arc_hdr_destroy() will call list_remove() 3729 * and decrement arcstat_l2_size. 3730 */ 3731 arc_change_state(arc_anon, ab, hash_lock); 3732 arc_hdr_destroy(ab); 3733 } else { 3734 /* 3735 * Tell ARC this no longer exists in L2ARC. 3736 */ 3737 if (ab->b_l2hdr != NULL) { 3738 abl2 = ab->b_l2hdr; 3739 ab->b_l2hdr = NULL; 3740 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 3741 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 3742 } 3743 list_remove(buflist, ab); 3744 3745 /* 3746 * This may have been leftover after a 3747 * failed write. 3748 */ 3749 ab->b_flags &= ~ARC_L2_WRITING; 3750 3751 /* 3752 * Invalidate issued or about to be issued 3753 * reads, since we may be about to write 3754 * over this location. 3755 */ 3756 if (HDR_L2_READING(ab)) { 3757 ARCSTAT_BUMP(arcstat_l2_evict_reading); 3758 ab->b_flags |= ARC_L2_EVICTED; 3759 } 3760 } 3761 mutex_exit(hash_lock); 3762 } 3763 mutex_exit(&l2arc_buflist_mtx); 3764 3765 spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict)); 3766 dev->l2ad_evict = taddr; 3767 } 3768 3769 /* 3770 * Find and write ARC buffers to the L2ARC device. 3771 * 3772 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 3773 * for reading until they have completed writing. 3774 */ 3775 static void 3776 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev) 3777 { 3778 arc_buf_hdr_t *ab, *ab_prev, *head; 3779 l2arc_buf_hdr_t *hdrl2; 3780 list_t *list; 3781 uint64_t passed_sz, write_sz, buf_sz; 3782 uint64_t target_sz = dev->l2ad_write; 3783 uint64_t headroom = dev->l2ad_write * l2arc_headroom; 3784 void *buf_data; 3785 kmutex_t *hash_lock, *list_lock; 3786 boolean_t have_lock, full; 3787 l2arc_write_callback_t *cb; 3788 zio_t *pio, *wzio; 3789 3790 ASSERT(MUTEX_HELD(&l2arc_dev_mtx)); 3791 ASSERT(dev->l2ad_vdev != NULL); 3792 3793 pio = NULL; 3794 write_sz = 0; 3795 full = B_FALSE; 3796 head = kmem_cache_alloc(hdr_cache, KM_SLEEP); 3797 head->b_flags |= ARC_L2_WRITE_HEAD; 3798 3799 /* 3800 * Copy buffers for L2ARC writing. 3801 */ 3802 mutex_enter(&l2arc_buflist_mtx); 3803 for (int try = 0; try <= 3; try++) { 3804 list = l2arc_list_locked(try, &list_lock); 3805 passed_sz = 0; 3806 3807 for (ab = list_tail(list); ab; ab = ab_prev) { 3808 ab_prev = list_prev(list, ab); 3809 3810 hash_lock = HDR_LOCK(ab); 3811 have_lock = MUTEX_HELD(hash_lock); 3812 if (!have_lock && !mutex_tryenter(hash_lock)) { 3813 /* 3814 * Skip this buffer rather than waiting. 3815 */ 3816 continue; 3817 } 3818 3819 passed_sz += ab->b_size; 3820 if (passed_sz > headroom) { 3821 /* 3822 * Searched too far. 3823 */ 3824 mutex_exit(hash_lock); 3825 break; 3826 } 3827 3828 if (ab->b_spa != spa) { 3829 mutex_exit(hash_lock); 3830 continue; 3831 } 3832 3833 if (ab->b_l2hdr != NULL) { 3834 /* 3835 * Already in L2ARC. 3836 */ 3837 mutex_exit(hash_lock); 3838 continue; 3839 } 3840 3841 if (HDR_IO_IN_PROGRESS(ab) || HDR_DONT_L2CACHE(ab)) { 3842 mutex_exit(hash_lock); 3843 continue; 3844 } 3845 3846 if ((write_sz + ab->b_size) > target_sz) { 3847 full = B_TRUE; 3848 mutex_exit(hash_lock); 3849 break; 3850 } 3851 3852 if (ab->b_buf == NULL) { 3853 DTRACE_PROBE1(l2arc__buf__null, void *, ab); 3854 mutex_exit(hash_lock); 3855 continue; 3856 } 3857 3858 if (pio == NULL) { 3859 /* 3860 * Insert a dummy header on the buflist so 3861 * l2arc_write_done() can find where the 3862 * write buffers begin without searching. 3863 */ 3864 list_insert_head(dev->l2ad_buflist, head); 3865 3866 cb = kmem_alloc( 3867 sizeof (l2arc_write_callback_t), KM_SLEEP); 3868 cb->l2wcb_dev = dev; 3869 cb->l2wcb_head = head; 3870 pio = zio_root(spa, l2arc_write_done, cb, 3871 ZIO_FLAG_CANFAIL); 3872 } 3873 3874 /* 3875 * Create and add a new L2ARC header. 3876 */ 3877 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 3878 hdrl2->b_dev = dev; 3879 hdrl2->b_daddr = dev->l2ad_hand; 3880 3881 ab->b_flags |= ARC_L2_WRITING; 3882 ab->b_l2hdr = hdrl2; 3883 list_insert_head(dev->l2ad_buflist, ab); 3884 buf_data = ab->b_buf->b_data; 3885 buf_sz = ab->b_size; 3886 3887 /* 3888 * Compute and store the buffer cksum before 3889 * writing. On debug the cksum is verified first. 3890 */ 3891 arc_cksum_verify(ab->b_buf); 3892 arc_cksum_compute(ab->b_buf, B_TRUE); 3893 3894 mutex_exit(hash_lock); 3895 3896 wzio = zio_write_phys(pio, dev->l2ad_vdev, 3897 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 3898 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 3899 ZIO_FLAG_CANFAIL, B_FALSE); 3900 3901 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 3902 zio_t *, wzio); 3903 (void) zio_nowait(wzio); 3904 3905 write_sz += buf_sz; 3906 dev->l2ad_hand += buf_sz; 3907 } 3908 3909 mutex_exit(list_lock); 3910 3911 if (full == B_TRUE) 3912 break; 3913 } 3914 mutex_exit(&l2arc_buflist_mtx); 3915 3916 if (pio == NULL) { 3917 ASSERT3U(write_sz, ==, 0); 3918 kmem_cache_free(hdr_cache, head); 3919 return; 3920 } 3921 3922 ASSERT3U(write_sz, <=, target_sz); 3923 ARCSTAT_BUMP(arcstat_l2_writes_sent); 3924 ARCSTAT_INCR(arcstat_l2_size, write_sz); 3925 spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz); 3926 3927 /* 3928 * Bump device hand to the device start if it is approaching the end. 3929 * l2arc_evict() will already have evicted ahead for this case. 3930 */ 3931 if (dev->l2ad_hand >= (dev->l2ad_end - dev->l2ad_write)) { 3932 spa_l2cache_space_update(dev->l2ad_vdev, 0, 3933 dev->l2ad_end - dev->l2ad_hand); 3934 dev->l2ad_hand = dev->l2ad_start; 3935 dev->l2ad_evict = dev->l2ad_start; 3936 dev->l2ad_first = B_FALSE; 3937 } 3938 3939 (void) zio_wait(pio); 3940 } 3941 3942 /* 3943 * This thread feeds the L2ARC at regular intervals. This is the beating 3944 * heart of the L2ARC. 3945 */ 3946 static void 3947 l2arc_feed_thread(void) 3948 { 3949 callb_cpr_t cpr; 3950 l2arc_dev_t *dev; 3951 spa_t *spa; 3952 int interval; 3953 boolean_t startup = B_TRUE; 3954 3955 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 3956 3957 mutex_enter(&l2arc_feed_thr_lock); 3958 3959 while (l2arc_thread_exit == 0) { 3960 /* 3961 * Initially pause for L2ARC_FEED_DELAY seconds as a grace 3962 * interval during boot, followed by l2arc_feed_secs seconds 3963 * thereafter. 3964 */ 3965 CALLB_CPR_SAFE_BEGIN(&cpr); 3966 if (startup) { 3967 interval = L2ARC_FEED_DELAY; 3968 startup = B_FALSE; 3969 } else { 3970 interval = l2arc_feed_secs; 3971 } 3972 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 3973 lbolt + (hz * interval)); 3974 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 3975 3976 /* 3977 * Do nothing until L2ARC devices exist. 3978 */ 3979 mutex_enter(&l2arc_dev_mtx); 3980 if (l2arc_ndev == 0) { 3981 mutex_exit(&l2arc_dev_mtx); 3982 continue; 3983 } 3984 3985 /* 3986 * Avoid contributing to memory pressure. 3987 */ 3988 if (arc_reclaim_needed()) { 3989 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 3990 mutex_exit(&l2arc_dev_mtx); 3991 continue; 3992 } 3993 3994 /* 3995 * This selects the next l2arc device to write to, and in 3996 * doing so the next spa to feed from: dev->l2ad_spa. 3997 */ 3998 if ((dev = l2arc_dev_get_next()) == NULL) { 3999 mutex_exit(&l2arc_dev_mtx); 4000 continue; 4001 } 4002 spa = dev->l2ad_spa; 4003 ASSERT(spa != NULL); 4004 ARCSTAT_BUMP(arcstat_l2_feeds); 4005 4006 /* 4007 * Evict L2ARC buffers that will be overwritten. 4008 */ 4009 l2arc_evict(dev, dev->l2ad_write, B_FALSE); 4010 4011 /* 4012 * Write ARC buffers. 4013 */ 4014 l2arc_write_buffers(spa, dev); 4015 mutex_exit(&l2arc_dev_mtx); 4016 } 4017 4018 l2arc_thread_exit = 0; 4019 cv_broadcast(&l2arc_feed_thr_cv); 4020 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 4021 thread_exit(); 4022 } 4023 4024 /* 4025 * Add a vdev for use by the L2ARC. By this point the spa has already 4026 * validated the vdev and opened it. 4027 */ 4028 void 4029 l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end) 4030 { 4031 l2arc_dev_t *adddev; 4032 4033 /* 4034 * Create a new l2arc device entry. 4035 */ 4036 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 4037 adddev->l2ad_spa = spa; 4038 adddev->l2ad_vdev = vd; 4039 adddev->l2ad_write = l2arc_write_max; 4040 adddev->l2ad_start = start; 4041 adddev->l2ad_end = end; 4042 adddev->l2ad_hand = adddev->l2ad_start; 4043 adddev->l2ad_evict = adddev->l2ad_start; 4044 adddev->l2ad_first = B_TRUE; 4045 ASSERT3U(adddev->l2ad_write, >, 0); 4046 4047 /* 4048 * This is a list of all ARC buffers that are still valid on the 4049 * device. 4050 */ 4051 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 4052 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 4053 offsetof(arc_buf_hdr_t, b_l2node)); 4054 4055 spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0); 4056 4057 /* 4058 * Add device to global list 4059 */ 4060 mutex_enter(&l2arc_dev_mtx); 4061 list_insert_head(l2arc_dev_list, adddev); 4062 atomic_inc_64(&l2arc_ndev); 4063 mutex_exit(&l2arc_dev_mtx); 4064 } 4065 4066 /* 4067 * Remove a vdev from the L2ARC. 4068 */ 4069 void 4070 l2arc_remove_vdev(vdev_t *vd) 4071 { 4072 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 4073 4074 /* 4075 * We can only grab the spa config lock when cache device writes 4076 * complete. 4077 */ 4078 ASSERT3U(l2arc_writes_sent, ==, l2arc_writes_done); 4079 4080 /* 4081 * Find the device by vdev 4082 */ 4083 mutex_enter(&l2arc_dev_mtx); 4084 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 4085 nextdev = list_next(l2arc_dev_list, dev); 4086 if (vd == dev->l2ad_vdev) { 4087 remdev = dev; 4088 break; 4089 } 4090 } 4091 ASSERT(remdev != NULL); 4092 4093 /* 4094 * Remove device from global list 4095 */ 4096 list_remove(l2arc_dev_list, remdev); 4097 l2arc_dev_last = NULL; /* may have been invalidated */ 4098 4099 /* 4100 * Clear all buflists and ARC references. L2ARC device flush. 4101 */ 4102 l2arc_evict(remdev, 0, B_TRUE); 4103 list_destroy(remdev->l2ad_buflist); 4104 kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 4105 kmem_free(remdev, sizeof (l2arc_dev_t)); 4106 4107 atomic_dec_64(&l2arc_ndev); 4108 mutex_exit(&l2arc_dev_mtx); 4109 } 4110 4111 void 4112 l2arc_init() 4113 { 4114 l2arc_thread_exit = 0; 4115 l2arc_ndev = 0; 4116 l2arc_writes_sent = 0; 4117 l2arc_writes_done = 0; 4118 4119 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 4120 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 4121 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 4122 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 4123 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 4124 4125 l2arc_dev_list = &L2ARC_dev_list; 4126 l2arc_free_on_write = &L2ARC_free_on_write; 4127 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 4128 offsetof(l2arc_dev_t, l2ad_node)); 4129 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 4130 offsetof(l2arc_data_free_t, l2df_list_node)); 4131 4132 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 4133 TS_RUN, minclsyspri); 4134 } 4135 4136 void 4137 l2arc_fini() 4138 { 4139 mutex_enter(&l2arc_feed_thr_lock); 4140 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 4141 l2arc_thread_exit = 1; 4142 while (l2arc_thread_exit != 0) 4143 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 4144 mutex_exit(&l2arc_feed_thr_lock); 4145 4146 mutex_destroy(&l2arc_feed_thr_lock); 4147 cv_destroy(&l2arc_feed_thr_cv); 4148 mutex_destroy(&l2arc_dev_mtx); 4149 mutex_destroy(&l2arc_buflist_mtx); 4150 mutex_destroy(&l2arc_free_on_write_mtx); 4151 4152 list_destroy(l2arc_dev_list); 4153 list_destroy(l2arc_free_on_write); 4154 } 4155