1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * DVA-based Adjustable Replacement Cache 30 * 31 * While much of the theory of operation used here is 32 * based on the self-tuning, low overhead replacement cache 33 * presented by Megiddo and Modha at FAST 2003, there are some 34 * significant differences: 35 * 36 * 1. The Megiddo and Modha model assumes any page is evictable. 37 * Pages in its cache cannot be "locked" into memory. This makes 38 * the eviction algorithm simple: evict the last page in the list. 39 * This also make the performance characteristics easy to reason 40 * about. Our cache is not so simple. At any given moment, some 41 * subset of the blocks in the cache are un-evictable because we 42 * have handed out a reference to them. Blocks are only evictable 43 * when there are no external references active. This makes 44 * eviction far more problematic: we choose to evict the evictable 45 * blocks that are the "lowest" in the list. 46 * 47 * There are times when it is not possible to evict the requested 48 * space. In these circumstances we are unable to adjust the cache 49 * size. To prevent the cache growing unbounded at these times we 50 * implement a "cache throttle" that slows the flow of new data 51 * into the cache until we can make space available. 52 * 53 * 2. The Megiddo and Modha model assumes a fixed cache size. 54 * Pages are evicted when the cache is full and there is a cache 55 * miss. Our model has a variable sized cache. It grows with 56 * high use, but also tries to react to memory pressure from the 57 * operating system: decreasing its size when system memory is 58 * tight. 59 * 60 * 3. The Megiddo and Modha model assumes a fixed page size. All 61 * elements of the cache are therefor exactly the same size. So 62 * when adjusting the cache size following a cache miss, its simply 63 * a matter of choosing a single page to evict. In our model, we 64 * have variable sized cache blocks (rangeing from 512 bytes to 65 * 128K bytes). We therefor choose a set of blocks to evict to make 66 * space for a cache miss that approximates as closely as possible 67 * the space used by the new block. 68 * 69 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 70 * by N. Megiddo & D. Modha, FAST 2003 71 */ 72 73 /* 74 * The locking model: 75 * 76 * A new reference to a cache buffer can be obtained in two 77 * ways: 1) via a hash table lookup using the DVA as a key, 78 * or 2) via one of the ARC lists. The arc_read() interface 79 * uses method 1, while the internal arc algorithms for 80 * adjusting the cache use method 2. We therefor provide two 81 * types of locks: 1) the hash table lock array, and 2) the 82 * arc list locks. 83 * 84 * Buffers do not have their own mutexs, rather they rely on the 85 * hash table mutexs for the bulk of their protection (i.e. most 86 * fields in the arc_buf_hdr_t are protected by these mutexs). 87 * 88 * buf_hash_find() returns the appropriate mutex (held) when it 89 * locates the requested buffer in the hash table. It returns 90 * NULL for the mutex if the buffer was not in the table. 91 * 92 * buf_hash_remove() expects the appropriate hash mutex to be 93 * already held before it is invoked. 94 * 95 * Each arc state also has a mutex which is used to protect the 96 * buffer list associated with the state. When attempting to 97 * obtain a hash table lock while holding an arc list lock you 98 * must use: mutex_tryenter() to avoid deadlock. Also note that 99 * the active state mutex must be held before the ghost state mutex. 100 * 101 * Arc buffers may have an associated eviction callback function. 102 * This function will be invoked prior to removing the buffer (e.g. 103 * in arc_do_user_evicts()). Note however that the data associated 104 * with the buffer may be evicted prior to the callback. The callback 105 * must be made with *no locks held* (to prevent deadlock). Additionally, 106 * the users of callbacks must ensure that their private data is 107 * protected from simultaneous callbacks from arc_buf_evict() 108 * and arc_do_user_evicts(). 109 * 110 * Note that the majority of the performance stats are manipulated 111 * with atomic operations. 112 * 113 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 114 * 115 * - L2ARC buflist creation 116 * - L2ARC buflist eviction 117 * - L2ARC write completion, which walks L2ARC buflists 118 * - ARC header destruction, as it removes from L2ARC buflists 119 * - ARC header release, as it removes from L2ARC buflists 120 */ 121 122 #include <sys/spa.h> 123 #include <sys/zio.h> 124 #include <sys/zio_checksum.h> 125 #include <sys/zfs_context.h> 126 #include <sys/arc.h> 127 #include <sys/refcount.h> 128 #include <sys/vdev.h> 129 #ifdef _KERNEL 130 #include <sys/vmsystm.h> 131 #include <vm/anon.h> 132 #include <sys/fs/swapnode.h> 133 #include <sys/dnlc.h> 134 #endif 135 #include <sys/callb.h> 136 #include <sys/kstat.h> 137 138 static kmutex_t arc_reclaim_thr_lock; 139 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 140 static uint8_t arc_thread_exit; 141 142 extern int zfs_write_limit_shift; 143 extern uint64_t zfs_write_limit_max; 144 extern uint64_t zfs_write_limit_inflated; 145 146 #define ARC_REDUCE_DNLC_PERCENT 3 147 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 148 149 typedef enum arc_reclaim_strategy { 150 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 151 ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 152 } arc_reclaim_strategy_t; 153 154 /* number of seconds before growing cache again */ 155 static int arc_grow_retry = 60; 156 157 /* 158 * minimum lifespan of a prefetch block in clock ticks 159 * (initialized in arc_init()) 160 */ 161 static int arc_min_prefetch_lifespan; 162 163 static int arc_dead; 164 165 /* 166 * These tunables are for performance analysis. 167 */ 168 uint64_t zfs_arc_max; 169 uint64_t zfs_arc_min; 170 uint64_t zfs_arc_meta_limit = 0; 171 172 /* 173 * Note that buffers can be in one of 6 states: 174 * ARC_anon - anonymous (discussed below) 175 * ARC_mru - recently used, currently cached 176 * ARC_mru_ghost - recentely used, no longer in cache 177 * ARC_mfu - frequently used, currently cached 178 * ARC_mfu_ghost - frequently used, no longer in cache 179 * ARC_l2c_only - exists in L2ARC but not other states 180 * When there are no active references to the buffer, they are 181 * are linked onto a list in one of these arc states. These are 182 * the only buffers that can be evicted or deleted. Within each 183 * state there are multiple lists, one for meta-data and one for 184 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 185 * etc.) is tracked separately so that it can be managed more 186 * explicitly: favored over data, limited explicitly. 187 * 188 * Anonymous buffers are buffers that are not associated with 189 * a DVA. These are buffers that hold dirty block copies 190 * before they are written to stable storage. By definition, 191 * they are "ref'd" and are considered part of arc_mru 192 * that cannot be freed. Generally, they will aquire a DVA 193 * as they are written and migrate onto the arc_mru list. 194 * 195 * The ARC_l2c_only state is for buffers that are in the second 196 * level ARC but no longer in any of the ARC_m* lists. The second 197 * level ARC itself may also contain buffers that are in any of 198 * the ARC_m* states - meaning that a buffer can exist in two 199 * places. The reason for the ARC_l2c_only state is to keep the 200 * buffer header in the hash table, so that reads that hit the 201 * second level ARC benefit from these fast lookups. 202 */ 203 204 typedef struct arc_state { 205 list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ 206 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 207 uint64_t arcs_size; /* total amount of data in this state */ 208 kmutex_t arcs_mtx; 209 } arc_state_t; 210 211 /* The 6 states: */ 212 static arc_state_t ARC_anon; 213 static arc_state_t ARC_mru; 214 static arc_state_t ARC_mru_ghost; 215 static arc_state_t ARC_mfu; 216 static arc_state_t ARC_mfu_ghost; 217 static arc_state_t ARC_l2c_only; 218 219 typedef struct arc_stats { 220 kstat_named_t arcstat_hits; 221 kstat_named_t arcstat_misses; 222 kstat_named_t arcstat_demand_data_hits; 223 kstat_named_t arcstat_demand_data_misses; 224 kstat_named_t arcstat_demand_metadata_hits; 225 kstat_named_t arcstat_demand_metadata_misses; 226 kstat_named_t arcstat_prefetch_data_hits; 227 kstat_named_t arcstat_prefetch_data_misses; 228 kstat_named_t arcstat_prefetch_metadata_hits; 229 kstat_named_t arcstat_prefetch_metadata_misses; 230 kstat_named_t arcstat_mru_hits; 231 kstat_named_t arcstat_mru_ghost_hits; 232 kstat_named_t arcstat_mfu_hits; 233 kstat_named_t arcstat_mfu_ghost_hits; 234 kstat_named_t arcstat_deleted; 235 kstat_named_t arcstat_recycle_miss; 236 kstat_named_t arcstat_mutex_miss; 237 kstat_named_t arcstat_evict_skip; 238 kstat_named_t arcstat_hash_elements; 239 kstat_named_t arcstat_hash_elements_max; 240 kstat_named_t arcstat_hash_collisions; 241 kstat_named_t arcstat_hash_chains; 242 kstat_named_t arcstat_hash_chain_max; 243 kstat_named_t arcstat_p; 244 kstat_named_t arcstat_c; 245 kstat_named_t arcstat_c_min; 246 kstat_named_t arcstat_c_max; 247 kstat_named_t arcstat_size; 248 kstat_named_t arcstat_hdr_size; 249 kstat_named_t arcstat_l2_hits; 250 kstat_named_t arcstat_l2_misses; 251 kstat_named_t arcstat_l2_feeds; 252 kstat_named_t arcstat_l2_rw_clash; 253 kstat_named_t arcstat_l2_writes_sent; 254 kstat_named_t arcstat_l2_writes_done; 255 kstat_named_t arcstat_l2_writes_error; 256 kstat_named_t arcstat_l2_writes_hdr_miss; 257 kstat_named_t arcstat_l2_evict_lock_retry; 258 kstat_named_t arcstat_l2_evict_reading; 259 kstat_named_t arcstat_l2_free_on_write; 260 kstat_named_t arcstat_l2_abort_lowmem; 261 kstat_named_t arcstat_l2_cksum_bad; 262 kstat_named_t arcstat_l2_io_error; 263 kstat_named_t arcstat_l2_size; 264 kstat_named_t arcstat_l2_hdr_size; 265 kstat_named_t arcstat_memory_throttle_count; 266 } arc_stats_t; 267 268 static arc_stats_t arc_stats = { 269 { "hits", KSTAT_DATA_UINT64 }, 270 { "misses", KSTAT_DATA_UINT64 }, 271 { "demand_data_hits", KSTAT_DATA_UINT64 }, 272 { "demand_data_misses", KSTAT_DATA_UINT64 }, 273 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 274 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 275 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 276 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 277 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 278 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 279 { "mru_hits", KSTAT_DATA_UINT64 }, 280 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 281 { "mfu_hits", KSTAT_DATA_UINT64 }, 282 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 283 { "deleted", KSTAT_DATA_UINT64 }, 284 { "recycle_miss", KSTAT_DATA_UINT64 }, 285 { "mutex_miss", KSTAT_DATA_UINT64 }, 286 { "evict_skip", KSTAT_DATA_UINT64 }, 287 { "hash_elements", KSTAT_DATA_UINT64 }, 288 { "hash_elements_max", KSTAT_DATA_UINT64 }, 289 { "hash_collisions", KSTAT_DATA_UINT64 }, 290 { "hash_chains", KSTAT_DATA_UINT64 }, 291 { "hash_chain_max", KSTAT_DATA_UINT64 }, 292 { "p", KSTAT_DATA_UINT64 }, 293 { "c", KSTAT_DATA_UINT64 }, 294 { "c_min", KSTAT_DATA_UINT64 }, 295 { "c_max", KSTAT_DATA_UINT64 }, 296 { "size", KSTAT_DATA_UINT64 }, 297 { "hdr_size", KSTAT_DATA_UINT64 }, 298 { "l2_hits", KSTAT_DATA_UINT64 }, 299 { "l2_misses", KSTAT_DATA_UINT64 }, 300 { "l2_feeds", KSTAT_DATA_UINT64 }, 301 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 302 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 303 { "l2_writes_done", KSTAT_DATA_UINT64 }, 304 { "l2_writes_error", KSTAT_DATA_UINT64 }, 305 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 306 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 307 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 308 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 309 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 310 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 311 { "l2_io_error", KSTAT_DATA_UINT64 }, 312 { "l2_size", KSTAT_DATA_UINT64 }, 313 { "l2_hdr_size", KSTAT_DATA_UINT64 }, 314 { "memory_throttle_count", KSTAT_DATA_UINT64 } 315 }; 316 317 #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 318 319 #define ARCSTAT_INCR(stat, val) \ 320 atomic_add_64(&arc_stats.stat.value.ui64, (val)); 321 322 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 323 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 324 325 #define ARCSTAT_MAX(stat, val) { \ 326 uint64_t m; \ 327 while ((val) > (m = arc_stats.stat.value.ui64) && \ 328 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 329 continue; \ 330 } 331 332 #define ARCSTAT_MAXSTAT(stat) \ 333 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 334 335 /* 336 * We define a macro to allow ARC hits/misses to be easily broken down by 337 * two separate conditions, giving a total of four different subtypes for 338 * each of hits and misses (so eight statistics total). 339 */ 340 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 341 if (cond1) { \ 342 if (cond2) { \ 343 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 344 } else { \ 345 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 346 } \ 347 } else { \ 348 if (cond2) { \ 349 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 350 } else { \ 351 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 352 } \ 353 } 354 355 kstat_t *arc_ksp; 356 static arc_state_t *arc_anon; 357 static arc_state_t *arc_mru; 358 static arc_state_t *arc_mru_ghost; 359 static arc_state_t *arc_mfu; 360 static arc_state_t *arc_mfu_ghost; 361 static arc_state_t *arc_l2c_only; 362 363 /* 364 * There are several ARC variables that are critical to export as kstats -- 365 * but we don't want to have to grovel around in the kstat whenever we wish to 366 * manipulate them. For these variables, we therefore define them to be in 367 * terms of the statistic variable. This assures that we are not introducing 368 * the possibility of inconsistency by having shadow copies of the variables, 369 * while still allowing the code to be readable. 370 */ 371 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 372 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 373 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 374 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 375 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 376 377 static int arc_no_grow; /* Don't try to grow cache size */ 378 static uint64_t arc_tempreserve; 379 static uint64_t arc_meta_used; 380 static uint64_t arc_meta_limit; 381 static uint64_t arc_meta_max = 0; 382 383 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 384 385 typedef struct arc_callback arc_callback_t; 386 387 struct arc_callback { 388 void *acb_private; 389 arc_done_func_t *acb_done; 390 arc_byteswap_func_t *acb_byteswap; 391 arc_buf_t *acb_buf; 392 zio_t *acb_zio_dummy; 393 arc_callback_t *acb_next; 394 }; 395 396 typedef struct arc_write_callback arc_write_callback_t; 397 398 struct arc_write_callback { 399 void *awcb_private; 400 arc_done_func_t *awcb_ready; 401 arc_done_func_t *awcb_done; 402 arc_buf_t *awcb_buf; 403 }; 404 405 struct arc_buf_hdr { 406 /* protected by hash lock */ 407 dva_t b_dva; 408 uint64_t b_birth; 409 uint64_t b_cksum0; 410 411 kmutex_t b_freeze_lock; 412 zio_cksum_t *b_freeze_cksum; 413 414 arc_buf_hdr_t *b_hash_next; 415 arc_buf_t *b_buf; 416 uint32_t b_flags; 417 uint32_t b_datacnt; 418 419 arc_callback_t *b_acb; 420 kcondvar_t b_cv; 421 422 /* immutable */ 423 arc_buf_contents_t b_type; 424 uint64_t b_size; 425 spa_t *b_spa; 426 427 /* protected by arc state mutex */ 428 arc_state_t *b_state; 429 list_node_t b_arc_node; 430 431 /* updated atomically */ 432 clock_t b_arc_access; 433 434 /* self protecting */ 435 refcount_t b_refcnt; 436 437 l2arc_buf_hdr_t *b_l2hdr; 438 list_node_t b_l2node; 439 }; 440 441 static arc_buf_t *arc_eviction_list; 442 static kmutex_t arc_eviction_mtx; 443 static arc_buf_hdr_t arc_eviction_hdr; 444 static void arc_get_data_buf(arc_buf_t *buf); 445 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 446 static int arc_evict_needed(arc_buf_contents_t type); 447 static void arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes); 448 449 #define GHOST_STATE(state) \ 450 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 451 (state) == arc_l2c_only) 452 453 /* 454 * Private ARC flags. These flags are private ARC only flags that will show up 455 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 456 * be passed in as arc_flags in things like arc_read. However, these flags 457 * should never be passed and should only be set by ARC code. When adding new 458 * public flags, make sure not to smash the private ones. 459 */ 460 461 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 462 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 463 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 464 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 465 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 466 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 467 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 468 #define ARC_DONT_L2CACHE (1 << 16) /* originated by prefetch */ 469 #define ARC_L2_READING (1 << 17) /* L2ARC read in progress */ 470 #define ARC_L2_WRITING (1 << 18) /* L2ARC write in progress */ 471 #define ARC_L2_EVICTED (1 << 19) /* evicted during I/O */ 472 #define ARC_L2_WRITE_HEAD (1 << 20) /* head of write list */ 473 474 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 475 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 476 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 477 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 478 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 479 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 480 #define HDR_DONT_L2CACHE(hdr) ((hdr)->b_flags & ARC_DONT_L2CACHE) 481 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_L2_READING) 482 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 483 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 484 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 485 486 /* 487 * Other sizes 488 */ 489 490 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 491 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 492 493 /* 494 * Hash table routines 495 */ 496 497 #define HT_LOCK_PAD 64 498 499 struct ht_lock { 500 kmutex_t ht_lock; 501 #ifdef _KERNEL 502 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 503 #endif 504 }; 505 506 #define BUF_LOCKS 256 507 typedef struct buf_hash_table { 508 uint64_t ht_mask; 509 arc_buf_hdr_t **ht_table; 510 struct ht_lock ht_locks[BUF_LOCKS]; 511 } buf_hash_table_t; 512 513 static buf_hash_table_t buf_hash_table; 514 515 #define BUF_HASH_INDEX(spa, dva, birth) \ 516 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 517 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 518 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 519 #define HDR_LOCK(buf) \ 520 (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 521 522 uint64_t zfs_crc64_table[256]; 523 524 /* 525 * Level 2 ARC 526 */ 527 528 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 529 #define L2ARC_HEADROOM 4 /* num of writes */ 530 #define L2ARC_FEED_DELAY 180 /* starting grace */ 531 #define L2ARC_FEED_SECS 1 /* caching interval */ 532 533 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 534 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 535 536 /* 537 * L2ARC Performance Tunables 538 */ 539 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 540 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 541 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 542 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 543 544 /* 545 * L2ARC Internals 546 */ 547 typedef struct l2arc_dev { 548 vdev_t *l2ad_vdev; /* vdev */ 549 spa_t *l2ad_spa; /* spa */ 550 uint64_t l2ad_hand; /* next write location */ 551 uint64_t l2ad_write; /* desired write size, bytes */ 552 uint64_t l2ad_start; /* first addr on device */ 553 uint64_t l2ad_end; /* last addr on device */ 554 uint64_t l2ad_evict; /* last addr eviction reached */ 555 boolean_t l2ad_first; /* first sweep through */ 556 list_t *l2ad_buflist; /* buffer list */ 557 list_node_t l2ad_node; /* device list node */ 558 } l2arc_dev_t; 559 560 static list_t L2ARC_dev_list; /* device list */ 561 static list_t *l2arc_dev_list; /* device list pointer */ 562 static kmutex_t l2arc_dev_mtx; /* device list mutex */ 563 static l2arc_dev_t *l2arc_dev_last; /* last device used */ 564 static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 565 static list_t L2ARC_free_on_write; /* free after write buf list */ 566 static list_t *l2arc_free_on_write; /* free after write list ptr */ 567 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 568 static uint64_t l2arc_ndev; /* number of devices */ 569 570 typedef struct l2arc_read_callback { 571 arc_buf_t *l2rcb_buf; /* read buffer */ 572 spa_t *l2rcb_spa; /* spa */ 573 blkptr_t l2rcb_bp; /* original blkptr */ 574 zbookmark_t l2rcb_zb; /* original bookmark */ 575 int l2rcb_flags; /* original flags */ 576 } l2arc_read_callback_t; 577 578 typedef struct l2arc_write_callback { 579 l2arc_dev_t *l2wcb_dev; /* device info */ 580 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 581 } l2arc_write_callback_t; 582 583 struct l2arc_buf_hdr { 584 /* protected by arc_buf_hdr mutex */ 585 l2arc_dev_t *b_dev; /* L2ARC device */ 586 daddr_t b_daddr; /* disk address, offset byte */ 587 }; 588 589 typedef struct l2arc_data_free { 590 /* protected by l2arc_free_on_write_mtx */ 591 void *l2df_data; 592 size_t l2df_size; 593 void (*l2df_func)(void *, size_t); 594 list_node_t l2df_list_node; 595 } l2arc_data_free_t; 596 597 static kmutex_t l2arc_feed_thr_lock; 598 static kcondvar_t l2arc_feed_thr_cv; 599 static uint8_t l2arc_thread_exit; 600 601 static void l2arc_read_done(zio_t *zio); 602 static void l2arc_hdr_stat_add(void); 603 static void l2arc_hdr_stat_remove(void); 604 605 static uint64_t 606 buf_hash(spa_t *spa, dva_t *dva, uint64_t birth) 607 { 608 uintptr_t spav = (uintptr_t)spa; 609 uint8_t *vdva = (uint8_t *)dva; 610 uint64_t crc = -1ULL; 611 int i; 612 613 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 614 615 for (i = 0; i < sizeof (dva_t); i++) 616 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 617 618 crc ^= (spav>>8) ^ birth; 619 620 return (crc); 621 } 622 623 #define BUF_EMPTY(buf) \ 624 ((buf)->b_dva.dva_word[0] == 0 && \ 625 (buf)->b_dva.dva_word[1] == 0 && \ 626 (buf)->b_birth == 0) 627 628 #define BUF_EQUAL(spa, dva, birth, buf) \ 629 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 630 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 631 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 632 633 static arc_buf_hdr_t * 634 buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp) 635 { 636 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 637 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 638 arc_buf_hdr_t *buf; 639 640 mutex_enter(hash_lock); 641 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 642 buf = buf->b_hash_next) { 643 if (BUF_EQUAL(spa, dva, birth, buf)) { 644 *lockp = hash_lock; 645 return (buf); 646 } 647 } 648 mutex_exit(hash_lock); 649 *lockp = NULL; 650 return (NULL); 651 } 652 653 /* 654 * Insert an entry into the hash table. If there is already an element 655 * equal to elem in the hash table, then the already existing element 656 * will be returned and the new element will not be inserted. 657 * Otherwise returns NULL. 658 */ 659 static arc_buf_hdr_t * 660 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 661 { 662 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 663 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 664 arc_buf_hdr_t *fbuf; 665 uint32_t i; 666 667 ASSERT(!HDR_IN_HASH_TABLE(buf)); 668 *lockp = hash_lock; 669 mutex_enter(hash_lock); 670 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 671 fbuf = fbuf->b_hash_next, i++) { 672 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 673 return (fbuf); 674 } 675 676 buf->b_hash_next = buf_hash_table.ht_table[idx]; 677 buf_hash_table.ht_table[idx] = buf; 678 buf->b_flags |= ARC_IN_HASH_TABLE; 679 680 /* collect some hash table performance data */ 681 if (i > 0) { 682 ARCSTAT_BUMP(arcstat_hash_collisions); 683 if (i == 1) 684 ARCSTAT_BUMP(arcstat_hash_chains); 685 686 ARCSTAT_MAX(arcstat_hash_chain_max, i); 687 } 688 689 ARCSTAT_BUMP(arcstat_hash_elements); 690 ARCSTAT_MAXSTAT(arcstat_hash_elements); 691 692 return (NULL); 693 } 694 695 static void 696 buf_hash_remove(arc_buf_hdr_t *buf) 697 { 698 arc_buf_hdr_t *fbuf, **bufp; 699 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 700 701 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 702 ASSERT(HDR_IN_HASH_TABLE(buf)); 703 704 bufp = &buf_hash_table.ht_table[idx]; 705 while ((fbuf = *bufp) != buf) { 706 ASSERT(fbuf != NULL); 707 bufp = &fbuf->b_hash_next; 708 } 709 *bufp = buf->b_hash_next; 710 buf->b_hash_next = NULL; 711 buf->b_flags &= ~ARC_IN_HASH_TABLE; 712 713 /* collect some hash table performance data */ 714 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 715 716 if (buf_hash_table.ht_table[idx] && 717 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 718 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 719 } 720 721 /* 722 * Global data structures and functions for the buf kmem cache. 723 */ 724 static kmem_cache_t *hdr_cache; 725 static kmem_cache_t *buf_cache; 726 727 static void 728 buf_fini(void) 729 { 730 int i; 731 732 kmem_free(buf_hash_table.ht_table, 733 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 734 for (i = 0; i < BUF_LOCKS; i++) 735 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 736 kmem_cache_destroy(hdr_cache); 737 kmem_cache_destroy(buf_cache); 738 } 739 740 /* 741 * Constructor callback - called when the cache is empty 742 * and a new buf is requested. 743 */ 744 /* ARGSUSED */ 745 static int 746 hdr_cons(void *vbuf, void *unused, int kmflag) 747 { 748 arc_buf_hdr_t *buf = vbuf; 749 750 bzero(buf, sizeof (arc_buf_hdr_t)); 751 refcount_create(&buf->b_refcnt); 752 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 753 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 754 755 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 756 return (0); 757 } 758 759 /* 760 * Destructor callback - called when a cached buf is 761 * no longer required. 762 */ 763 /* ARGSUSED */ 764 static void 765 hdr_dest(void *vbuf, void *unused) 766 { 767 arc_buf_hdr_t *buf = vbuf; 768 769 refcount_destroy(&buf->b_refcnt); 770 cv_destroy(&buf->b_cv); 771 mutex_destroy(&buf->b_freeze_lock); 772 773 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 774 } 775 776 /* 777 * Reclaim callback -- invoked when memory is low. 778 */ 779 /* ARGSUSED */ 780 static void 781 hdr_recl(void *unused) 782 { 783 dprintf("hdr_recl called\n"); 784 /* 785 * umem calls the reclaim func when we destroy the buf cache, 786 * which is after we do arc_fini(). 787 */ 788 if (!arc_dead) 789 cv_signal(&arc_reclaim_thr_cv); 790 } 791 792 static void 793 buf_init(void) 794 { 795 uint64_t *ct; 796 uint64_t hsize = 1ULL << 12; 797 int i, j; 798 799 /* 800 * The hash table is big enough to fill all of physical memory 801 * with an average 64K block size. The table will take up 802 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 803 */ 804 while (hsize * 65536 < physmem * PAGESIZE) 805 hsize <<= 1; 806 retry: 807 buf_hash_table.ht_mask = hsize - 1; 808 buf_hash_table.ht_table = 809 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 810 if (buf_hash_table.ht_table == NULL) { 811 ASSERT(hsize > (1ULL << 8)); 812 hsize >>= 1; 813 goto retry; 814 } 815 816 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 817 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 818 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 819 0, NULL, NULL, NULL, NULL, NULL, 0); 820 821 for (i = 0; i < 256; i++) 822 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 823 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 824 825 for (i = 0; i < BUF_LOCKS; i++) { 826 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 827 NULL, MUTEX_DEFAULT, NULL); 828 } 829 } 830 831 #define ARC_MINTIME (hz>>4) /* 62 ms */ 832 833 static void 834 arc_cksum_verify(arc_buf_t *buf) 835 { 836 zio_cksum_t zc; 837 838 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 839 return; 840 841 mutex_enter(&buf->b_hdr->b_freeze_lock); 842 if (buf->b_hdr->b_freeze_cksum == NULL || 843 (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 844 mutex_exit(&buf->b_hdr->b_freeze_lock); 845 return; 846 } 847 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 848 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 849 panic("buffer modified while frozen!"); 850 mutex_exit(&buf->b_hdr->b_freeze_lock); 851 } 852 853 static int 854 arc_cksum_equal(arc_buf_t *buf) 855 { 856 zio_cksum_t zc; 857 int equal; 858 859 mutex_enter(&buf->b_hdr->b_freeze_lock); 860 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 861 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 862 mutex_exit(&buf->b_hdr->b_freeze_lock); 863 864 return (equal); 865 } 866 867 static void 868 arc_cksum_compute(arc_buf_t *buf, boolean_t force) 869 { 870 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 871 return; 872 873 mutex_enter(&buf->b_hdr->b_freeze_lock); 874 if (buf->b_hdr->b_freeze_cksum != NULL) { 875 mutex_exit(&buf->b_hdr->b_freeze_lock); 876 return; 877 } 878 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 879 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 880 buf->b_hdr->b_freeze_cksum); 881 mutex_exit(&buf->b_hdr->b_freeze_lock); 882 } 883 884 void 885 arc_buf_thaw(arc_buf_t *buf) 886 { 887 if (zfs_flags & ZFS_DEBUG_MODIFY) { 888 if (buf->b_hdr->b_state != arc_anon) 889 panic("modifying non-anon buffer!"); 890 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 891 panic("modifying buffer while i/o in progress!"); 892 arc_cksum_verify(buf); 893 } 894 895 mutex_enter(&buf->b_hdr->b_freeze_lock); 896 if (buf->b_hdr->b_freeze_cksum != NULL) { 897 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 898 buf->b_hdr->b_freeze_cksum = NULL; 899 } 900 mutex_exit(&buf->b_hdr->b_freeze_lock); 901 } 902 903 void 904 arc_buf_freeze(arc_buf_t *buf) 905 { 906 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 907 return; 908 909 ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 910 buf->b_hdr->b_state == arc_anon); 911 arc_cksum_compute(buf, B_FALSE); 912 } 913 914 static void 915 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 916 { 917 ASSERT(MUTEX_HELD(hash_lock)); 918 919 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 920 (ab->b_state != arc_anon)) { 921 uint64_t delta = ab->b_size * ab->b_datacnt; 922 list_t *list = &ab->b_state->arcs_list[ab->b_type]; 923 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 924 925 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 926 mutex_enter(&ab->b_state->arcs_mtx); 927 ASSERT(list_link_active(&ab->b_arc_node)); 928 list_remove(list, ab); 929 if (GHOST_STATE(ab->b_state)) { 930 ASSERT3U(ab->b_datacnt, ==, 0); 931 ASSERT3P(ab->b_buf, ==, NULL); 932 delta = ab->b_size; 933 } 934 ASSERT(delta > 0); 935 ASSERT3U(*size, >=, delta); 936 atomic_add_64(size, -delta); 937 mutex_exit(&ab->b_state->arcs_mtx); 938 /* remove the prefetch flag is we get a reference */ 939 if (ab->b_flags & ARC_PREFETCH) 940 ab->b_flags &= ~ARC_PREFETCH; 941 } 942 } 943 944 static int 945 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 946 { 947 int cnt; 948 arc_state_t *state = ab->b_state; 949 950 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 951 ASSERT(!GHOST_STATE(state)); 952 953 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 954 (state != arc_anon)) { 955 uint64_t *size = &state->arcs_lsize[ab->b_type]; 956 957 ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 958 mutex_enter(&state->arcs_mtx); 959 ASSERT(!list_link_active(&ab->b_arc_node)); 960 list_insert_head(&state->arcs_list[ab->b_type], ab); 961 ASSERT(ab->b_datacnt > 0); 962 atomic_add_64(size, ab->b_size * ab->b_datacnt); 963 mutex_exit(&state->arcs_mtx); 964 } 965 return (cnt); 966 } 967 968 /* 969 * Move the supplied buffer to the indicated state. The mutex 970 * for the buffer must be held by the caller. 971 */ 972 static void 973 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 974 { 975 arc_state_t *old_state = ab->b_state; 976 int64_t refcnt = refcount_count(&ab->b_refcnt); 977 uint64_t from_delta, to_delta; 978 979 ASSERT(MUTEX_HELD(hash_lock)); 980 ASSERT(new_state != old_state); 981 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 982 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 983 984 from_delta = to_delta = ab->b_datacnt * ab->b_size; 985 986 /* 987 * If this buffer is evictable, transfer it from the 988 * old state list to the new state list. 989 */ 990 if (refcnt == 0) { 991 if (old_state != arc_anon) { 992 int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 993 uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 994 995 if (use_mutex) 996 mutex_enter(&old_state->arcs_mtx); 997 998 ASSERT(list_link_active(&ab->b_arc_node)); 999 list_remove(&old_state->arcs_list[ab->b_type], ab); 1000 1001 /* 1002 * If prefetching out of the ghost cache, 1003 * we will have a non-null datacnt. 1004 */ 1005 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 1006 /* ghost elements have a ghost size */ 1007 ASSERT(ab->b_buf == NULL); 1008 from_delta = ab->b_size; 1009 } 1010 ASSERT3U(*size, >=, from_delta); 1011 atomic_add_64(size, -from_delta); 1012 1013 if (use_mutex) 1014 mutex_exit(&old_state->arcs_mtx); 1015 } 1016 if (new_state != arc_anon) { 1017 int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 1018 uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1019 1020 if (use_mutex) 1021 mutex_enter(&new_state->arcs_mtx); 1022 1023 list_insert_head(&new_state->arcs_list[ab->b_type], ab); 1024 1025 /* ghost elements have a ghost size */ 1026 if (GHOST_STATE(new_state)) { 1027 ASSERT(ab->b_datacnt == 0); 1028 ASSERT(ab->b_buf == NULL); 1029 to_delta = ab->b_size; 1030 } 1031 atomic_add_64(size, to_delta); 1032 1033 if (use_mutex) 1034 mutex_exit(&new_state->arcs_mtx); 1035 } 1036 } 1037 1038 ASSERT(!BUF_EMPTY(ab)); 1039 if (new_state == arc_anon) { 1040 buf_hash_remove(ab); 1041 } 1042 1043 /* adjust state sizes */ 1044 if (to_delta) 1045 atomic_add_64(&new_state->arcs_size, to_delta); 1046 if (from_delta) { 1047 ASSERT3U(old_state->arcs_size, >=, from_delta); 1048 atomic_add_64(&old_state->arcs_size, -from_delta); 1049 } 1050 ab->b_state = new_state; 1051 1052 /* adjust l2arc hdr stats */ 1053 if (new_state == arc_l2c_only) 1054 l2arc_hdr_stat_add(); 1055 else if (old_state == arc_l2c_only) 1056 l2arc_hdr_stat_remove(); 1057 } 1058 1059 void 1060 arc_space_consume(uint64_t space) 1061 { 1062 atomic_add_64(&arc_meta_used, space); 1063 atomic_add_64(&arc_size, space); 1064 } 1065 1066 void 1067 arc_space_return(uint64_t space) 1068 { 1069 ASSERT(arc_meta_used >= space); 1070 if (arc_meta_max < arc_meta_used) 1071 arc_meta_max = arc_meta_used; 1072 atomic_add_64(&arc_meta_used, -space); 1073 ASSERT(arc_size >= space); 1074 atomic_add_64(&arc_size, -space); 1075 } 1076 1077 void * 1078 arc_data_buf_alloc(uint64_t size) 1079 { 1080 if (arc_evict_needed(ARC_BUFC_DATA)) 1081 cv_signal(&arc_reclaim_thr_cv); 1082 atomic_add_64(&arc_size, size); 1083 return (zio_data_buf_alloc(size)); 1084 } 1085 1086 void 1087 arc_data_buf_free(void *buf, uint64_t size) 1088 { 1089 zio_data_buf_free(buf, size); 1090 ASSERT(arc_size >= size); 1091 atomic_add_64(&arc_size, -size); 1092 } 1093 1094 arc_buf_t * 1095 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1096 { 1097 arc_buf_hdr_t *hdr; 1098 arc_buf_t *buf; 1099 1100 ASSERT3U(size, >, 0); 1101 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1102 ASSERT(BUF_EMPTY(hdr)); 1103 hdr->b_size = size; 1104 hdr->b_type = type; 1105 hdr->b_spa = spa; 1106 hdr->b_state = arc_anon; 1107 hdr->b_arc_access = 0; 1108 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1109 buf->b_hdr = hdr; 1110 buf->b_data = NULL; 1111 buf->b_efunc = NULL; 1112 buf->b_private = NULL; 1113 buf->b_next = NULL; 1114 hdr->b_buf = buf; 1115 arc_get_data_buf(buf); 1116 hdr->b_datacnt = 1; 1117 hdr->b_flags = 0; 1118 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1119 (void) refcount_add(&hdr->b_refcnt, tag); 1120 1121 return (buf); 1122 } 1123 1124 static arc_buf_t * 1125 arc_buf_clone(arc_buf_t *from) 1126 { 1127 arc_buf_t *buf; 1128 arc_buf_hdr_t *hdr = from->b_hdr; 1129 uint64_t size = hdr->b_size; 1130 1131 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1132 buf->b_hdr = hdr; 1133 buf->b_data = NULL; 1134 buf->b_efunc = NULL; 1135 buf->b_private = NULL; 1136 buf->b_next = hdr->b_buf; 1137 hdr->b_buf = buf; 1138 arc_get_data_buf(buf); 1139 bcopy(from->b_data, buf->b_data, size); 1140 hdr->b_datacnt += 1; 1141 return (buf); 1142 } 1143 1144 void 1145 arc_buf_add_ref(arc_buf_t *buf, void* tag) 1146 { 1147 arc_buf_hdr_t *hdr; 1148 kmutex_t *hash_lock; 1149 1150 /* 1151 * Check to see if this buffer is currently being evicted via 1152 * arc_do_user_evicts(). 1153 */ 1154 mutex_enter(&arc_eviction_mtx); 1155 hdr = buf->b_hdr; 1156 if (hdr == NULL) { 1157 mutex_exit(&arc_eviction_mtx); 1158 return; 1159 } 1160 hash_lock = HDR_LOCK(hdr); 1161 mutex_exit(&arc_eviction_mtx); 1162 1163 mutex_enter(hash_lock); 1164 if (buf->b_data == NULL) { 1165 /* 1166 * This buffer is evicted. 1167 */ 1168 mutex_exit(hash_lock); 1169 return; 1170 } 1171 1172 ASSERT(buf->b_hdr == hdr); 1173 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1174 add_reference(hdr, hash_lock, tag); 1175 arc_access(hdr, hash_lock); 1176 mutex_exit(hash_lock); 1177 ARCSTAT_BUMP(arcstat_hits); 1178 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 1179 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 1180 data, metadata, hits); 1181 } 1182 1183 /* 1184 * Free the arc data buffer. If it is an l2arc write in progress, 1185 * the buffer is placed on l2arc_free_on_write to be freed later. 1186 */ 1187 static void 1188 arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), 1189 void *data, size_t size) 1190 { 1191 if (HDR_L2_WRITING(hdr)) { 1192 l2arc_data_free_t *df; 1193 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 1194 df->l2df_data = data; 1195 df->l2df_size = size; 1196 df->l2df_func = free_func; 1197 mutex_enter(&l2arc_free_on_write_mtx); 1198 list_insert_head(l2arc_free_on_write, df); 1199 mutex_exit(&l2arc_free_on_write_mtx); 1200 ARCSTAT_BUMP(arcstat_l2_free_on_write); 1201 } else { 1202 free_func(data, size); 1203 } 1204 } 1205 1206 static void 1207 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1208 { 1209 arc_buf_t **bufp; 1210 1211 /* free up data associated with the buf */ 1212 if (buf->b_data) { 1213 arc_state_t *state = buf->b_hdr->b_state; 1214 uint64_t size = buf->b_hdr->b_size; 1215 arc_buf_contents_t type = buf->b_hdr->b_type; 1216 1217 arc_cksum_verify(buf); 1218 if (!recycle) { 1219 if (type == ARC_BUFC_METADATA) { 1220 arc_buf_data_free(buf->b_hdr, zio_buf_free, 1221 buf->b_data, size); 1222 arc_space_return(size); 1223 } else { 1224 ASSERT(type == ARC_BUFC_DATA); 1225 arc_buf_data_free(buf->b_hdr, 1226 zio_data_buf_free, buf->b_data, size); 1227 atomic_add_64(&arc_size, -size); 1228 } 1229 } 1230 if (list_link_active(&buf->b_hdr->b_arc_node)) { 1231 uint64_t *cnt = &state->arcs_lsize[type]; 1232 1233 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 1234 ASSERT(state != arc_anon); 1235 1236 ASSERT3U(*cnt, >=, size); 1237 atomic_add_64(cnt, -size); 1238 } 1239 ASSERT3U(state->arcs_size, >=, size); 1240 atomic_add_64(&state->arcs_size, -size); 1241 buf->b_data = NULL; 1242 ASSERT(buf->b_hdr->b_datacnt > 0); 1243 buf->b_hdr->b_datacnt -= 1; 1244 } 1245 1246 /* only remove the buf if requested */ 1247 if (!all) 1248 return; 1249 1250 /* remove the buf from the hdr list */ 1251 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1252 continue; 1253 *bufp = buf->b_next; 1254 1255 ASSERT(buf->b_efunc == NULL); 1256 1257 /* clean up the buf */ 1258 buf->b_hdr = NULL; 1259 kmem_cache_free(buf_cache, buf); 1260 } 1261 1262 static void 1263 arc_hdr_destroy(arc_buf_hdr_t *hdr) 1264 { 1265 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1266 ASSERT3P(hdr->b_state, ==, arc_anon); 1267 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1268 1269 if (hdr->b_l2hdr != NULL) { 1270 if (!MUTEX_HELD(&l2arc_buflist_mtx)) { 1271 /* 1272 * To prevent arc_free() and l2arc_evict() from 1273 * attempting to free the same buffer at the same time, 1274 * a FREE_IN_PROGRESS flag is given to arc_free() to 1275 * give it priority. l2arc_evict() can't destroy this 1276 * header while we are waiting on l2arc_buflist_mtx. 1277 */ 1278 mutex_enter(&l2arc_buflist_mtx); 1279 ASSERT(hdr->b_l2hdr != NULL); 1280 1281 list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); 1282 mutex_exit(&l2arc_buflist_mtx); 1283 } else { 1284 list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); 1285 } 1286 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1287 kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t)); 1288 if (hdr->b_state == arc_l2c_only) 1289 l2arc_hdr_stat_remove(); 1290 hdr->b_l2hdr = NULL; 1291 } 1292 1293 if (!BUF_EMPTY(hdr)) { 1294 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1295 bzero(&hdr->b_dva, sizeof (dva_t)); 1296 hdr->b_birth = 0; 1297 hdr->b_cksum0 = 0; 1298 } 1299 while (hdr->b_buf) { 1300 arc_buf_t *buf = hdr->b_buf; 1301 1302 if (buf->b_efunc) { 1303 mutex_enter(&arc_eviction_mtx); 1304 ASSERT(buf->b_hdr != NULL); 1305 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1306 hdr->b_buf = buf->b_next; 1307 buf->b_hdr = &arc_eviction_hdr; 1308 buf->b_next = arc_eviction_list; 1309 arc_eviction_list = buf; 1310 mutex_exit(&arc_eviction_mtx); 1311 } else { 1312 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1313 } 1314 } 1315 if (hdr->b_freeze_cksum != NULL) { 1316 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1317 hdr->b_freeze_cksum = NULL; 1318 } 1319 1320 ASSERT(!list_link_active(&hdr->b_arc_node)); 1321 ASSERT3P(hdr->b_hash_next, ==, NULL); 1322 ASSERT3P(hdr->b_acb, ==, NULL); 1323 kmem_cache_free(hdr_cache, hdr); 1324 } 1325 1326 void 1327 arc_buf_free(arc_buf_t *buf, void *tag) 1328 { 1329 arc_buf_hdr_t *hdr = buf->b_hdr; 1330 int hashed = hdr->b_state != arc_anon; 1331 1332 ASSERT(buf->b_efunc == NULL); 1333 ASSERT(buf->b_data != NULL); 1334 1335 if (hashed) { 1336 kmutex_t *hash_lock = HDR_LOCK(hdr); 1337 1338 mutex_enter(hash_lock); 1339 (void) remove_reference(hdr, hash_lock, tag); 1340 if (hdr->b_datacnt > 1) 1341 arc_buf_destroy(buf, FALSE, TRUE); 1342 else 1343 hdr->b_flags |= ARC_BUF_AVAILABLE; 1344 mutex_exit(hash_lock); 1345 } else if (HDR_IO_IN_PROGRESS(hdr)) { 1346 int destroy_hdr; 1347 /* 1348 * We are in the middle of an async write. Don't destroy 1349 * this buffer unless the write completes before we finish 1350 * decrementing the reference count. 1351 */ 1352 mutex_enter(&arc_eviction_mtx); 1353 (void) remove_reference(hdr, NULL, tag); 1354 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1355 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1356 mutex_exit(&arc_eviction_mtx); 1357 if (destroy_hdr) 1358 arc_hdr_destroy(hdr); 1359 } else { 1360 if (remove_reference(hdr, NULL, tag) > 0) { 1361 ASSERT(HDR_IO_ERROR(hdr)); 1362 arc_buf_destroy(buf, FALSE, TRUE); 1363 } else { 1364 arc_hdr_destroy(hdr); 1365 } 1366 } 1367 } 1368 1369 int 1370 arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1371 { 1372 arc_buf_hdr_t *hdr = buf->b_hdr; 1373 kmutex_t *hash_lock = HDR_LOCK(hdr); 1374 int no_callback = (buf->b_efunc == NULL); 1375 1376 if (hdr->b_state == arc_anon) { 1377 arc_buf_free(buf, tag); 1378 return (no_callback); 1379 } 1380 1381 mutex_enter(hash_lock); 1382 ASSERT(hdr->b_state != arc_anon); 1383 ASSERT(buf->b_data != NULL); 1384 1385 (void) remove_reference(hdr, hash_lock, tag); 1386 if (hdr->b_datacnt > 1) { 1387 if (no_callback) 1388 arc_buf_destroy(buf, FALSE, TRUE); 1389 } else if (no_callback) { 1390 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1391 hdr->b_flags |= ARC_BUF_AVAILABLE; 1392 } 1393 ASSERT(no_callback || hdr->b_datacnt > 1 || 1394 refcount_is_zero(&hdr->b_refcnt)); 1395 mutex_exit(hash_lock); 1396 return (no_callback); 1397 } 1398 1399 int 1400 arc_buf_size(arc_buf_t *buf) 1401 { 1402 return (buf->b_hdr->b_size); 1403 } 1404 1405 /* 1406 * Evict buffers from list until we've removed the specified number of 1407 * bytes. Move the removed buffers to the appropriate evict state. 1408 * If the recycle flag is set, then attempt to "recycle" a buffer: 1409 * - look for a buffer to evict that is `bytes' long. 1410 * - return the data block from this buffer rather than freeing it. 1411 * This flag is used by callers that are trying to make space for a 1412 * new buffer in a full arc cache. 1413 * 1414 * This function makes a "best effort". It skips over any buffers 1415 * it can't get a hash_lock on, and so may not catch all candidates. 1416 * It may also return without evicting as much space as requested. 1417 */ 1418 static void * 1419 arc_evict(arc_state_t *state, spa_t *spa, int64_t bytes, boolean_t recycle, 1420 arc_buf_contents_t type) 1421 { 1422 arc_state_t *evicted_state; 1423 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1424 arc_buf_hdr_t *ab, *ab_prev = NULL; 1425 list_t *list = &state->arcs_list[type]; 1426 kmutex_t *hash_lock; 1427 boolean_t have_lock; 1428 void *stolen = NULL; 1429 1430 ASSERT(state == arc_mru || state == arc_mfu); 1431 1432 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1433 1434 mutex_enter(&state->arcs_mtx); 1435 mutex_enter(&evicted_state->arcs_mtx); 1436 1437 for (ab = list_tail(list); ab; ab = ab_prev) { 1438 ab_prev = list_prev(list, ab); 1439 /* prefetch buffers have a minimum lifespan */ 1440 if (HDR_IO_IN_PROGRESS(ab) || 1441 (spa && ab->b_spa != spa) || 1442 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1443 lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { 1444 skipped++; 1445 continue; 1446 } 1447 /* "lookahead" for better eviction candidate */ 1448 if (recycle && ab->b_size != bytes && 1449 ab_prev && ab_prev->b_size == bytes) 1450 continue; 1451 hash_lock = HDR_LOCK(ab); 1452 have_lock = MUTEX_HELD(hash_lock); 1453 if (have_lock || mutex_tryenter(hash_lock)) { 1454 ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 1455 ASSERT(ab->b_datacnt > 0); 1456 while (ab->b_buf) { 1457 arc_buf_t *buf = ab->b_buf; 1458 if (buf->b_data) { 1459 bytes_evicted += ab->b_size; 1460 if (recycle && ab->b_type == type && 1461 ab->b_size == bytes && 1462 !HDR_L2_WRITING(ab)) { 1463 stolen = buf->b_data; 1464 recycle = FALSE; 1465 } 1466 } 1467 if (buf->b_efunc) { 1468 mutex_enter(&arc_eviction_mtx); 1469 arc_buf_destroy(buf, 1470 buf->b_data == stolen, FALSE); 1471 ab->b_buf = buf->b_next; 1472 buf->b_hdr = &arc_eviction_hdr; 1473 buf->b_next = arc_eviction_list; 1474 arc_eviction_list = buf; 1475 mutex_exit(&arc_eviction_mtx); 1476 } else { 1477 arc_buf_destroy(buf, 1478 buf->b_data == stolen, TRUE); 1479 } 1480 } 1481 ASSERT(ab->b_datacnt == 0); 1482 arc_change_state(evicted_state, ab, hash_lock); 1483 ASSERT(HDR_IN_HASH_TABLE(ab)); 1484 ab->b_flags |= ARC_IN_HASH_TABLE; 1485 ab->b_flags &= ~ARC_BUF_AVAILABLE; 1486 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1487 if (!have_lock) 1488 mutex_exit(hash_lock); 1489 if (bytes >= 0 && bytes_evicted >= bytes) 1490 break; 1491 } else { 1492 missed += 1; 1493 } 1494 } 1495 1496 mutex_exit(&evicted_state->arcs_mtx); 1497 mutex_exit(&state->arcs_mtx); 1498 1499 if (bytes_evicted < bytes) 1500 dprintf("only evicted %lld bytes from %x", 1501 (longlong_t)bytes_evicted, state); 1502 1503 if (skipped) 1504 ARCSTAT_INCR(arcstat_evict_skip, skipped); 1505 1506 if (missed) 1507 ARCSTAT_INCR(arcstat_mutex_miss, missed); 1508 1509 /* 1510 * We have just evicted some date into the ghost state, make 1511 * sure we also adjust the ghost state size if necessary. 1512 */ 1513 if (arc_no_grow && 1514 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 1515 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 1516 arc_mru_ghost->arcs_size - arc_c; 1517 1518 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 1519 int64_t todelete = 1520 MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 1521 arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1522 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 1523 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 1524 arc_mru_ghost->arcs_size + 1525 arc_mfu_ghost->arcs_size - arc_c); 1526 arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1527 } 1528 } 1529 1530 return (stolen); 1531 } 1532 1533 /* 1534 * Remove buffers from list until we've removed the specified number of 1535 * bytes. Destroy the buffers that are removed. 1536 */ 1537 static void 1538 arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes) 1539 { 1540 arc_buf_hdr_t *ab, *ab_prev; 1541 list_t *list = &state->arcs_list[ARC_BUFC_DATA]; 1542 kmutex_t *hash_lock; 1543 uint64_t bytes_deleted = 0; 1544 uint64_t bufs_skipped = 0; 1545 1546 ASSERT(GHOST_STATE(state)); 1547 top: 1548 mutex_enter(&state->arcs_mtx); 1549 for (ab = list_tail(list); ab; ab = ab_prev) { 1550 ab_prev = list_prev(list, ab); 1551 if (spa && ab->b_spa != spa) 1552 continue; 1553 hash_lock = HDR_LOCK(ab); 1554 if (mutex_tryenter(hash_lock)) { 1555 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1556 ASSERT(ab->b_buf == NULL); 1557 ARCSTAT_BUMP(arcstat_deleted); 1558 bytes_deleted += ab->b_size; 1559 1560 if (ab->b_l2hdr != NULL) { 1561 /* 1562 * This buffer is cached on the 2nd Level ARC; 1563 * don't destroy the header. 1564 */ 1565 arc_change_state(arc_l2c_only, ab, hash_lock); 1566 mutex_exit(hash_lock); 1567 } else { 1568 arc_change_state(arc_anon, ab, hash_lock); 1569 mutex_exit(hash_lock); 1570 arc_hdr_destroy(ab); 1571 } 1572 1573 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1574 if (bytes >= 0 && bytes_deleted >= bytes) 1575 break; 1576 } else { 1577 if (bytes < 0) { 1578 mutex_exit(&state->arcs_mtx); 1579 mutex_enter(hash_lock); 1580 mutex_exit(hash_lock); 1581 goto top; 1582 } 1583 bufs_skipped += 1; 1584 } 1585 } 1586 mutex_exit(&state->arcs_mtx); 1587 1588 if (list == &state->arcs_list[ARC_BUFC_DATA] && 1589 (bytes < 0 || bytes_deleted < bytes)) { 1590 list = &state->arcs_list[ARC_BUFC_METADATA]; 1591 goto top; 1592 } 1593 1594 if (bufs_skipped) { 1595 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1596 ASSERT(bytes >= 0); 1597 } 1598 1599 if (bytes_deleted < bytes) 1600 dprintf("only deleted %lld bytes from %p", 1601 (longlong_t)bytes_deleted, state); 1602 } 1603 1604 static void 1605 arc_adjust(void) 1606 { 1607 int64_t top_sz, mru_over, arc_over, todelete; 1608 1609 top_sz = arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used; 1610 1611 if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 1612 int64_t toevict = 1613 MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], top_sz - arc_p); 1614 (void) arc_evict(arc_mru, NULL, toevict, FALSE, ARC_BUFC_DATA); 1615 top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1616 } 1617 1618 if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1619 int64_t toevict = 1620 MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], top_sz - arc_p); 1621 (void) arc_evict(arc_mru, NULL, toevict, FALSE, 1622 ARC_BUFC_METADATA); 1623 top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1624 } 1625 1626 mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; 1627 1628 if (mru_over > 0) { 1629 if (arc_mru_ghost->arcs_size > 0) { 1630 todelete = MIN(arc_mru_ghost->arcs_size, mru_over); 1631 arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1632 } 1633 } 1634 1635 if ((arc_over = arc_size - arc_c) > 0) { 1636 int64_t tbl_over; 1637 1638 if (arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 1639 int64_t toevict = 1640 MIN(arc_mfu->arcs_lsize[ARC_BUFC_DATA], arc_over); 1641 (void) arc_evict(arc_mfu, NULL, toevict, FALSE, 1642 ARC_BUFC_DATA); 1643 arc_over = arc_size - arc_c; 1644 } 1645 1646 if (arc_over > 0 && 1647 arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1648 int64_t toevict = 1649 MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA], 1650 arc_over); 1651 (void) arc_evict(arc_mfu, NULL, toevict, FALSE, 1652 ARC_BUFC_METADATA); 1653 } 1654 1655 tbl_over = arc_size + arc_mru_ghost->arcs_size + 1656 arc_mfu_ghost->arcs_size - arc_c * 2; 1657 1658 if (tbl_over > 0 && arc_mfu_ghost->arcs_size > 0) { 1659 todelete = MIN(arc_mfu_ghost->arcs_size, tbl_over); 1660 arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1661 } 1662 } 1663 } 1664 1665 static void 1666 arc_do_user_evicts(void) 1667 { 1668 mutex_enter(&arc_eviction_mtx); 1669 while (arc_eviction_list != NULL) { 1670 arc_buf_t *buf = arc_eviction_list; 1671 arc_eviction_list = buf->b_next; 1672 buf->b_hdr = NULL; 1673 mutex_exit(&arc_eviction_mtx); 1674 1675 if (buf->b_efunc != NULL) 1676 VERIFY(buf->b_efunc(buf) == 0); 1677 1678 buf->b_efunc = NULL; 1679 buf->b_private = NULL; 1680 kmem_cache_free(buf_cache, buf); 1681 mutex_enter(&arc_eviction_mtx); 1682 } 1683 mutex_exit(&arc_eviction_mtx); 1684 } 1685 1686 /* 1687 * Flush all *evictable* data from the cache for the given spa. 1688 * NOTE: this will not touch "active" (i.e. referenced) data. 1689 */ 1690 void 1691 arc_flush(spa_t *spa) 1692 { 1693 while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { 1694 (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_DATA); 1695 if (spa) 1696 break; 1697 } 1698 while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) { 1699 (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_METADATA); 1700 if (spa) 1701 break; 1702 } 1703 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) { 1704 (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_DATA); 1705 if (spa) 1706 break; 1707 } 1708 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) { 1709 (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_METADATA); 1710 if (spa) 1711 break; 1712 } 1713 1714 arc_evict_ghost(arc_mru_ghost, spa, -1); 1715 arc_evict_ghost(arc_mfu_ghost, spa, -1); 1716 1717 mutex_enter(&arc_reclaim_thr_lock); 1718 arc_do_user_evicts(); 1719 mutex_exit(&arc_reclaim_thr_lock); 1720 ASSERT(spa || arc_eviction_list == NULL); 1721 } 1722 1723 int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ 1724 1725 void 1726 arc_shrink(void) 1727 { 1728 if (arc_c > arc_c_min) { 1729 uint64_t to_free; 1730 1731 #ifdef _KERNEL 1732 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); 1733 #else 1734 to_free = arc_c >> arc_shrink_shift; 1735 #endif 1736 if (arc_c > arc_c_min + to_free) 1737 atomic_add_64(&arc_c, -to_free); 1738 else 1739 arc_c = arc_c_min; 1740 1741 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 1742 if (arc_c > arc_size) 1743 arc_c = MAX(arc_size, arc_c_min); 1744 if (arc_p > arc_c) 1745 arc_p = (arc_c >> 1); 1746 ASSERT(arc_c >= arc_c_min); 1747 ASSERT((int64_t)arc_p >= 0); 1748 } 1749 1750 if (arc_size > arc_c) 1751 arc_adjust(); 1752 } 1753 1754 static int 1755 arc_reclaim_needed(void) 1756 { 1757 uint64_t extra; 1758 1759 #ifdef _KERNEL 1760 1761 if (needfree) 1762 return (1); 1763 1764 /* 1765 * take 'desfree' extra pages, so we reclaim sooner, rather than later 1766 */ 1767 extra = desfree; 1768 1769 /* 1770 * check that we're out of range of the pageout scanner. It starts to 1771 * schedule paging if freemem is less than lotsfree and needfree. 1772 * lotsfree is the high-water mark for pageout, and needfree is the 1773 * number of needed free pages. We add extra pages here to make sure 1774 * the scanner doesn't start up while we're freeing memory. 1775 */ 1776 if (freemem < lotsfree + needfree + extra) 1777 return (1); 1778 1779 /* 1780 * check to make sure that swapfs has enough space so that anon 1781 * reservations can still succeed. anon_resvmem() checks that the 1782 * availrmem is greater than swapfs_minfree, and the number of reserved 1783 * swap pages. We also add a bit of extra here just to prevent 1784 * circumstances from getting really dire. 1785 */ 1786 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1787 return (1); 1788 1789 #if defined(__i386) 1790 /* 1791 * If we're on an i386 platform, it's possible that we'll exhaust the 1792 * kernel heap space before we ever run out of available physical 1793 * memory. Most checks of the size of the heap_area compare against 1794 * tune.t_minarmem, which is the minimum available real memory that we 1795 * can have in the system. However, this is generally fixed at 25 pages 1796 * which is so low that it's useless. In this comparison, we seek to 1797 * calculate the total heap-size, and reclaim if more than 3/4ths of the 1798 * heap is allocated. (Or, in the calculation, if less than 1/4th is 1799 * free) 1800 */ 1801 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1802 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1803 return (1); 1804 #endif 1805 1806 #else 1807 if (spa_get_random(100) == 0) 1808 return (1); 1809 #endif 1810 return (0); 1811 } 1812 1813 static void 1814 arc_kmem_reap_now(arc_reclaim_strategy_t strat) 1815 { 1816 size_t i; 1817 kmem_cache_t *prev_cache = NULL; 1818 kmem_cache_t *prev_data_cache = NULL; 1819 extern kmem_cache_t *zio_buf_cache[]; 1820 extern kmem_cache_t *zio_data_buf_cache[]; 1821 1822 #ifdef _KERNEL 1823 if (arc_meta_used >= arc_meta_limit) { 1824 /* 1825 * We are exceeding our meta-data cache limit. 1826 * Purge some DNLC entries to release holds on meta-data. 1827 */ 1828 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 1829 } 1830 #if defined(__i386) 1831 /* 1832 * Reclaim unused memory from all kmem caches. 1833 */ 1834 kmem_reap(); 1835 #endif 1836 #endif 1837 1838 /* 1839 * An aggressive reclamation will shrink the cache size as well as 1840 * reap free buffers from the arc kmem caches. 1841 */ 1842 if (strat == ARC_RECLAIM_AGGR) 1843 arc_shrink(); 1844 1845 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1846 if (zio_buf_cache[i] != prev_cache) { 1847 prev_cache = zio_buf_cache[i]; 1848 kmem_cache_reap_now(zio_buf_cache[i]); 1849 } 1850 if (zio_data_buf_cache[i] != prev_data_cache) { 1851 prev_data_cache = zio_data_buf_cache[i]; 1852 kmem_cache_reap_now(zio_data_buf_cache[i]); 1853 } 1854 } 1855 kmem_cache_reap_now(buf_cache); 1856 kmem_cache_reap_now(hdr_cache); 1857 } 1858 1859 static void 1860 arc_reclaim_thread(void) 1861 { 1862 clock_t growtime = 0; 1863 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1864 callb_cpr_t cpr; 1865 1866 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1867 1868 mutex_enter(&arc_reclaim_thr_lock); 1869 while (arc_thread_exit == 0) { 1870 if (arc_reclaim_needed()) { 1871 1872 if (arc_no_grow) { 1873 if (last_reclaim == ARC_RECLAIM_CONS) { 1874 last_reclaim = ARC_RECLAIM_AGGR; 1875 } else { 1876 last_reclaim = ARC_RECLAIM_CONS; 1877 } 1878 } else { 1879 arc_no_grow = TRUE; 1880 last_reclaim = ARC_RECLAIM_AGGR; 1881 membar_producer(); 1882 } 1883 1884 /* reset the growth delay for every reclaim */ 1885 growtime = lbolt + (arc_grow_retry * hz); 1886 1887 arc_kmem_reap_now(last_reclaim); 1888 1889 } else if (arc_no_grow && lbolt >= growtime) { 1890 arc_no_grow = FALSE; 1891 } 1892 1893 if (2 * arc_c < arc_size + 1894 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size) 1895 arc_adjust(); 1896 1897 if (arc_eviction_list != NULL) 1898 arc_do_user_evicts(); 1899 1900 /* block until needed, or one second, whichever is shorter */ 1901 CALLB_CPR_SAFE_BEGIN(&cpr); 1902 (void) cv_timedwait(&arc_reclaim_thr_cv, 1903 &arc_reclaim_thr_lock, (lbolt + hz)); 1904 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 1905 } 1906 1907 arc_thread_exit = 0; 1908 cv_broadcast(&arc_reclaim_thr_cv); 1909 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 1910 thread_exit(); 1911 } 1912 1913 /* 1914 * Adapt arc info given the number of bytes we are trying to add and 1915 * the state that we are comming from. This function is only called 1916 * when we are adding new content to the cache. 1917 */ 1918 static void 1919 arc_adapt(int bytes, arc_state_t *state) 1920 { 1921 int mult; 1922 1923 if (state == arc_l2c_only) 1924 return; 1925 1926 ASSERT(bytes > 0); 1927 /* 1928 * Adapt the target size of the MRU list: 1929 * - if we just hit in the MRU ghost list, then increase 1930 * the target size of the MRU list. 1931 * - if we just hit in the MFU ghost list, then increase 1932 * the target size of the MFU list by decreasing the 1933 * target size of the MRU list. 1934 */ 1935 if (state == arc_mru_ghost) { 1936 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 1937 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 1938 1939 arc_p = MIN(arc_c, arc_p + bytes * mult); 1940 } else if (state == arc_mfu_ghost) { 1941 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 1942 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 1943 1944 arc_p = MAX(0, (int64_t)arc_p - bytes * mult); 1945 } 1946 ASSERT((int64_t)arc_p >= 0); 1947 1948 if (arc_reclaim_needed()) { 1949 cv_signal(&arc_reclaim_thr_cv); 1950 return; 1951 } 1952 1953 if (arc_no_grow) 1954 return; 1955 1956 if (arc_c >= arc_c_max) 1957 return; 1958 1959 /* 1960 * If we're within (2 * maxblocksize) bytes of the target 1961 * cache size, increment the target cache size 1962 */ 1963 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 1964 atomic_add_64(&arc_c, (int64_t)bytes); 1965 if (arc_c > arc_c_max) 1966 arc_c = arc_c_max; 1967 else if (state == arc_anon) 1968 atomic_add_64(&arc_p, (int64_t)bytes); 1969 if (arc_p > arc_c) 1970 arc_p = arc_c; 1971 } 1972 ASSERT((int64_t)arc_p >= 0); 1973 } 1974 1975 /* 1976 * Check if the cache has reached its limits and eviction is required 1977 * prior to insert. 1978 */ 1979 static int 1980 arc_evict_needed(arc_buf_contents_t type) 1981 { 1982 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 1983 return (1); 1984 1985 #ifdef _KERNEL 1986 /* 1987 * If zio data pages are being allocated out of a separate heap segment, 1988 * then enforce that the size of available vmem for this area remains 1989 * above about 1/32nd free. 1990 */ 1991 if (type == ARC_BUFC_DATA && zio_arena != NULL && 1992 vmem_size(zio_arena, VMEM_FREE) < 1993 (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 1994 return (1); 1995 #endif 1996 1997 if (arc_reclaim_needed()) 1998 return (1); 1999 2000 return (arc_size > arc_c); 2001 } 2002 2003 /* 2004 * The buffer, supplied as the first argument, needs a data block. 2005 * So, if we are at cache max, determine which cache should be victimized. 2006 * We have the following cases: 2007 * 2008 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2009 * In this situation if we're out of space, but the resident size of the MFU is 2010 * under the limit, victimize the MFU cache to satisfy this insertion request. 2011 * 2012 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2013 * Here, we've used up all of the available space for the MRU, so we need to 2014 * evict from our own cache instead. Evict from the set of resident MRU 2015 * entries. 2016 * 2017 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2018 * c minus p represents the MFU space in the cache, since p is the size of the 2019 * cache that is dedicated to the MRU. In this situation there's still space on 2020 * the MFU side, so the MRU side needs to be victimized. 2021 * 2022 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2023 * MFU's resident set is consuming more space than it has been allotted. In 2024 * this situation, we must victimize our own cache, the MFU, for this insertion. 2025 */ 2026 static void 2027 arc_get_data_buf(arc_buf_t *buf) 2028 { 2029 arc_state_t *state = buf->b_hdr->b_state; 2030 uint64_t size = buf->b_hdr->b_size; 2031 arc_buf_contents_t type = buf->b_hdr->b_type; 2032 2033 arc_adapt(size, state); 2034 2035 /* 2036 * We have not yet reached cache maximum size, 2037 * just allocate a new buffer. 2038 */ 2039 if (!arc_evict_needed(type)) { 2040 if (type == ARC_BUFC_METADATA) { 2041 buf->b_data = zio_buf_alloc(size); 2042 arc_space_consume(size); 2043 } else { 2044 ASSERT(type == ARC_BUFC_DATA); 2045 buf->b_data = zio_data_buf_alloc(size); 2046 atomic_add_64(&arc_size, size); 2047 } 2048 goto out; 2049 } 2050 2051 /* 2052 * If we are prefetching from the mfu ghost list, this buffer 2053 * will end up on the mru list; so steal space from there. 2054 */ 2055 if (state == arc_mfu_ghost) 2056 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 2057 else if (state == arc_mru_ghost) 2058 state = arc_mru; 2059 2060 if (state == arc_mru || state == arc_anon) { 2061 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 2062 state = (arc_mfu->arcs_lsize[type] > 0 && 2063 arc_p > mru_used) ? arc_mfu : arc_mru; 2064 } else { 2065 /* MFU cases */ 2066 uint64_t mfu_space = arc_c - arc_p; 2067 state = (arc_mru->arcs_lsize[type] > 0 && 2068 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 2069 } 2070 if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { 2071 if (type == ARC_BUFC_METADATA) { 2072 buf->b_data = zio_buf_alloc(size); 2073 arc_space_consume(size); 2074 } else { 2075 ASSERT(type == ARC_BUFC_DATA); 2076 buf->b_data = zio_data_buf_alloc(size); 2077 atomic_add_64(&arc_size, size); 2078 } 2079 ARCSTAT_BUMP(arcstat_recycle_miss); 2080 } 2081 ASSERT(buf->b_data != NULL); 2082 out: 2083 /* 2084 * Update the state size. Note that ghost states have a 2085 * "ghost size" and so don't need to be updated. 2086 */ 2087 if (!GHOST_STATE(buf->b_hdr->b_state)) { 2088 arc_buf_hdr_t *hdr = buf->b_hdr; 2089 2090 atomic_add_64(&hdr->b_state->arcs_size, size); 2091 if (list_link_active(&hdr->b_arc_node)) { 2092 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2093 atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2094 } 2095 /* 2096 * If we are growing the cache, and we are adding anonymous 2097 * data, and we have outgrown arc_p, update arc_p 2098 */ 2099 if (arc_size < arc_c && hdr->b_state == arc_anon && 2100 arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 2101 arc_p = MIN(arc_c, arc_p + size); 2102 } 2103 } 2104 2105 /* 2106 * This routine is called whenever a buffer is accessed. 2107 * NOTE: the hash lock is dropped in this function. 2108 */ 2109 static void 2110 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2111 { 2112 ASSERT(MUTEX_HELD(hash_lock)); 2113 2114 if (buf->b_state == arc_anon) { 2115 /* 2116 * This buffer is not in the cache, and does not 2117 * appear in our "ghost" list. Add the new buffer 2118 * to the MRU state. 2119 */ 2120 2121 ASSERT(buf->b_arc_access == 0); 2122 buf->b_arc_access = lbolt; 2123 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2124 arc_change_state(arc_mru, buf, hash_lock); 2125 2126 } else if (buf->b_state == arc_mru) { 2127 /* 2128 * If this buffer is here because of a prefetch, then either: 2129 * - clear the flag if this is a "referencing" read 2130 * (any subsequent access will bump this into the MFU state). 2131 * or 2132 * - move the buffer to the head of the list if this is 2133 * another prefetch (to make it less likely to be evicted). 2134 */ 2135 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2136 if (refcount_count(&buf->b_refcnt) == 0) { 2137 ASSERT(list_link_active(&buf->b_arc_node)); 2138 } else { 2139 buf->b_flags &= ~ARC_PREFETCH; 2140 ARCSTAT_BUMP(arcstat_mru_hits); 2141 } 2142 buf->b_arc_access = lbolt; 2143 return; 2144 } 2145 2146 /* 2147 * This buffer has been "accessed" only once so far, 2148 * but it is still in the cache. Move it to the MFU 2149 * state. 2150 */ 2151 if (lbolt > buf->b_arc_access + ARC_MINTIME) { 2152 /* 2153 * More than 125ms have passed since we 2154 * instantiated this buffer. Move it to the 2155 * most frequently used state. 2156 */ 2157 buf->b_arc_access = lbolt; 2158 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2159 arc_change_state(arc_mfu, buf, hash_lock); 2160 } 2161 ARCSTAT_BUMP(arcstat_mru_hits); 2162 } else if (buf->b_state == arc_mru_ghost) { 2163 arc_state_t *new_state; 2164 /* 2165 * This buffer has been "accessed" recently, but 2166 * was evicted from the cache. Move it to the 2167 * MFU state. 2168 */ 2169 2170 if (buf->b_flags & ARC_PREFETCH) { 2171 new_state = arc_mru; 2172 if (refcount_count(&buf->b_refcnt) > 0) 2173 buf->b_flags &= ~ARC_PREFETCH; 2174 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2175 } else { 2176 new_state = arc_mfu; 2177 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2178 } 2179 2180 buf->b_arc_access = lbolt; 2181 arc_change_state(new_state, buf, hash_lock); 2182 2183 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2184 } else if (buf->b_state == arc_mfu) { 2185 /* 2186 * This buffer has been accessed more than once and is 2187 * still in the cache. Keep it in the MFU state. 2188 * 2189 * NOTE: an add_reference() that occurred when we did 2190 * the arc_read() will have kicked this off the list. 2191 * If it was a prefetch, we will explicitly move it to 2192 * the head of the list now. 2193 */ 2194 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2195 ASSERT(refcount_count(&buf->b_refcnt) == 0); 2196 ASSERT(list_link_active(&buf->b_arc_node)); 2197 } 2198 ARCSTAT_BUMP(arcstat_mfu_hits); 2199 buf->b_arc_access = lbolt; 2200 } else if (buf->b_state == arc_mfu_ghost) { 2201 arc_state_t *new_state = arc_mfu; 2202 /* 2203 * This buffer has been accessed more than once but has 2204 * been evicted from the cache. Move it back to the 2205 * MFU state. 2206 */ 2207 2208 if (buf->b_flags & ARC_PREFETCH) { 2209 /* 2210 * This is a prefetch access... 2211 * move this block back to the MRU state. 2212 */ 2213 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 2214 new_state = arc_mru; 2215 } 2216 2217 buf->b_arc_access = lbolt; 2218 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2219 arc_change_state(new_state, buf, hash_lock); 2220 2221 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2222 } else if (buf->b_state == arc_l2c_only) { 2223 /* 2224 * This buffer is on the 2nd Level ARC. 2225 */ 2226 2227 buf->b_arc_access = lbolt; 2228 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2229 arc_change_state(arc_mfu, buf, hash_lock); 2230 } else { 2231 ASSERT(!"invalid arc state"); 2232 } 2233 } 2234 2235 /* a generic arc_done_func_t which you can use */ 2236 /* ARGSUSED */ 2237 void 2238 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2239 { 2240 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2241 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2242 } 2243 2244 /* a generic arc_done_func_t */ 2245 void 2246 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2247 { 2248 arc_buf_t **bufp = arg; 2249 if (zio && zio->io_error) { 2250 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2251 *bufp = NULL; 2252 } else { 2253 *bufp = buf; 2254 } 2255 } 2256 2257 static void 2258 arc_read_done(zio_t *zio) 2259 { 2260 arc_buf_hdr_t *hdr, *found; 2261 arc_buf_t *buf; 2262 arc_buf_t *abuf; /* buffer we're assigning to callback */ 2263 kmutex_t *hash_lock; 2264 arc_callback_t *callback_list, *acb; 2265 int freeable = FALSE; 2266 2267 buf = zio->io_private; 2268 hdr = buf->b_hdr; 2269 2270 /* 2271 * The hdr was inserted into hash-table and removed from lists 2272 * prior to starting I/O. We should find this header, since 2273 * it's in the hash table, and it should be legit since it's 2274 * not possible to evict it during the I/O. The only possible 2275 * reason for it not to be found is if we were freed during the 2276 * read. 2277 */ 2278 found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 2279 &hash_lock); 2280 2281 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 2282 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 2283 (found == hdr && HDR_L2_READING(hdr))); 2284 2285 hdr->b_flags &= ~(ARC_L2_READING|ARC_L2_EVICTED); 2286 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 2287 hdr->b_flags |= ARC_DONT_L2CACHE; 2288 2289 /* byteswap if necessary */ 2290 callback_list = hdr->b_acb; 2291 ASSERT(callback_list != NULL); 2292 if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap) 2293 callback_list->acb_byteswap(buf->b_data, hdr->b_size); 2294 2295 arc_cksum_compute(buf, B_FALSE); 2296 2297 /* create copies of the data buffer for the callers */ 2298 abuf = buf; 2299 for (acb = callback_list; acb; acb = acb->acb_next) { 2300 if (acb->acb_done) { 2301 if (abuf == NULL) 2302 abuf = arc_buf_clone(buf); 2303 acb->acb_buf = abuf; 2304 abuf = NULL; 2305 } 2306 } 2307 hdr->b_acb = NULL; 2308 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2309 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 2310 if (abuf == buf) 2311 hdr->b_flags |= ARC_BUF_AVAILABLE; 2312 2313 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2314 2315 if (zio->io_error != 0) { 2316 hdr->b_flags |= ARC_IO_ERROR; 2317 if (hdr->b_state != arc_anon) 2318 arc_change_state(arc_anon, hdr, hash_lock); 2319 if (HDR_IN_HASH_TABLE(hdr)) 2320 buf_hash_remove(hdr); 2321 freeable = refcount_is_zero(&hdr->b_refcnt); 2322 /* convert checksum errors into IO errors */ 2323 if (zio->io_error == ECKSUM) 2324 zio->io_error = EIO; 2325 } 2326 2327 /* 2328 * Broadcast before we drop the hash_lock to avoid the possibility 2329 * that the hdr (and hence the cv) might be freed before we get to 2330 * the cv_broadcast(). 2331 */ 2332 cv_broadcast(&hdr->b_cv); 2333 2334 if (hash_lock) { 2335 /* 2336 * Only call arc_access on anonymous buffers. This is because 2337 * if we've issued an I/O for an evicted buffer, we've already 2338 * called arc_access (to prevent any simultaneous readers from 2339 * getting confused). 2340 */ 2341 if (zio->io_error == 0 && hdr->b_state == arc_anon) 2342 arc_access(hdr, hash_lock); 2343 mutex_exit(hash_lock); 2344 } else { 2345 /* 2346 * This block was freed while we waited for the read to 2347 * complete. It has been removed from the hash table and 2348 * moved to the anonymous state (so that it won't show up 2349 * in the cache). 2350 */ 2351 ASSERT3P(hdr->b_state, ==, arc_anon); 2352 freeable = refcount_is_zero(&hdr->b_refcnt); 2353 } 2354 2355 /* execute each callback and free its structure */ 2356 while ((acb = callback_list) != NULL) { 2357 if (acb->acb_done) 2358 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2359 2360 if (acb->acb_zio_dummy != NULL) { 2361 acb->acb_zio_dummy->io_error = zio->io_error; 2362 zio_nowait(acb->acb_zio_dummy); 2363 } 2364 2365 callback_list = acb->acb_next; 2366 kmem_free(acb, sizeof (arc_callback_t)); 2367 } 2368 2369 if (freeable) 2370 arc_hdr_destroy(hdr); 2371 } 2372 2373 /* 2374 * "Read" the block block at the specified DVA (in bp) via the 2375 * cache. If the block is found in the cache, invoke the provided 2376 * callback immediately and return. Note that the `zio' parameter 2377 * in the callback will be NULL in this case, since no IO was 2378 * required. If the block is not in the cache pass the read request 2379 * on to the spa with a substitute callback function, so that the 2380 * requested block will be added to the cache. 2381 * 2382 * If a read request arrives for a block that has a read in-progress, 2383 * either wait for the in-progress read to complete (and return the 2384 * results); or, if this is a read with a "done" func, add a record 2385 * to the read to invoke the "done" func when the read completes, 2386 * and return; or just return. 2387 * 2388 * arc_read_done() will invoke all the requested "done" functions 2389 * for readers of this block. 2390 */ 2391 int 2392 arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap, 2393 arc_done_func_t *done, void *private, int priority, int flags, 2394 uint32_t *arc_flags, zbookmark_t *zb) 2395 { 2396 arc_buf_hdr_t *hdr; 2397 arc_buf_t *buf; 2398 kmutex_t *hash_lock; 2399 zio_t *rzio; 2400 2401 top: 2402 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2403 if (hdr && hdr->b_datacnt > 0) { 2404 2405 *arc_flags |= ARC_CACHED; 2406 2407 if (HDR_IO_IN_PROGRESS(hdr)) { 2408 2409 if (*arc_flags & ARC_WAIT) { 2410 cv_wait(&hdr->b_cv, hash_lock); 2411 mutex_exit(hash_lock); 2412 goto top; 2413 } 2414 ASSERT(*arc_flags & ARC_NOWAIT); 2415 2416 if (done) { 2417 arc_callback_t *acb = NULL; 2418 2419 acb = kmem_zalloc(sizeof (arc_callback_t), 2420 KM_SLEEP); 2421 acb->acb_done = done; 2422 acb->acb_private = private; 2423 acb->acb_byteswap = swap; 2424 if (pio != NULL) 2425 acb->acb_zio_dummy = zio_null(pio, 2426 spa, NULL, NULL, flags); 2427 2428 ASSERT(acb->acb_done != NULL); 2429 acb->acb_next = hdr->b_acb; 2430 hdr->b_acb = acb; 2431 add_reference(hdr, hash_lock, private); 2432 mutex_exit(hash_lock); 2433 return (0); 2434 } 2435 mutex_exit(hash_lock); 2436 return (0); 2437 } 2438 2439 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2440 2441 if (done) { 2442 add_reference(hdr, hash_lock, private); 2443 /* 2444 * If this block is already in use, create a new 2445 * copy of the data so that we will be guaranteed 2446 * that arc_release() will always succeed. 2447 */ 2448 buf = hdr->b_buf; 2449 ASSERT(buf); 2450 ASSERT(buf->b_data); 2451 if (HDR_BUF_AVAILABLE(hdr)) { 2452 ASSERT(buf->b_efunc == NULL); 2453 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2454 } else { 2455 buf = arc_buf_clone(buf); 2456 } 2457 } else if (*arc_flags & ARC_PREFETCH && 2458 refcount_count(&hdr->b_refcnt) == 0) { 2459 hdr->b_flags |= ARC_PREFETCH; 2460 } 2461 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 2462 arc_access(hdr, hash_lock); 2463 mutex_exit(hash_lock); 2464 ARCSTAT_BUMP(arcstat_hits); 2465 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2466 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2467 data, metadata, hits); 2468 2469 if (done) 2470 done(NULL, buf, private); 2471 } else { 2472 uint64_t size = BP_GET_LSIZE(bp); 2473 arc_callback_t *acb; 2474 2475 if (hdr == NULL) { 2476 /* this block is not in the cache */ 2477 arc_buf_hdr_t *exists; 2478 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 2479 buf = arc_buf_alloc(spa, size, private, type); 2480 hdr = buf->b_hdr; 2481 hdr->b_dva = *BP_IDENTITY(bp); 2482 hdr->b_birth = bp->blk_birth; 2483 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2484 exists = buf_hash_insert(hdr, &hash_lock); 2485 if (exists) { 2486 /* somebody beat us to the hash insert */ 2487 mutex_exit(hash_lock); 2488 bzero(&hdr->b_dva, sizeof (dva_t)); 2489 hdr->b_birth = 0; 2490 hdr->b_cksum0 = 0; 2491 (void) arc_buf_remove_ref(buf, private); 2492 goto top; /* restart the IO request */ 2493 } 2494 /* if this is a prefetch, we don't have a reference */ 2495 if (*arc_flags & ARC_PREFETCH) { 2496 (void) remove_reference(hdr, hash_lock, 2497 private); 2498 hdr->b_flags |= ARC_PREFETCH; 2499 } 2500 if (BP_GET_LEVEL(bp) > 0) 2501 hdr->b_flags |= ARC_INDIRECT; 2502 } else { 2503 /* this block is in the ghost cache */ 2504 ASSERT(GHOST_STATE(hdr->b_state)); 2505 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2506 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 2507 ASSERT(hdr->b_buf == NULL); 2508 2509 /* if this is a prefetch, we don't have a reference */ 2510 if (*arc_flags & ARC_PREFETCH) 2511 hdr->b_flags |= ARC_PREFETCH; 2512 else 2513 add_reference(hdr, hash_lock, private); 2514 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 2515 buf->b_hdr = hdr; 2516 buf->b_data = NULL; 2517 buf->b_efunc = NULL; 2518 buf->b_private = NULL; 2519 buf->b_next = NULL; 2520 hdr->b_buf = buf; 2521 arc_get_data_buf(buf); 2522 ASSERT(hdr->b_datacnt == 0); 2523 hdr->b_datacnt = 1; 2524 2525 } 2526 2527 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2528 acb->acb_done = done; 2529 acb->acb_private = private; 2530 acb->acb_byteswap = swap; 2531 2532 ASSERT(hdr->b_acb == NULL); 2533 hdr->b_acb = acb; 2534 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2535 2536 /* 2537 * If the buffer has been evicted, migrate it to a present state 2538 * before issuing the I/O. Once we drop the hash-table lock, 2539 * the header will be marked as I/O in progress and have an 2540 * attached buffer. At this point, anybody who finds this 2541 * buffer ought to notice that it's legit but has a pending I/O. 2542 */ 2543 2544 if (GHOST_STATE(hdr->b_state)) 2545 arc_access(hdr, hash_lock); 2546 2547 ASSERT3U(hdr->b_size, ==, size); 2548 DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 2549 zbookmark_t *, zb); 2550 ARCSTAT_BUMP(arcstat_misses); 2551 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2552 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2553 data, metadata, misses); 2554 2555 if (l2arc_ndev != 0) { 2556 /* 2557 * Read from the L2ARC if the following are true: 2558 * 1. This buffer has L2ARC metadata. 2559 * 2. This buffer isn't currently writing to the L2ARC. 2560 */ 2561 if (hdr->b_l2hdr != NULL && !HDR_L2_WRITING(hdr)) { 2562 vdev_t *vd = hdr->b_l2hdr->b_dev->l2ad_vdev; 2563 daddr_t addr = hdr->b_l2hdr->b_daddr; 2564 l2arc_read_callback_t *cb; 2565 2566 if (vdev_is_dead(vd)) 2567 goto skip_l2arc; 2568 2569 hdr->b_flags |= ARC_L2_READING; 2570 mutex_exit(hash_lock); 2571 2572 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 2573 ARCSTAT_BUMP(arcstat_l2_hits); 2574 2575 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 2576 KM_SLEEP); 2577 cb->l2rcb_buf = buf; 2578 cb->l2rcb_spa = spa; 2579 cb->l2rcb_bp = *bp; 2580 cb->l2rcb_zb = *zb; 2581 cb->l2rcb_flags = flags; 2582 2583 /* 2584 * l2arc read. 2585 */ 2586 rzio = zio_read_phys(pio, vd, addr, size, 2587 buf->b_data, ZIO_CHECKSUM_OFF, 2588 l2arc_read_done, cb, priority, 2589 flags | ZIO_FLAG_DONT_CACHE, B_FALSE); 2590 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 2591 zio_t *, rzio); 2592 2593 if (*arc_flags & ARC_WAIT) 2594 return (zio_wait(rzio)); 2595 2596 ASSERT(*arc_flags & ARC_NOWAIT); 2597 zio_nowait(rzio); 2598 return (0); 2599 } else { 2600 DTRACE_PROBE1(l2arc__miss, 2601 arc_buf_hdr_t *, hdr); 2602 ARCSTAT_BUMP(arcstat_l2_misses); 2603 if (HDR_L2_WRITING(hdr)) 2604 ARCSTAT_BUMP(arcstat_l2_rw_clash); 2605 } 2606 } 2607 2608 skip_l2arc: 2609 mutex_exit(hash_lock); 2610 2611 rzio = zio_read(pio, spa, bp, buf->b_data, size, 2612 arc_read_done, buf, priority, flags, zb); 2613 2614 if (*arc_flags & ARC_WAIT) 2615 return (zio_wait(rzio)); 2616 2617 ASSERT(*arc_flags & ARC_NOWAIT); 2618 zio_nowait(rzio); 2619 } 2620 return (0); 2621 } 2622 2623 /* 2624 * arc_read() variant to support pool traversal. If the block is already 2625 * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 2626 * The idea is that we don't want pool traversal filling up memory, but 2627 * if the ARC already has the data anyway, we shouldn't pay for the I/O. 2628 */ 2629 int 2630 arc_tryread(spa_t *spa, blkptr_t *bp, void *data) 2631 { 2632 arc_buf_hdr_t *hdr; 2633 kmutex_t *hash_mtx; 2634 int rc = 0; 2635 2636 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 2637 2638 if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 2639 arc_buf_t *buf = hdr->b_buf; 2640 2641 ASSERT(buf); 2642 while (buf->b_data == NULL) { 2643 buf = buf->b_next; 2644 ASSERT(buf); 2645 } 2646 bcopy(buf->b_data, data, hdr->b_size); 2647 } else { 2648 rc = ENOENT; 2649 } 2650 2651 if (hash_mtx) 2652 mutex_exit(hash_mtx); 2653 2654 return (rc); 2655 } 2656 2657 void 2658 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 2659 { 2660 ASSERT(buf->b_hdr != NULL); 2661 ASSERT(buf->b_hdr->b_state != arc_anon); 2662 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 2663 buf->b_efunc = func; 2664 buf->b_private = private; 2665 } 2666 2667 /* 2668 * This is used by the DMU to let the ARC know that a buffer is 2669 * being evicted, so the ARC should clean up. If this arc buf 2670 * is not yet in the evicted state, it will be put there. 2671 */ 2672 int 2673 arc_buf_evict(arc_buf_t *buf) 2674 { 2675 arc_buf_hdr_t *hdr; 2676 kmutex_t *hash_lock; 2677 arc_buf_t **bufp; 2678 2679 mutex_enter(&arc_eviction_mtx); 2680 hdr = buf->b_hdr; 2681 if (hdr == NULL) { 2682 /* 2683 * We are in arc_do_user_evicts(). 2684 */ 2685 ASSERT(buf->b_data == NULL); 2686 mutex_exit(&arc_eviction_mtx); 2687 return (0); 2688 } 2689 hash_lock = HDR_LOCK(hdr); 2690 mutex_exit(&arc_eviction_mtx); 2691 2692 mutex_enter(hash_lock); 2693 2694 if (buf->b_data == NULL) { 2695 /* 2696 * We are on the eviction list. 2697 */ 2698 mutex_exit(hash_lock); 2699 mutex_enter(&arc_eviction_mtx); 2700 if (buf->b_hdr == NULL) { 2701 /* 2702 * We are already in arc_do_user_evicts(). 2703 */ 2704 mutex_exit(&arc_eviction_mtx); 2705 return (0); 2706 } else { 2707 arc_buf_t copy = *buf; /* structure assignment */ 2708 /* 2709 * Process this buffer now 2710 * but let arc_do_user_evicts() do the reaping. 2711 */ 2712 buf->b_efunc = NULL; 2713 mutex_exit(&arc_eviction_mtx); 2714 VERIFY(copy.b_efunc(©) == 0); 2715 return (1); 2716 } 2717 } 2718 2719 ASSERT(buf->b_hdr == hdr); 2720 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 2721 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2722 2723 /* 2724 * Pull this buffer off of the hdr 2725 */ 2726 bufp = &hdr->b_buf; 2727 while (*bufp != buf) 2728 bufp = &(*bufp)->b_next; 2729 *bufp = buf->b_next; 2730 2731 ASSERT(buf->b_data != NULL); 2732 arc_buf_destroy(buf, FALSE, FALSE); 2733 2734 if (hdr->b_datacnt == 0) { 2735 arc_state_t *old_state = hdr->b_state; 2736 arc_state_t *evicted_state; 2737 2738 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2739 2740 evicted_state = 2741 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 2742 2743 mutex_enter(&old_state->arcs_mtx); 2744 mutex_enter(&evicted_state->arcs_mtx); 2745 2746 arc_change_state(evicted_state, hdr, hash_lock); 2747 ASSERT(HDR_IN_HASH_TABLE(hdr)); 2748 hdr->b_flags |= ARC_IN_HASH_TABLE; 2749 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2750 2751 mutex_exit(&evicted_state->arcs_mtx); 2752 mutex_exit(&old_state->arcs_mtx); 2753 } 2754 mutex_exit(hash_lock); 2755 2756 VERIFY(buf->b_efunc(buf) == 0); 2757 buf->b_efunc = NULL; 2758 buf->b_private = NULL; 2759 buf->b_hdr = NULL; 2760 kmem_cache_free(buf_cache, buf); 2761 return (1); 2762 } 2763 2764 /* 2765 * Release this buffer from the cache. This must be done 2766 * after a read and prior to modifying the buffer contents. 2767 * If the buffer has more than one reference, we must make 2768 * make a new hdr for the buffer. 2769 */ 2770 void 2771 arc_release(arc_buf_t *buf, void *tag) 2772 { 2773 arc_buf_hdr_t *hdr = buf->b_hdr; 2774 kmutex_t *hash_lock = HDR_LOCK(hdr); 2775 l2arc_buf_hdr_t *l2hdr = NULL; 2776 uint64_t buf_size; 2777 2778 /* this buffer is not on any list */ 2779 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2780 2781 if (hdr->b_state == arc_anon) { 2782 /* this buffer is already released */ 2783 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2784 ASSERT(BUF_EMPTY(hdr)); 2785 ASSERT(buf->b_efunc == NULL); 2786 arc_buf_thaw(buf); 2787 return; 2788 } 2789 2790 mutex_enter(hash_lock); 2791 2792 /* 2793 * Do we have more than one buf? 2794 */ 2795 if (hdr->b_buf != buf || buf->b_next != NULL) { 2796 arc_buf_hdr_t *nhdr; 2797 arc_buf_t **bufp; 2798 uint64_t blksz = hdr->b_size; 2799 spa_t *spa = hdr->b_spa; 2800 arc_buf_contents_t type = hdr->b_type; 2801 uint32_t flags = hdr->b_flags; 2802 2803 ASSERT(hdr->b_datacnt > 1); 2804 /* 2805 * Pull the data off of this buf and attach it to 2806 * a new anonymous buf. 2807 */ 2808 (void) remove_reference(hdr, hash_lock, tag); 2809 bufp = &hdr->b_buf; 2810 while (*bufp != buf) 2811 bufp = &(*bufp)->b_next; 2812 *bufp = (*bufp)->b_next; 2813 buf->b_next = NULL; 2814 2815 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 2816 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 2817 if (refcount_is_zero(&hdr->b_refcnt)) { 2818 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 2819 ASSERT3U(*size, >=, hdr->b_size); 2820 atomic_add_64(size, -hdr->b_size); 2821 } 2822 hdr->b_datacnt -= 1; 2823 if (hdr->b_l2hdr != NULL) { 2824 mutex_enter(&l2arc_buflist_mtx); 2825 l2hdr = hdr->b_l2hdr; 2826 hdr->b_l2hdr = NULL; 2827 buf_size = hdr->b_size; 2828 } 2829 arc_cksum_verify(buf); 2830 2831 mutex_exit(hash_lock); 2832 2833 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 2834 nhdr->b_size = blksz; 2835 nhdr->b_spa = spa; 2836 nhdr->b_type = type; 2837 nhdr->b_buf = buf; 2838 nhdr->b_state = arc_anon; 2839 nhdr->b_arc_access = 0; 2840 nhdr->b_flags = flags & ARC_L2_WRITING; 2841 nhdr->b_l2hdr = NULL; 2842 nhdr->b_datacnt = 1; 2843 nhdr->b_freeze_cksum = NULL; 2844 (void) refcount_add(&nhdr->b_refcnt, tag); 2845 buf->b_hdr = nhdr; 2846 atomic_add_64(&arc_anon->arcs_size, blksz); 2847 } else { 2848 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2849 ASSERT(!list_link_active(&hdr->b_arc_node)); 2850 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2851 arc_change_state(arc_anon, hdr, hash_lock); 2852 hdr->b_arc_access = 0; 2853 if (hdr->b_l2hdr != NULL) { 2854 mutex_enter(&l2arc_buflist_mtx); 2855 l2hdr = hdr->b_l2hdr; 2856 hdr->b_l2hdr = NULL; 2857 buf_size = hdr->b_size; 2858 } 2859 mutex_exit(hash_lock); 2860 2861 bzero(&hdr->b_dva, sizeof (dva_t)); 2862 hdr->b_birth = 0; 2863 hdr->b_cksum0 = 0; 2864 arc_buf_thaw(buf); 2865 } 2866 buf->b_efunc = NULL; 2867 buf->b_private = NULL; 2868 2869 if (l2hdr) { 2870 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 2871 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 2872 ARCSTAT_INCR(arcstat_l2_size, -buf_size); 2873 } 2874 if (MUTEX_HELD(&l2arc_buflist_mtx)) 2875 mutex_exit(&l2arc_buflist_mtx); 2876 } 2877 2878 int 2879 arc_released(arc_buf_t *buf) 2880 { 2881 return (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 2882 } 2883 2884 int 2885 arc_has_callback(arc_buf_t *buf) 2886 { 2887 return (buf->b_efunc != NULL); 2888 } 2889 2890 #ifdef ZFS_DEBUG 2891 int 2892 arc_referenced(arc_buf_t *buf) 2893 { 2894 return (refcount_count(&buf->b_hdr->b_refcnt)); 2895 } 2896 #endif 2897 2898 static void 2899 arc_write_ready(zio_t *zio) 2900 { 2901 arc_write_callback_t *callback = zio->io_private; 2902 arc_buf_t *buf = callback->awcb_buf; 2903 arc_buf_hdr_t *hdr = buf->b_hdr; 2904 2905 if (zio->io_error == 0 && callback->awcb_ready) { 2906 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 2907 callback->awcb_ready(zio, buf, callback->awcb_private); 2908 } 2909 /* 2910 * If the IO is already in progress, then this is a re-write 2911 * attempt, so we need to thaw and re-compute the cksum. It is 2912 * the responsibility of the callback to handle the freeing 2913 * and accounting for any re-write attempt. If we don't have a 2914 * callback registered then simply free the block here. 2915 */ 2916 if (HDR_IO_IN_PROGRESS(hdr)) { 2917 if (!BP_IS_HOLE(&zio->io_bp_orig) && 2918 callback->awcb_ready == NULL) { 2919 zio_nowait(zio_free(zio, zio->io_spa, zio->io_txg, 2920 &zio->io_bp_orig, NULL, NULL)); 2921 } 2922 mutex_enter(&hdr->b_freeze_lock); 2923 if (hdr->b_freeze_cksum != NULL) { 2924 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 2925 hdr->b_freeze_cksum = NULL; 2926 } 2927 mutex_exit(&hdr->b_freeze_lock); 2928 } 2929 arc_cksum_compute(buf, B_FALSE); 2930 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2931 } 2932 2933 static void 2934 arc_write_done(zio_t *zio) 2935 { 2936 arc_write_callback_t *callback = zio->io_private; 2937 arc_buf_t *buf = callback->awcb_buf; 2938 arc_buf_hdr_t *hdr = buf->b_hdr; 2939 2940 hdr->b_acb = NULL; 2941 2942 /* this buffer is on no lists and is not in the hash table */ 2943 ASSERT3P(hdr->b_state, ==, arc_anon); 2944 2945 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 2946 hdr->b_birth = zio->io_bp->blk_birth; 2947 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 2948 /* 2949 * If the block to be written was all-zero, we may have 2950 * compressed it away. In this case no write was performed 2951 * so there will be no dva/birth-date/checksum. The buffer 2952 * must therefor remain anonymous (and uncached). 2953 */ 2954 if (!BUF_EMPTY(hdr)) { 2955 arc_buf_hdr_t *exists; 2956 kmutex_t *hash_lock; 2957 2958 arc_cksum_verify(buf); 2959 2960 exists = buf_hash_insert(hdr, &hash_lock); 2961 if (exists) { 2962 /* 2963 * This can only happen if we overwrite for 2964 * sync-to-convergence, because we remove 2965 * buffers from the hash table when we arc_free(). 2966 */ 2967 ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 2968 BP_IDENTITY(zio->io_bp))); 2969 ASSERT3U(zio->io_bp_orig.blk_birth, ==, 2970 zio->io_bp->blk_birth); 2971 2972 ASSERT(refcount_is_zero(&exists->b_refcnt)); 2973 arc_change_state(arc_anon, exists, hash_lock); 2974 mutex_exit(hash_lock); 2975 arc_hdr_destroy(exists); 2976 exists = buf_hash_insert(hdr, &hash_lock); 2977 ASSERT3P(exists, ==, NULL); 2978 } 2979 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2980 arc_access(hdr, hash_lock); 2981 mutex_exit(hash_lock); 2982 } else if (callback->awcb_done == NULL) { 2983 int destroy_hdr; 2984 /* 2985 * This is an anonymous buffer with no user callback, 2986 * destroy it if there are no active references. 2987 */ 2988 mutex_enter(&arc_eviction_mtx); 2989 destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 2990 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2991 mutex_exit(&arc_eviction_mtx); 2992 if (destroy_hdr) 2993 arc_hdr_destroy(hdr); 2994 } else { 2995 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2996 } 2997 2998 if (callback->awcb_done) { 2999 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 3000 callback->awcb_done(zio, buf, callback->awcb_private); 3001 } 3002 3003 kmem_free(callback, sizeof (arc_write_callback_t)); 3004 } 3005 3006 zio_t * 3007 arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies, 3008 uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 3009 arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, 3010 int flags, zbookmark_t *zb) 3011 { 3012 arc_buf_hdr_t *hdr = buf->b_hdr; 3013 arc_write_callback_t *callback; 3014 zio_t *zio; 3015 3016 /* this is a private buffer - no locking required */ 3017 ASSERT3P(hdr->b_state, ==, arc_anon); 3018 ASSERT(BUF_EMPTY(hdr)); 3019 ASSERT(!HDR_IO_ERROR(hdr)); 3020 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 3021 ASSERT(hdr->b_acb == 0); 3022 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3023 callback->awcb_ready = ready; 3024 callback->awcb_done = done; 3025 callback->awcb_private = private; 3026 callback->awcb_buf = buf; 3027 zio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp, 3028 buf->b_data, hdr->b_size, arc_write_ready, arc_write_done, callback, 3029 priority, flags, zb); 3030 3031 return (zio); 3032 } 3033 3034 int 3035 arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 3036 zio_done_func_t *done, void *private, uint32_t arc_flags) 3037 { 3038 arc_buf_hdr_t *ab; 3039 kmutex_t *hash_lock; 3040 zio_t *zio; 3041 3042 /* 3043 * If this buffer is in the cache, release it, so it 3044 * can be re-used. 3045 */ 3046 ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 3047 if (ab != NULL) { 3048 /* 3049 * The checksum of blocks to free is not always 3050 * preserved (eg. on the deadlist). However, if it is 3051 * nonzero, it should match what we have in the cache. 3052 */ 3053 ASSERT(bp->blk_cksum.zc_word[0] == 0 || 3054 ab->b_cksum0 == bp->blk_cksum.zc_word[0]); 3055 if (ab->b_state != arc_anon) 3056 arc_change_state(arc_anon, ab, hash_lock); 3057 if (HDR_IO_IN_PROGRESS(ab)) { 3058 /* 3059 * This should only happen when we prefetch. 3060 */ 3061 ASSERT(ab->b_flags & ARC_PREFETCH); 3062 ASSERT3U(ab->b_datacnt, ==, 1); 3063 ab->b_flags |= ARC_FREED_IN_READ; 3064 if (HDR_IN_HASH_TABLE(ab)) 3065 buf_hash_remove(ab); 3066 ab->b_arc_access = 0; 3067 bzero(&ab->b_dva, sizeof (dva_t)); 3068 ab->b_birth = 0; 3069 ab->b_cksum0 = 0; 3070 ab->b_buf->b_efunc = NULL; 3071 ab->b_buf->b_private = NULL; 3072 mutex_exit(hash_lock); 3073 } else if (refcount_is_zero(&ab->b_refcnt)) { 3074 ab->b_flags |= ARC_FREE_IN_PROGRESS; 3075 mutex_exit(hash_lock); 3076 arc_hdr_destroy(ab); 3077 ARCSTAT_BUMP(arcstat_deleted); 3078 } else { 3079 /* 3080 * We still have an active reference on this 3081 * buffer. This can happen, e.g., from 3082 * dbuf_unoverride(). 3083 */ 3084 ASSERT(!HDR_IN_HASH_TABLE(ab)); 3085 ab->b_arc_access = 0; 3086 bzero(&ab->b_dva, sizeof (dva_t)); 3087 ab->b_birth = 0; 3088 ab->b_cksum0 = 0; 3089 ab->b_buf->b_efunc = NULL; 3090 ab->b_buf->b_private = NULL; 3091 mutex_exit(hash_lock); 3092 } 3093 } 3094 3095 zio = zio_free(pio, spa, txg, bp, done, private); 3096 3097 if (arc_flags & ARC_WAIT) 3098 return (zio_wait(zio)); 3099 3100 ASSERT(arc_flags & ARC_NOWAIT); 3101 zio_nowait(zio); 3102 3103 return (0); 3104 } 3105 3106 static int 3107 arc_memory_throttle(uint64_t reserve, uint64_t txg) 3108 { 3109 #ifdef _KERNEL 3110 uint64_t inflight_data = arc_anon->arcs_size; 3111 uint64_t available_memory = ptob(freemem); 3112 static uint64_t page_load = 0; 3113 static uint64_t last_txg = 0; 3114 3115 #if defined(__i386) 3116 available_memory = 3117 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 3118 #endif 3119 if (available_memory >= zfs_write_limit_max) 3120 return (0); 3121 3122 if (txg > last_txg) { 3123 last_txg = txg; 3124 page_load = 0; 3125 } 3126 /* 3127 * If we are in pageout, we know that memory is already tight, 3128 * the arc is already going to be evicting, so we just want to 3129 * continue to let page writes occur as quickly as possible. 3130 */ 3131 if (curproc == proc_pageout) { 3132 if (page_load > MAX(ptob(minfree), available_memory) / 4) 3133 return (ERESTART); 3134 /* Note: reserve is inflated, so we deflate */ 3135 page_load += reserve / 8; 3136 return (0); 3137 } else if (page_load > 0 && arc_reclaim_needed()) { 3138 /* memory is low, delay before restarting */ 3139 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3140 return (EAGAIN); 3141 } 3142 page_load = 0; 3143 3144 if (arc_size > arc_c_min) { 3145 uint64_t evictable_memory = 3146 arc_mru->arcs_lsize[ARC_BUFC_DATA] + 3147 arc_mru->arcs_lsize[ARC_BUFC_METADATA] + 3148 arc_mfu->arcs_lsize[ARC_BUFC_DATA] + 3149 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; 3150 available_memory += MIN(evictable_memory, arc_size - arc_c_min); 3151 } 3152 3153 if (inflight_data > available_memory / 4) { 3154 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3155 return (ERESTART); 3156 } 3157 #endif 3158 return (0); 3159 } 3160 3161 void 3162 arc_tempreserve_clear(uint64_t reserve) 3163 { 3164 atomic_add_64(&arc_tempreserve, -reserve); 3165 ASSERT((int64_t)arc_tempreserve >= 0); 3166 } 3167 3168 int 3169 arc_tempreserve_space(uint64_t reserve, uint64_t txg) 3170 { 3171 int error; 3172 3173 #ifdef ZFS_DEBUG 3174 /* 3175 * Once in a while, fail for no reason. Everything should cope. 3176 */ 3177 if (spa_get_random(10000) == 0) { 3178 dprintf("forcing random failure\n"); 3179 return (ERESTART); 3180 } 3181 #endif 3182 if (reserve > arc_c/4 && !arc_no_grow) 3183 arc_c = MIN(arc_c_max, reserve * 4); 3184 if (reserve > arc_c) 3185 return (ENOMEM); 3186 3187 /* 3188 * Writes will, almost always, require additional memory allocations 3189 * in order to compress/encrypt/etc the data. We therefor need to 3190 * make sure that there is sufficient available memory for this. 3191 */ 3192 if (error = arc_memory_throttle(reserve, txg)) 3193 return (error); 3194 3195 /* 3196 * Throttle writes when the amount of dirty data in the cache 3197 * gets too large. We try to keep the cache less than half full 3198 * of dirty blocks so that our sync times don't grow too large. 3199 * Note: if two requests come in concurrently, we might let them 3200 * both succeed, when one of them should fail. Not a huge deal. 3201 */ 3202 if (reserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && 3203 arc_anon->arcs_size > arc_c / 4) { 3204 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 3205 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 3206 arc_tempreserve>>10, 3207 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 3208 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3209 reserve>>10, arc_c>>10); 3210 return (ERESTART); 3211 } 3212 atomic_add_64(&arc_tempreserve, reserve); 3213 return (0); 3214 } 3215 3216 void 3217 arc_init(void) 3218 { 3219 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3220 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3221 3222 /* Convert seconds to clock ticks */ 3223 arc_min_prefetch_lifespan = 1 * hz; 3224 3225 /* Start out with 1/8 of all memory */ 3226 arc_c = physmem * PAGESIZE / 8; 3227 3228 #ifdef _KERNEL 3229 /* 3230 * On architectures where the physical memory can be larger 3231 * than the addressable space (intel in 32-bit mode), we may 3232 * need to limit the cache to 1/8 of VM size. 3233 */ 3234 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3235 #endif 3236 3237 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 3238 arc_c_min = MAX(arc_c / 4, 64<<20); 3239 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 3240 if (arc_c * 8 >= 1<<30) 3241 arc_c_max = (arc_c * 8) - (1<<30); 3242 else 3243 arc_c_max = arc_c_min; 3244 arc_c_max = MAX(arc_c * 6, arc_c_max); 3245 3246 /* 3247 * Allow the tunables to override our calculations if they are 3248 * reasonable (ie. over 64MB) 3249 */ 3250 if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) 3251 arc_c_max = zfs_arc_max; 3252 if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) 3253 arc_c_min = zfs_arc_min; 3254 3255 arc_c = arc_c_max; 3256 arc_p = (arc_c >> 1); 3257 3258 /* limit meta-data to 1/4 of the arc capacity */ 3259 arc_meta_limit = arc_c_max / 4; 3260 3261 /* Allow the tunable to override if it is reasonable */ 3262 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 3263 arc_meta_limit = zfs_arc_meta_limit; 3264 3265 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 3266 arc_c_min = arc_meta_limit / 2; 3267 3268 /* if kmem_flags are set, lets try to use less memory */ 3269 if (kmem_debugging()) 3270 arc_c = arc_c / 2; 3271 if (arc_c < arc_c_min) 3272 arc_c = arc_c_min; 3273 3274 arc_anon = &ARC_anon; 3275 arc_mru = &ARC_mru; 3276 arc_mru_ghost = &ARC_mru_ghost; 3277 arc_mfu = &ARC_mfu; 3278 arc_mfu_ghost = &ARC_mfu_ghost; 3279 arc_l2c_only = &ARC_l2c_only; 3280 arc_size = 0; 3281 3282 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3283 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3284 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3285 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3286 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3287 mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3288 3289 list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], 3290 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3291 list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], 3292 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3293 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], 3294 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3295 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], 3296 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3297 list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], 3298 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3299 list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], 3300 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3301 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], 3302 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3303 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], 3304 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3305 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], 3306 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3307 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], 3308 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3309 3310 buf_init(); 3311 3312 arc_thread_exit = 0; 3313 arc_eviction_list = NULL; 3314 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 3315 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3316 3317 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 3318 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 3319 3320 if (arc_ksp != NULL) { 3321 arc_ksp->ks_data = &arc_stats; 3322 kstat_install(arc_ksp); 3323 } 3324 3325 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3326 TS_RUN, minclsyspri); 3327 3328 arc_dead = FALSE; 3329 3330 if (zfs_write_limit_max == 0) 3331 zfs_write_limit_max = physmem * PAGESIZE >> 3332 zfs_write_limit_shift; 3333 else 3334 zfs_write_limit_shift = 0; 3335 } 3336 3337 void 3338 arc_fini(void) 3339 { 3340 mutex_enter(&arc_reclaim_thr_lock); 3341 arc_thread_exit = 1; 3342 while (arc_thread_exit != 0) 3343 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 3344 mutex_exit(&arc_reclaim_thr_lock); 3345 3346 arc_flush(NULL); 3347 3348 arc_dead = TRUE; 3349 3350 if (arc_ksp != NULL) { 3351 kstat_delete(arc_ksp); 3352 arc_ksp = NULL; 3353 } 3354 3355 mutex_destroy(&arc_eviction_mtx); 3356 mutex_destroy(&arc_reclaim_thr_lock); 3357 cv_destroy(&arc_reclaim_thr_cv); 3358 3359 list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); 3360 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 3361 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); 3362 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 3363 list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); 3364 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 3365 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); 3366 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 3367 3368 mutex_destroy(&arc_anon->arcs_mtx); 3369 mutex_destroy(&arc_mru->arcs_mtx); 3370 mutex_destroy(&arc_mru_ghost->arcs_mtx); 3371 mutex_destroy(&arc_mfu->arcs_mtx); 3372 mutex_destroy(&arc_mfu_ghost->arcs_mtx); 3373 3374 buf_fini(); 3375 } 3376 3377 /* 3378 * Level 2 ARC 3379 * 3380 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 3381 * It uses dedicated storage devices to hold cached data, which are populated 3382 * using large infrequent writes. The main role of this cache is to boost 3383 * the performance of random read workloads. The intended L2ARC devices 3384 * include short-stroked disks, solid state disks, and other media with 3385 * substantially faster read latency than disk. 3386 * 3387 * +-----------------------+ 3388 * | ARC | 3389 * +-----------------------+ 3390 * | ^ ^ 3391 * | | | 3392 * l2arc_feed_thread() arc_read() 3393 * | | | 3394 * | l2arc read | 3395 * V | | 3396 * +---------------+ | 3397 * | L2ARC | | 3398 * +---------------+ | 3399 * | ^ | 3400 * l2arc_write() | | 3401 * | | | 3402 * V | | 3403 * +-------+ +-------+ 3404 * | vdev | | vdev | 3405 * | cache | | cache | 3406 * +-------+ +-------+ 3407 * +=========+ .-----. 3408 * : L2ARC : |-_____-| 3409 * : devices : | Disks | 3410 * +=========+ `-_____-' 3411 * 3412 * Read requests are satisfied from the following sources, in order: 3413 * 3414 * 1) ARC 3415 * 2) vdev cache of L2ARC devices 3416 * 3) L2ARC devices 3417 * 4) vdev cache of disks 3418 * 5) disks 3419 * 3420 * Some L2ARC device types exhibit extremely slow write performance. 3421 * To accommodate for this there are some significant differences between 3422 * the L2ARC and traditional cache design: 3423 * 3424 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 3425 * the ARC behave as usual, freeing buffers and placing headers on ghost 3426 * lists. The ARC does not send buffers to the L2ARC during eviction as 3427 * this would add inflated write latencies for all ARC memory pressure. 3428 * 3429 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 3430 * It does this by periodically scanning buffers from the eviction-end of 3431 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 3432 * not already there. It scans until a headroom of buffers is satisfied, 3433 * which itself is a buffer for ARC eviction. The thread that does this is 3434 * l2arc_feed_thread(), illustrated below; example sizes are included to 3435 * provide a better sense of ratio than this diagram: 3436 * 3437 * head --> tail 3438 * +---------------------+----------+ 3439 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 3440 * +---------------------+----------+ | o L2ARC eligible 3441 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 3442 * +---------------------+----------+ | 3443 * 15.9 Gbytes ^ 32 Mbytes | 3444 * headroom | 3445 * l2arc_feed_thread() 3446 * | 3447 * l2arc write hand <--[oooo]--' 3448 * | 8 Mbyte 3449 * | write max 3450 * V 3451 * +==============================+ 3452 * L2ARC dev |####|#|###|###| |####| ... | 3453 * +==============================+ 3454 * 32 Gbytes 3455 * 3456 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 3457 * evicted, then the L2ARC has cached a buffer much sooner than it probably 3458 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 3459 * safe to say that this is an uncommon case, since buffers at the end of 3460 * the ARC lists have moved there due to inactivity. 3461 * 3462 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 3463 * then the L2ARC simply misses copying some buffers. This serves as a 3464 * pressure valve to prevent heavy read workloads from both stalling the ARC 3465 * with waits and clogging the L2ARC with writes. This also helps prevent 3466 * the potential for the L2ARC to churn if it attempts to cache content too 3467 * quickly, such as during backups of the entire pool. 3468 * 3469 * 5. Writes to the L2ARC devices are grouped and sent in-sequence, so that 3470 * the vdev queue can aggregate them into larger and fewer writes. Each 3471 * device is written to in a rotor fashion, sweeping writes through 3472 * available space then repeating. 3473 * 3474 * 6. The L2ARC does not store dirty content. It never needs to flush 3475 * write buffers back to disk based storage. 3476 * 3477 * 7. If an ARC buffer is written (and dirtied) which also exists in the 3478 * L2ARC, the now stale L2ARC buffer is immediately dropped. 3479 * 3480 * The performance of the L2ARC can be tweaked by a number of tunables, which 3481 * may be necessary for different workloads: 3482 * 3483 * l2arc_write_max max write bytes per interval 3484 * l2arc_noprefetch skip caching prefetched buffers 3485 * l2arc_headroom number of max device writes to precache 3486 * l2arc_feed_secs seconds between L2ARC writing 3487 * 3488 * Tunables may be removed or added as future performance improvements are 3489 * integrated, and also may become zpool properties. 3490 */ 3491 3492 static void 3493 l2arc_hdr_stat_add(void) 3494 { 3495 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 3496 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 3497 } 3498 3499 static void 3500 l2arc_hdr_stat_remove(void) 3501 { 3502 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 3503 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 3504 } 3505 3506 /* 3507 * Cycle through L2ARC devices. This is how L2ARC load balances. 3508 * This is called with l2arc_dev_mtx held, which also locks out spa removal. 3509 */ 3510 static l2arc_dev_t * 3511 l2arc_dev_get_next(void) 3512 { 3513 l2arc_dev_t *next, *first; 3514 3515 /* if there are no vdevs, there is nothing to do */ 3516 if (l2arc_ndev == 0) 3517 return (NULL); 3518 3519 first = NULL; 3520 next = l2arc_dev_last; 3521 do { 3522 /* loop around the list looking for a non-faulted vdev */ 3523 if (next == NULL) { 3524 next = list_head(l2arc_dev_list); 3525 } else { 3526 next = list_next(l2arc_dev_list, next); 3527 if (next == NULL) 3528 next = list_head(l2arc_dev_list); 3529 } 3530 3531 /* if we have come back to the start, bail out */ 3532 if (first == NULL) 3533 first = next; 3534 else if (next == first) 3535 break; 3536 3537 } while (vdev_is_dead(next->l2ad_vdev)); 3538 3539 /* if we were unable to find any usable vdevs, return NULL */ 3540 if (vdev_is_dead(next->l2ad_vdev)) 3541 return (NULL); 3542 3543 l2arc_dev_last = next; 3544 3545 return (next); 3546 } 3547 3548 /* 3549 * A write to a cache device has completed. Update all headers to allow 3550 * reads from these buffers to begin. 3551 */ 3552 static void 3553 l2arc_write_done(zio_t *zio) 3554 { 3555 l2arc_write_callback_t *cb; 3556 l2arc_dev_t *dev; 3557 list_t *buflist; 3558 l2arc_data_free_t *df, *df_prev; 3559 arc_buf_hdr_t *head, *ab, *ab_prev; 3560 kmutex_t *hash_lock; 3561 3562 cb = zio->io_private; 3563 ASSERT(cb != NULL); 3564 dev = cb->l2wcb_dev; 3565 ASSERT(dev != NULL); 3566 head = cb->l2wcb_head; 3567 ASSERT(head != NULL); 3568 buflist = dev->l2ad_buflist; 3569 ASSERT(buflist != NULL); 3570 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 3571 l2arc_write_callback_t *, cb); 3572 3573 if (zio->io_error != 0) 3574 ARCSTAT_BUMP(arcstat_l2_writes_error); 3575 3576 mutex_enter(&l2arc_buflist_mtx); 3577 3578 /* 3579 * All writes completed, or an error was hit. 3580 */ 3581 for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 3582 ab_prev = list_prev(buflist, ab); 3583 3584 hash_lock = HDR_LOCK(ab); 3585 if (!mutex_tryenter(hash_lock)) { 3586 /* 3587 * This buffer misses out. It may be in a stage 3588 * of eviction. Its ARC_L2_WRITING flag will be 3589 * left set, denying reads to this buffer. 3590 */ 3591 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 3592 continue; 3593 } 3594 3595 if (zio->io_error != 0) { 3596 /* 3597 * Error - invalidate L2ARC entry. 3598 */ 3599 ab->b_l2hdr = NULL; 3600 } 3601 3602 /* 3603 * Allow ARC to begin reads to this L2ARC entry. 3604 */ 3605 ab->b_flags &= ~ARC_L2_WRITING; 3606 3607 mutex_exit(hash_lock); 3608 } 3609 3610 atomic_inc_64(&l2arc_writes_done); 3611 list_remove(buflist, head); 3612 kmem_cache_free(hdr_cache, head); 3613 mutex_exit(&l2arc_buflist_mtx); 3614 3615 /* 3616 * Free buffers that were tagged for destruction. 3617 */ 3618 mutex_enter(&l2arc_free_on_write_mtx); 3619 buflist = l2arc_free_on_write; 3620 for (df = list_tail(buflist); df; df = df_prev) { 3621 df_prev = list_prev(buflist, df); 3622 ASSERT(df->l2df_data != NULL); 3623 ASSERT(df->l2df_func != NULL); 3624 df->l2df_func(df->l2df_data, df->l2df_size); 3625 list_remove(buflist, df); 3626 kmem_free(df, sizeof (l2arc_data_free_t)); 3627 } 3628 mutex_exit(&l2arc_free_on_write_mtx); 3629 3630 kmem_free(cb, sizeof (l2arc_write_callback_t)); 3631 } 3632 3633 /* 3634 * A read to a cache device completed. Validate buffer contents before 3635 * handing over to the regular ARC routines. 3636 */ 3637 static void 3638 l2arc_read_done(zio_t *zio) 3639 { 3640 l2arc_read_callback_t *cb; 3641 arc_buf_hdr_t *hdr; 3642 arc_buf_t *buf; 3643 zio_t *rzio; 3644 kmutex_t *hash_lock; 3645 int equal, err = 0; 3646 3647 cb = zio->io_private; 3648 ASSERT(cb != NULL); 3649 buf = cb->l2rcb_buf; 3650 ASSERT(buf != NULL); 3651 hdr = buf->b_hdr; 3652 ASSERT(hdr != NULL); 3653 3654 hash_lock = HDR_LOCK(hdr); 3655 mutex_enter(hash_lock); 3656 3657 /* 3658 * Check this survived the L2ARC journey. 3659 */ 3660 equal = arc_cksum_equal(buf); 3661 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 3662 mutex_exit(hash_lock); 3663 zio->io_private = buf; 3664 arc_read_done(zio); 3665 } else { 3666 mutex_exit(hash_lock); 3667 /* 3668 * Buffer didn't survive caching. Increment stats and 3669 * reissue to the original storage device. 3670 */ 3671 if (zio->io_error != 0) 3672 ARCSTAT_BUMP(arcstat_l2_io_error); 3673 if (!equal) 3674 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 3675 3676 zio->io_flags &= ~ZIO_FLAG_DONT_CACHE; 3677 rzio = zio_read(NULL, cb->l2rcb_spa, &cb->l2rcb_bp, 3678 buf->b_data, zio->io_size, arc_read_done, buf, 3679 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb); 3680 3681 /* 3682 * Since this is a seperate thread, we can wait on this 3683 * I/O whether there is an io_waiter or not. 3684 */ 3685 err = zio_wait(rzio); 3686 3687 /* 3688 * Let the resent I/O call arc_read_done() instead. 3689 * io_error is set to the reissued I/O error status. 3690 */ 3691 zio->io_done = NULL; 3692 zio->io_waiter = NULL; 3693 zio->io_error = err; 3694 } 3695 3696 kmem_free(cb, sizeof (l2arc_read_callback_t)); 3697 } 3698 3699 /* 3700 * This is the list priority from which the L2ARC will search for pages to 3701 * cache. This is used within loops (0..3) to cycle through lists in the 3702 * desired order. This order can have a significant effect on cache 3703 * performance. 3704 * 3705 * Currently the metadata lists are hit first, MFU then MRU, followed by 3706 * the data lists. This function returns a locked list, and also returns 3707 * the lock pointer. 3708 */ 3709 static list_t * 3710 l2arc_list_locked(int list_num, kmutex_t **lock) 3711 { 3712 list_t *list; 3713 3714 ASSERT(list_num >= 0 && list_num <= 3); 3715 3716 switch (list_num) { 3717 case 0: 3718 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; 3719 *lock = &arc_mfu->arcs_mtx; 3720 break; 3721 case 1: 3722 list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; 3723 *lock = &arc_mru->arcs_mtx; 3724 break; 3725 case 2: 3726 list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; 3727 *lock = &arc_mfu->arcs_mtx; 3728 break; 3729 case 3: 3730 list = &arc_mru->arcs_list[ARC_BUFC_DATA]; 3731 *lock = &arc_mru->arcs_mtx; 3732 break; 3733 } 3734 3735 ASSERT(!(MUTEX_HELD(*lock))); 3736 mutex_enter(*lock); 3737 return (list); 3738 } 3739 3740 /* 3741 * Evict buffers from the device write hand to the distance specified in 3742 * bytes. This distance may span populated buffers, it may span nothing. 3743 * This is clearing a region on the L2ARC device ready for writing. 3744 * If the 'all' boolean is set, every buffer is evicted. 3745 */ 3746 static void 3747 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 3748 { 3749 list_t *buflist; 3750 l2arc_buf_hdr_t *abl2; 3751 arc_buf_hdr_t *ab, *ab_prev; 3752 kmutex_t *hash_lock; 3753 uint64_t taddr; 3754 3755 ASSERT(MUTEX_HELD(&l2arc_dev_mtx)); 3756 3757 buflist = dev->l2ad_buflist; 3758 3759 if (buflist == NULL) 3760 return; 3761 3762 if (!all && dev->l2ad_first) { 3763 /* 3764 * This is the first sweep through the device. There is 3765 * nothing to evict. 3766 */ 3767 return; 3768 } 3769 3770 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * dev->l2ad_write))) { 3771 /* 3772 * When nearing the end of the device, evict to the end 3773 * before the device write hand jumps to the start. 3774 */ 3775 taddr = dev->l2ad_end; 3776 } else { 3777 taddr = dev->l2ad_hand + distance; 3778 } 3779 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 3780 uint64_t, taddr, boolean_t, all); 3781 3782 top: 3783 mutex_enter(&l2arc_buflist_mtx); 3784 for (ab = list_tail(buflist); ab; ab = ab_prev) { 3785 ab_prev = list_prev(buflist, ab); 3786 3787 hash_lock = HDR_LOCK(ab); 3788 if (!mutex_tryenter(hash_lock)) { 3789 /* 3790 * Missed the hash lock. Retry. 3791 */ 3792 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 3793 mutex_exit(&l2arc_buflist_mtx); 3794 mutex_enter(hash_lock); 3795 mutex_exit(hash_lock); 3796 goto top; 3797 } 3798 3799 if (HDR_L2_WRITE_HEAD(ab)) { 3800 /* 3801 * We hit a write head node. Leave it for 3802 * l2arc_write_done(). 3803 */ 3804 list_remove(buflist, ab); 3805 mutex_exit(hash_lock); 3806 continue; 3807 } 3808 3809 if (!all && ab->b_l2hdr != NULL && 3810 (ab->b_l2hdr->b_daddr > taddr || 3811 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 3812 /* 3813 * We've evicted to the target address, 3814 * or the end of the device. 3815 */ 3816 mutex_exit(hash_lock); 3817 break; 3818 } 3819 3820 if (HDR_FREE_IN_PROGRESS(ab)) { 3821 /* 3822 * Already on the path to destruction. 3823 */ 3824 mutex_exit(hash_lock); 3825 continue; 3826 } 3827 3828 if (ab->b_state == arc_l2c_only) { 3829 ASSERT(!HDR_L2_READING(ab)); 3830 /* 3831 * This doesn't exist in the ARC. Destroy. 3832 * arc_hdr_destroy() will call list_remove() 3833 * and decrement arcstat_l2_size. 3834 */ 3835 arc_change_state(arc_anon, ab, hash_lock); 3836 arc_hdr_destroy(ab); 3837 } else { 3838 /* 3839 * Tell ARC this no longer exists in L2ARC. 3840 */ 3841 if (ab->b_l2hdr != NULL) { 3842 abl2 = ab->b_l2hdr; 3843 ab->b_l2hdr = NULL; 3844 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 3845 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 3846 } 3847 list_remove(buflist, ab); 3848 3849 /* 3850 * This may have been leftover after a 3851 * failed write. 3852 */ 3853 ab->b_flags &= ~ARC_L2_WRITING; 3854 3855 /* 3856 * Invalidate issued or about to be issued 3857 * reads, since we may be about to write 3858 * over this location. 3859 */ 3860 if (HDR_L2_READING(ab)) { 3861 ARCSTAT_BUMP(arcstat_l2_evict_reading); 3862 ab->b_flags |= ARC_L2_EVICTED; 3863 } 3864 } 3865 mutex_exit(hash_lock); 3866 } 3867 mutex_exit(&l2arc_buflist_mtx); 3868 3869 spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict)); 3870 dev->l2ad_evict = taddr; 3871 } 3872 3873 /* 3874 * Find and write ARC buffers to the L2ARC device. 3875 * 3876 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 3877 * for reading until they have completed writing. 3878 */ 3879 static void 3880 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev) 3881 { 3882 arc_buf_hdr_t *ab, *ab_prev, *head; 3883 l2arc_buf_hdr_t *hdrl2; 3884 list_t *list; 3885 uint64_t passed_sz, write_sz, buf_sz; 3886 uint64_t target_sz = dev->l2ad_write; 3887 uint64_t headroom = dev->l2ad_write * l2arc_headroom; 3888 void *buf_data; 3889 kmutex_t *hash_lock, *list_lock; 3890 boolean_t have_lock, full; 3891 l2arc_write_callback_t *cb; 3892 zio_t *pio, *wzio; 3893 3894 ASSERT(MUTEX_HELD(&l2arc_dev_mtx)); 3895 ASSERT(dev->l2ad_vdev != NULL); 3896 3897 pio = NULL; 3898 write_sz = 0; 3899 full = B_FALSE; 3900 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 3901 head->b_flags |= ARC_L2_WRITE_HEAD; 3902 3903 /* 3904 * Copy buffers for L2ARC writing. 3905 */ 3906 mutex_enter(&l2arc_buflist_mtx); 3907 for (int try = 0; try <= 3; try++) { 3908 list = l2arc_list_locked(try, &list_lock); 3909 passed_sz = 0; 3910 3911 for (ab = list_tail(list); ab; ab = ab_prev) { 3912 ab_prev = list_prev(list, ab); 3913 3914 hash_lock = HDR_LOCK(ab); 3915 have_lock = MUTEX_HELD(hash_lock); 3916 if (!have_lock && !mutex_tryenter(hash_lock)) { 3917 /* 3918 * Skip this buffer rather than waiting. 3919 */ 3920 continue; 3921 } 3922 3923 passed_sz += ab->b_size; 3924 if (passed_sz > headroom) { 3925 /* 3926 * Searched too far. 3927 */ 3928 mutex_exit(hash_lock); 3929 break; 3930 } 3931 3932 if (ab->b_spa != spa) { 3933 mutex_exit(hash_lock); 3934 continue; 3935 } 3936 3937 if (ab->b_l2hdr != NULL) { 3938 /* 3939 * Already in L2ARC. 3940 */ 3941 mutex_exit(hash_lock); 3942 continue; 3943 } 3944 3945 if (HDR_IO_IN_PROGRESS(ab) || HDR_DONT_L2CACHE(ab)) { 3946 mutex_exit(hash_lock); 3947 continue; 3948 } 3949 3950 if ((write_sz + ab->b_size) > target_sz) { 3951 full = B_TRUE; 3952 mutex_exit(hash_lock); 3953 break; 3954 } 3955 3956 if (ab->b_buf == NULL) { 3957 DTRACE_PROBE1(l2arc__buf__null, void *, ab); 3958 mutex_exit(hash_lock); 3959 continue; 3960 } 3961 3962 if (pio == NULL) { 3963 /* 3964 * Insert a dummy header on the buflist so 3965 * l2arc_write_done() can find where the 3966 * write buffers begin without searching. 3967 */ 3968 list_insert_head(dev->l2ad_buflist, head); 3969 3970 cb = kmem_alloc( 3971 sizeof (l2arc_write_callback_t), KM_SLEEP); 3972 cb->l2wcb_dev = dev; 3973 cb->l2wcb_head = head; 3974 pio = zio_root(spa, l2arc_write_done, cb, 3975 ZIO_FLAG_CANFAIL); 3976 } 3977 3978 /* 3979 * Create and add a new L2ARC header. 3980 */ 3981 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 3982 hdrl2->b_dev = dev; 3983 hdrl2->b_daddr = dev->l2ad_hand; 3984 3985 ab->b_flags |= ARC_L2_WRITING; 3986 ab->b_l2hdr = hdrl2; 3987 list_insert_head(dev->l2ad_buflist, ab); 3988 buf_data = ab->b_buf->b_data; 3989 buf_sz = ab->b_size; 3990 3991 /* 3992 * Compute and store the buffer cksum before 3993 * writing. On debug the cksum is verified first. 3994 */ 3995 arc_cksum_verify(ab->b_buf); 3996 arc_cksum_compute(ab->b_buf, B_TRUE); 3997 3998 mutex_exit(hash_lock); 3999 4000 wzio = zio_write_phys(pio, dev->l2ad_vdev, 4001 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 4002 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 4003 ZIO_FLAG_CANFAIL, B_FALSE); 4004 4005 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 4006 zio_t *, wzio); 4007 (void) zio_nowait(wzio); 4008 4009 write_sz += buf_sz; 4010 dev->l2ad_hand += buf_sz; 4011 } 4012 4013 mutex_exit(list_lock); 4014 4015 if (full == B_TRUE) 4016 break; 4017 } 4018 mutex_exit(&l2arc_buflist_mtx); 4019 4020 if (pio == NULL) { 4021 ASSERT3U(write_sz, ==, 0); 4022 kmem_cache_free(hdr_cache, head); 4023 return; 4024 } 4025 4026 ASSERT3U(write_sz, <=, target_sz); 4027 ARCSTAT_BUMP(arcstat_l2_writes_sent); 4028 ARCSTAT_INCR(arcstat_l2_size, write_sz); 4029 spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz); 4030 4031 /* 4032 * Bump device hand to the device start if it is approaching the end. 4033 * l2arc_evict() will already have evicted ahead for this case. 4034 */ 4035 if (dev->l2ad_hand >= (dev->l2ad_end - dev->l2ad_write)) { 4036 spa_l2cache_space_update(dev->l2ad_vdev, 0, 4037 dev->l2ad_end - dev->l2ad_hand); 4038 dev->l2ad_hand = dev->l2ad_start; 4039 dev->l2ad_evict = dev->l2ad_start; 4040 dev->l2ad_first = B_FALSE; 4041 } 4042 4043 (void) zio_wait(pio); 4044 } 4045 4046 /* 4047 * This thread feeds the L2ARC at regular intervals. This is the beating 4048 * heart of the L2ARC. 4049 */ 4050 static void 4051 l2arc_feed_thread(void) 4052 { 4053 callb_cpr_t cpr; 4054 l2arc_dev_t *dev; 4055 spa_t *spa; 4056 int interval; 4057 boolean_t startup = B_TRUE; 4058 4059 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 4060 4061 mutex_enter(&l2arc_feed_thr_lock); 4062 4063 while (l2arc_thread_exit == 0) { 4064 /* 4065 * Initially pause for L2ARC_FEED_DELAY seconds as a grace 4066 * interval during boot, followed by l2arc_feed_secs seconds 4067 * thereafter. 4068 */ 4069 CALLB_CPR_SAFE_BEGIN(&cpr); 4070 if (startup) { 4071 interval = L2ARC_FEED_DELAY; 4072 startup = B_FALSE; 4073 } else { 4074 interval = l2arc_feed_secs; 4075 } 4076 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 4077 lbolt + (hz * interval)); 4078 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 4079 4080 mutex_enter(&l2arc_dev_mtx); 4081 4082 /* 4083 * This selects the next l2arc device to write to, and in 4084 * doing so the next spa to feed from: dev->l2ad_spa. This 4085 * will return NULL if there are no l2arc devices or if they 4086 * are all faulted. 4087 */ 4088 if ((dev = l2arc_dev_get_next()) == NULL) { 4089 mutex_exit(&l2arc_dev_mtx); 4090 continue; 4091 } 4092 4093 /* 4094 * Avoid contributing to memory pressure. 4095 */ 4096 if (arc_reclaim_needed()) { 4097 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 4098 mutex_exit(&l2arc_dev_mtx); 4099 continue; 4100 } 4101 4102 spa = dev->l2ad_spa; 4103 ASSERT(spa != NULL); 4104 ARCSTAT_BUMP(arcstat_l2_feeds); 4105 4106 /* 4107 * Evict L2ARC buffers that will be overwritten. 4108 */ 4109 l2arc_evict(dev, dev->l2ad_write, B_FALSE); 4110 4111 /* 4112 * Write ARC buffers. 4113 */ 4114 l2arc_write_buffers(spa, dev); 4115 mutex_exit(&l2arc_dev_mtx); 4116 } 4117 4118 l2arc_thread_exit = 0; 4119 cv_broadcast(&l2arc_feed_thr_cv); 4120 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 4121 thread_exit(); 4122 } 4123 4124 boolean_t 4125 l2arc_vdev_present(vdev_t *vd) 4126 { 4127 l2arc_dev_t *dev; 4128 4129 mutex_enter(&l2arc_dev_mtx); 4130 for (dev = list_head(l2arc_dev_list); dev != NULL; 4131 dev = list_next(l2arc_dev_list, dev)) { 4132 if (dev->l2ad_vdev == vd) 4133 break; 4134 } 4135 mutex_exit(&l2arc_dev_mtx); 4136 4137 return (dev != NULL); 4138 } 4139 4140 /* 4141 * Add a vdev for use by the L2ARC. By this point the spa has already 4142 * validated the vdev and opened it. 4143 */ 4144 void 4145 l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end) 4146 { 4147 l2arc_dev_t *adddev; 4148 4149 ASSERT(!l2arc_vdev_present(vd)); 4150 4151 /* 4152 * Create a new l2arc device entry. 4153 */ 4154 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 4155 adddev->l2ad_spa = spa; 4156 adddev->l2ad_vdev = vd; 4157 adddev->l2ad_write = l2arc_write_max; 4158 adddev->l2ad_start = start; 4159 adddev->l2ad_end = end; 4160 adddev->l2ad_hand = adddev->l2ad_start; 4161 adddev->l2ad_evict = adddev->l2ad_start; 4162 adddev->l2ad_first = B_TRUE; 4163 ASSERT3U(adddev->l2ad_write, >, 0); 4164 4165 /* 4166 * This is a list of all ARC buffers that are still valid on the 4167 * device. 4168 */ 4169 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 4170 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 4171 offsetof(arc_buf_hdr_t, b_l2node)); 4172 4173 spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0); 4174 4175 /* 4176 * Add device to global list 4177 */ 4178 mutex_enter(&l2arc_dev_mtx); 4179 list_insert_head(l2arc_dev_list, adddev); 4180 atomic_inc_64(&l2arc_ndev); 4181 mutex_exit(&l2arc_dev_mtx); 4182 } 4183 4184 /* 4185 * Remove a vdev from the L2ARC. 4186 */ 4187 void 4188 l2arc_remove_vdev(vdev_t *vd) 4189 { 4190 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 4191 4192 /* 4193 * We can only grab the spa config lock when cache device writes 4194 * complete. 4195 */ 4196 ASSERT3U(l2arc_writes_sent, ==, l2arc_writes_done); 4197 4198 /* 4199 * Find the device by vdev 4200 */ 4201 mutex_enter(&l2arc_dev_mtx); 4202 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 4203 nextdev = list_next(l2arc_dev_list, dev); 4204 if (vd == dev->l2ad_vdev) { 4205 remdev = dev; 4206 break; 4207 } 4208 } 4209 ASSERT(remdev != NULL); 4210 4211 /* 4212 * Remove device from global list 4213 */ 4214 list_remove(l2arc_dev_list, remdev); 4215 l2arc_dev_last = NULL; /* may have been invalidated */ 4216 4217 /* 4218 * Clear all buflists and ARC references. L2ARC device flush. 4219 */ 4220 l2arc_evict(remdev, 0, B_TRUE); 4221 list_destroy(remdev->l2ad_buflist); 4222 kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 4223 kmem_free(remdev, sizeof (l2arc_dev_t)); 4224 4225 atomic_dec_64(&l2arc_ndev); 4226 mutex_exit(&l2arc_dev_mtx); 4227 } 4228 4229 void 4230 l2arc_init() 4231 { 4232 l2arc_thread_exit = 0; 4233 l2arc_ndev = 0; 4234 l2arc_writes_sent = 0; 4235 l2arc_writes_done = 0; 4236 4237 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 4238 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 4239 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 4240 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 4241 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 4242 4243 l2arc_dev_list = &L2ARC_dev_list; 4244 l2arc_free_on_write = &L2ARC_free_on_write; 4245 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 4246 offsetof(l2arc_dev_t, l2ad_node)); 4247 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 4248 offsetof(l2arc_data_free_t, l2df_list_node)); 4249 4250 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 4251 TS_RUN, minclsyspri); 4252 } 4253 4254 void 4255 l2arc_fini() 4256 { 4257 mutex_enter(&l2arc_feed_thr_lock); 4258 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 4259 l2arc_thread_exit = 1; 4260 while (l2arc_thread_exit != 0) 4261 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 4262 mutex_exit(&l2arc_feed_thr_lock); 4263 4264 mutex_destroy(&l2arc_feed_thr_lock); 4265 cv_destroy(&l2arc_feed_thr_cv); 4266 mutex_destroy(&l2arc_dev_mtx); 4267 mutex_destroy(&l2arc_buflist_mtx); 4268 mutex_destroy(&l2arc_free_on_write_mtx); 4269 4270 list_destroy(l2arc_dev_list); 4271 list_destroy(l2arc_free_on_write); 4272 } 4273