1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * DVA-based Adjustable Replacement Cache 28 * 29 * While much of the theory of operation used here is 30 * based on the self-tuning, low overhead replacement cache 31 * presented by Megiddo and Modha at FAST 2003, there are some 32 * significant differences: 33 * 34 * 1. The Megiddo and Modha model assumes any page is evictable. 35 * Pages in its cache cannot be "locked" into memory. This makes 36 * the eviction algorithm simple: evict the last page in the list. 37 * This also make the performance characteristics easy to reason 38 * about. Our cache is not so simple. At any given moment, some 39 * subset of the blocks in the cache are un-evictable because we 40 * have handed out a reference to them. Blocks are only evictable 41 * when there are no external references active. This makes 42 * eviction far more problematic: we choose to evict the evictable 43 * blocks that are the "lowest" in the list. 44 * 45 * There are times when it is not possible to evict the requested 46 * space. In these circumstances we are unable to adjust the cache 47 * size. To prevent the cache growing unbounded at these times we 48 * implement a "cache throttle" that slows the flow of new data 49 * into the cache until we can make space available. 50 * 51 * 2. The Megiddo and Modha model assumes a fixed cache size. 52 * Pages are evicted when the cache is full and there is a cache 53 * miss. Our model has a variable sized cache. It grows with 54 * high use, but also tries to react to memory pressure from the 55 * operating system: decreasing its size when system memory is 56 * tight. 57 * 58 * 3. The Megiddo and Modha model assumes a fixed page size. All 59 * elements of the cache are therefor exactly the same size. So 60 * when adjusting the cache size following a cache miss, its simply 61 * a matter of choosing a single page to evict. In our model, we 62 * have variable sized cache blocks (rangeing from 512 bytes to 63 * 128K bytes). We therefor choose a set of blocks to evict to make 64 * space for a cache miss that approximates as closely as possible 65 * the space used by the new block. 66 * 67 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 68 * by N. Megiddo & D. Modha, FAST 2003 69 */ 70 71 /* 72 * The locking model: 73 * 74 * A new reference to a cache buffer can be obtained in two 75 * ways: 1) via a hash table lookup using the DVA as a key, 76 * or 2) via one of the ARC lists. The arc_read() interface 77 * uses method 1, while the internal arc algorithms for 78 * adjusting the cache use method 2. We therefor provide two 79 * types of locks: 1) the hash table lock array, and 2) the 80 * arc list locks. 81 * 82 * Buffers do not have their own mutexs, rather they rely on the 83 * hash table mutexs for the bulk of their protection (i.e. most 84 * fields in the arc_buf_hdr_t are protected by these mutexs). 85 * 86 * buf_hash_find() returns the appropriate mutex (held) when it 87 * locates the requested buffer in the hash table. It returns 88 * NULL for the mutex if the buffer was not in the table. 89 * 90 * buf_hash_remove() expects the appropriate hash mutex to be 91 * already held before it is invoked. 92 * 93 * Each arc state also has a mutex which is used to protect the 94 * buffer list associated with the state. When attempting to 95 * obtain a hash table lock while holding an arc list lock you 96 * must use: mutex_tryenter() to avoid deadlock. Also note that 97 * the active state mutex must be held before the ghost state mutex. 98 * 99 * Arc buffers may have an associated eviction callback function. 100 * This function will be invoked prior to removing the buffer (e.g. 101 * in arc_do_user_evicts()). Note however that the data associated 102 * with the buffer may be evicted prior to the callback. The callback 103 * must be made with *no locks held* (to prevent deadlock). Additionally, 104 * the users of callbacks must ensure that their private data is 105 * protected from simultaneous callbacks from arc_buf_evict() 106 * and arc_do_user_evicts(). 107 * 108 * Note that the majority of the performance stats are manipulated 109 * with atomic operations. 110 * 111 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 112 * 113 * - L2ARC buflist creation 114 * - L2ARC buflist eviction 115 * - L2ARC write completion, which walks L2ARC buflists 116 * - ARC header destruction, as it removes from L2ARC buflists 117 * - ARC header release, as it removes from L2ARC buflists 118 */ 119 120 #include <sys/spa.h> 121 #include <sys/zio.h> 122 #include <sys/zfs_context.h> 123 #include <sys/arc.h> 124 #include <sys/refcount.h> 125 #include <sys/vdev.h> 126 #include <sys/vdev_impl.h> 127 #ifdef _KERNEL 128 #include <sys/vmsystm.h> 129 #include <vm/anon.h> 130 #include <sys/fs/swapnode.h> 131 #include <sys/dnlc.h> 132 #endif 133 #include <sys/callb.h> 134 #include <sys/kstat.h> 135 #include <zfs_fletcher.h> 136 137 static kmutex_t arc_reclaim_thr_lock; 138 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 139 static uint8_t arc_thread_exit; 140 141 extern int zfs_write_limit_shift; 142 extern uint64_t zfs_write_limit_max; 143 extern kmutex_t zfs_write_limit_lock; 144 145 #define ARC_REDUCE_DNLC_PERCENT 3 146 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 147 148 typedef enum arc_reclaim_strategy { 149 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 150 ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 151 } arc_reclaim_strategy_t; 152 153 /* number of seconds before growing cache again */ 154 static int arc_grow_retry = 60; 155 156 /* shift of arc_c for calculating both min and max arc_p */ 157 static int arc_p_min_shift = 4; 158 159 /* log2(fraction of arc to reclaim) */ 160 static int arc_shrink_shift = 5; 161 162 /* 163 * minimum lifespan of a prefetch block in clock ticks 164 * (initialized in arc_init()) 165 */ 166 static int arc_min_prefetch_lifespan; 167 168 static int arc_dead; 169 170 /* 171 * The arc has filled available memory and has now warmed up. 172 */ 173 static boolean_t arc_warm; 174 175 /* 176 * These tunables are for performance analysis. 177 */ 178 uint64_t zfs_arc_max; 179 uint64_t zfs_arc_min; 180 uint64_t zfs_arc_meta_limit = 0; 181 int zfs_arc_grow_retry = 0; 182 int zfs_arc_shrink_shift = 0; 183 int zfs_arc_p_min_shift = 0; 184 185 /* 186 * Note that buffers can be in one of 6 states: 187 * ARC_anon - anonymous (discussed below) 188 * ARC_mru - recently used, currently cached 189 * ARC_mru_ghost - recentely used, no longer in cache 190 * ARC_mfu - frequently used, currently cached 191 * ARC_mfu_ghost - frequently used, no longer in cache 192 * ARC_l2c_only - exists in L2ARC but not other states 193 * When there are no active references to the buffer, they are 194 * are linked onto a list in one of these arc states. These are 195 * the only buffers that can be evicted or deleted. Within each 196 * state there are multiple lists, one for meta-data and one for 197 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 198 * etc.) is tracked separately so that it can be managed more 199 * explicitly: favored over data, limited explicitly. 200 * 201 * Anonymous buffers are buffers that are not associated with 202 * a DVA. These are buffers that hold dirty block copies 203 * before they are written to stable storage. By definition, 204 * they are "ref'd" and are considered part of arc_mru 205 * that cannot be freed. Generally, they will aquire a DVA 206 * as they are written and migrate onto the arc_mru list. 207 * 208 * The ARC_l2c_only state is for buffers that are in the second 209 * level ARC but no longer in any of the ARC_m* lists. The second 210 * level ARC itself may also contain buffers that are in any of 211 * the ARC_m* states - meaning that a buffer can exist in two 212 * places. The reason for the ARC_l2c_only state is to keep the 213 * buffer header in the hash table, so that reads that hit the 214 * second level ARC benefit from these fast lookups. 215 */ 216 217 typedef struct arc_state { 218 list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ 219 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 220 uint64_t arcs_size; /* total amount of data in this state */ 221 kmutex_t arcs_mtx; 222 } arc_state_t; 223 224 /* The 6 states: */ 225 static arc_state_t ARC_anon; 226 static arc_state_t ARC_mru; 227 static arc_state_t ARC_mru_ghost; 228 static arc_state_t ARC_mfu; 229 static arc_state_t ARC_mfu_ghost; 230 static arc_state_t ARC_l2c_only; 231 232 typedef struct arc_stats { 233 kstat_named_t arcstat_hits; 234 kstat_named_t arcstat_misses; 235 kstat_named_t arcstat_demand_data_hits; 236 kstat_named_t arcstat_demand_data_misses; 237 kstat_named_t arcstat_demand_metadata_hits; 238 kstat_named_t arcstat_demand_metadata_misses; 239 kstat_named_t arcstat_prefetch_data_hits; 240 kstat_named_t arcstat_prefetch_data_misses; 241 kstat_named_t arcstat_prefetch_metadata_hits; 242 kstat_named_t arcstat_prefetch_metadata_misses; 243 kstat_named_t arcstat_mru_hits; 244 kstat_named_t arcstat_mru_ghost_hits; 245 kstat_named_t arcstat_mfu_hits; 246 kstat_named_t arcstat_mfu_ghost_hits; 247 kstat_named_t arcstat_deleted; 248 kstat_named_t arcstat_recycle_miss; 249 kstat_named_t arcstat_mutex_miss; 250 kstat_named_t arcstat_evict_skip; 251 kstat_named_t arcstat_evict_l2_cached; 252 kstat_named_t arcstat_evict_l2_eligible; 253 kstat_named_t arcstat_evict_l2_ineligible; 254 kstat_named_t arcstat_hash_elements; 255 kstat_named_t arcstat_hash_elements_max; 256 kstat_named_t arcstat_hash_collisions; 257 kstat_named_t arcstat_hash_chains; 258 kstat_named_t arcstat_hash_chain_max; 259 kstat_named_t arcstat_p; 260 kstat_named_t arcstat_c; 261 kstat_named_t arcstat_c_min; 262 kstat_named_t arcstat_c_max; 263 kstat_named_t arcstat_size; 264 kstat_named_t arcstat_hdr_size; 265 kstat_named_t arcstat_data_size; 266 kstat_named_t arcstat_other_size; 267 kstat_named_t arcstat_l2_hits; 268 kstat_named_t arcstat_l2_misses; 269 kstat_named_t arcstat_l2_feeds; 270 kstat_named_t arcstat_l2_rw_clash; 271 kstat_named_t arcstat_l2_read_bytes; 272 kstat_named_t arcstat_l2_write_bytes; 273 kstat_named_t arcstat_l2_writes_sent; 274 kstat_named_t arcstat_l2_writes_done; 275 kstat_named_t arcstat_l2_writes_error; 276 kstat_named_t arcstat_l2_writes_hdr_miss; 277 kstat_named_t arcstat_l2_evict_lock_retry; 278 kstat_named_t arcstat_l2_evict_reading; 279 kstat_named_t arcstat_l2_free_on_write; 280 kstat_named_t arcstat_l2_abort_lowmem; 281 kstat_named_t arcstat_l2_cksum_bad; 282 kstat_named_t arcstat_l2_io_error; 283 kstat_named_t arcstat_l2_size; 284 kstat_named_t arcstat_l2_hdr_size; 285 kstat_named_t arcstat_memory_throttle_count; 286 } arc_stats_t; 287 288 static arc_stats_t arc_stats = { 289 { "hits", KSTAT_DATA_UINT64 }, 290 { "misses", KSTAT_DATA_UINT64 }, 291 { "demand_data_hits", KSTAT_DATA_UINT64 }, 292 { "demand_data_misses", KSTAT_DATA_UINT64 }, 293 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 294 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 295 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 296 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 297 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 298 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 299 { "mru_hits", KSTAT_DATA_UINT64 }, 300 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 301 { "mfu_hits", KSTAT_DATA_UINT64 }, 302 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 303 { "deleted", KSTAT_DATA_UINT64 }, 304 { "recycle_miss", KSTAT_DATA_UINT64 }, 305 { "mutex_miss", KSTAT_DATA_UINT64 }, 306 { "evict_skip", KSTAT_DATA_UINT64 }, 307 { "evict_l2_cached", KSTAT_DATA_UINT64 }, 308 { "evict_l2_eligible", KSTAT_DATA_UINT64 }, 309 { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, 310 { "hash_elements", KSTAT_DATA_UINT64 }, 311 { "hash_elements_max", KSTAT_DATA_UINT64 }, 312 { "hash_collisions", KSTAT_DATA_UINT64 }, 313 { "hash_chains", KSTAT_DATA_UINT64 }, 314 { "hash_chain_max", KSTAT_DATA_UINT64 }, 315 { "p", KSTAT_DATA_UINT64 }, 316 { "c", KSTAT_DATA_UINT64 }, 317 { "c_min", KSTAT_DATA_UINT64 }, 318 { "c_max", KSTAT_DATA_UINT64 }, 319 { "size", KSTAT_DATA_UINT64 }, 320 { "hdr_size", KSTAT_DATA_UINT64 }, 321 { "data_size", KSTAT_DATA_UINT64 }, 322 { "other_size", KSTAT_DATA_UINT64 }, 323 { "l2_hits", KSTAT_DATA_UINT64 }, 324 { "l2_misses", KSTAT_DATA_UINT64 }, 325 { "l2_feeds", KSTAT_DATA_UINT64 }, 326 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 327 { "l2_read_bytes", KSTAT_DATA_UINT64 }, 328 { "l2_write_bytes", KSTAT_DATA_UINT64 }, 329 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 330 { "l2_writes_done", KSTAT_DATA_UINT64 }, 331 { "l2_writes_error", KSTAT_DATA_UINT64 }, 332 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 333 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 334 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 335 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 336 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 337 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 338 { "l2_io_error", KSTAT_DATA_UINT64 }, 339 { "l2_size", KSTAT_DATA_UINT64 }, 340 { "l2_hdr_size", KSTAT_DATA_UINT64 }, 341 { "memory_throttle_count", KSTAT_DATA_UINT64 } 342 }; 343 344 #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 345 346 #define ARCSTAT_INCR(stat, val) \ 347 atomic_add_64(&arc_stats.stat.value.ui64, (val)); 348 349 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 350 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 351 352 #define ARCSTAT_MAX(stat, val) { \ 353 uint64_t m; \ 354 while ((val) > (m = arc_stats.stat.value.ui64) && \ 355 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 356 continue; \ 357 } 358 359 #define ARCSTAT_MAXSTAT(stat) \ 360 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 361 362 /* 363 * We define a macro to allow ARC hits/misses to be easily broken down by 364 * two separate conditions, giving a total of four different subtypes for 365 * each of hits and misses (so eight statistics total). 366 */ 367 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 368 if (cond1) { \ 369 if (cond2) { \ 370 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 371 } else { \ 372 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 373 } \ 374 } else { \ 375 if (cond2) { \ 376 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 377 } else { \ 378 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 379 } \ 380 } 381 382 kstat_t *arc_ksp; 383 static arc_state_t *arc_anon; 384 static arc_state_t *arc_mru; 385 static arc_state_t *arc_mru_ghost; 386 static arc_state_t *arc_mfu; 387 static arc_state_t *arc_mfu_ghost; 388 static arc_state_t *arc_l2c_only; 389 390 /* 391 * There are several ARC variables that are critical to export as kstats -- 392 * but we don't want to have to grovel around in the kstat whenever we wish to 393 * manipulate them. For these variables, we therefore define them to be in 394 * terms of the statistic variable. This assures that we are not introducing 395 * the possibility of inconsistency by having shadow copies of the variables, 396 * while still allowing the code to be readable. 397 */ 398 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 399 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 400 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 401 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 402 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 403 404 static int arc_no_grow; /* Don't try to grow cache size */ 405 static uint64_t arc_tempreserve; 406 static uint64_t arc_loaned_bytes; 407 static uint64_t arc_meta_used; 408 static uint64_t arc_meta_limit; 409 static uint64_t arc_meta_max = 0; 410 411 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 412 413 typedef struct arc_callback arc_callback_t; 414 415 struct arc_callback { 416 void *acb_private; 417 arc_done_func_t *acb_done; 418 arc_buf_t *acb_buf; 419 zio_t *acb_zio_dummy; 420 arc_callback_t *acb_next; 421 }; 422 423 typedef struct arc_write_callback arc_write_callback_t; 424 425 struct arc_write_callback { 426 void *awcb_private; 427 arc_done_func_t *awcb_ready; 428 arc_done_func_t *awcb_done; 429 arc_buf_t *awcb_buf; 430 }; 431 432 struct arc_buf_hdr { 433 /* protected by hash lock */ 434 dva_t b_dva; 435 uint64_t b_birth; 436 uint64_t b_cksum0; 437 438 kmutex_t b_freeze_lock; 439 zio_cksum_t *b_freeze_cksum; 440 441 arc_buf_hdr_t *b_hash_next; 442 arc_buf_t *b_buf; 443 uint32_t b_flags; 444 uint32_t b_datacnt; 445 446 arc_callback_t *b_acb; 447 kcondvar_t b_cv; 448 449 /* immutable */ 450 arc_buf_contents_t b_type; 451 uint64_t b_size; 452 uint64_t b_spa; 453 454 /* protected by arc state mutex */ 455 arc_state_t *b_state; 456 list_node_t b_arc_node; 457 458 /* updated atomically */ 459 clock_t b_arc_access; 460 461 /* self protecting */ 462 refcount_t b_refcnt; 463 464 l2arc_buf_hdr_t *b_l2hdr; 465 list_node_t b_l2node; 466 }; 467 468 static arc_buf_t *arc_eviction_list; 469 static kmutex_t arc_eviction_mtx; 470 static arc_buf_hdr_t arc_eviction_hdr; 471 static void arc_get_data_buf(arc_buf_t *buf); 472 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 473 static int arc_evict_needed(arc_buf_contents_t type); 474 static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes); 475 476 static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab); 477 478 #define GHOST_STATE(state) \ 479 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 480 (state) == arc_l2c_only) 481 482 /* 483 * Private ARC flags. These flags are private ARC only flags that will show up 484 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 485 * be passed in as arc_flags in things like arc_read. However, these flags 486 * should never be passed and should only be set by ARC code. When adding new 487 * public flags, make sure not to smash the private ones. 488 */ 489 490 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 491 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 492 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 493 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 494 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 495 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 496 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 497 #define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ 498 #define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ 499 #define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ 500 501 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 502 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 503 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 504 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH) 505 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 506 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 507 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 508 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE) 509 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ 510 (hdr)->b_l2hdr != NULL) 511 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 512 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 513 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 514 515 /* 516 * Other sizes 517 */ 518 519 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 520 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 521 522 /* 523 * Hash table routines 524 */ 525 526 #define HT_LOCK_PAD 64 527 528 struct ht_lock { 529 kmutex_t ht_lock; 530 #ifdef _KERNEL 531 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 532 #endif 533 }; 534 535 #define BUF_LOCKS 256 536 typedef struct buf_hash_table { 537 uint64_t ht_mask; 538 arc_buf_hdr_t **ht_table; 539 struct ht_lock ht_locks[BUF_LOCKS]; 540 } buf_hash_table_t; 541 542 static buf_hash_table_t buf_hash_table; 543 544 #define BUF_HASH_INDEX(spa, dva, birth) \ 545 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 546 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 547 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 548 #define HDR_LOCK(buf) \ 549 (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 550 551 uint64_t zfs_crc64_table[256]; 552 553 /* 554 * Level 2 ARC 555 */ 556 557 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 558 #define L2ARC_HEADROOM 2 /* num of writes */ 559 #define L2ARC_FEED_SECS 1 /* caching interval secs */ 560 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ 561 562 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 563 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 564 565 /* 566 * L2ARC Performance Tunables 567 */ 568 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 569 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 570 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 571 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 572 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ 573 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 574 boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ 575 boolean_t l2arc_norw = B_TRUE; /* no reads during writes */ 576 577 /* 578 * L2ARC Internals 579 */ 580 typedef struct l2arc_dev { 581 vdev_t *l2ad_vdev; /* vdev */ 582 spa_t *l2ad_spa; /* spa */ 583 uint64_t l2ad_hand; /* next write location */ 584 uint64_t l2ad_write; /* desired write size, bytes */ 585 uint64_t l2ad_boost; /* warmup write boost, bytes */ 586 uint64_t l2ad_start; /* first addr on device */ 587 uint64_t l2ad_end; /* last addr on device */ 588 uint64_t l2ad_evict; /* last addr eviction reached */ 589 boolean_t l2ad_first; /* first sweep through */ 590 boolean_t l2ad_writing; /* currently writing */ 591 list_t *l2ad_buflist; /* buffer list */ 592 list_node_t l2ad_node; /* device list node */ 593 } l2arc_dev_t; 594 595 static list_t L2ARC_dev_list; /* device list */ 596 static list_t *l2arc_dev_list; /* device list pointer */ 597 static kmutex_t l2arc_dev_mtx; /* device list mutex */ 598 static l2arc_dev_t *l2arc_dev_last; /* last device used */ 599 static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 600 static list_t L2ARC_free_on_write; /* free after write buf list */ 601 static list_t *l2arc_free_on_write; /* free after write list ptr */ 602 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 603 static uint64_t l2arc_ndev; /* number of devices */ 604 605 typedef struct l2arc_read_callback { 606 arc_buf_t *l2rcb_buf; /* read buffer */ 607 spa_t *l2rcb_spa; /* spa */ 608 blkptr_t l2rcb_bp; /* original blkptr */ 609 zbookmark_t l2rcb_zb; /* original bookmark */ 610 int l2rcb_flags; /* original flags */ 611 } l2arc_read_callback_t; 612 613 typedef struct l2arc_write_callback { 614 l2arc_dev_t *l2wcb_dev; /* device info */ 615 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 616 } l2arc_write_callback_t; 617 618 struct l2arc_buf_hdr { 619 /* protected by arc_buf_hdr mutex */ 620 l2arc_dev_t *b_dev; /* L2ARC device */ 621 uint64_t b_daddr; /* disk address, offset byte */ 622 }; 623 624 typedef struct l2arc_data_free { 625 /* protected by l2arc_free_on_write_mtx */ 626 void *l2df_data; 627 size_t l2df_size; 628 void (*l2df_func)(void *, size_t); 629 list_node_t l2df_list_node; 630 } l2arc_data_free_t; 631 632 static kmutex_t l2arc_feed_thr_lock; 633 static kcondvar_t l2arc_feed_thr_cv; 634 static uint8_t l2arc_thread_exit; 635 636 static void l2arc_read_done(zio_t *zio); 637 static void l2arc_hdr_stat_add(void); 638 static void l2arc_hdr_stat_remove(void); 639 640 static uint64_t 641 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) 642 { 643 uint8_t *vdva = (uint8_t *)dva; 644 uint64_t crc = -1ULL; 645 int i; 646 647 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 648 649 for (i = 0; i < sizeof (dva_t); i++) 650 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 651 652 crc ^= (spa>>8) ^ birth; 653 654 return (crc); 655 } 656 657 #define BUF_EMPTY(buf) \ 658 ((buf)->b_dva.dva_word[0] == 0 && \ 659 (buf)->b_dva.dva_word[1] == 0 && \ 660 (buf)->b_birth == 0) 661 662 #define BUF_EQUAL(spa, dva, birth, buf) \ 663 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 664 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 665 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 666 667 static arc_buf_hdr_t * 668 buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp) 669 { 670 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 671 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 672 arc_buf_hdr_t *buf; 673 674 mutex_enter(hash_lock); 675 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 676 buf = buf->b_hash_next) { 677 if (BUF_EQUAL(spa, dva, birth, buf)) { 678 *lockp = hash_lock; 679 return (buf); 680 } 681 } 682 mutex_exit(hash_lock); 683 *lockp = NULL; 684 return (NULL); 685 } 686 687 /* 688 * Insert an entry into the hash table. If there is already an element 689 * equal to elem in the hash table, then the already existing element 690 * will be returned and the new element will not be inserted. 691 * Otherwise returns NULL. 692 */ 693 static arc_buf_hdr_t * 694 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 695 { 696 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 697 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 698 arc_buf_hdr_t *fbuf; 699 uint32_t i; 700 701 ASSERT(!HDR_IN_HASH_TABLE(buf)); 702 *lockp = hash_lock; 703 mutex_enter(hash_lock); 704 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 705 fbuf = fbuf->b_hash_next, i++) { 706 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 707 return (fbuf); 708 } 709 710 buf->b_hash_next = buf_hash_table.ht_table[idx]; 711 buf_hash_table.ht_table[idx] = buf; 712 buf->b_flags |= ARC_IN_HASH_TABLE; 713 714 /* collect some hash table performance data */ 715 if (i > 0) { 716 ARCSTAT_BUMP(arcstat_hash_collisions); 717 if (i == 1) 718 ARCSTAT_BUMP(arcstat_hash_chains); 719 720 ARCSTAT_MAX(arcstat_hash_chain_max, i); 721 } 722 723 ARCSTAT_BUMP(arcstat_hash_elements); 724 ARCSTAT_MAXSTAT(arcstat_hash_elements); 725 726 return (NULL); 727 } 728 729 static void 730 buf_hash_remove(arc_buf_hdr_t *buf) 731 { 732 arc_buf_hdr_t *fbuf, **bufp; 733 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 734 735 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 736 ASSERT(HDR_IN_HASH_TABLE(buf)); 737 738 bufp = &buf_hash_table.ht_table[idx]; 739 while ((fbuf = *bufp) != buf) { 740 ASSERT(fbuf != NULL); 741 bufp = &fbuf->b_hash_next; 742 } 743 *bufp = buf->b_hash_next; 744 buf->b_hash_next = NULL; 745 buf->b_flags &= ~ARC_IN_HASH_TABLE; 746 747 /* collect some hash table performance data */ 748 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 749 750 if (buf_hash_table.ht_table[idx] && 751 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 752 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 753 } 754 755 /* 756 * Global data structures and functions for the buf kmem cache. 757 */ 758 static kmem_cache_t *hdr_cache; 759 static kmem_cache_t *buf_cache; 760 761 static void 762 buf_fini(void) 763 { 764 int i; 765 766 kmem_free(buf_hash_table.ht_table, 767 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 768 for (i = 0; i < BUF_LOCKS; i++) 769 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 770 kmem_cache_destroy(hdr_cache); 771 kmem_cache_destroy(buf_cache); 772 } 773 774 /* 775 * Constructor callback - called when the cache is empty 776 * and a new buf is requested. 777 */ 778 /* ARGSUSED */ 779 static int 780 hdr_cons(void *vbuf, void *unused, int kmflag) 781 { 782 arc_buf_hdr_t *buf = vbuf; 783 784 bzero(buf, sizeof (arc_buf_hdr_t)); 785 refcount_create(&buf->b_refcnt); 786 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 787 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 788 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 789 790 return (0); 791 } 792 793 /* ARGSUSED */ 794 static int 795 buf_cons(void *vbuf, void *unused, int kmflag) 796 { 797 arc_buf_t *buf = vbuf; 798 799 bzero(buf, sizeof (arc_buf_t)); 800 rw_init(&buf->b_lock, NULL, RW_DEFAULT, NULL); 801 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); 802 803 return (0); 804 } 805 806 /* 807 * Destructor callback - called when a cached buf is 808 * no longer required. 809 */ 810 /* ARGSUSED */ 811 static void 812 hdr_dest(void *vbuf, void *unused) 813 { 814 arc_buf_hdr_t *buf = vbuf; 815 816 ASSERT(BUF_EMPTY(buf)); 817 refcount_destroy(&buf->b_refcnt); 818 cv_destroy(&buf->b_cv); 819 mutex_destroy(&buf->b_freeze_lock); 820 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 821 } 822 823 /* ARGSUSED */ 824 static void 825 buf_dest(void *vbuf, void *unused) 826 { 827 arc_buf_t *buf = vbuf; 828 829 rw_destroy(&buf->b_lock); 830 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); 831 } 832 833 /* 834 * Reclaim callback -- invoked when memory is low. 835 */ 836 /* ARGSUSED */ 837 static void 838 hdr_recl(void *unused) 839 { 840 dprintf("hdr_recl called\n"); 841 /* 842 * umem calls the reclaim func when we destroy the buf cache, 843 * which is after we do arc_fini(). 844 */ 845 if (!arc_dead) 846 cv_signal(&arc_reclaim_thr_cv); 847 } 848 849 static void 850 buf_init(void) 851 { 852 uint64_t *ct; 853 uint64_t hsize = 1ULL << 12; 854 int i, j; 855 856 /* 857 * The hash table is big enough to fill all of physical memory 858 * with an average 64K block size. The table will take up 859 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 860 */ 861 while (hsize * 65536 < physmem * PAGESIZE) 862 hsize <<= 1; 863 retry: 864 buf_hash_table.ht_mask = hsize - 1; 865 buf_hash_table.ht_table = 866 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 867 if (buf_hash_table.ht_table == NULL) { 868 ASSERT(hsize > (1ULL << 8)); 869 hsize >>= 1; 870 goto retry; 871 } 872 873 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 874 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 875 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 876 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 877 878 for (i = 0; i < 256; i++) 879 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 880 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 881 882 for (i = 0; i < BUF_LOCKS; i++) { 883 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 884 NULL, MUTEX_DEFAULT, NULL); 885 } 886 } 887 888 #define ARC_MINTIME (hz>>4) /* 62 ms */ 889 890 static void 891 arc_cksum_verify(arc_buf_t *buf) 892 { 893 zio_cksum_t zc; 894 895 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 896 return; 897 898 mutex_enter(&buf->b_hdr->b_freeze_lock); 899 if (buf->b_hdr->b_freeze_cksum == NULL || 900 (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 901 mutex_exit(&buf->b_hdr->b_freeze_lock); 902 return; 903 } 904 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 905 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 906 panic("buffer modified while frozen!"); 907 mutex_exit(&buf->b_hdr->b_freeze_lock); 908 } 909 910 static int 911 arc_cksum_equal(arc_buf_t *buf) 912 { 913 zio_cksum_t zc; 914 int equal; 915 916 mutex_enter(&buf->b_hdr->b_freeze_lock); 917 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 918 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 919 mutex_exit(&buf->b_hdr->b_freeze_lock); 920 921 return (equal); 922 } 923 924 static void 925 arc_cksum_compute(arc_buf_t *buf, boolean_t force) 926 { 927 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 928 return; 929 930 mutex_enter(&buf->b_hdr->b_freeze_lock); 931 if (buf->b_hdr->b_freeze_cksum != NULL) { 932 mutex_exit(&buf->b_hdr->b_freeze_lock); 933 return; 934 } 935 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 936 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 937 buf->b_hdr->b_freeze_cksum); 938 mutex_exit(&buf->b_hdr->b_freeze_lock); 939 } 940 941 void 942 arc_buf_thaw(arc_buf_t *buf) 943 { 944 if (zfs_flags & ZFS_DEBUG_MODIFY) { 945 if (buf->b_hdr->b_state != arc_anon) 946 panic("modifying non-anon buffer!"); 947 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 948 panic("modifying buffer while i/o in progress!"); 949 arc_cksum_verify(buf); 950 } 951 952 mutex_enter(&buf->b_hdr->b_freeze_lock); 953 if (buf->b_hdr->b_freeze_cksum != NULL) { 954 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 955 buf->b_hdr->b_freeze_cksum = NULL; 956 } 957 mutex_exit(&buf->b_hdr->b_freeze_lock); 958 } 959 960 void 961 arc_buf_freeze(arc_buf_t *buf) 962 { 963 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 964 return; 965 966 ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 967 buf->b_hdr->b_state == arc_anon); 968 arc_cksum_compute(buf, B_FALSE); 969 } 970 971 static void 972 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 973 { 974 ASSERT(MUTEX_HELD(hash_lock)); 975 976 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 977 (ab->b_state != arc_anon)) { 978 uint64_t delta = ab->b_size * ab->b_datacnt; 979 list_t *list = &ab->b_state->arcs_list[ab->b_type]; 980 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 981 982 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 983 mutex_enter(&ab->b_state->arcs_mtx); 984 ASSERT(list_link_active(&ab->b_arc_node)); 985 list_remove(list, ab); 986 if (GHOST_STATE(ab->b_state)) { 987 ASSERT3U(ab->b_datacnt, ==, 0); 988 ASSERT3P(ab->b_buf, ==, NULL); 989 delta = ab->b_size; 990 } 991 ASSERT(delta > 0); 992 ASSERT3U(*size, >=, delta); 993 atomic_add_64(size, -delta); 994 mutex_exit(&ab->b_state->arcs_mtx); 995 /* remove the prefetch flag if we get a reference */ 996 if (ab->b_flags & ARC_PREFETCH) 997 ab->b_flags &= ~ARC_PREFETCH; 998 } 999 } 1000 1001 static int 1002 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1003 { 1004 int cnt; 1005 arc_state_t *state = ab->b_state; 1006 1007 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 1008 ASSERT(!GHOST_STATE(state)); 1009 1010 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 1011 (state != arc_anon)) { 1012 uint64_t *size = &state->arcs_lsize[ab->b_type]; 1013 1014 ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 1015 mutex_enter(&state->arcs_mtx); 1016 ASSERT(!list_link_active(&ab->b_arc_node)); 1017 list_insert_head(&state->arcs_list[ab->b_type], ab); 1018 ASSERT(ab->b_datacnt > 0); 1019 atomic_add_64(size, ab->b_size * ab->b_datacnt); 1020 mutex_exit(&state->arcs_mtx); 1021 } 1022 return (cnt); 1023 } 1024 1025 /* 1026 * Move the supplied buffer to the indicated state. The mutex 1027 * for the buffer must be held by the caller. 1028 */ 1029 static void 1030 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 1031 { 1032 arc_state_t *old_state = ab->b_state; 1033 int64_t refcnt = refcount_count(&ab->b_refcnt); 1034 uint64_t from_delta, to_delta; 1035 1036 ASSERT(MUTEX_HELD(hash_lock)); 1037 ASSERT(new_state != old_state); 1038 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 1039 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 1040 ASSERT(ab->b_datacnt <= 1 || new_state != arc_anon); 1041 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon); 1042 1043 from_delta = to_delta = ab->b_datacnt * ab->b_size; 1044 1045 /* 1046 * If this buffer is evictable, transfer it from the 1047 * old state list to the new state list. 1048 */ 1049 if (refcnt == 0) { 1050 if (old_state != arc_anon) { 1051 int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 1052 uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 1053 1054 if (use_mutex) 1055 mutex_enter(&old_state->arcs_mtx); 1056 1057 ASSERT(list_link_active(&ab->b_arc_node)); 1058 list_remove(&old_state->arcs_list[ab->b_type], ab); 1059 1060 /* 1061 * If prefetching out of the ghost cache, 1062 * we will have a non-null datacnt. 1063 */ 1064 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 1065 /* ghost elements have a ghost size */ 1066 ASSERT(ab->b_buf == NULL); 1067 from_delta = ab->b_size; 1068 } 1069 ASSERT3U(*size, >=, from_delta); 1070 atomic_add_64(size, -from_delta); 1071 1072 if (use_mutex) 1073 mutex_exit(&old_state->arcs_mtx); 1074 } 1075 if (new_state != arc_anon) { 1076 int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 1077 uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1078 1079 if (use_mutex) 1080 mutex_enter(&new_state->arcs_mtx); 1081 1082 list_insert_head(&new_state->arcs_list[ab->b_type], ab); 1083 1084 /* ghost elements have a ghost size */ 1085 if (GHOST_STATE(new_state)) { 1086 ASSERT(ab->b_datacnt == 0); 1087 ASSERT(ab->b_buf == NULL); 1088 to_delta = ab->b_size; 1089 } 1090 atomic_add_64(size, to_delta); 1091 1092 if (use_mutex) 1093 mutex_exit(&new_state->arcs_mtx); 1094 } 1095 } 1096 1097 ASSERT(!BUF_EMPTY(ab)); 1098 if (new_state == arc_anon) { 1099 buf_hash_remove(ab); 1100 } 1101 1102 /* adjust state sizes */ 1103 if (to_delta) 1104 atomic_add_64(&new_state->arcs_size, to_delta); 1105 if (from_delta) { 1106 ASSERT3U(old_state->arcs_size, >=, from_delta); 1107 atomic_add_64(&old_state->arcs_size, -from_delta); 1108 } 1109 ab->b_state = new_state; 1110 1111 /* adjust l2arc hdr stats */ 1112 if (new_state == arc_l2c_only) 1113 l2arc_hdr_stat_add(); 1114 else if (old_state == arc_l2c_only) 1115 l2arc_hdr_stat_remove(); 1116 } 1117 1118 void 1119 arc_space_consume(uint64_t space, arc_space_type_t type) 1120 { 1121 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1122 1123 switch (type) { 1124 case ARC_SPACE_DATA: 1125 ARCSTAT_INCR(arcstat_data_size, space); 1126 break; 1127 case ARC_SPACE_OTHER: 1128 ARCSTAT_INCR(arcstat_other_size, space); 1129 break; 1130 case ARC_SPACE_HDRS: 1131 ARCSTAT_INCR(arcstat_hdr_size, space); 1132 break; 1133 case ARC_SPACE_L2HDRS: 1134 ARCSTAT_INCR(arcstat_l2_hdr_size, space); 1135 break; 1136 } 1137 1138 atomic_add_64(&arc_meta_used, space); 1139 atomic_add_64(&arc_size, space); 1140 } 1141 1142 void 1143 arc_space_return(uint64_t space, arc_space_type_t type) 1144 { 1145 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1146 1147 switch (type) { 1148 case ARC_SPACE_DATA: 1149 ARCSTAT_INCR(arcstat_data_size, -space); 1150 break; 1151 case ARC_SPACE_OTHER: 1152 ARCSTAT_INCR(arcstat_other_size, -space); 1153 break; 1154 case ARC_SPACE_HDRS: 1155 ARCSTAT_INCR(arcstat_hdr_size, -space); 1156 break; 1157 case ARC_SPACE_L2HDRS: 1158 ARCSTAT_INCR(arcstat_l2_hdr_size, -space); 1159 break; 1160 } 1161 1162 ASSERT(arc_meta_used >= space); 1163 if (arc_meta_max < arc_meta_used) 1164 arc_meta_max = arc_meta_used; 1165 atomic_add_64(&arc_meta_used, -space); 1166 ASSERT(arc_size >= space); 1167 atomic_add_64(&arc_size, -space); 1168 } 1169 1170 void * 1171 arc_data_buf_alloc(uint64_t size) 1172 { 1173 if (arc_evict_needed(ARC_BUFC_DATA)) 1174 cv_signal(&arc_reclaim_thr_cv); 1175 atomic_add_64(&arc_size, size); 1176 return (zio_data_buf_alloc(size)); 1177 } 1178 1179 void 1180 arc_data_buf_free(void *buf, uint64_t size) 1181 { 1182 zio_data_buf_free(buf, size); 1183 ASSERT(arc_size >= size); 1184 atomic_add_64(&arc_size, -size); 1185 } 1186 1187 arc_buf_t * 1188 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1189 { 1190 arc_buf_hdr_t *hdr; 1191 arc_buf_t *buf; 1192 1193 ASSERT3U(size, >, 0); 1194 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1195 ASSERT(BUF_EMPTY(hdr)); 1196 hdr->b_size = size; 1197 hdr->b_type = type; 1198 hdr->b_spa = spa_guid(spa); 1199 hdr->b_state = arc_anon; 1200 hdr->b_arc_access = 0; 1201 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1202 buf->b_hdr = hdr; 1203 buf->b_data = NULL; 1204 buf->b_efunc = NULL; 1205 buf->b_private = NULL; 1206 buf->b_next = NULL; 1207 hdr->b_buf = buf; 1208 arc_get_data_buf(buf); 1209 hdr->b_datacnt = 1; 1210 hdr->b_flags = 0; 1211 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1212 (void) refcount_add(&hdr->b_refcnt, tag); 1213 1214 return (buf); 1215 } 1216 1217 static char *arc_onloan_tag = "onloan"; 1218 1219 /* 1220 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in 1221 * flight data by arc_tempreserve_space() until they are "returned". Loaned 1222 * buffers must be returned to the arc before they can be used by the DMU or 1223 * freed. 1224 */ 1225 arc_buf_t * 1226 arc_loan_buf(spa_t *spa, int size) 1227 { 1228 arc_buf_t *buf; 1229 1230 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA); 1231 1232 atomic_add_64(&arc_loaned_bytes, size); 1233 return (buf); 1234 } 1235 1236 /* 1237 * Return a loaned arc buffer to the arc. 1238 */ 1239 void 1240 arc_return_buf(arc_buf_t *buf, void *tag) 1241 { 1242 arc_buf_hdr_t *hdr = buf->b_hdr; 1243 1244 ASSERT(buf->b_data != NULL); 1245 (void) refcount_add(&hdr->b_refcnt, tag); 1246 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag); 1247 1248 atomic_add_64(&arc_loaned_bytes, -hdr->b_size); 1249 } 1250 1251 /* Detach an arc_buf from a dbuf (tag) */ 1252 void 1253 arc_loan_inuse_buf(arc_buf_t *buf, void *tag) 1254 { 1255 arc_buf_hdr_t *hdr; 1256 1257 rw_enter(&buf->b_lock, RW_WRITER); 1258 ASSERT(buf->b_data != NULL); 1259 hdr = buf->b_hdr; 1260 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag); 1261 (void) refcount_remove(&hdr->b_refcnt, tag); 1262 buf->b_efunc = NULL; 1263 buf->b_private = NULL; 1264 1265 atomic_add_64(&arc_loaned_bytes, hdr->b_size); 1266 rw_exit(&buf->b_lock); 1267 } 1268 1269 static arc_buf_t * 1270 arc_buf_clone(arc_buf_t *from) 1271 { 1272 arc_buf_t *buf; 1273 arc_buf_hdr_t *hdr = from->b_hdr; 1274 uint64_t size = hdr->b_size; 1275 1276 ASSERT(hdr->b_state != arc_anon); 1277 1278 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1279 buf->b_hdr = hdr; 1280 buf->b_data = NULL; 1281 buf->b_efunc = NULL; 1282 buf->b_private = NULL; 1283 buf->b_next = hdr->b_buf; 1284 hdr->b_buf = buf; 1285 arc_get_data_buf(buf); 1286 bcopy(from->b_data, buf->b_data, size); 1287 hdr->b_datacnt += 1; 1288 return (buf); 1289 } 1290 1291 void 1292 arc_buf_add_ref(arc_buf_t *buf, void* tag) 1293 { 1294 arc_buf_hdr_t *hdr; 1295 kmutex_t *hash_lock; 1296 1297 /* 1298 * Check to see if this buffer is evicted. Callers 1299 * must verify b_data != NULL to know if the add_ref 1300 * was successful. 1301 */ 1302 rw_enter(&buf->b_lock, RW_READER); 1303 if (buf->b_data == NULL) { 1304 rw_exit(&buf->b_lock); 1305 return; 1306 } 1307 hdr = buf->b_hdr; 1308 ASSERT(hdr != NULL); 1309 hash_lock = HDR_LOCK(hdr); 1310 mutex_enter(hash_lock); 1311 rw_exit(&buf->b_lock); 1312 1313 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1314 add_reference(hdr, hash_lock, tag); 1315 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 1316 arc_access(hdr, hash_lock); 1317 mutex_exit(hash_lock); 1318 ARCSTAT_BUMP(arcstat_hits); 1319 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 1320 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 1321 data, metadata, hits); 1322 } 1323 1324 /* 1325 * Free the arc data buffer. If it is an l2arc write in progress, 1326 * the buffer is placed on l2arc_free_on_write to be freed later. 1327 */ 1328 static void 1329 arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), 1330 void *data, size_t size) 1331 { 1332 if (HDR_L2_WRITING(hdr)) { 1333 l2arc_data_free_t *df; 1334 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 1335 df->l2df_data = data; 1336 df->l2df_size = size; 1337 df->l2df_func = free_func; 1338 mutex_enter(&l2arc_free_on_write_mtx); 1339 list_insert_head(l2arc_free_on_write, df); 1340 mutex_exit(&l2arc_free_on_write_mtx); 1341 ARCSTAT_BUMP(arcstat_l2_free_on_write); 1342 } else { 1343 free_func(data, size); 1344 } 1345 } 1346 1347 static void 1348 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1349 { 1350 arc_buf_t **bufp; 1351 1352 /* free up data associated with the buf */ 1353 if (buf->b_data) { 1354 arc_state_t *state = buf->b_hdr->b_state; 1355 uint64_t size = buf->b_hdr->b_size; 1356 arc_buf_contents_t type = buf->b_hdr->b_type; 1357 1358 arc_cksum_verify(buf); 1359 1360 if (!recycle) { 1361 if (type == ARC_BUFC_METADATA) { 1362 arc_buf_data_free(buf->b_hdr, zio_buf_free, 1363 buf->b_data, size); 1364 arc_space_return(size, ARC_SPACE_DATA); 1365 } else { 1366 ASSERT(type == ARC_BUFC_DATA); 1367 arc_buf_data_free(buf->b_hdr, 1368 zio_data_buf_free, buf->b_data, size); 1369 ARCSTAT_INCR(arcstat_data_size, -size); 1370 atomic_add_64(&arc_size, -size); 1371 } 1372 } 1373 if (list_link_active(&buf->b_hdr->b_arc_node)) { 1374 uint64_t *cnt = &state->arcs_lsize[type]; 1375 1376 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 1377 ASSERT(state != arc_anon); 1378 1379 ASSERT3U(*cnt, >=, size); 1380 atomic_add_64(cnt, -size); 1381 } 1382 ASSERT3U(state->arcs_size, >=, size); 1383 atomic_add_64(&state->arcs_size, -size); 1384 buf->b_data = NULL; 1385 ASSERT(buf->b_hdr->b_datacnt > 0); 1386 buf->b_hdr->b_datacnt -= 1; 1387 } 1388 1389 /* only remove the buf if requested */ 1390 if (!all) 1391 return; 1392 1393 /* remove the buf from the hdr list */ 1394 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1395 continue; 1396 *bufp = buf->b_next; 1397 1398 ASSERT(buf->b_efunc == NULL); 1399 1400 /* clean up the buf */ 1401 buf->b_hdr = NULL; 1402 kmem_cache_free(buf_cache, buf); 1403 } 1404 1405 static void 1406 arc_hdr_destroy(arc_buf_hdr_t *hdr) 1407 { 1408 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1409 ASSERT3P(hdr->b_state, ==, arc_anon); 1410 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1411 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr; 1412 1413 if (l2hdr != NULL) { 1414 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx); 1415 /* 1416 * To prevent arc_free() and l2arc_evict() from 1417 * attempting to free the same buffer at the same time, 1418 * a FREE_IN_PROGRESS flag is given to arc_free() to 1419 * give it priority. l2arc_evict() can't destroy this 1420 * header while we are waiting on l2arc_buflist_mtx. 1421 * 1422 * The hdr may be removed from l2ad_buflist before we 1423 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. 1424 */ 1425 if (!buflist_held) { 1426 mutex_enter(&l2arc_buflist_mtx); 1427 l2hdr = hdr->b_l2hdr; 1428 } 1429 1430 if (l2hdr != NULL) { 1431 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 1432 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1433 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 1434 if (hdr->b_state == arc_l2c_only) 1435 l2arc_hdr_stat_remove(); 1436 hdr->b_l2hdr = NULL; 1437 } 1438 1439 if (!buflist_held) 1440 mutex_exit(&l2arc_buflist_mtx); 1441 } 1442 1443 if (!BUF_EMPTY(hdr)) { 1444 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1445 bzero(&hdr->b_dva, sizeof (dva_t)); 1446 hdr->b_birth = 0; 1447 hdr->b_cksum0 = 0; 1448 } 1449 while (hdr->b_buf) { 1450 arc_buf_t *buf = hdr->b_buf; 1451 1452 if (buf->b_efunc) { 1453 mutex_enter(&arc_eviction_mtx); 1454 rw_enter(&buf->b_lock, RW_WRITER); 1455 ASSERT(buf->b_hdr != NULL); 1456 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1457 hdr->b_buf = buf->b_next; 1458 buf->b_hdr = &arc_eviction_hdr; 1459 buf->b_next = arc_eviction_list; 1460 arc_eviction_list = buf; 1461 rw_exit(&buf->b_lock); 1462 mutex_exit(&arc_eviction_mtx); 1463 } else { 1464 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1465 } 1466 } 1467 if (hdr->b_freeze_cksum != NULL) { 1468 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1469 hdr->b_freeze_cksum = NULL; 1470 } 1471 1472 ASSERT(!list_link_active(&hdr->b_arc_node)); 1473 ASSERT3P(hdr->b_hash_next, ==, NULL); 1474 ASSERT3P(hdr->b_acb, ==, NULL); 1475 kmem_cache_free(hdr_cache, hdr); 1476 } 1477 1478 void 1479 arc_buf_free(arc_buf_t *buf, void *tag) 1480 { 1481 arc_buf_hdr_t *hdr = buf->b_hdr; 1482 int hashed = hdr->b_state != arc_anon; 1483 1484 ASSERT(buf->b_efunc == NULL); 1485 ASSERT(buf->b_data != NULL); 1486 1487 if (hashed) { 1488 kmutex_t *hash_lock = HDR_LOCK(hdr); 1489 1490 mutex_enter(hash_lock); 1491 (void) remove_reference(hdr, hash_lock, tag); 1492 if (hdr->b_datacnt > 1) { 1493 arc_buf_destroy(buf, FALSE, TRUE); 1494 } else { 1495 ASSERT(buf == hdr->b_buf); 1496 ASSERT(buf->b_efunc == NULL); 1497 hdr->b_flags |= ARC_BUF_AVAILABLE; 1498 } 1499 mutex_exit(hash_lock); 1500 } else if (HDR_IO_IN_PROGRESS(hdr)) { 1501 int destroy_hdr; 1502 /* 1503 * We are in the middle of an async write. Don't destroy 1504 * this buffer unless the write completes before we finish 1505 * decrementing the reference count. 1506 */ 1507 mutex_enter(&arc_eviction_mtx); 1508 (void) remove_reference(hdr, NULL, tag); 1509 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1510 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1511 mutex_exit(&arc_eviction_mtx); 1512 if (destroy_hdr) 1513 arc_hdr_destroy(hdr); 1514 } else { 1515 if (remove_reference(hdr, NULL, tag) > 0) { 1516 ASSERT(HDR_IO_ERROR(hdr)); 1517 arc_buf_destroy(buf, FALSE, TRUE); 1518 } else { 1519 arc_hdr_destroy(hdr); 1520 } 1521 } 1522 } 1523 1524 int 1525 arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1526 { 1527 arc_buf_hdr_t *hdr = buf->b_hdr; 1528 kmutex_t *hash_lock = HDR_LOCK(hdr); 1529 int no_callback = (buf->b_efunc == NULL); 1530 1531 if (hdr->b_state == arc_anon) { 1532 ASSERT(hdr->b_datacnt == 1); 1533 arc_buf_free(buf, tag); 1534 return (no_callback); 1535 } 1536 1537 mutex_enter(hash_lock); 1538 ASSERT(hdr->b_state != arc_anon); 1539 ASSERT(buf->b_data != NULL); 1540 1541 (void) remove_reference(hdr, hash_lock, tag); 1542 if (hdr->b_datacnt > 1) { 1543 if (no_callback) 1544 arc_buf_destroy(buf, FALSE, TRUE); 1545 } else if (no_callback) { 1546 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1547 ASSERT(buf->b_efunc == NULL); 1548 hdr->b_flags |= ARC_BUF_AVAILABLE; 1549 } 1550 ASSERT(no_callback || hdr->b_datacnt > 1 || 1551 refcount_is_zero(&hdr->b_refcnt)); 1552 mutex_exit(hash_lock); 1553 return (no_callback); 1554 } 1555 1556 int 1557 arc_buf_size(arc_buf_t *buf) 1558 { 1559 return (buf->b_hdr->b_size); 1560 } 1561 1562 /* 1563 * Evict buffers from list until we've removed the specified number of 1564 * bytes. Move the removed buffers to the appropriate evict state. 1565 * If the recycle flag is set, then attempt to "recycle" a buffer: 1566 * - look for a buffer to evict that is `bytes' long. 1567 * - return the data block from this buffer rather than freeing it. 1568 * This flag is used by callers that are trying to make space for a 1569 * new buffer in a full arc cache. 1570 * 1571 * This function makes a "best effort". It skips over any buffers 1572 * it can't get a hash_lock on, and so may not catch all candidates. 1573 * It may also return without evicting as much space as requested. 1574 */ 1575 static void * 1576 arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle, 1577 arc_buf_contents_t type) 1578 { 1579 arc_state_t *evicted_state; 1580 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1581 arc_buf_hdr_t *ab, *ab_prev = NULL; 1582 list_t *list = &state->arcs_list[type]; 1583 kmutex_t *hash_lock; 1584 boolean_t have_lock; 1585 void *stolen = NULL; 1586 1587 ASSERT(state == arc_mru || state == arc_mfu); 1588 1589 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1590 1591 mutex_enter(&state->arcs_mtx); 1592 mutex_enter(&evicted_state->arcs_mtx); 1593 1594 for (ab = list_tail(list); ab; ab = ab_prev) { 1595 ab_prev = list_prev(list, ab); 1596 /* prefetch buffers have a minimum lifespan */ 1597 if (HDR_IO_IN_PROGRESS(ab) || 1598 (spa && ab->b_spa != spa) || 1599 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1600 ddi_get_lbolt() - ab->b_arc_access < 1601 arc_min_prefetch_lifespan)) { 1602 skipped++; 1603 continue; 1604 } 1605 /* "lookahead" for better eviction candidate */ 1606 if (recycle && ab->b_size != bytes && 1607 ab_prev && ab_prev->b_size == bytes) 1608 continue; 1609 hash_lock = HDR_LOCK(ab); 1610 have_lock = MUTEX_HELD(hash_lock); 1611 if (have_lock || mutex_tryenter(hash_lock)) { 1612 ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 1613 ASSERT(ab->b_datacnt > 0); 1614 while (ab->b_buf) { 1615 arc_buf_t *buf = ab->b_buf; 1616 if (!rw_tryenter(&buf->b_lock, RW_WRITER)) { 1617 missed += 1; 1618 break; 1619 } 1620 if (buf->b_data) { 1621 bytes_evicted += ab->b_size; 1622 if (recycle && ab->b_type == type && 1623 ab->b_size == bytes && 1624 !HDR_L2_WRITING(ab)) { 1625 stolen = buf->b_data; 1626 recycle = FALSE; 1627 } 1628 } 1629 if (buf->b_efunc) { 1630 mutex_enter(&arc_eviction_mtx); 1631 arc_buf_destroy(buf, 1632 buf->b_data == stolen, FALSE); 1633 ab->b_buf = buf->b_next; 1634 buf->b_hdr = &arc_eviction_hdr; 1635 buf->b_next = arc_eviction_list; 1636 arc_eviction_list = buf; 1637 mutex_exit(&arc_eviction_mtx); 1638 rw_exit(&buf->b_lock); 1639 } else { 1640 rw_exit(&buf->b_lock); 1641 arc_buf_destroy(buf, 1642 buf->b_data == stolen, TRUE); 1643 } 1644 } 1645 1646 if (ab->b_l2hdr) { 1647 ARCSTAT_INCR(arcstat_evict_l2_cached, 1648 ab->b_size); 1649 } else { 1650 if (l2arc_write_eligible(ab->b_spa, ab)) { 1651 ARCSTAT_INCR(arcstat_evict_l2_eligible, 1652 ab->b_size); 1653 } else { 1654 ARCSTAT_INCR( 1655 arcstat_evict_l2_ineligible, 1656 ab->b_size); 1657 } 1658 } 1659 1660 if (ab->b_datacnt == 0) { 1661 arc_change_state(evicted_state, ab, hash_lock); 1662 ASSERT(HDR_IN_HASH_TABLE(ab)); 1663 ab->b_flags |= ARC_IN_HASH_TABLE; 1664 ab->b_flags &= ~ARC_BUF_AVAILABLE; 1665 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1666 } 1667 if (!have_lock) 1668 mutex_exit(hash_lock); 1669 if (bytes >= 0 && bytes_evicted >= bytes) 1670 break; 1671 } else { 1672 missed += 1; 1673 } 1674 } 1675 1676 mutex_exit(&evicted_state->arcs_mtx); 1677 mutex_exit(&state->arcs_mtx); 1678 1679 if (bytes_evicted < bytes) 1680 dprintf("only evicted %lld bytes from %x", 1681 (longlong_t)bytes_evicted, state); 1682 1683 if (skipped) 1684 ARCSTAT_INCR(arcstat_evict_skip, skipped); 1685 1686 if (missed) 1687 ARCSTAT_INCR(arcstat_mutex_miss, missed); 1688 1689 /* 1690 * We have just evicted some date into the ghost state, make 1691 * sure we also adjust the ghost state size if necessary. 1692 */ 1693 if (arc_no_grow && 1694 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 1695 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 1696 arc_mru_ghost->arcs_size - arc_c; 1697 1698 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 1699 int64_t todelete = 1700 MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 1701 arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1702 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 1703 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 1704 arc_mru_ghost->arcs_size + 1705 arc_mfu_ghost->arcs_size - arc_c); 1706 arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1707 } 1708 } 1709 1710 return (stolen); 1711 } 1712 1713 /* 1714 * Remove buffers from list until we've removed the specified number of 1715 * bytes. Destroy the buffers that are removed. 1716 */ 1717 static void 1718 arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes) 1719 { 1720 arc_buf_hdr_t *ab, *ab_prev; 1721 list_t *list = &state->arcs_list[ARC_BUFC_DATA]; 1722 kmutex_t *hash_lock; 1723 uint64_t bytes_deleted = 0; 1724 uint64_t bufs_skipped = 0; 1725 boolean_t have_lock; 1726 1727 ASSERT(GHOST_STATE(state)); 1728 top: 1729 mutex_enter(&state->arcs_mtx); 1730 for (ab = list_tail(list); ab; ab = ab_prev) { 1731 ab_prev = list_prev(list, ab); 1732 if (spa && ab->b_spa != spa) 1733 continue; 1734 hash_lock = HDR_LOCK(ab); 1735 have_lock = MUTEX_HELD(hash_lock); 1736 if (have_lock || mutex_tryenter(hash_lock)) { 1737 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1738 ASSERT(ab->b_buf == NULL); 1739 ARCSTAT_BUMP(arcstat_deleted); 1740 bytes_deleted += ab->b_size; 1741 1742 if (ab->b_l2hdr != NULL) { 1743 /* 1744 * This buffer is cached on the 2nd Level ARC; 1745 * don't destroy the header. 1746 */ 1747 arc_change_state(arc_l2c_only, ab, hash_lock); 1748 if (!have_lock) 1749 mutex_exit(hash_lock); 1750 } else { 1751 arc_change_state(arc_anon, ab, hash_lock); 1752 if (!have_lock) 1753 mutex_exit(hash_lock); 1754 arc_hdr_destroy(ab); 1755 } 1756 1757 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1758 if (bytes >= 0 && bytes_deleted >= bytes) 1759 break; 1760 } else { 1761 if (bytes < 0) { 1762 mutex_exit(&state->arcs_mtx); 1763 mutex_enter(hash_lock); 1764 mutex_exit(hash_lock); 1765 goto top; 1766 } 1767 bufs_skipped += 1; 1768 } 1769 } 1770 mutex_exit(&state->arcs_mtx); 1771 1772 if (list == &state->arcs_list[ARC_BUFC_DATA] && 1773 (bytes < 0 || bytes_deleted < bytes)) { 1774 list = &state->arcs_list[ARC_BUFC_METADATA]; 1775 goto top; 1776 } 1777 1778 if (bufs_skipped) { 1779 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1780 ASSERT(bytes >= 0); 1781 } 1782 1783 if (bytes_deleted < bytes) 1784 dprintf("only deleted %lld bytes from %p", 1785 (longlong_t)bytes_deleted, state); 1786 } 1787 1788 static void 1789 arc_adjust(void) 1790 { 1791 int64_t adjustment, delta; 1792 1793 /* 1794 * Adjust MRU size 1795 */ 1796 1797 adjustment = MIN(arc_size - arc_c, 1798 arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used - arc_p); 1799 1800 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 1801 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment); 1802 (void) arc_evict(arc_mru, NULL, delta, FALSE, ARC_BUFC_DATA); 1803 adjustment -= delta; 1804 } 1805 1806 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1807 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment); 1808 (void) arc_evict(arc_mru, NULL, delta, FALSE, 1809 ARC_BUFC_METADATA); 1810 } 1811 1812 /* 1813 * Adjust MFU size 1814 */ 1815 1816 adjustment = arc_size - arc_c; 1817 1818 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 1819 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]); 1820 (void) arc_evict(arc_mfu, NULL, delta, FALSE, ARC_BUFC_DATA); 1821 adjustment -= delta; 1822 } 1823 1824 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1825 int64_t delta = MIN(adjustment, 1826 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]); 1827 (void) arc_evict(arc_mfu, NULL, delta, FALSE, 1828 ARC_BUFC_METADATA); 1829 } 1830 1831 /* 1832 * Adjust ghost lists 1833 */ 1834 1835 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c; 1836 1837 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) { 1838 delta = MIN(arc_mru_ghost->arcs_size, adjustment); 1839 arc_evict_ghost(arc_mru_ghost, NULL, delta); 1840 } 1841 1842 adjustment = 1843 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c; 1844 1845 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) { 1846 delta = MIN(arc_mfu_ghost->arcs_size, adjustment); 1847 arc_evict_ghost(arc_mfu_ghost, NULL, delta); 1848 } 1849 } 1850 1851 static void 1852 arc_do_user_evicts(void) 1853 { 1854 mutex_enter(&arc_eviction_mtx); 1855 while (arc_eviction_list != NULL) { 1856 arc_buf_t *buf = arc_eviction_list; 1857 arc_eviction_list = buf->b_next; 1858 rw_enter(&buf->b_lock, RW_WRITER); 1859 buf->b_hdr = NULL; 1860 rw_exit(&buf->b_lock); 1861 mutex_exit(&arc_eviction_mtx); 1862 1863 if (buf->b_efunc != NULL) 1864 VERIFY(buf->b_efunc(buf) == 0); 1865 1866 buf->b_efunc = NULL; 1867 buf->b_private = NULL; 1868 kmem_cache_free(buf_cache, buf); 1869 mutex_enter(&arc_eviction_mtx); 1870 } 1871 mutex_exit(&arc_eviction_mtx); 1872 } 1873 1874 /* 1875 * Flush all *evictable* data from the cache for the given spa. 1876 * NOTE: this will not touch "active" (i.e. referenced) data. 1877 */ 1878 void 1879 arc_flush(spa_t *spa) 1880 { 1881 uint64_t guid = 0; 1882 1883 if (spa) 1884 guid = spa_guid(spa); 1885 1886 while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { 1887 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA); 1888 if (spa) 1889 break; 1890 } 1891 while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) { 1892 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA); 1893 if (spa) 1894 break; 1895 } 1896 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) { 1897 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA); 1898 if (spa) 1899 break; 1900 } 1901 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) { 1902 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA); 1903 if (spa) 1904 break; 1905 } 1906 1907 arc_evict_ghost(arc_mru_ghost, guid, -1); 1908 arc_evict_ghost(arc_mfu_ghost, guid, -1); 1909 1910 mutex_enter(&arc_reclaim_thr_lock); 1911 arc_do_user_evicts(); 1912 mutex_exit(&arc_reclaim_thr_lock); 1913 ASSERT(spa || arc_eviction_list == NULL); 1914 } 1915 1916 void 1917 arc_shrink(void) 1918 { 1919 if (arc_c > arc_c_min) { 1920 uint64_t to_free; 1921 1922 #ifdef _KERNEL 1923 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); 1924 #else 1925 to_free = arc_c >> arc_shrink_shift; 1926 #endif 1927 if (arc_c > arc_c_min + to_free) 1928 atomic_add_64(&arc_c, -to_free); 1929 else 1930 arc_c = arc_c_min; 1931 1932 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 1933 if (arc_c > arc_size) 1934 arc_c = MAX(arc_size, arc_c_min); 1935 if (arc_p > arc_c) 1936 arc_p = (arc_c >> 1); 1937 ASSERT(arc_c >= arc_c_min); 1938 ASSERT((int64_t)arc_p >= 0); 1939 } 1940 1941 if (arc_size > arc_c) 1942 arc_adjust(); 1943 } 1944 1945 static int 1946 arc_reclaim_needed(void) 1947 { 1948 uint64_t extra; 1949 1950 #ifdef _KERNEL 1951 1952 if (needfree) 1953 return (1); 1954 1955 /* 1956 * take 'desfree' extra pages, so we reclaim sooner, rather than later 1957 */ 1958 extra = desfree; 1959 1960 /* 1961 * check that we're out of range of the pageout scanner. It starts to 1962 * schedule paging if freemem is less than lotsfree and needfree. 1963 * lotsfree is the high-water mark for pageout, and needfree is the 1964 * number of needed free pages. We add extra pages here to make sure 1965 * the scanner doesn't start up while we're freeing memory. 1966 */ 1967 if (freemem < lotsfree + needfree + extra) 1968 return (1); 1969 1970 /* 1971 * check to make sure that swapfs has enough space so that anon 1972 * reservations can still succeed. anon_resvmem() checks that the 1973 * availrmem is greater than swapfs_minfree, and the number of reserved 1974 * swap pages. We also add a bit of extra here just to prevent 1975 * circumstances from getting really dire. 1976 */ 1977 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1978 return (1); 1979 1980 #if defined(__i386) 1981 /* 1982 * If we're on an i386 platform, it's possible that we'll exhaust the 1983 * kernel heap space before we ever run out of available physical 1984 * memory. Most checks of the size of the heap_area compare against 1985 * tune.t_minarmem, which is the minimum available real memory that we 1986 * can have in the system. However, this is generally fixed at 25 pages 1987 * which is so low that it's useless. In this comparison, we seek to 1988 * calculate the total heap-size, and reclaim if more than 3/4ths of the 1989 * heap is allocated. (Or, in the calculation, if less than 1/4th is 1990 * free) 1991 */ 1992 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1993 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1994 return (1); 1995 #endif 1996 1997 #else 1998 if (spa_get_random(100) == 0) 1999 return (1); 2000 #endif 2001 return (0); 2002 } 2003 2004 static void 2005 arc_kmem_reap_now(arc_reclaim_strategy_t strat) 2006 { 2007 size_t i; 2008 kmem_cache_t *prev_cache = NULL; 2009 kmem_cache_t *prev_data_cache = NULL; 2010 extern kmem_cache_t *zio_buf_cache[]; 2011 extern kmem_cache_t *zio_data_buf_cache[]; 2012 2013 #ifdef _KERNEL 2014 if (arc_meta_used >= arc_meta_limit) { 2015 /* 2016 * We are exceeding our meta-data cache limit. 2017 * Purge some DNLC entries to release holds on meta-data. 2018 */ 2019 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 2020 } 2021 #if defined(__i386) 2022 /* 2023 * Reclaim unused memory from all kmem caches. 2024 */ 2025 kmem_reap(); 2026 #endif 2027 #endif 2028 2029 /* 2030 * An aggressive reclamation will shrink the cache size as well as 2031 * reap free buffers from the arc kmem caches. 2032 */ 2033 if (strat == ARC_RECLAIM_AGGR) 2034 arc_shrink(); 2035 2036 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 2037 if (zio_buf_cache[i] != prev_cache) { 2038 prev_cache = zio_buf_cache[i]; 2039 kmem_cache_reap_now(zio_buf_cache[i]); 2040 } 2041 if (zio_data_buf_cache[i] != prev_data_cache) { 2042 prev_data_cache = zio_data_buf_cache[i]; 2043 kmem_cache_reap_now(zio_data_buf_cache[i]); 2044 } 2045 } 2046 kmem_cache_reap_now(buf_cache); 2047 kmem_cache_reap_now(hdr_cache); 2048 } 2049 2050 static void 2051 arc_reclaim_thread(void) 2052 { 2053 clock_t growtime = 0; 2054 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 2055 callb_cpr_t cpr; 2056 2057 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 2058 2059 mutex_enter(&arc_reclaim_thr_lock); 2060 while (arc_thread_exit == 0) { 2061 if (arc_reclaim_needed()) { 2062 2063 if (arc_no_grow) { 2064 if (last_reclaim == ARC_RECLAIM_CONS) { 2065 last_reclaim = ARC_RECLAIM_AGGR; 2066 } else { 2067 last_reclaim = ARC_RECLAIM_CONS; 2068 } 2069 } else { 2070 arc_no_grow = TRUE; 2071 last_reclaim = ARC_RECLAIM_AGGR; 2072 membar_producer(); 2073 } 2074 2075 /* reset the growth delay for every reclaim */ 2076 growtime = ddi_get_lbolt() + (arc_grow_retry * hz); 2077 2078 arc_kmem_reap_now(last_reclaim); 2079 arc_warm = B_TRUE; 2080 2081 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) { 2082 arc_no_grow = FALSE; 2083 } 2084 2085 if (2 * arc_c < arc_size + 2086 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size) 2087 arc_adjust(); 2088 2089 if (arc_eviction_list != NULL) 2090 arc_do_user_evicts(); 2091 2092 /* block until needed, or one second, whichever is shorter */ 2093 CALLB_CPR_SAFE_BEGIN(&cpr); 2094 (void) cv_timedwait(&arc_reclaim_thr_cv, 2095 &arc_reclaim_thr_lock, (ddi_get_lbolt() + hz)); 2096 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 2097 } 2098 2099 arc_thread_exit = 0; 2100 cv_broadcast(&arc_reclaim_thr_cv); 2101 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 2102 thread_exit(); 2103 } 2104 2105 /* 2106 * Adapt arc info given the number of bytes we are trying to add and 2107 * the state that we are comming from. This function is only called 2108 * when we are adding new content to the cache. 2109 */ 2110 static void 2111 arc_adapt(int bytes, arc_state_t *state) 2112 { 2113 int mult; 2114 uint64_t arc_p_min = (arc_c >> arc_p_min_shift); 2115 2116 if (state == arc_l2c_only) 2117 return; 2118 2119 ASSERT(bytes > 0); 2120 /* 2121 * Adapt the target size of the MRU list: 2122 * - if we just hit in the MRU ghost list, then increase 2123 * the target size of the MRU list. 2124 * - if we just hit in the MFU ghost list, then increase 2125 * the target size of the MFU list by decreasing the 2126 * target size of the MRU list. 2127 */ 2128 if (state == arc_mru_ghost) { 2129 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 2130 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 2131 2132 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); 2133 } else if (state == arc_mfu_ghost) { 2134 uint64_t delta; 2135 2136 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 2137 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 2138 2139 delta = MIN(bytes * mult, arc_p); 2140 arc_p = MAX(arc_p_min, arc_p - delta); 2141 } 2142 ASSERT((int64_t)arc_p >= 0); 2143 2144 if (arc_reclaim_needed()) { 2145 cv_signal(&arc_reclaim_thr_cv); 2146 return; 2147 } 2148 2149 if (arc_no_grow) 2150 return; 2151 2152 if (arc_c >= arc_c_max) 2153 return; 2154 2155 /* 2156 * If we're within (2 * maxblocksize) bytes of the target 2157 * cache size, increment the target cache size 2158 */ 2159 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 2160 atomic_add_64(&arc_c, (int64_t)bytes); 2161 if (arc_c > arc_c_max) 2162 arc_c = arc_c_max; 2163 else if (state == arc_anon) 2164 atomic_add_64(&arc_p, (int64_t)bytes); 2165 if (arc_p > arc_c) 2166 arc_p = arc_c; 2167 } 2168 ASSERT((int64_t)arc_p >= 0); 2169 } 2170 2171 /* 2172 * Check if the cache has reached its limits and eviction is required 2173 * prior to insert. 2174 */ 2175 static int 2176 arc_evict_needed(arc_buf_contents_t type) 2177 { 2178 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 2179 return (1); 2180 2181 #ifdef _KERNEL 2182 /* 2183 * If zio data pages are being allocated out of a separate heap segment, 2184 * then enforce that the size of available vmem for this area remains 2185 * above about 1/32nd free. 2186 */ 2187 if (type == ARC_BUFC_DATA && zio_arena != NULL && 2188 vmem_size(zio_arena, VMEM_FREE) < 2189 (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 2190 return (1); 2191 #endif 2192 2193 if (arc_reclaim_needed()) 2194 return (1); 2195 2196 return (arc_size > arc_c); 2197 } 2198 2199 /* 2200 * The buffer, supplied as the first argument, needs a data block. 2201 * So, if we are at cache max, determine which cache should be victimized. 2202 * We have the following cases: 2203 * 2204 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2205 * In this situation if we're out of space, but the resident size of the MFU is 2206 * under the limit, victimize the MFU cache to satisfy this insertion request. 2207 * 2208 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2209 * Here, we've used up all of the available space for the MRU, so we need to 2210 * evict from our own cache instead. Evict from the set of resident MRU 2211 * entries. 2212 * 2213 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2214 * c minus p represents the MFU space in the cache, since p is the size of the 2215 * cache that is dedicated to the MRU. In this situation there's still space on 2216 * the MFU side, so the MRU side needs to be victimized. 2217 * 2218 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2219 * MFU's resident set is consuming more space than it has been allotted. In 2220 * this situation, we must victimize our own cache, the MFU, for this insertion. 2221 */ 2222 static void 2223 arc_get_data_buf(arc_buf_t *buf) 2224 { 2225 arc_state_t *state = buf->b_hdr->b_state; 2226 uint64_t size = buf->b_hdr->b_size; 2227 arc_buf_contents_t type = buf->b_hdr->b_type; 2228 2229 arc_adapt(size, state); 2230 2231 /* 2232 * We have not yet reached cache maximum size, 2233 * just allocate a new buffer. 2234 */ 2235 if (!arc_evict_needed(type)) { 2236 if (type == ARC_BUFC_METADATA) { 2237 buf->b_data = zio_buf_alloc(size); 2238 arc_space_consume(size, ARC_SPACE_DATA); 2239 } else { 2240 ASSERT(type == ARC_BUFC_DATA); 2241 buf->b_data = zio_data_buf_alloc(size); 2242 ARCSTAT_INCR(arcstat_data_size, size); 2243 atomic_add_64(&arc_size, size); 2244 } 2245 goto out; 2246 } 2247 2248 /* 2249 * If we are prefetching from the mfu ghost list, this buffer 2250 * will end up on the mru list; so steal space from there. 2251 */ 2252 if (state == arc_mfu_ghost) 2253 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 2254 else if (state == arc_mru_ghost) 2255 state = arc_mru; 2256 2257 if (state == arc_mru || state == arc_anon) { 2258 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 2259 state = (arc_mfu->arcs_lsize[type] >= size && 2260 arc_p > mru_used) ? arc_mfu : arc_mru; 2261 } else { 2262 /* MFU cases */ 2263 uint64_t mfu_space = arc_c - arc_p; 2264 state = (arc_mru->arcs_lsize[type] >= size && 2265 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 2266 } 2267 if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { 2268 if (type == ARC_BUFC_METADATA) { 2269 buf->b_data = zio_buf_alloc(size); 2270 arc_space_consume(size, ARC_SPACE_DATA); 2271 } else { 2272 ASSERT(type == ARC_BUFC_DATA); 2273 buf->b_data = zio_data_buf_alloc(size); 2274 ARCSTAT_INCR(arcstat_data_size, size); 2275 atomic_add_64(&arc_size, size); 2276 } 2277 ARCSTAT_BUMP(arcstat_recycle_miss); 2278 } 2279 ASSERT(buf->b_data != NULL); 2280 out: 2281 /* 2282 * Update the state size. Note that ghost states have a 2283 * "ghost size" and so don't need to be updated. 2284 */ 2285 if (!GHOST_STATE(buf->b_hdr->b_state)) { 2286 arc_buf_hdr_t *hdr = buf->b_hdr; 2287 2288 atomic_add_64(&hdr->b_state->arcs_size, size); 2289 if (list_link_active(&hdr->b_arc_node)) { 2290 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2291 atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2292 } 2293 /* 2294 * If we are growing the cache, and we are adding anonymous 2295 * data, and we have outgrown arc_p, update arc_p 2296 */ 2297 if (arc_size < arc_c && hdr->b_state == arc_anon && 2298 arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 2299 arc_p = MIN(arc_c, arc_p + size); 2300 } 2301 } 2302 2303 /* 2304 * This routine is called whenever a buffer is accessed. 2305 * NOTE: the hash lock is dropped in this function. 2306 */ 2307 static void 2308 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2309 { 2310 clock_t now; 2311 2312 ASSERT(MUTEX_HELD(hash_lock)); 2313 2314 if (buf->b_state == arc_anon) { 2315 /* 2316 * This buffer is not in the cache, and does not 2317 * appear in our "ghost" list. Add the new buffer 2318 * to the MRU state. 2319 */ 2320 2321 ASSERT(buf->b_arc_access == 0); 2322 buf->b_arc_access = ddi_get_lbolt(); 2323 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2324 arc_change_state(arc_mru, buf, hash_lock); 2325 2326 } else if (buf->b_state == arc_mru) { 2327 now = ddi_get_lbolt(); 2328 2329 /* 2330 * If this buffer is here because of a prefetch, then either: 2331 * - clear the flag if this is a "referencing" read 2332 * (any subsequent access will bump this into the MFU state). 2333 * or 2334 * - move the buffer to the head of the list if this is 2335 * another prefetch (to make it less likely to be evicted). 2336 */ 2337 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2338 if (refcount_count(&buf->b_refcnt) == 0) { 2339 ASSERT(list_link_active(&buf->b_arc_node)); 2340 } else { 2341 buf->b_flags &= ~ARC_PREFETCH; 2342 ARCSTAT_BUMP(arcstat_mru_hits); 2343 } 2344 buf->b_arc_access = now; 2345 return; 2346 } 2347 2348 /* 2349 * This buffer has been "accessed" only once so far, 2350 * but it is still in the cache. Move it to the MFU 2351 * state. 2352 */ 2353 if (now > buf->b_arc_access + ARC_MINTIME) { 2354 /* 2355 * More than 125ms have passed since we 2356 * instantiated this buffer. Move it to the 2357 * most frequently used state. 2358 */ 2359 buf->b_arc_access = now; 2360 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2361 arc_change_state(arc_mfu, buf, hash_lock); 2362 } 2363 ARCSTAT_BUMP(arcstat_mru_hits); 2364 } else if (buf->b_state == arc_mru_ghost) { 2365 arc_state_t *new_state; 2366 /* 2367 * This buffer has been "accessed" recently, but 2368 * was evicted from the cache. Move it to the 2369 * MFU state. 2370 */ 2371 2372 if (buf->b_flags & ARC_PREFETCH) { 2373 new_state = arc_mru; 2374 if (refcount_count(&buf->b_refcnt) > 0) 2375 buf->b_flags &= ~ARC_PREFETCH; 2376 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2377 } else { 2378 new_state = arc_mfu; 2379 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2380 } 2381 2382 buf->b_arc_access = ddi_get_lbolt(); 2383 arc_change_state(new_state, buf, hash_lock); 2384 2385 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2386 } else if (buf->b_state == arc_mfu) { 2387 /* 2388 * This buffer has been accessed more than once and is 2389 * still in the cache. Keep it in the MFU state. 2390 * 2391 * NOTE: an add_reference() that occurred when we did 2392 * the arc_read() will have kicked this off the list. 2393 * If it was a prefetch, we will explicitly move it to 2394 * the head of the list now. 2395 */ 2396 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2397 ASSERT(refcount_count(&buf->b_refcnt) == 0); 2398 ASSERT(list_link_active(&buf->b_arc_node)); 2399 } 2400 ARCSTAT_BUMP(arcstat_mfu_hits); 2401 buf->b_arc_access = ddi_get_lbolt(); 2402 } else if (buf->b_state == arc_mfu_ghost) { 2403 arc_state_t *new_state = arc_mfu; 2404 /* 2405 * This buffer has been accessed more than once but has 2406 * been evicted from the cache. Move it back to the 2407 * MFU state. 2408 */ 2409 2410 if (buf->b_flags & ARC_PREFETCH) { 2411 /* 2412 * This is a prefetch access... 2413 * move this block back to the MRU state. 2414 */ 2415 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 2416 new_state = arc_mru; 2417 } 2418 2419 buf->b_arc_access = ddi_get_lbolt(); 2420 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2421 arc_change_state(new_state, buf, hash_lock); 2422 2423 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2424 } else if (buf->b_state == arc_l2c_only) { 2425 /* 2426 * This buffer is on the 2nd Level ARC. 2427 */ 2428 2429 buf->b_arc_access = ddi_get_lbolt(); 2430 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2431 arc_change_state(arc_mfu, buf, hash_lock); 2432 } else { 2433 ASSERT(!"invalid arc state"); 2434 } 2435 } 2436 2437 /* a generic arc_done_func_t which you can use */ 2438 /* ARGSUSED */ 2439 void 2440 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2441 { 2442 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2443 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2444 } 2445 2446 /* a generic arc_done_func_t */ 2447 void 2448 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2449 { 2450 arc_buf_t **bufp = arg; 2451 if (zio && zio->io_error) { 2452 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2453 *bufp = NULL; 2454 } else { 2455 *bufp = buf; 2456 } 2457 } 2458 2459 static void 2460 arc_read_done(zio_t *zio) 2461 { 2462 arc_buf_hdr_t *hdr, *found; 2463 arc_buf_t *buf; 2464 arc_buf_t *abuf; /* buffer we're assigning to callback */ 2465 kmutex_t *hash_lock; 2466 arc_callback_t *callback_list, *acb; 2467 int freeable = FALSE; 2468 2469 buf = zio->io_private; 2470 hdr = buf->b_hdr; 2471 2472 /* 2473 * The hdr was inserted into hash-table and removed from lists 2474 * prior to starting I/O. We should find this header, since 2475 * it's in the hash table, and it should be legit since it's 2476 * not possible to evict it during the I/O. The only possible 2477 * reason for it not to be found is if we were freed during the 2478 * read. 2479 */ 2480 found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth, 2481 &hash_lock); 2482 2483 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 2484 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 2485 (found == hdr && HDR_L2_READING(hdr))); 2486 2487 hdr->b_flags &= ~ARC_L2_EVICTED; 2488 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 2489 hdr->b_flags &= ~ARC_L2CACHE; 2490 2491 /* byteswap if necessary */ 2492 callback_list = hdr->b_acb; 2493 ASSERT(callback_list != NULL); 2494 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) { 2495 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? 2496 byteswap_uint64_array : 2497 dmu_ot[BP_GET_TYPE(zio->io_bp)].ot_byteswap; 2498 func(buf->b_data, hdr->b_size); 2499 } 2500 2501 arc_cksum_compute(buf, B_FALSE); 2502 2503 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) { 2504 /* 2505 * Only call arc_access on anonymous buffers. This is because 2506 * if we've issued an I/O for an evicted buffer, we've already 2507 * called arc_access (to prevent any simultaneous readers from 2508 * getting confused). 2509 */ 2510 arc_access(hdr, hash_lock); 2511 } 2512 2513 /* create copies of the data buffer for the callers */ 2514 abuf = buf; 2515 for (acb = callback_list; acb; acb = acb->acb_next) { 2516 if (acb->acb_done) { 2517 if (abuf == NULL) 2518 abuf = arc_buf_clone(buf); 2519 acb->acb_buf = abuf; 2520 abuf = NULL; 2521 } 2522 } 2523 hdr->b_acb = NULL; 2524 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2525 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 2526 if (abuf == buf) { 2527 ASSERT(buf->b_efunc == NULL); 2528 ASSERT(hdr->b_datacnt == 1); 2529 hdr->b_flags |= ARC_BUF_AVAILABLE; 2530 } 2531 2532 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2533 2534 if (zio->io_error != 0) { 2535 hdr->b_flags |= ARC_IO_ERROR; 2536 if (hdr->b_state != arc_anon) 2537 arc_change_state(arc_anon, hdr, hash_lock); 2538 if (HDR_IN_HASH_TABLE(hdr)) 2539 buf_hash_remove(hdr); 2540 freeable = refcount_is_zero(&hdr->b_refcnt); 2541 } 2542 2543 /* 2544 * Broadcast before we drop the hash_lock to avoid the possibility 2545 * that the hdr (and hence the cv) might be freed before we get to 2546 * the cv_broadcast(). 2547 */ 2548 cv_broadcast(&hdr->b_cv); 2549 2550 if (hash_lock) { 2551 mutex_exit(hash_lock); 2552 } else { 2553 /* 2554 * This block was freed while we waited for the read to 2555 * complete. It has been removed from the hash table and 2556 * moved to the anonymous state (so that it won't show up 2557 * in the cache). 2558 */ 2559 ASSERT3P(hdr->b_state, ==, arc_anon); 2560 freeable = refcount_is_zero(&hdr->b_refcnt); 2561 } 2562 2563 /* execute each callback and free its structure */ 2564 while ((acb = callback_list) != NULL) { 2565 if (acb->acb_done) 2566 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2567 2568 if (acb->acb_zio_dummy != NULL) { 2569 acb->acb_zio_dummy->io_error = zio->io_error; 2570 zio_nowait(acb->acb_zio_dummy); 2571 } 2572 2573 callback_list = acb->acb_next; 2574 kmem_free(acb, sizeof (arc_callback_t)); 2575 } 2576 2577 if (freeable) 2578 arc_hdr_destroy(hdr); 2579 } 2580 2581 /* 2582 * "Read" the block block at the specified DVA (in bp) via the 2583 * cache. If the block is found in the cache, invoke the provided 2584 * callback immediately and return. Note that the `zio' parameter 2585 * in the callback will be NULL in this case, since no IO was 2586 * required. If the block is not in the cache pass the read request 2587 * on to the spa with a substitute callback function, so that the 2588 * requested block will be added to the cache. 2589 * 2590 * If a read request arrives for a block that has a read in-progress, 2591 * either wait for the in-progress read to complete (and return the 2592 * results); or, if this is a read with a "done" func, add a record 2593 * to the read to invoke the "done" func when the read completes, 2594 * and return; or just return. 2595 * 2596 * arc_read_done() will invoke all the requested "done" functions 2597 * for readers of this block. 2598 * 2599 * Normal callers should use arc_read and pass the arc buffer and offset 2600 * for the bp. But if you know you don't need locking, you can use 2601 * arc_read_bp. 2602 */ 2603 int 2604 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_buf_t *pbuf, 2605 arc_done_func_t *done, void *private, int priority, int zio_flags, 2606 uint32_t *arc_flags, const zbookmark_t *zb) 2607 { 2608 int err; 2609 2610 ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt)); 2611 ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size); 2612 rw_enter(&pbuf->b_lock, RW_READER); 2613 2614 err = arc_read_nolock(pio, spa, bp, done, private, priority, 2615 zio_flags, arc_flags, zb); 2616 rw_exit(&pbuf->b_lock); 2617 2618 return (err); 2619 } 2620 2621 int 2622 arc_read_nolock(zio_t *pio, spa_t *spa, const blkptr_t *bp, 2623 arc_done_func_t *done, void *private, int priority, int zio_flags, 2624 uint32_t *arc_flags, const zbookmark_t *zb) 2625 { 2626 arc_buf_hdr_t *hdr; 2627 arc_buf_t *buf; 2628 kmutex_t *hash_lock; 2629 zio_t *rzio; 2630 uint64_t guid = spa_guid(spa); 2631 2632 top: 2633 hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp), 2634 &hash_lock); 2635 if (hdr && hdr->b_datacnt > 0) { 2636 2637 *arc_flags |= ARC_CACHED; 2638 2639 if (HDR_IO_IN_PROGRESS(hdr)) { 2640 2641 if (*arc_flags & ARC_WAIT) { 2642 cv_wait(&hdr->b_cv, hash_lock); 2643 mutex_exit(hash_lock); 2644 goto top; 2645 } 2646 ASSERT(*arc_flags & ARC_NOWAIT); 2647 2648 if (done) { 2649 arc_callback_t *acb = NULL; 2650 2651 acb = kmem_zalloc(sizeof (arc_callback_t), 2652 KM_SLEEP); 2653 acb->acb_done = done; 2654 acb->acb_private = private; 2655 if (pio != NULL) 2656 acb->acb_zio_dummy = zio_null(pio, 2657 spa, NULL, NULL, NULL, zio_flags); 2658 2659 ASSERT(acb->acb_done != NULL); 2660 acb->acb_next = hdr->b_acb; 2661 hdr->b_acb = acb; 2662 add_reference(hdr, hash_lock, private); 2663 mutex_exit(hash_lock); 2664 return (0); 2665 } 2666 mutex_exit(hash_lock); 2667 return (0); 2668 } 2669 2670 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2671 2672 if (done) { 2673 add_reference(hdr, hash_lock, private); 2674 /* 2675 * If this block is already in use, create a new 2676 * copy of the data so that we will be guaranteed 2677 * that arc_release() will always succeed. 2678 */ 2679 buf = hdr->b_buf; 2680 ASSERT(buf); 2681 ASSERT(buf->b_data); 2682 if (HDR_BUF_AVAILABLE(hdr)) { 2683 ASSERT(buf->b_efunc == NULL); 2684 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2685 } else { 2686 buf = arc_buf_clone(buf); 2687 } 2688 2689 } else if (*arc_flags & ARC_PREFETCH && 2690 refcount_count(&hdr->b_refcnt) == 0) { 2691 hdr->b_flags |= ARC_PREFETCH; 2692 } 2693 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 2694 arc_access(hdr, hash_lock); 2695 if (*arc_flags & ARC_L2CACHE) 2696 hdr->b_flags |= ARC_L2CACHE; 2697 mutex_exit(hash_lock); 2698 ARCSTAT_BUMP(arcstat_hits); 2699 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2700 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2701 data, metadata, hits); 2702 2703 if (done) 2704 done(NULL, buf, private); 2705 } else { 2706 uint64_t size = BP_GET_LSIZE(bp); 2707 arc_callback_t *acb; 2708 vdev_t *vd = NULL; 2709 uint64_t addr; 2710 boolean_t devw = B_FALSE; 2711 2712 if (hdr == NULL) { 2713 /* this block is not in the cache */ 2714 arc_buf_hdr_t *exists; 2715 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 2716 buf = arc_buf_alloc(spa, size, private, type); 2717 hdr = buf->b_hdr; 2718 hdr->b_dva = *BP_IDENTITY(bp); 2719 hdr->b_birth = BP_PHYSICAL_BIRTH(bp); 2720 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2721 exists = buf_hash_insert(hdr, &hash_lock); 2722 if (exists) { 2723 /* somebody beat us to the hash insert */ 2724 mutex_exit(hash_lock); 2725 bzero(&hdr->b_dva, sizeof (dva_t)); 2726 hdr->b_birth = 0; 2727 hdr->b_cksum0 = 0; 2728 (void) arc_buf_remove_ref(buf, private); 2729 goto top; /* restart the IO request */ 2730 } 2731 /* if this is a prefetch, we don't have a reference */ 2732 if (*arc_flags & ARC_PREFETCH) { 2733 (void) remove_reference(hdr, hash_lock, 2734 private); 2735 hdr->b_flags |= ARC_PREFETCH; 2736 } 2737 if (*arc_flags & ARC_L2CACHE) 2738 hdr->b_flags |= ARC_L2CACHE; 2739 if (BP_GET_LEVEL(bp) > 0) 2740 hdr->b_flags |= ARC_INDIRECT; 2741 } else { 2742 /* this block is in the ghost cache */ 2743 ASSERT(GHOST_STATE(hdr->b_state)); 2744 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2745 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 2746 ASSERT(hdr->b_buf == NULL); 2747 2748 /* if this is a prefetch, we don't have a reference */ 2749 if (*arc_flags & ARC_PREFETCH) 2750 hdr->b_flags |= ARC_PREFETCH; 2751 else 2752 add_reference(hdr, hash_lock, private); 2753 if (*arc_flags & ARC_L2CACHE) 2754 hdr->b_flags |= ARC_L2CACHE; 2755 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 2756 buf->b_hdr = hdr; 2757 buf->b_data = NULL; 2758 buf->b_efunc = NULL; 2759 buf->b_private = NULL; 2760 buf->b_next = NULL; 2761 hdr->b_buf = buf; 2762 arc_get_data_buf(buf); 2763 ASSERT(hdr->b_datacnt == 0); 2764 hdr->b_datacnt = 1; 2765 } 2766 2767 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2768 acb->acb_done = done; 2769 acb->acb_private = private; 2770 2771 ASSERT(hdr->b_acb == NULL); 2772 hdr->b_acb = acb; 2773 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2774 2775 /* 2776 * If the buffer has been evicted, migrate it to a present state 2777 * before issuing the I/O. Once we drop the hash-table lock, 2778 * the header will be marked as I/O in progress and have an 2779 * attached buffer. At this point, anybody who finds this 2780 * buffer ought to notice that it's legit but has a pending I/O. 2781 */ 2782 2783 if (GHOST_STATE(hdr->b_state)) 2784 arc_access(hdr, hash_lock); 2785 2786 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL && 2787 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { 2788 devw = hdr->b_l2hdr->b_dev->l2ad_writing; 2789 addr = hdr->b_l2hdr->b_daddr; 2790 /* 2791 * Lock out device removal. 2792 */ 2793 if (vdev_is_dead(vd) || 2794 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 2795 vd = NULL; 2796 } 2797 2798 mutex_exit(hash_lock); 2799 2800 ASSERT3U(hdr->b_size, ==, size); 2801 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp, 2802 uint64_t, size, zbookmark_t *, zb); 2803 ARCSTAT_BUMP(arcstat_misses); 2804 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2805 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2806 data, metadata, misses); 2807 2808 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { 2809 /* 2810 * Read from the L2ARC if the following are true: 2811 * 1. The L2ARC vdev was previously cached. 2812 * 2. This buffer still has L2ARC metadata. 2813 * 3. This buffer isn't currently writing to the L2ARC. 2814 * 4. The L2ARC entry wasn't evicted, which may 2815 * also have invalidated the vdev. 2816 * 5. This isn't prefetch and l2arc_noprefetch is set. 2817 */ 2818 if (hdr->b_l2hdr != NULL && 2819 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && 2820 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { 2821 l2arc_read_callback_t *cb; 2822 2823 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 2824 ARCSTAT_BUMP(arcstat_l2_hits); 2825 2826 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 2827 KM_SLEEP); 2828 cb->l2rcb_buf = buf; 2829 cb->l2rcb_spa = spa; 2830 cb->l2rcb_bp = *bp; 2831 cb->l2rcb_zb = *zb; 2832 cb->l2rcb_flags = zio_flags; 2833 2834 /* 2835 * l2arc read. The SCL_L2ARC lock will be 2836 * released by l2arc_read_done(). 2837 */ 2838 rzio = zio_read_phys(pio, vd, addr, size, 2839 buf->b_data, ZIO_CHECKSUM_OFF, 2840 l2arc_read_done, cb, priority, zio_flags | 2841 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | 2842 ZIO_FLAG_DONT_PROPAGATE | 2843 ZIO_FLAG_DONT_RETRY, B_FALSE); 2844 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 2845 zio_t *, rzio); 2846 ARCSTAT_INCR(arcstat_l2_read_bytes, size); 2847 2848 if (*arc_flags & ARC_NOWAIT) { 2849 zio_nowait(rzio); 2850 return (0); 2851 } 2852 2853 ASSERT(*arc_flags & ARC_WAIT); 2854 if (zio_wait(rzio) == 0) 2855 return (0); 2856 2857 /* l2arc read error; goto zio_read() */ 2858 } else { 2859 DTRACE_PROBE1(l2arc__miss, 2860 arc_buf_hdr_t *, hdr); 2861 ARCSTAT_BUMP(arcstat_l2_misses); 2862 if (HDR_L2_WRITING(hdr)) 2863 ARCSTAT_BUMP(arcstat_l2_rw_clash); 2864 spa_config_exit(spa, SCL_L2ARC, vd); 2865 } 2866 } else { 2867 if (vd != NULL) 2868 spa_config_exit(spa, SCL_L2ARC, vd); 2869 if (l2arc_ndev != 0) { 2870 DTRACE_PROBE1(l2arc__miss, 2871 arc_buf_hdr_t *, hdr); 2872 ARCSTAT_BUMP(arcstat_l2_misses); 2873 } 2874 } 2875 2876 rzio = zio_read(pio, spa, bp, buf->b_data, size, 2877 arc_read_done, buf, priority, zio_flags, zb); 2878 2879 if (*arc_flags & ARC_WAIT) 2880 return (zio_wait(rzio)); 2881 2882 ASSERT(*arc_flags & ARC_NOWAIT); 2883 zio_nowait(rzio); 2884 } 2885 return (0); 2886 } 2887 2888 void 2889 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 2890 { 2891 ASSERT(buf->b_hdr != NULL); 2892 ASSERT(buf->b_hdr->b_state != arc_anon); 2893 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 2894 ASSERT(buf->b_efunc == NULL); 2895 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr)); 2896 2897 buf->b_efunc = func; 2898 buf->b_private = private; 2899 } 2900 2901 /* 2902 * This is used by the DMU to let the ARC know that a buffer is 2903 * being evicted, so the ARC should clean up. If this arc buf 2904 * is not yet in the evicted state, it will be put there. 2905 */ 2906 int 2907 arc_buf_evict(arc_buf_t *buf) 2908 { 2909 arc_buf_hdr_t *hdr; 2910 kmutex_t *hash_lock; 2911 arc_buf_t **bufp; 2912 2913 rw_enter(&buf->b_lock, RW_WRITER); 2914 hdr = buf->b_hdr; 2915 if (hdr == NULL) { 2916 /* 2917 * We are in arc_do_user_evicts(). 2918 */ 2919 ASSERT(buf->b_data == NULL); 2920 rw_exit(&buf->b_lock); 2921 return (0); 2922 } else if (buf->b_data == NULL) { 2923 arc_buf_t copy = *buf; /* structure assignment */ 2924 /* 2925 * We are on the eviction list; process this buffer now 2926 * but let arc_do_user_evicts() do the reaping. 2927 */ 2928 buf->b_efunc = NULL; 2929 rw_exit(&buf->b_lock); 2930 VERIFY(copy.b_efunc(©) == 0); 2931 return (1); 2932 } 2933 hash_lock = HDR_LOCK(hdr); 2934 mutex_enter(hash_lock); 2935 2936 ASSERT(buf->b_hdr == hdr); 2937 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 2938 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2939 2940 /* 2941 * Pull this buffer off of the hdr 2942 */ 2943 bufp = &hdr->b_buf; 2944 while (*bufp != buf) 2945 bufp = &(*bufp)->b_next; 2946 *bufp = buf->b_next; 2947 2948 ASSERT(buf->b_data != NULL); 2949 arc_buf_destroy(buf, FALSE, FALSE); 2950 2951 if (hdr->b_datacnt == 0) { 2952 arc_state_t *old_state = hdr->b_state; 2953 arc_state_t *evicted_state; 2954 2955 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2956 2957 evicted_state = 2958 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 2959 2960 mutex_enter(&old_state->arcs_mtx); 2961 mutex_enter(&evicted_state->arcs_mtx); 2962 2963 arc_change_state(evicted_state, hdr, hash_lock); 2964 ASSERT(HDR_IN_HASH_TABLE(hdr)); 2965 hdr->b_flags |= ARC_IN_HASH_TABLE; 2966 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2967 2968 mutex_exit(&evicted_state->arcs_mtx); 2969 mutex_exit(&old_state->arcs_mtx); 2970 } 2971 mutex_exit(hash_lock); 2972 rw_exit(&buf->b_lock); 2973 2974 VERIFY(buf->b_efunc(buf) == 0); 2975 buf->b_efunc = NULL; 2976 buf->b_private = NULL; 2977 buf->b_hdr = NULL; 2978 kmem_cache_free(buf_cache, buf); 2979 return (1); 2980 } 2981 2982 /* 2983 * Release this buffer from the cache. This must be done 2984 * after a read and prior to modifying the buffer contents. 2985 * If the buffer has more than one reference, we must make 2986 * a new hdr for the buffer. 2987 */ 2988 void 2989 arc_release(arc_buf_t *buf, void *tag) 2990 { 2991 arc_buf_hdr_t *hdr; 2992 kmutex_t *hash_lock; 2993 l2arc_buf_hdr_t *l2hdr; 2994 uint64_t buf_size; 2995 boolean_t released = B_FALSE; 2996 2997 rw_enter(&buf->b_lock, RW_WRITER); 2998 hdr = buf->b_hdr; 2999 3000 /* this buffer is not on any list */ 3001 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 3002 3003 if (hdr->b_state == arc_anon) { 3004 /* this buffer is already released */ 3005 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 3006 ASSERT(BUF_EMPTY(hdr)); 3007 ASSERT(buf->b_efunc == NULL); 3008 arc_buf_thaw(buf); 3009 rw_exit(&buf->b_lock); 3010 released = B_TRUE; 3011 } else { 3012 hash_lock = HDR_LOCK(hdr); 3013 mutex_enter(hash_lock); 3014 } 3015 3016 l2hdr = hdr->b_l2hdr; 3017 if (l2hdr) { 3018 mutex_enter(&l2arc_buflist_mtx); 3019 hdr->b_l2hdr = NULL; 3020 buf_size = hdr->b_size; 3021 } 3022 3023 if (released) 3024 goto out; 3025 3026 /* 3027 * Do we have more than one buf? 3028 */ 3029 if (hdr->b_datacnt > 1) { 3030 arc_buf_hdr_t *nhdr; 3031 arc_buf_t **bufp; 3032 uint64_t blksz = hdr->b_size; 3033 uint64_t spa = hdr->b_spa; 3034 arc_buf_contents_t type = hdr->b_type; 3035 uint32_t flags = hdr->b_flags; 3036 3037 ASSERT(hdr->b_buf != buf || buf->b_next != NULL); 3038 /* 3039 * Pull the data off of this buf and attach it to 3040 * a new anonymous buf. 3041 */ 3042 (void) remove_reference(hdr, hash_lock, tag); 3043 bufp = &hdr->b_buf; 3044 while (*bufp != buf) 3045 bufp = &(*bufp)->b_next; 3046 *bufp = (*bufp)->b_next; 3047 buf->b_next = NULL; 3048 3049 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 3050 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 3051 if (refcount_is_zero(&hdr->b_refcnt)) { 3052 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 3053 ASSERT3U(*size, >=, hdr->b_size); 3054 atomic_add_64(size, -hdr->b_size); 3055 } 3056 hdr->b_datacnt -= 1; 3057 arc_cksum_verify(buf); 3058 3059 mutex_exit(hash_lock); 3060 3061 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 3062 nhdr->b_size = blksz; 3063 nhdr->b_spa = spa; 3064 nhdr->b_type = type; 3065 nhdr->b_buf = buf; 3066 nhdr->b_state = arc_anon; 3067 nhdr->b_arc_access = 0; 3068 nhdr->b_flags = flags & ARC_L2_WRITING; 3069 nhdr->b_l2hdr = NULL; 3070 nhdr->b_datacnt = 1; 3071 nhdr->b_freeze_cksum = NULL; 3072 (void) refcount_add(&nhdr->b_refcnt, tag); 3073 buf->b_hdr = nhdr; 3074 rw_exit(&buf->b_lock); 3075 atomic_add_64(&arc_anon->arcs_size, blksz); 3076 } else { 3077 rw_exit(&buf->b_lock); 3078 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 3079 ASSERT(!list_link_active(&hdr->b_arc_node)); 3080 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3081 arc_change_state(arc_anon, hdr, hash_lock); 3082 hdr->b_arc_access = 0; 3083 mutex_exit(hash_lock); 3084 3085 bzero(&hdr->b_dva, sizeof (dva_t)); 3086 hdr->b_birth = 0; 3087 hdr->b_cksum0 = 0; 3088 arc_buf_thaw(buf); 3089 } 3090 buf->b_efunc = NULL; 3091 buf->b_private = NULL; 3092 3093 out: 3094 if (l2hdr) { 3095 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 3096 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 3097 ARCSTAT_INCR(arcstat_l2_size, -buf_size); 3098 mutex_exit(&l2arc_buflist_mtx); 3099 } 3100 } 3101 3102 int 3103 arc_released(arc_buf_t *buf) 3104 { 3105 int released; 3106 3107 rw_enter(&buf->b_lock, RW_READER); 3108 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 3109 rw_exit(&buf->b_lock); 3110 return (released); 3111 } 3112 3113 int 3114 arc_has_callback(arc_buf_t *buf) 3115 { 3116 int callback; 3117 3118 rw_enter(&buf->b_lock, RW_READER); 3119 callback = (buf->b_efunc != NULL); 3120 rw_exit(&buf->b_lock); 3121 return (callback); 3122 } 3123 3124 #ifdef ZFS_DEBUG 3125 int 3126 arc_referenced(arc_buf_t *buf) 3127 { 3128 int referenced; 3129 3130 rw_enter(&buf->b_lock, RW_READER); 3131 referenced = (refcount_count(&buf->b_hdr->b_refcnt)); 3132 rw_exit(&buf->b_lock); 3133 return (referenced); 3134 } 3135 #endif 3136 3137 static void 3138 arc_write_ready(zio_t *zio) 3139 { 3140 arc_write_callback_t *callback = zio->io_private; 3141 arc_buf_t *buf = callback->awcb_buf; 3142 arc_buf_hdr_t *hdr = buf->b_hdr; 3143 3144 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 3145 callback->awcb_ready(zio, buf, callback->awcb_private); 3146 3147 /* 3148 * If the IO is already in progress, then this is a re-write 3149 * attempt, so we need to thaw and re-compute the cksum. 3150 * It is the responsibility of the callback to handle the 3151 * accounting for any re-write attempt. 3152 */ 3153 if (HDR_IO_IN_PROGRESS(hdr)) { 3154 mutex_enter(&hdr->b_freeze_lock); 3155 if (hdr->b_freeze_cksum != NULL) { 3156 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 3157 hdr->b_freeze_cksum = NULL; 3158 } 3159 mutex_exit(&hdr->b_freeze_lock); 3160 } 3161 arc_cksum_compute(buf, B_FALSE); 3162 hdr->b_flags |= ARC_IO_IN_PROGRESS; 3163 } 3164 3165 static void 3166 arc_write_done(zio_t *zio) 3167 { 3168 arc_write_callback_t *callback = zio->io_private; 3169 arc_buf_t *buf = callback->awcb_buf; 3170 arc_buf_hdr_t *hdr = buf->b_hdr; 3171 3172 ASSERT(hdr->b_acb == NULL); 3173 3174 if (zio->io_error == 0) { 3175 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 3176 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp); 3177 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 3178 } else { 3179 ASSERT(BUF_EMPTY(hdr)); 3180 } 3181 3182 /* 3183 * If the block to be written was all-zero, we may have 3184 * compressed it away. In this case no write was performed 3185 * so there will be no dva/birth-date/checksum. The buffer 3186 * must therefor remain anonymous (and uncached). 3187 */ 3188 if (!BUF_EMPTY(hdr)) { 3189 arc_buf_hdr_t *exists; 3190 kmutex_t *hash_lock; 3191 3192 ASSERT(zio->io_error == 0); 3193 3194 arc_cksum_verify(buf); 3195 3196 exists = buf_hash_insert(hdr, &hash_lock); 3197 if (exists) { 3198 /* 3199 * This can only happen if we overwrite for 3200 * sync-to-convergence, because we remove 3201 * buffers from the hash table when we arc_free(). 3202 */ 3203 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 3204 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 3205 panic("bad overwrite, hdr=%p exists=%p", 3206 (void *)hdr, (void *)exists); 3207 ASSERT(refcount_is_zero(&exists->b_refcnt)); 3208 arc_change_state(arc_anon, exists, hash_lock); 3209 mutex_exit(hash_lock); 3210 arc_hdr_destroy(exists); 3211 exists = buf_hash_insert(hdr, &hash_lock); 3212 ASSERT3P(exists, ==, NULL); 3213 } else { 3214 /* Dedup */ 3215 ASSERT(hdr->b_datacnt == 1); 3216 ASSERT(hdr->b_state == arc_anon); 3217 ASSERT(BP_GET_DEDUP(zio->io_bp)); 3218 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 3219 } 3220 } 3221 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3222 /* if it's not anon, we are doing a scrub */ 3223 if (!exists && hdr->b_state == arc_anon) 3224 arc_access(hdr, hash_lock); 3225 mutex_exit(hash_lock); 3226 } else { 3227 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3228 } 3229 3230 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 3231 callback->awcb_done(zio, buf, callback->awcb_private); 3232 3233 kmem_free(callback, sizeof (arc_write_callback_t)); 3234 } 3235 3236 zio_t * 3237 arc_write(zio_t *pio, spa_t *spa, uint64_t txg, 3238 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, const zio_prop_t *zp, 3239 arc_done_func_t *ready, arc_done_func_t *done, void *private, 3240 int priority, int zio_flags, const zbookmark_t *zb) 3241 { 3242 arc_buf_hdr_t *hdr = buf->b_hdr; 3243 arc_write_callback_t *callback; 3244 zio_t *zio; 3245 3246 ASSERT(ready != NULL); 3247 ASSERT(done != NULL); 3248 ASSERT(!HDR_IO_ERROR(hdr)); 3249 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 3250 ASSERT(hdr->b_acb == NULL); 3251 if (l2arc) 3252 hdr->b_flags |= ARC_L2CACHE; 3253 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3254 callback->awcb_ready = ready; 3255 callback->awcb_done = done; 3256 callback->awcb_private = private; 3257 callback->awcb_buf = buf; 3258 3259 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp, 3260 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb); 3261 3262 return (zio); 3263 } 3264 3265 void 3266 arc_free(spa_t *spa, const blkptr_t *bp) 3267 { 3268 arc_buf_hdr_t *ab; 3269 kmutex_t *hash_lock; 3270 uint64_t guid = spa_guid(spa); 3271 3272 /* 3273 * If this buffer is in the cache, release it, so it can be re-used. 3274 */ 3275 ab = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp), 3276 &hash_lock); 3277 if (ab != NULL) { 3278 if (ab->b_state != arc_anon) 3279 arc_change_state(arc_anon, ab, hash_lock); 3280 if (HDR_IO_IN_PROGRESS(ab)) { 3281 /* 3282 * This should only happen when we prefetch. 3283 */ 3284 ASSERT(ab->b_flags & ARC_PREFETCH); 3285 ASSERT3U(ab->b_datacnt, ==, 1); 3286 ab->b_flags |= ARC_FREED_IN_READ; 3287 if (HDR_IN_HASH_TABLE(ab)) 3288 buf_hash_remove(ab); 3289 ab->b_arc_access = 0; 3290 bzero(&ab->b_dva, sizeof (dva_t)); 3291 ab->b_birth = 0; 3292 ab->b_cksum0 = 0; 3293 ab->b_buf->b_efunc = NULL; 3294 ab->b_buf->b_private = NULL; 3295 mutex_exit(hash_lock); 3296 } else { 3297 ASSERT(refcount_is_zero(&ab->b_refcnt)); 3298 ab->b_flags |= ARC_FREE_IN_PROGRESS; 3299 mutex_exit(hash_lock); 3300 arc_hdr_destroy(ab); 3301 ARCSTAT_BUMP(arcstat_deleted); 3302 } 3303 } 3304 } 3305 3306 static int 3307 arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg) 3308 { 3309 #ifdef _KERNEL 3310 uint64_t available_memory = ptob(freemem); 3311 static uint64_t page_load = 0; 3312 static uint64_t last_txg = 0; 3313 3314 #if defined(__i386) 3315 available_memory = 3316 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 3317 #endif 3318 if (available_memory >= zfs_write_limit_max) 3319 return (0); 3320 3321 if (txg > last_txg) { 3322 last_txg = txg; 3323 page_load = 0; 3324 } 3325 /* 3326 * If we are in pageout, we know that memory is already tight, 3327 * the arc is already going to be evicting, so we just want to 3328 * continue to let page writes occur as quickly as possible. 3329 */ 3330 if (curproc == proc_pageout) { 3331 if (page_load > MAX(ptob(minfree), available_memory) / 4) 3332 return (ERESTART); 3333 /* Note: reserve is inflated, so we deflate */ 3334 page_load += reserve / 8; 3335 return (0); 3336 } else if (page_load > 0 && arc_reclaim_needed()) { 3337 /* memory is low, delay before restarting */ 3338 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3339 return (EAGAIN); 3340 } 3341 page_load = 0; 3342 3343 if (arc_size > arc_c_min) { 3344 uint64_t evictable_memory = 3345 arc_mru->arcs_lsize[ARC_BUFC_DATA] + 3346 arc_mru->arcs_lsize[ARC_BUFC_METADATA] + 3347 arc_mfu->arcs_lsize[ARC_BUFC_DATA] + 3348 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; 3349 available_memory += MIN(evictable_memory, arc_size - arc_c_min); 3350 } 3351 3352 if (inflight_data > available_memory / 4) { 3353 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3354 return (ERESTART); 3355 } 3356 #endif 3357 return (0); 3358 } 3359 3360 void 3361 arc_tempreserve_clear(uint64_t reserve) 3362 { 3363 atomic_add_64(&arc_tempreserve, -reserve); 3364 ASSERT((int64_t)arc_tempreserve >= 0); 3365 } 3366 3367 int 3368 arc_tempreserve_space(uint64_t reserve, uint64_t txg) 3369 { 3370 int error; 3371 uint64_t anon_size; 3372 3373 #ifdef ZFS_DEBUG 3374 /* 3375 * Once in a while, fail for no reason. Everything should cope. 3376 */ 3377 if (spa_get_random(10000) == 0) { 3378 dprintf("forcing random failure\n"); 3379 return (ERESTART); 3380 } 3381 #endif 3382 if (reserve > arc_c/4 && !arc_no_grow) 3383 arc_c = MIN(arc_c_max, reserve * 4); 3384 if (reserve > arc_c) 3385 return (ENOMEM); 3386 3387 /* 3388 * Don't count loaned bufs as in flight dirty data to prevent long 3389 * network delays from blocking transactions that are ready to be 3390 * assigned to a txg. 3391 */ 3392 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0); 3393 3394 /* 3395 * Writes will, almost always, require additional memory allocations 3396 * in order to compress/encrypt/etc the data. We therefor need to 3397 * make sure that there is sufficient available memory for this. 3398 */ 3399 if (error = arc_memory_throttle(reserve, anon_size, txg)) 3400 return (error); 3401 3402 /* 3403 * Throttle writes when the amount of dirty data in the cache 3404 * gets too large. We try to keep the cache less than half full 3405 * of dirty blocks so that our sync times don't grow too large. 3406 * Note: if two requests come in concurrently, we might let them 3407 * both succeed, when one of them should fail. Not a huge deal. 3408 */ 3409 3410 if (reserve + arc_tempreserve + anon_size > arc_c / 2 && 3411 anon_size > arc_c / 4) { 3412 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 3413 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 3414 arc_tempreserve>>10, 3415 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 3416 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3417 reserve>>10, arc_c>>10); 3418 return (ERESTART); 3419 } 3420 atomic_add_64(&arc_tempreserve, reserve); 3421 return (0); 3422 } 3423 3424 void 3425 arc_init(void) 3426 { 3427 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3428 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3429 3430 /* Convert seconds to clock ticks */ 3431 arc_min_prefetch_lifespan = 1 * hz; 3432 3433 /* Start out with 1/8 of all memory */ 3434 arc_c = physmem * PAGESIZE / 8; 3435 3436 #ifdef _KERNEL 3437 /* 3438 * On architectures where the physical memory can be larger 3439 * than the addressable space (intel in 32-bit mode), we may 3440 * need to limit the cache to 1/8 of VM size. 3441 */ 3442 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3443 #endif 3444 3445 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 3446 arc_c_min = MAX(arc_c / 4, 64<<20); 3447 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 3448 if (arc_c * 8 >= 1<<30) 3449 arc_c_max = (arc_c * 8) - (1<<30); 3450 else 3451 arc_c_max = arc_c_min; 3452 arc_c_max = MAX(arc_c * 6, arc_c_max); 3453 3454 /* 3455 * Allow the tunables to override our calculations if they are 3456 * reasonable (ie. over 64MB) 3457 */ 3458 if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) 3459 arc_c_max = zfs_arc_max; 3460 if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) 3461 arc_c_min = zfs_arc_min; 3462 3463 arc_c = arc_c_max; 3464 arc_p = (arc_c >> 1); 3465 3466 /* limit meta-data to 1/4 of the arc capacity */ 3467 arc_meta_limit = arc_c_max / 4; 3468 3469 /* Allow the tunable to override if it is reasonable */ 3470 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 3471 arc_meta_limit = zfs_arc_meta_limit; 3472 3473 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 3474 arc_c_min = arc_meta_limit / 2; 3475 3476 if (zfs_arc_grow_retry > 0) 3477 arc_grow_retry = zfs_arc_grow_retry; 3478 3479 if (zfs_arc_shrink_shift > 0) 3480 arc_shrink_shift = zfs_arc_shrink_shift; 3481 3482 if (zfs_arc_p_min_shift > 0) 3483 arc_p_min_shift = zfs_arc_p_min_shift; 3484 3485 /* if kmem_flags are set, lets try to use less memory */ 3486 if (kmem_debugging()) 3487 arc_c = arc_c / 2; 3488 if (arc_c < arc_c_min) 3489 arc_c = arc_c_min; 3490 3491 arc_anon = &ARC_anon; 3492 arc_mru = &ARC_mru; 3493 arc_mru_ghost = &ARC_mru_ghost; 3494 arc_mfu = &ARC_mfu; 3495 arc_mfu_ghost = &ARC_mfu_ghost; 3496 arc_l2c_only = &ARC_l2c_only; 3497 arc_size = 0; 3498 3499 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3500 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3501 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3502 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3503 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3504 mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3505 3506 list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], 3507 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3508 list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], 3509 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3510 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], 3511 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3512 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], 3513 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3514 list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], 3515 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3516 list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], 3517 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3518 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], 3519 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3520 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], 3521 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3522 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], 3523 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3524 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], 3525 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3526 3527 buf_init(); 3528 3529 arc_thread_exit = 0; 3530 arc_eviction_list = NULL; 3531 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 3532 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3533 3534 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 3535 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 3536 3537 if (arc_ksp != NULL) { 3538 arc_ksp->ks_data = &arc_stats; 3539 kstat_install(arc_ksp); 3540 } 3541 3542 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3543 TS_RUN, minclsyspri); 3544 3545 arc_dead = FALSE; 3546 arc_warm = B_FALSE; 3547 3548 if (zfs_write_limit_max == 0) 3549 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; 3550 else 3551 zfs_write_limit_shift = 0; 3552 mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL); 3553 } 3554 3555 void 3556 arc_fini(void) 3557 { 3558 mutex_enter(&arc_reclaim_thr_lock); 3559 arc_thread_exit = 1; 3560 while (arc_thread_exit != 0) 3561 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 3562 mutex_exit(&arc_reclaim_thr_lock); 3563 3564 arc_flush(NULL); 3565 3566 arc_dead = TRUE; 3567 3568 if (arc_ksp != NULL) { 3569 kstat_delete(arc_ksp); 3570 arc_ksp = NULL; 3571 } 3572 3573 mutex_destroy(&arc_eviction_mtx); 3574 mutex_destroy(&arc_reclaim_thr_lock); 3575 cv_destroy(&arc_reclaim_thr_cv); 3576 3577 list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); 3578 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 3579 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); 3580 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 3581 list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); 3582 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 3583 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); 3584 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 3585 3586 mutex_destroy(&arc_anon->arcs_mtx); 3587 mutex_destroy(&arc_mru->arcs_mtx); 3588 mutex_destroy(&arc_mru_ghost->arcs_mtx); 3589 mutex_destroy(&arc_mfu->arcs_mtx); 3590 mutex_destroy(&arc_mfu_ghost->arcs_mtx); 3591 mutex_destroy(&arc_l2c_only->arcs_mtx); 3592 3593 mutex_destroy(&zfs_write_limit_lock); 3594 3595 buf_fini(); 3596 3597 ASSERT(arc_loaned_bytes == 0); 3598 } 3599 3600 /* 3601 * Level 2 ARC 3602 * 3603 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 3604 * It uses dedicated storage devices to hold cached data, which are populated 3605 * using large infrequent writes. The main role of this cache is to boost 3606 * the performance of random read workloads. The intended L2ARC devices 3607 * include short-stroked disks, solid state disks, and other media with 3608 * substantially faster read latency than disk. 3609 * 3610 * +-----------------------+ 3611 * | ARC | 3612 * +-----------------------+ 3613 * | ^ ^ 3614 * | | | 3615 * l2arc_feed_thread() arc_read() 3616 * | | | 3617 * | l2arc read | 3618 * V | | 3619 * +---------------+ | 3620 * | L2ARC | | 3621 * +---------------+ | 3622 * | ^ | 3623 * l2arc_write() | | 3624 * | | | 3625 * V | | 3626 * +-------+ +-------+ 3627 * | vdev | | vdev | 3628 * | cache | | cache | 3629 * +-------+ +-------+ 3630 * +=========+ .-----. 3631 * : L2ARC : |-_____-| 3632 * : devices : | Disks | 3633 * +=========+ `-_____-' 3634 * 3635 * Read requests are satisfied from the following sources, in order: 3636 * 3637 * 1) ARC 3638 * 2) vdev cache of L2ARC devices 3639 * 3) L2ARC devices 3640 * 4) vdev cache of disks 3641 * 5) disks 3642 * 3643 * Some L2ARC device types exhibit extremely slow write performance. 3644 * To accommodate for this there are some significant differences between 3645 * the L2ARC and traditional cache design: 3646 * 3647 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 3648 * the ARC behave as usual, freeing buffers and placing headers on ghost 3649 * lists. The ARC does not send buffers to the L2ARC during eviction as 3650 * this would add inflated write latencies for all ARC memory pressure. 3651 * 3652 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 3653 * It does this by periodically scanning buffers from the eviction-end of 3654 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 3655 * not already there. It scans until a headroom of buffers is satisfied, 3656 * which itself is a buffer for ARC eviction. The thread that does this is 3657 * l2arc_feed_thread(), illustrated below; example sizes are included to 3658 * provide a better sense of ratio than this diagram: 3659 * 3660 * head --> tail 3661 * +---------------------+----------+ 3662 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 3663 * +---------------------+----------+ | o L2ARC eligible 3664 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 3665 * +---------------------+----------+ | 3666 * 15.9 Gbytes ^ 32 Mbytes | 3667 * headroom | 3668 * l2arc_feed_thread() 3669 * | 3670 * l2arc write hand <--[oooo]--' 3671 * | 8 Mbyte 3672 * | write max 3673 * V 3674 * +==============================+ 3675 * L2ARC dev |####|#|###|###| |####| ... | 3676 * +==============================+ 3677 * 32 Gbytes 3678 * 3679 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 3680 * evicted, then the L2ARC has cached a buffer much sooner than it probably 3681 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 3682 * safe to say that this is an uncommon case, since buffers at the end of 3683 * the ARC lists have moved there due to inactivity. 3684 * 3685 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 3686 * then the L2ARC simply misses copying some buffers. This serves as a 3687 * pressure valve to prevent heavy read workloads from both stalling the ARC 3688 * with waits and clogging the L2ARC with writes. This also helps prevent 3689 * the potential for the L2ARC to churn if it attempts to cache content too 3690 * quickly, such as during backups of the entire pool. 3691 * 3692 * 5. After system boot and before the ARC has filled main memory, there are 3693 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 3694 * lists can remain mostly static. Instead of searching from tail of these 3695 * lists as pictured, the l2arc_feed_thread() will search from the list heads 3696 * for eligible buffers, greatly increasing its chance of finding them. 3697 * 3698 * The L2ARC device write speed is also boosted during this time so that 3699 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 3700 * there are no L2ARC reads, and no fear of degrading read performance 3701 * through increased writes. 3702 * 3703 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 3704 * the vdev queue can aggregate them into larger and fewer writes. Each 3705 * device is written to in a rotor fashion, sweeping writes through 3706 * available space then repeating. 3707 * 3708 * 7. The L2ARC does not store dirty content. It never needs to flush 3709 * write buffers back to disk based storage. 3710 * 3711 * 8. If an ARC buffer is written (and dirtied) which also exists in the 3712 * L2ARC, the now stale L2ARC buffer is immediately dropped. 3713 * 3714 * The performance of the L2ARC can be tweaked by a number of tunables, which 3715 * may be necessary for different workloads: 3716 * 3717 * l2arc_write_max max write bytes per interval 3718 * l2arc_write_boost extra write bytes during device warmup 3719 * l2arc_noprefetch skip caching prefetched buffers 3720 * l2arc_headroom number of max device writes to precache 3721 * l2arc_feed_secs seconds between L2ARC writing 3722 * 3723 * Tunables may be removed or added as future performance improvements are 3724 * integrated, and also may become zpool properties. 3725 * 3726 * There are three key functions that control how the L2ARC warms up: 3727 * 3728 * l2arc_write_eligible() check if a buffer is eligible to cache 3729 * l2arc_write_size() calculate how much to write 3730 * l2arc_write_interval() calculate sleep delay between writes 3731 * 3732 * These three functions determine what to write, how much, and how quickly 3733 * to send writes. 3734 */ 3735 3736 static boolean_t 3737 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab) 3738 { 3739 /* 3740 * A buffer is *not* eligible for the L2ARC if it: 3741 * 1. belongs to a different spa. 3742 * 2. is already cached on the L2ARC. 3743 * 3. has an I/O in progress (it may be an incomplete read). 3744 * 4. is flagged not eligible (zfs property). 3745 */ 3746 if (ab->b_spa != spa_guid || ab->b_l2hdr != NULL || 3747 HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab)) 3748 return (B_FALSE); 3749 3750 return (B_TRUE); 3751 } 3752 3753 static uint64_t 3754 l2arc_write_size(l2arc_dev_t *dev) 3755 { 3756 uint64_t size; 3757 3758 size = dev->l2ad_write; 3759 3760 if (arc_warm == B_FALSE) 3761 size += dev->l2ad_boost; 3762 3763 return (size); 3764 3765 } 3766 3767 static clock_t 3768 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) 3769 { 3770 clock_t interval, next, now; 3771 3772 /* 3773 * If the ARC lists are busy, increase our write rate; if the 3774 * lists are stale, idle back. This is achieved by checking 3775 * how much we previously wrote - if it was more than half of 3776 * what we wanted, schedule the next write much sooner. 3777 */ 3778 if (l2arc_feed_again && wrote > (wanted / 2)) 3779 interval = (hz * l2arc_feed_min_ms) / 1000; 3780 else 3781 interval = hz * l2arc_feed_secs; 3782 3783 now = ddi_get_lbolt(); 3784 next = MAX(now, MIN(now + interval, began + interval)); 3785 3786 return (next); 3787 } 3788 3789 static void 3790 l2arc_hdr_stat_add(void) 3791 { 3792 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 3793 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 3794 } 3795 3796 static void 3797 l2arc_hdr_stat_remove(void) 3798 { 3799 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 3800 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 3801 } 3802 3803 /* 3804 * Cycle through L2ARC devices. This is how L2ARC load balances. 3805 * If a device is returned, this also returns holding the spa config lock. 3806 */ 3807 static l2arc_dev_t * 3808 l2arc_dev_get_next(void) 3809 { 3810 l2arc_dev_t *first, *next = NULL; 3811 3812 /* 3813 * Lock out the removal of spas (spa_namespace_lock), then removal 3814 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 3815 * both locks will be dropped and a spa config lock held instead. 3816 */ 3817 mutex_enter(&spa_namespace_lock); 3818 mutex_enter(&l2arc_dev_mtx); 3819 3820 /* if there are no vdevs, there is nothing to do */ 3821 if (l2arc_ndev == 0) 3822 goto out; 3823 3824 first = NULL; 3825 next = l2arc_dev_last; 3826 do { 3827 /* loop around the list looking for a non-faulted vdev */ 3828 if (next == NULL) { 3829 next = list_head(l2arc_dev_list); 3830 } else { 3831 next = list_next(l2arc_dev_list, next); 3832 if (next == NULL) 3833 next = list_head(l2arc_dev_list); 3834 } 3835 3836 /* if we have come back to the start, bail out */ 3837 if (first == NULL) 3838 first = next; 3839 else if (next == first) 3840 break; 3841 3842 } while (vdev_is_dead(next->l2ad_vdev)); 3843 3844 /* if we were unable to find any usable vdevs, return NULL */ 3845 if (vdev_is_dead(next->l2ad_vdev)) 3846 next = NULL; 3847 3848 l2arc_dev_last = next; 3849 3850 out: 3851 mutex_exit(&l2arc_dev_mtx); 3852 3853 /* 3854 * Grab the config lock to prevent the 'next' device from being 3855 * removed while we are writing to it. 3856 */ 3857 if (next != NULL) 3858 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 3859 mutex_exit(&spa_namespace_lock); 3860 3861 return (next); 3862 } 3863 3864 /* 3865 * Free buffers that were tagged for destruction. 3866 */ 3867 static void 3868 l2arc_do_free_on_write() 3869 { 3870 list_t *buflist; 3871 l2arc_data_free_t *df, *df_prev; 3872 3873 mutex_enter(&l2arc_free_on_write_mtx); 3874 buflist = l2arc_free_on_write; 3875 3876 for (df = list_tail(buflist); df; df = df_prev) { 3877 df_prev = list_prev(buflist, df); 3878 ASSERT(df->l2df_data != NULL); 3879 ASSERT(df->l2df_func != NULL); 3880 df->l2df_func(df->l2df_data, df->l2df_size); 3881 list_remove(buflist, df); 3882 kmem_free(df, sizeof (l2arc_data_free_t)); 3883 } 3884 3885 mutex_exit(&l2arc_free_on_write_mtx); 3886 } 3887 3888 /* 3889 * A write to a cache device has completed. Update all headers to allow 3890 * reads from these buffers to begin. 3891 */ 3892 static void 3893 l2arc_write_done(zio_t *zio) 3894 { 3895 l2arc_write_callback_t *cb; 3896 l2arc_dev_t *dev; 3897 list_t *buflist; 3898 arc_buf_hdr_t *head, *ab, *ab_prev; 3899 l2arc_buf_hdr_t *abl2; 3900 kmutex_t *hash_lock; 3901 3902 cb = zio->io_private; 3903 ASSERT(cb != NULL); 3904 dev = cb->l2wcb_dev; 3905 ASSERT(dev != NULL); 3906 head = cb->l2wcb_head; 3907 ASSERT(head != NULL); 3908 buflist = dev->l2ad_buflist; 3909 ASSERT(buflist != NULL); 3910 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 3911 l2arc_write_callback_t *, cb); 3912 3913 if (zio->io_error != 0) 3914 ARCSTAT_BUMP(arcstat_l2_writes_error); 3915 3916 mutex_enter(&l2arc_buflist_mtx); 3917 3918 /* 3919 * All writes completed, or an error was hit. 3920 */ 3921 for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 3922 ab_prev = list_prev(buflist, ab); 3923 3924 hash_lock = HDR_LOCK(ab); 3925 if (!mutex_tryenter(hash_lock)) { 3926 /* 3927 * This buffer misses out. It may be in a stage 3928 * of eviction. Its ARC_L2_WRITING flag will be 3929 * left set, denying reads to this buffer. 3930 */ 3931 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 3932 continue; 3933 } 3934 3935 if (zio->io_error != 0) { 3936 /* 3937 * Error - drop L2ARC entry. 3938 */ 3939 list_remove(buflist, ab); 3940 abl2 = ab->b_l2hdr; 3941 ab->b_l2hdr = NULL; 3942 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 3943 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 3944 } 3945 3946 /* 3947 * Allow ARC to begin reads to this L2ARC entry. 3948 */ 3949 ab->b_flags &= ~ARC_L2_WRITING; 3950 3951 mutex_exit(hash_lock); 3952 } 3953 3954 atomic_inc_64(&l2arc_writes_done); 3955 list_remove(buflist, head); 3956 kmem_cache_free(hdr_cache, head); 3957 mutex_exit(&l2arc_buflist_mtx); 3958 3959 l2arc_do_free_on_write(); 3960 3961 kmem_free(cb, sizeof (l2arc_write_callback_t)); 3962 } 3963 3964 /* 3965 * A read to a cache device completed. Validate buffer contents before 3966 * handing over to the regular ARC routines. 3967 */ 3968 static void 3969 l2arc_read_done(zio_t *zio) 3970 { 3971 l2arc_read_callback_t *cb; 3972 arc_buf_hdr_t *hdr; 3973 arc_buf_t *buf; 3974 kmutex_t *hash_lock; 3975 int equal; 3976 3977 ASSERT(zio->io_vd != NULL); 3978 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 3979 3980 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 3981 3982 cb = zio->io_private; 3983 ASSERT(cb != NULL); 3984 buf = cb->l2rcb_buf; 3985 ASSERT(buf != NULL); 3986 hdr = buf->b_hdr; 3987 ASSERT(hdr != NULL); 3988 3989 hash_lock = HDR_LOCK(hdr); 3990 mutex_enter(hash_lock); 3991 3992 /* 3993 * Check this survived the L2ARC journey. 3994 */ 3995 equal = arc_cksum_equal(buf); 3996 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 3997 mutex_exit(hash_lock); 3998 zio->io_private = buf; 3999 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 4000 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 4001 arc_read_done(zio); 4002 } else { 4003 mutex_exit(hash_lock); 4004 /* 4005 * Buffer didn't survive caching. Increment stats and 4006 * reissue to the original storage device. 4007 */ 4008 if (zio->io_error != 0) { 4009 ARCSTAT_BUMP(arcstat_l2_io_error); 4010 } else { 4011 zio->io_error = EIO; 4012 } 4013 if (!equal) 4014 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 4015 4016 /* 4017 * If there's no waiter, issue an async i/o to the primary 4018 * storage now. If there *is* a waiter, the caller must 4019 * issue the i/o in a context where it's OK to block. 4020 */ 4021 if (zio->io_waiter == NULL) { 4022 zio_t *pio = zio_unique_parent(zio); 4023 4024 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); 4025 4026 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp, 4027 buf->b_data, zio->io_size, arc_read_done, buf, 4028 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); 4029 } 4030 } 4031 4032 kmem_free(cb, sizeof (l2arc_read_callback_t)); 4033 } 4034 4035 /* 4036 * This is the list priority from which the L2ARC will search for pages to 4037 * cache. This is used within loops (0..3) to cycle through lists in the 4038 * desired order. This order can have a significant effect on cache 4039 * performance. 4040 * 4041 * Currently the metadata lists are hit first, MFU then MRU, followed by 4042 * the data lists. This function returns a locked list, and also returns 4043 * the lock pointer. 4044 */ 4045 static list_t * 4046 l2arc_list_locked(int list_num, kmutex_t **lock) 4047 { 4048 list_t *list; 4049 4050 ASSERT(list_num >= 0 && list_num <= 3); 4051 4052 switch (list_num) { 4053 case 0: 4054 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; 4055 *lock = &arc_mfu->arcs_mtx; 4056 break; 4057 case 1: 4058 list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; 4059 *lock = &arc_mru->arcs_mtx; 4060 break; 4061 case 2: 4062 list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; 4063 *lock = &arc_mfu->arcs_mtx; 4064 break; 4065 case 3: 4066 list = &arc_mru->arcs_list[ARC_BUFC_DATA]; 4067 *lock = &arc_mru->arcs_mtx; 4068 break; 4069 } 4070 4071 ASSERT(!(MUTEX_HELD(*lock))); 4072 mutex_enter(*lock); 4073 return (list); 4074 } 4075 4076 /* 4077 * Evict buffers from the device write hand to the distance specified in 4078 * bytes. This distance may span populated buffers, it may span nothing. 4079 * This is clearing a region on the L2ARC device ready for writing. 4080 * If the 'all' boolean is set, every buffer is evicted. 4081 */ 4082 static void 4083 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 4084 { 4085 list_t *buflist; 4086 l2arc_buf_hdr_t *abl2; 4087 arc_buf_hdr_t *ab, *ab_prev; 4088 kmutex_t *hash_lock; 4089 uint64_t taddr; 4090 4091 buflist = dev->l2ad_buflist; 4092 4093 if (buflist == NULL) 4094 return; 4095 4096 if (!all && dev->l2ad_first) { 4097 /* 4098 * This is the first sweep through the device. There is 4099 * nothing to evict. 4100 */ 4101 return; 4102 } 4103 4104 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 4105 /* 4106 * When nearing the end of the device, evict to the end 4107 * before the device write hand jumps to the start. 4108 */ 4109 taddr = dev->l2ad_end; 4110 } else { 4111 taddr = dev->l2ad_hand + distance; 4112 } 4113 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 4114 uint64_t, taddr, boolean_t, all); 4115 4116 top: 4117 mutex_enter(&l2arc_buflist_mtx); 4118 for (ab = list_tail(buflist); ab; ab = ab_prev) { 4119 ab_prev = list_prev(buflist, ab); 4120 4121 hash_lock = HDR_LOCK(ab); 4122 if (!mutex_tryenter(hash_lock)) { 4123 /* 4124 * Missed the hash lock. Retry. 4125 */ 4126 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 4127 mutex_exit(&l2arc_buflist_mtx); 4128 mutex_enter(hash_lock); 4129 mutex_exit(hash_lock); 4130 goto top; 4131 } 4132 4133 if (HDR_L2_WRITE_HEAD(ab)) { 4134 /* 4135 * We hit a write head node. Leave it for 4136 * l2arc_write_done(). 4137 */ 4138 list_remove(buflist, ab); 4139 mutex_exit(hash_lock); 4140 continue; 4141 } 4142 4143 if (!all && ab->b_l2hdr != NULL && 4144 (ab->b_l2hdr->b_daddr > taddr || 4145 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 4146 /* 4147 * We've evicted to the target address, 4148 * or the end of the device. 4149 */ 4150 mutex_exit(hash_lock); 4151 break; 4152 } 4153 4154 if (HDR_FREE_IN_PROGRESS(ab)) { 4155 /* 4156 * Already on the path to destruction. 4157 */ 4158 mutex_exit(hash_lock); 4159 continue; 4160 } 4161 4162 if (ab->b_state == arc_l2c_only) { 4163 ASSERT(!HDR_L2_READING(ab)); 4164 /* 4165 * This doesn't exist in the ARC. Destroy. 4166 * arc_hdr_destroy() will call list_remove() 4167 * and decrement arcstat_l2_size. 4168 */ 4169 arc_change_state(arc_anon, ab, hash_lock); 4170 arc_hdr_destroy(ab); 4171 } else { 4172 /* 4173 * Invalidate issued or about to be issued 4174 * reads, since we may be about to write 4175 * over this location. 4176 */ 4177 if (HDR_L2_READING(ab)) { 4178 ARCSTAT_BUMP(arcstat_l2_evict_reading); 4179 ab->b_flags |= ARC_L2_EVICTED; 4180 } 4181 4182 /* 4183 * Tell ARC this no longer exists in L2ARC. 4184 */ 4185 if (ab->b_l2hdr != NULL) { 4186 abl2 = ab->b_l2hdr; 4187 ab->b_l2hdr = NULL; 4188 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4189 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4190 } 4191 list_remove(buflist, ab); 4192 4193 /* 4194 * This may have been leftover after a 4195 * failed write. 4196 */ 4197 ab->b_flags &= ~ARC_L2_WRITING; 4198 } 4199 mutex_exit(hash_lock); 4200 } 4201 mutex_exit(&l2arc_buflist_mtx); 4202 4203 vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0); 4204 dev->l2ad_evict = taddr; 4205 } 4206 4207 /* 4208 * Find and write ARC buffers to the L2ARC device. 4209 * 4210 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 4211 * for reading until they have completed writing. 4212 */ 4213 static uint64_t 4214 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) 4215 { 4216 arc_buf_hdr_t *ab, *ab_prev, *head; 4217 l2arc_buf_hdr_t *hdrl2; 4218 list_t *list; 4219 uint64_t passed_sz, write_sz, buf_sz, headroom; 4220 void *buf_data; 4221 kmutex_t *hash_lock, *list_lock; 4222 boolean_t have_lock, full; 4223 l2arc_write_callback_t *cb; 4224 zio_t *pio, *wzio; 4225 uint64_t guid = spa_guid(spa); 4226 4227 ASSERT(dev->l2ad_vdev != NULL); 4228 4229 pio = NULL; 4230 write_sz = 0; 4231 full = B_FALSE; 4232 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 4233 head->b_flags |= ARC_L2_WRITE_HEAD; 4234 4235 /* 4236 * Copy buffers for L2ARC writing. 4237 */ 4238 mutex_enter(&l2arc_buflist_mtx); 4239 for (int try = 0; try <= 3; try++) { 4240 list = l2arc_list_locked(try, &list_lock); 4241 passed_sz = 0; 4242 4243 /* 4244 * L2ARC fast warmup. 4245 * 4246 * Until the ARC is warm and starts to evict, read from the 4247 * head of the ARC lists rather than the tail. 4248 */ 4249 headroom = target_sz * l2arc_headroom; 4250 if (arc_warm == B_FALSE) 4251 ab = list_head(list); 4252 else 4253 ab = list_tail(list); 4254 4255 for (; ab; ab = ab_prev) { 4256 if (arc_warm == B_FALSE) 4257 ab_prev = list_next(list, ab); 4258 else 4259 ab_prev = list_prev(list, ab); 4260 4261 hash_lock = HDR_LOCK(ab); 4262 have_lock = MUTEX_HELD(hash_lock); 4263 if (!have_lock && !mutex_tryenter(hash_lock)) { 4264 /* 4265 * Skip this buffer rather than waiting. 4266 */ 4267 continue; 4268 } 4269 4270 passed_sz += ab->b_size; 4271 if (passed_sz > headroom) { 4272 /* 4273 * Searched too far. 4274 */ 4275 mutex_exit(hash_lock); 4276 break; 4277 } 4278 4279 if (!l2arc_write_eligible(guid, ab)) { 4280 mutex_exit(hash_lock); 4281 continue; 4282 } 4283 4284 if ((write_sz + ab->b_size) > target_sz) { 4285 full = B_TRUE; 4286 mutex_exit(hash_lock); 4287 break; 4288 } 4289 4290 if (pio == NULL) { 4291 /* 4292 * Insert a dummy header on the buflist so 4293 * l2arc_write_done() can find where the 4294 * write buffers begin without searching. 4295 */ 4296 list_insert_head(dev->l2ad_buflist, head); 4297 4298 cb = kmem_alloc( 4299 sizeof (l2arc_write_callback_t), KM_SLEEP); 4300 cb->l2wcb_dev = dev; 4301 cb->l2wcb_head = head; 4302 pio = zio_root(spa, l2arc_write_done, cb, 4303 ZIO_FLAG_CANFAIL); 4304 } 4305 4306 /* 4307 * Create and add a new L2ARC header. 4308 */ 4309 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 4310 hdrl2->b_dev = dev; 4311 hdrl2->b_daddr = dev->l2ad_hand; 4312 4313 ab->b_flags |= ARC_L2_WRITING; 4314 ab->b_l2hdr = hdrl2; 4315 list_insert_head(dev->l2ad_buflist, ab); 4316 buf_data = ab->b_buf->b_data; 4317 buf_sz = ab->b_size; 4318 4319 /* 4320 * Compute and store the buffer cksum before 4321 * writing. On debug the cksum is verified first. 4322 */ 4323 arc_cksum_verify(ab->b_buf); 4324 arc_cksum_compute(ab->b_buf, B_TRUE); 4325 4326 mutex_exit(hash_lock); 4327 4328 wzio = zio_write_phys(pio, dev->l2ad_vdev, 4329 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 4330 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 4331 ZIO_FLAG_CANFAIL, B_FALSE); 4332 4333 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 4334 zio_t *, wzio); 4335 (void) zio_nowait(wzio); 4336 4337 /* 4338 * Keep the clock hand suitably device-aligned. 4339 */ 4340 buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); 4341 4342 write_sz += buf_sz; 4343 dev->l2ad_hand += buf_sz; 4344 } 4345 4346 mutex_exit(list_lock); 4347 4348 if (full == B_TRUE) 4349 break; 4350 } 4351 mutex_exit(&l2arc_buflist_mtx); 4352 4353 if (pio == NULL) { 4354 ASSERT3U(write_sz, ==, 0); 4355 kmem_cache_free(hdr_cache, head); 4356 return (0); 4357 } 4358 4359 ASSERT3U(write_sz, <=, target_sz); 4360 ARCSTAT_BUMP(arcstat_l2_writes_sent); 4361 ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz); 4362 ARCSTAT_INCR(arcstat_l2_size, write_sz); 4363 vdev_space_update(dev->l2ad_vdev, write_sz, 0, 0); 4364 4365 /* 4366 * Bump device hand to the device start if it is approaching the end. 4367 * l2arc_evict() will already have evicted ahead for this case. 4368 */ 4369 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 4370 vdev_space_update(dev->l2ad_vdev, 4371 dev->l2ad_end - dev->l2ad_hand, 0, 0); 4372 dev->l2ad_hand = dev->l2ad_start; 4373 dev->l2ad_evict = dev->l2ad_start; 4374 dev->l2ad_first = B_FALSE; 4375 } 4376 4377 dev->l2ad_writing = B_TRUE; 4378 (void) zio_wait(pio); 4379 dev->l2ad_writing = B_FALSE; 4380 4381 return (write_sz); 4382 } 4383 4384 /* 4385 * This thread feeds the L2ARC at regular intervals. This is the beating 4386 * heart of the L2ARC. 4387 */ 4388 static void 4389 l2arc_feed_thread(void) 4390 { 4391 callb_cpr_t cpr; 4392 l2arc_dev_t *dev; 4393 spa_t *spa; 4394 uint64_t size, wrote; 4395 clock_t begin, next = ddi_get_lbolt(); 4396 4397 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 4398 4399 mutex_enter(&l2arc_feed_thr_lock); 4400 4401 while (l2arc_thread_exit == 0) { 4402 CALLB_CPR_SAFE_BEGIN(&cpr); 4403 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 4404 next); 4405 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 4406 next = ddi_get_lbolt() + hz; 4407 4408 /* 4409 * Quick check for L2ARC devices. 4410 */ 4411 mutex_enter(&l2arc_dev_mtx); 4412 if (l2arc_ndev == 0) { 4413 mutex_exit(&l2arc_dev_mtx); 4414 continue; 4415 } 4416 mutex_exit(&l2arc_dev_mtx); 4417 begin = ddi_get_lbolt(); 4418 4419 /* 4420 * This selects the next l2arc device to write to, and in 4421 * doing so the next spa to feed from: dev->l2ad_spa. This 4422 * will return NULL if there are now no l2arc devices or if 4423 * they are all faulted. 4424 * 4425 * If a device is returned, its spa's config lock is also 4426 * held to prevent device removal. l2arc_dev_get_next() 4427 * will grab and release l2arc_dev_mtx. 4428 */ 4429 if ((dev = l2arc_dev_get_next()) == NULL) 4430 continue; 4431 4432 spa = dev->l2ad_spa; 4433 ASSERT(spa != NULL); 4434 4435 /* 4436 * Avoid contributing to memory pressure. 4437 */ 4438 if (arc_reclaim_needed()) { 4439 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 4440 spa_config_exit(spa, SCL_L2ARC, dev); 4441 continue; 4442 } 4443 4444 ARCSTAT_BUMP(arcstat_l2_feeds); 4445 4446 size = l2arc_write_size(dev); 4447 4448 /* 4449 * Evict L2ARC buffers that will be overwritten. 4450 */ 4451 l2arc_evict(dev, size, B_FALSE); 4452 4453 /* 4454 * Write ARC buffers. 4455 */ 4456 wrote = l2arc_write_buffers(spa, dev, size); 4457 4458 /* 4459 * Calculate interval between writes. 4460 */ 4461 next = l2arc_write_interval(begin, size, wrote); 4462 spa_config_exit(spa, SCL_L2ARC, dev); 4463 } 4464 4465 l2arc_thread_exit = 0; 4466 cv_broadcast(&l2arc_feed_thr_cv); 4467 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 4468 thread_exit(); 4469 } 4470 4471 boolean_t 4472 l2arc_vdev_present(vdev_t *vd) 4473 { 4474 l2arc_dev_t *dev; 4475 4476 mutex_enter(&l2arc_dev_mtx); 4477 for (dev = list_head(l2arc_dev_list); dev != NULL; 4478 dev = list_next(l2arc_dev_list, dev)) { 4479 if (dev->l2ad_vdev == vd) 4480 break; 4481 } 4482 mutex_exit(&l2arc_dev_mtx); 4483 4484 return (dev != NULL); 4485 } 4486 4487 /* 4488 * Add a vdev for use by the L2ARC. By this point the spa has already 4489 * validated the vdev and opened it. 4490 */ 4491 void 4492 l2arc_add_vdev(spa_t *spa, vdev_t *vd) 4493 { 4494 l2arc_dev_t *adddev; 4495 4496 ASSERT(!l2arc_vdev_present(vd)); 4497 4498 /* 4499 * Create a new l2arc device entry. 4500 */ 4501 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 4502 adddev->l2ad_spa = spa; 4503 adddev->l2ad_vdev = vd; 4504 adddev->l2ad_write = l2arc_write_max; 4505 adddev->l2ad_boost = l2arc_write_boost; 4506 adddev->l2ad_start = VDEV_LABEL_START_SIZE; 4507 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); 4508 adddev->l2ad_hand = adddev->l2ad_start; 4509 adddev->l2ad_evict = adddev->l2ad_start; 4510 adddev->l2ad_first = B_TRUE; 4511 adddev->l2ad_writing = B_FALSE; 4512 ASSERT3U(adddev->l2ad_write, >, 0); 4513 4514 /* 4515 * This is a list of all ARC buffers that are still valid on the 4516 * device. 4517 */ 4518 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 4519 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 4520 offsetof(arc_buf_hdr_t, b_l2node)); 4521 4522 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); 4523 4524 /* 4525 * Add device to global list 4526 */ 4527 mutex_enter(&l2arc_dev_mtx); 4528 list_insert_head(l2arc_dev_list, adddev); 4529 atomic_inc_64(&l2arc_ndev); 4530 mutex_exit(&l2arc_dev_mtx); 4531 } 4532 4533 /* 4534 * Remove a vdev from the L2ARC. 4535 */ 4536 void 4537 l2arc_remove_vdev(vdev_t *vd) 4538 { 4539 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 4540 4541 /* 4542 * Find the device by vdev 4543 */ 4544 mutex_enter(&l2arc_dev_mtx); 4545 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 4546 nextdev = list_next(l2arc_dev_list, dev); 4547 if (vd == dev->l2ad_vdev) { 4548 remdev = dev; 4549 break; 4550 } 4551 } 4552 ASSERT(remdev != NULL); 4553 4554 /* 4555 * Remove device from global list 4556 */ 4557 list_remove(l2arc_dev_list, remdev); 4558 l2arc_dev_last = NULL; /* may have been invalidated */ 4559 atomic_dec_64(&l2arc_ndev); 4560 mutex_exit(&l2arc_dev_mtx); 4561 4562 /* 4563 * Clear all buflists and ARC references. L2ARC device flush. 4564 */ 4565 l2arc_evict(remdev, 0, B_TRUE); 4566 list_destroy(remdev->l2ad_buflist); 4567 kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 4568 kmem_free(remdev, sizeof (l2arc_dev_t)); 4569 } 4570 4571 void 4572 l2arc_init(void) 4573 { 4574 l2arc_thread_exit = 0; 4575 l2arc_ndev = 0; 4576 l2arc_writes_sent = 0; 4577 l2arc_writes_done = 0; 4578 4579 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 4580 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 4581 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 4582 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 4583 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 4584 4585 l2arc_dev_list = &L2ARC_dev_list; 4586 l2arc_free_on_write = &L2ARC_free_on_write; 4587 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 4588 offsetof(l2arc_dev_t, l2ad_node)); 4589 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 4590 offsetof(l2arc_data_free_t, l2df_list_node)); 4591 } 4592 4593 void 4594 l2arc_fini(void) 4595 { 4596 /* 4597 * This is called from dmu_fini(), which is called from spa_fini(); 4598 * Because of this, we can assume that all l2arc devices have 4599 * already been removed when the pools themselves were removed. 4600 */ 4601 4602 l2arc_do_free_on_write(); 4603 4604 mutex_destroy(&l2arc_feed_thr_lock); 4605 cv_destroy(&l2arc_feed_thr_cv); 4606 mutex_destroy(&l2arc_dev_mtx); 4607 mutex_destroy(&l2arc_buflist_mtx); 4608 mutex_destroy(&l2arc_free_on_write_mtx); 4609 4610 list_destroy(l2arc_dev_list); 4611 list_destroy(l2arc_free_on_write); 4612 } 4613 4614 void 4615 l2arc_start(void) 4616 { 4617 if (!(spa_mode_global & FWRITE)) 4618 return; 4619 4620 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 4621 TS_RUN, minclsyspri); 4622 } 4623 4624 void 4625 l2arc_stop(void) 4626 { 4627 if (!(spa_mode_global & FWRITE)) 4628 return; 4629 4630 mutex_enter(&l2arc_feed_thr_lock); 4631 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 4632 l2arc_thread_exit = 1; 4633 while (l2arc_thread_exit != 0) 4634 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 4635 mutex_exit(&l2arc_feed_thr_lock); 4636 } 4637