1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * DVA-based Adjustable Replacement Cache 27 * 28 * While much of the theory of operation used here is 29 * based on the self-tuning, low overhead replacement cache 30 * presented by Megiddo and Modha at FAST 2003, there are some 31 * significant differences: 32 * 33 * 1. The Megiddo and Modha model assumes any page is evictable. 34 * Pages in its cache cannot be "locked" into memory. This makes 35 * the eviction algorithm simple: evict the last page in the list. 36 * This also make the performance characteristics easy to reason 37 * about. Our cache is not so simple. At any given moment, some 38 * subset of the blocks in the cache are un-evictable because we 39 * have handed out a reference to them. Blocks are only evictable 40 * when there are no external references active. This makes 41 * eviction far more problematic: we choose to evict the evictable 42 * blocks that are the "lowest" in the list. 43 * 44 * There are times when it is not possible to evict the requested 45 * space. In these circumstances we are unable to adjust the cache 46 * size. To prevent the cache growing unbounded at these times we 47 * implement a "cache throttle" that slows the flow of new data 48 * into the cache until we can make space available. 49 * 50 * 2. The Megiddo and Modha model assumes a fixed cache size. 51 * Pages are evicted when the cache is full and there is a cache 52 * miss. Our model has a variable sized cache. It grows with 53 * high use, but also tries to react to memory pressure from the 54 * operating system: decreasing its size when system memory is 55 * tight. 56 * 57 * 3. The Megiddo and Modha model assumes a fixed page size. All 58 * elements of the cache are therefor exactly the same size. So 59 * when adjusting the cache size following a cache miss, its simply 60 * a matter of choosing a single page to evict. In our model, we 61 * have variable sized cache blocks (rangeing from 512 bytes to 62 * 128K bytes). We therefor choose a set of blocks to evict to make 63 * space for a cache miss that approximates as closely as possible 64 * the space used by the new block. 65 * 66 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 67 * by N. Megiddo & D. Modha, FAST 2003 68 */ 69 70 /* 71 * The locking model: 72 * 73 * A new reference to a cache buffer can be obtained in two 74 * ways: 1) via a hash table lookup using the DVA as a key, 75 * or 2) via one of the ARC lists. The arc_read() interface 76 * uses method 1, while the internal arc algorithms for 77 * adjusting the cache use method 2. We therefor provide two 78 * types of locks: 1) the hash table lock array, and 2) the 79 * arc list locks. 80 * 81 * Buffers do not have their own mutexs, rather they rely on the 82 * hash table mutexs for the bulk of their protection (i.e. most 83 * fields in the arc_buf_hdr_t are protected by these mutexs). 84 * 85 * buf_hash_find() returns the appropriate mutex (held) when it 86 * locates the requested buffer in the hash table. It returns 87 * NULL for the mutex if the buffer was not in the table. 88 * 89 * buf_hash_remove() expects the appropriate hash mutex to be 90 * already held before it is invoked. 91 * 92 * Each arc state also has a mutex which is used to protect the 93 * buffer list associated with the state. When attempting to 94 * obtain a hash table lock while holding an arc list lock you 95 * must use: mutex_tryenter() to avoid deadlock. Also note that 96 * the active state mutex must be held before the ghost state mutex. 97 * 98 * Arc buffers may have an associated eviction callback function. 99 * This function will be invoked prior to removing the buffer (e.g. 100 * in arc_do_user_evicts()). Note however that the data associated 101 * with the buffer may be evicted prior to the callback. The callback 102 * must be made with *no locks held* (to prevent deadlock). Additionally, 103 * the users of callbacks must ensure that their private data is 104 * protected from simultaneous callbacks from arc_buf_evict() 105 * and arc_do_user_evicts(). 106 * 107 * Note that the majority of the performance stats are manipulated 108 * with atomic operations. 109 * 110 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 111 * 112 * - L2ARC buflist creation 113 * - L2ARC buflist eviction 114 * - L2ARC write completion, which walks L2ARC buflists 115 * - ARC header destruction, as it removes from L2ARC buflists 116 * - ARC header release, as it removes from L2ARC buflists 117 */ 118 119 #include <sys/spa.h> 120 #include <sys/zio.h> 121 #include <sys/zfs_context.h> 122 #include <sys/arc.h> 123 #include <sys/refcount.h> 124 #include <sys/vdev.h> 125 #include <sys/vdev_impl.h> 126 #ifdef _KERNEL 127 #include <sys/vmsystm.h> 128 #include <vm/anon.h> 129 #include <sys/fs/swapnode.h> 130 #include <sys/dnlc.h> 131 #endif 132 #include <sys/callb.h> 133 #include <sys/kstat.h> 134 #include <zfs_fletcher.h> 135 136 static kmutex_t arc_reclaim_thr_lock; 137 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 138 static uint8_t arc_thread_exit; 139 140 extern int zfs_write_limit_shift; 141 extern uint64_t zfs_write_limit_max; 142 extern kmutex_t zfs_write_limit_lock; 143 144 #define ARC_REDUCE_DNLC_PERCENT 3 145 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 146 147 typedef enum arc_reclaim_strategy { 148 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 149 ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 150 } arc_reclaim_strategy_t; 151 152 /* number of seconds before growing cache again */ 153 static int arc_grow_retry = 60; 154 155 /* shift of arc_c for calculating both min and max arc_p */ 156 static int arc_p_min_shift = 4; 157 158 /* log2(fraction of arc to reclaim) */ 159 static int arc_shrink_shift = 5; 160 161 /* 162 * minimum lifespan of a prefetch block in clock ticks 163 * (initialized in arc_init()) 164 */ 165 static int arc_min_prefetch_lifespan; 166 167 static int arc_dead; 168 169 /* 170 * The arc has filled available memory and has now warmed up. 171 */ 172 static boolean_t arc_warm; 173 174 /* 175 * These tunables are for performance analysis. 176 */ 177 uint64_t zfs_arc_max; 178 uint64_t zfs_arc_min; 179 uint64_t zfs_arc_meta_limit = 0; 180 int zfs_arc_grow_retry = 0; 181 int zfs_arc_shrink_shift = 0; 182 int zfs_arc_p_min_shift = 0; 183 184 /* 185 * Note that buffers can be in one of 6 states: 186 * ARC_anon - anonymous (discussed below) 187 * ARC_mru - recently used, currently cached 188 * ARC_mru_ghost - recentely used, no longer in cache 189 * ARC_mfu - frequently used, currently cached 190 * ARC_mfu_ghost - frequently used, no longer in cache 191 * ARC_l2c_only - exists in L2ARC but not other states 192 * When there are no active references to the buffer, they are 193 * are linked onto a list in one of these arc states. These are 194 * the only buffers that can be evicted or deleted. Within each 195 * state there are multiple lists, one for meta-data and one for 196 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 197 * etc.) is tracked separately so that it can be managed more 198 * explicitly: favored over data, limited explicitly. 199 * 200 * Anonymous buffers are buffers that are not associated with 201 * a DVA. These are buffers that hold dirty block copies 202 * before they are written to stable storage. By definition, 203 * they are "ref'd" and are considered part of arc_mru 204 * that cannot be freed. Generally, they will aquire a DVA 205 * as they are written and migrate onto the arc_mru list. 206 * 207 * The ARC_l2c_only state is for buffers that are in the second 208 * level ARC but no longer in any of the ARC_m* lists. The second 209 * level ARC itself may also contain buffers that are in any of 210 * the ARC_m* states - meaning that a buffer can exist in two 211 * places. The reason for the ARC_l2c_only state is to keep the 212 * buffer header in the hash table, so that reads that hit the 213 * second level ARC benefit from these fast lookups. 214 */ 215 216 typedef struct arc_state { 217 list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ 218 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 219 uint64_t arcs_size; /* total amount of data in this state */ 220 kmutex_t arcs_mtx; 221 } arc_state_t; 222 223 /* The 6 states: */ 224 static arc_state_t ARC_anon; 225 static arc_state_t ARC_mru; 226 static arc_state_t ARC_mru_ghost; 227 static arc_state_t ARC_mfu; 228 static arc_state_t ARC_mfu_ghost; 229 static arc_state_t ARC_l2c_only; 230 231 typedef struct arc_stats { 232 kstat_named_t arcstat_hits; 233 kstat_named_t arcstat_misses; 234 kstat_named_t arcstat_demand_data_hits; 235 kstat_named_t arcstat_demand_data_misses; 236 kstat_named_t arcstat_demand_metadata_hits; 237 kstat_named_t arcstat_demand_metadata_misses; 238 kstat_named_t arcstat_prefetch_data_hits; 239 kstat_named_t arcstat_prefetch_data_misses; 240 kstat_named_t arcstat_prefetch_metadata_hits; 241 kstat_named_t arcstat_prefetch_metadata_misses; 242 kstat_named_t arcstat_mru_hits; 243 kstat_named_t arcstat_mru_ghost_hits; 244 kstat_named_t arcstat_mfu_hits; 245 kstat_named_t arcstat_mfu_ghost_hits; 246 kstat_named_t arcstat_deleted; 247 kstat_named_t arcstat_recycle_miss; 248 kstat_named_t arcstat_mutex_miss; 249 kstat_named_t arcstat_evict_skip; 250 kstat_named_t arcstat_evict_l2_cached; 251 kstat_named_t arcstat_evict_l2_eligible; 252 kstat_named_t arcstat_evict_l2_ineligible; 253 kstat_named_t arcstat_hash_elements; 254 kstat_named_t arcstat_hash_elements_max; 255 kstat_named_t arcstat_hash_collisions; 256 kstat_named_t arcstat_hash_chains; 257 kstat_named_t arcstat_hash_chain_max; 258 kstat_named_t arcstat_p; 259 kstat_named_t arcstat_c; 260 kstat_named_t arcstat_c_min; 261 kstat_named_t arcstat_c_max; 262 kstat_named_t arcstat_size; 263 kstat_named_t arcstat_hdr_size; 264 kstat_named_t arcstat_data_size; 265 kstat_named_t arcstat_other_size; 266 kstat_named_t arcstat_l2_hits; 267 kstat_named_t arcstat_l2_misses; 268 kstat_named_t arcstat_l2_feeds; 269 kstat_named_t arcstat_l2_rw_clash; 270 kstat_named_t arcstat_l2_read_bytes; 271 kstat_named_t arcstat_l2_write_bytes; 272 kstat_named_t arcstat_l2_writes_sent; 273 kstat_named_t arcstat_l2_writes_done; 274 kstat_named_t arcstat_l2_writes_error; 275 kstat_named_t arcstat_l2_writes_hdr_miss; 276 kstat_named_t arcstat_l2_evict_lock_retry; 277 kstat_named_t arcstat_l2_evict_reading; 278 kstat_named_t arcstat_l2_free_on_write; 279 kstat_named_t arcstat_l2_abort_lowmem; 280 kstat_named_t arcstat_l2_cksum_bad; 281 kstat_named_t arcstat_l2_io_error; 282 kstat_named_t arcstat_l2_size; 283 kstat_named_t arcstat_l2_hdr_size; 284 kstat_named_t arcstat_memory_throttle_count; 285 } arc_stats_t; 286 287 static arc_stats_t arc_stats = { 288 { "hits", KSTAT_DATA_UINT64 }, 289 { "misses", KSTAT_DATA_UINT64 }, 290 { "demand_data_hits", KSTAT_DATA_UINT64 }, 291 { "demand_data_misses", KSTAT_DATA_UINT64 }, 292 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 293 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 294 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 295 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 296 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 297 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 298 { "mru_hits", KSTAT_DATA_UINT64 }, 299 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 300 { "mfu_hits", KSTAT_DATA_UINT64 }, 301 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 302 { "deleted", KSTAT_DATA_UINT64 }, 303 { "recycle_miss", KSTAT_DATA_UINT64 }, 304 { "mutex_miss", KSTAT_DATA_UINT64 }, 305 { "evict_skip", KSTAT_DATA_UINT64 }, 306 { "evict_l2_cached", KSTAT_DATA_UINT64 }, 307 { "evict_l2_eligible", KSTAT_DATA_UINT64 }, 308 { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, 309 { "hash_elements", KSTAT_DATA_UINT64 }, 310 { "hash_elements_max", KSTAT_DATA_UINT64 }, 311 { "hash_collisions", KSTAT_DATA_UINT64 }, 312 { "hash_chains", KSTAT_DATA_UINT64 }, 313 { "hash_chain_max", KSTAT_DATA_UINT64 }, 314 { "p", KSTAT_DATA_UINT64 }, 315 { "c", KSTAT_DATA_UINT64 }, 316 { "c_min", KSTAT_DATA_UINT64 }, 317 { "c_max", KSTAT_DATA_UINT64 }, 318 { "size", KSTAT_DATA_UINT64 }, 319 { "hdr_size", KSTAT_DATA_UINT64 }, 320 { "data_size", KSTAT_DATA_UINT64 }, 321 { "other_size", KSTAT_DATA_UINT64 }, 322 { "l2_hits", KSTAT_DATA_UINT64 }, 323 { "l2_misses", KSTAT_DATA_UINT64 }, 324 { "l2_feeds", KSTAT_DATA_UINT64 }, 325 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 326 { "l2_read_bytes", KSTAT_DATA_UINT64 }, 327 { "l2_write_bytes", KSTAT_DATA_UINT64 }, 328 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 329 { "l2_writes_done", KSTAT_DATA_UINT64 }, 330 { "l2_writes_error", KSTAT_DATA_UINT64 }, 331 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 332 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 333 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 334 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 335 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 336 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 337 { "l2_io_error", KSTAT_DATA_UINT64 }, 338 { "l2_size", KSTAT_DATA_UINT64 }, 339 { "l2_hdr_size", KSTAT_DATA_UINT64 }, 340 { "memory_throttle_count", KSTAT_DATA_UINT64 } 341 }; 342 343 #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 344 345 #define ARCSTAT_INCR(stat, val) \ 346 atomic_add_64(&arc_stats.stat.value.ui64, (val)); 347 348 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 349 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 350 351 #define ARCSTAT_MAX(stat, val) { \ 352 uint64_t m; \ 353 while ((val) > (m = arc_stats.stat.value.ui64) && \ 354 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 355 continue; \ 356 } 357 358 #define ARCSTAT_MAXSTAT(stat) \ 359 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 360 361 /* 362 * We define a macro to allow ARC hits/misses to be easily broken down by 363 * two separate conditions, giving a total of four different subtypes for 364 * each of hits and misses (so eight statistics total). 365 */ 366 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 367 if (cond1) { \ 368 if (cond2) { \ 369 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 370 } else { \ 371 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 372 } \ 373 } else { \ 374 if (cond2) { \ 375 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 376 } else { \ 377 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 378 } \ 379 } 380 381 kstat_t *arc_ksp; 382 static arc_state_t *arc_anon; 383 static arc_state_t *arc_mru; 384 static arc_state_t *arc_mru_ghost; 385 static arc_state_t *arc_mfu; 386 static arc_state_t *arc_mfu_ghost; 387 static arc_state_t *arc_l2c_only; 388 389 /* 390 * There are several ARC variables that are critical to export as kstats -- 391 * but we don't want to have to grovel around in the kstat whenever we wish to 392 * manipulate them. For these variables, we therefore define them to be in 393 * terms of the statistic variable. This assures that we are not introducing 394 * the possibility of inconsistency by having shadow copies of the variables, 395 * while still allowing the code to be readable. 396 */ 397 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 398 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 399 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 400 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 401 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 402 403 static int arc_no_grow; /* Don't try to grow cache size */ 404 static uint64_t arc_tempreserve; 405 static uint64_t arc_loaned_bytes; 406 static uint64_t arc_meta_used; 407 static uint64_t arc_meta_limit; 408 static uint64_t arc_meta_max = 0; 409 410 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 411 412 typedef struct arc_callback arc_callback_t; 413 414 struct arc_callback { 415 void *acb_private; 416 arc_done_func_t *acb_done; 417 arc_buf_t *acb_buf; 418 zio_t *acb_zio_dummy; 419 arc_callback_t *acb_next; 420 }; 421 422 typedef struct arc_write_callback arc_write_callback_t; 423 424 struct arc_write_callback { 425 void *awcb_private; 426 arc_done_func_t *awcb_ready; 427 arc_done_func_t *awcb_done; 428 arc_buf_t *awcb_buf; 429 }; 430 431 struct arc_buf_hdr { 432 /* protected by hash lock */ 433 dva_t b_dva; 434 uint64_t b_birth; 435 uint64_t b_cksum0; 436 437 kmutex_t b_freeze_lock; 438 zio_cksum_t *b_freeze_cksum; 439 void *b_thawed; 440 441 arc_buf_hdr_t *b_hash_next; 442 arc_buf_t *b_buf; 443 uint32_t b_flags; 444 uint32_t b_datacnt; 445 446 arc_callback_t *b_acb; 447 kcondvar_t b_cv; 448 449 /* immutable */ 450 arc_buf_contents_t b_type; 451 uint64_t b_size; 452 uint64_t b_spa; 453 454 /* protected by arc state mutex */ 455 arc_state_t *b_state; 456 list_node_t b_arc_node; 457 458 /* updated atomically */ 459 clock_t b_arc_access; 460 461 /* self protecting */ 462 refcount_t b_refcnt; 463 464 l2arc_buf_hdr_t *b_l2hdr; 465 list_node_t b_l2node; 466 }; 467 468 static arc_buf_t *arc_eviction_list; 469 static kmutex_t arc_eviction_mtx; 470 static arc_buf_hdr_t arc_eviction_hdr; 471 static void arc_get_data_buf(arc_buf_t *buf); 472 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 473 static int arc_evict_needed(arc_buf_contents_t type); 474 static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes); 475 476 static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab); 477 478 #define GHOST_STATE(state) \ 479 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 480 (state) == arc_l2c_only) 481 482 /* 483 * Private ARC flags. These flags are private ARC only flags that will show up 484 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 485 * be passed in as arc_flags in things like arc_read. However, these flags 486 * should never be passed and should only be set by ARC code. When adding new 487 * public flags, make sure not to smash the private ones. 488 */ 489 490 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 491 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 492 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 493 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 494 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 495 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 496 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 497 #define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ 498 #define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ 499 #define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ 500 501 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 502 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 503 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 504 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH) 505 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 506 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 507 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 508 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE) 509 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ 510 (hdr)->b_l2hdr != NULL) 511 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 512 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 513 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 514 515 /* 516 * Other sizes 517 */ 518 519 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 520 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 521 522 /* 523 * Hash table routines 524 */ 525 526 #define HT_LOCK_PAD 64 527 528 struct ht_lock { 529 kmutex_t ht_lock; 530 #ifdef _KERNEL 531 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 532 #endif 533 }; 534 535 #define BUF_LOCKS 256 536 typedef struct buf_hash_table { 537 uint64_t ht_mask; 538 arc_buf_hdr_t **ht_table; 539 struct ht_lock ht_locks[BUF_LOCKS]; 540 } buf_hash_table_t; 541 542 static buf_hash_table_t buf_hash_table; 543 544 #define BUF_HASH_INDEX(spa, dva, birth) \ 545 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 546 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 547 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 548 #define HDR_LOCK(hdr) \ 549 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth))) 550 551 uint64_t zfs_crc64_table[256]; 552 553 /* 554 * Level 2 ARC 555 */ 556 557 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 558 #define L2ARC_HEADROOM 2 /* num of writes */ 559 #define L2ARC_FEED_SECS 1 /* caching interval secs */ 560 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ 561 562 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 563 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 564 565 /* 566 * L2ARC Performance Tunables 567 */ 568 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 569 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 570 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 571 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 572 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ 573 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 574 boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ 575 boolean_t l2arc_norw = B_TRUE; /* no reads during writes */ 576 577 /* 578 * L2ARC Internals 579 */ 580 typedef struct l2arc_dev { 581 vdev_t *l2ad_vdev; /* vdev */ 582 spa_t *l2ad_spa; /* spa */ 583 uint64_t l2ad_hand; /* next write location */ 584 uint64_t l2ad_write; /* desired write size, bytes */ 585 uint64_t l2ad_boost; /* warmup write boost, bytes */ 586 uint64_t l2ad_start; /* first addr on device */ 587 uint64_t l2ad_end; /* last addr on device */ 588 uint64_t l2ad_evict; /* last addr eviction reached */ 589 boolean_t l2ad_first; /* first sweep through */ 590 boolean_t l2ad_writing; /* currently writing */ 591 list_t *l2ad_buflist; /* buffer list */ 592 list_node_t l2ad_node; /* device list node */ 593 } l2arc_dev_t; 594 595 static list_t L2ARC_dev_list; /* device list */ 596 static list_t *l2arc_dev_list; /* device list pointer */ 597 static kmutex_t l2arc_dev_mtx; /* device list mutex */ 598 static l2arc_dev_t *l2arc_dev_last; /* last device used */ 599 static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 600 static list_t L2ARC_free_on_write; /* free after write buf list */ 601 static list_t *l2arc_free_on_write; /* free after write list ptr */ 602 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 603 static uint64_t l2arc_ndev; /* number of devices */ 604 605 typedef struct l2arc_read_callback { 606 arc_buf_t *l2rcb_buf; /* read buffer */ 607 spa_t *l2rcb_spa; /* spa */ 608 blkptr_t l2rcb_bp; /* original blkptr */ 609 zbookmark_t l2rcb_zb; /* original bookmark */ 610 int l2rcb_flags; /* original flags */ 611 } l2arc_read_callback_t; 612 613 typedef struct l2arc_write_callback { 614 l2arc_dev_t *l2wcb_dev; /* device info */ 615 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 616 } l2arc_write_callback_t; 617 618 struct l2arc_buf_hdr { 619 /* protected by arc_buf_hdr mutex */ 620 l2arc_dev_t *b_dev; /* L2ARC device */ 621 uint64_t b_daddr; /* disk address, offset byte */ 622 }; 623 624 typedef struct l2arc_data_free { 625 /* protected by l2arc_free_on_write_mtx */ 626 void *l2df_data; 627 size_t l2df_size; 628 void (*l2df_func)(void *, size_t); 629 list_node_t l2df_list_node; 630 } l2arc_data_free_t; 631 632 static kmutex_t l2arc_feed_thr_lock; 633 static kcondvar_t l2arc_feed_thr_cv; 634 static uint8_t l2arc_thread_exit; 635 636 static void l2arc_read_done(zio_t *zio); 637 static void l2arc_hdr_stat_add(void); 638 static void l2arc_hdr_stat_remove(void); 639 640 static uint64_t 641 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) 642 { 643 uint8_t *vdva = (uint8_t *)dva; 644 uint64_t crc = -1ULL; 645 int i; 646 647 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 648 649 for (i = 0; i < sizeof (dva_t); i++) 650 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 651 652 crc ^= (spa>>8) ^ birth; 653 654 return (crc); 655 } 656 657 #define BUF_EMPTY(buf) \ 658 ((buf)->b_dva.dva_word[0] == 0 && \ 659 (buf)->b_dva.dva_word[1] == 0 && \ 660 (buf)->b_birth == 0) 661 662 #define BUF_EQUAL(spa, dva, birth, buf) \ 663 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 664 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 665 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 666 667 static void 668 buf_discard_identity(arc_buf_hdr_t *hdr) 669 { 670 hdr->b_dva.dva_word[0] = 0; 671 hdr->b_dva.dva_word[1] = 0; 672 hdr->b_birth = 0; 673 hdr->b_cksum0 = 0; 674 } 675 676 static arc_buf_hdr_t * 677 buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp) 678 { 679 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 680 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 681 arc_buf_hdr_t *buf; 682 683 mutex_enter(hash_lock); 684 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 685 buf = buf->b_hash_next) { 686 if (BUF_EQUAL(spa, dva, birth, buf)) { 687 *lockp = hash_lock; 688 return (buf); 689 } 690 } 691 mutex_exit(hash_lock); 692 *lockp = NULL; 693 return (NULL); 694 } 695 696 /* 697 * Insert an entry into the hash table. If there is already an element 698 * equal to elem in the hash table, then the already existing element 699 * will be returned and the new element will not be inserted. 700 * Otherwise returns NULL. 701 */ 702 static arc_buf_hdr_t * 703 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 704 { 705 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 706 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 707 arc_buf_hdr_t *fbuf; 708 uint32_t i; 709 710 ASSERT(!HDR_IN_HASH_TABLE(buf)); 711 *lockp = hash_lock; 712 mutex_enter(hash_lock); 713 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 714 fbuf = fbuf->b_hash_next, i++) { 715 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 716 return (fbuf); 717 } 718 719 buf->b_hash_next = buf_hash_table.ht_table[idx]; 720 buf_hash_table.ht_table[idx] = buf; 721 buf->b_flags |= ARC_IN_HASH_TABLE; 722 723 /* collect some hash table performance data */ 724 if (i > 0) { 725 ARCSTAT_BUMP(arcstat_hash_collisions); 726 if (i == 1) 727 ARCSTAT_BUMP(arcstat_hash_chains); 728 729 ARCSTAT_MAX(arcstat_hash_chain_max, i); 730 } 731 732 ARCSTAT_BUMP(arcstat_hash_elements); 733 ARCSTAT_MAXSTAT(arcstat_hash_elements); 734 735 return (NULL); 736 } 737 738 static void 739 buf_hash_remove(arc_buf_hdr_t *buf) 740 { 741 arc_buf_hdr_t *fbuf, **bufp; 742 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 743 744 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 745 ASSERT(HDR_IN_HASH_TABLE(buf)); 746 747 bufp = &buf_hash_table.ht_table[idx]; 748 while ((fbuf = *bufp) != buf) { 749 ASSERT(fbuf != NULL); 750 bufp = &fbuf->b_hash_next; 751 } 752 *bufp = buf->b_hash_next; 753 buf->b_hash_next = NULL; 754 buf->b_flags &= ~ARC_IN_HASH_TABLE; 755 756 /* collect some hash table performance data */ 757 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 758 759 if (buf_hash_table.ht_table[idx] && 760 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 761 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 762 } 763 764 /* 765 * Global data structures and functions for the buf kmem cache. 766 */ 767 static kmem_cache_t *hdr_cache; 768 static kmem_cache_t *buf_cache; 769 770 static void 771 buf_fini(void) 772 { 773 int i; 774 775 kmem_free(buf_hash_table.ht_table, 776 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 777 for (i = 0; i < BUF_LOCKS; i++) 778 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 779 kmem_cache_destroy(hdr_cache); 780 kmem_cache_destroy(buf_cache); 781 } 782 783 /* 784 * Constructor callback - called when the cache is empty 785 * and a new buf is requested. 786 */ 787 /* ARGSUSED */ 788 static int 789 hdr_cons(void *vbuf, void *unused, int kmflag) 790 { 791 arc_buf_hdr_t *buf = vbuf; 792 793 bzero(buf, sizeof (arc_buf_hdr_t)); 794 refcount_create(&buf->b_refcnt); 795 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 796 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 797 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 798 799 return (0); 800 } 801 802 /* ARGSUSED */ 803 static int 804 buf_cons(void *vbuf, void *unused, int kmflag) 805 { 806 arc_buf_t *buf = vbuf; 807 808 bzero(buf, sizeof (arc_buf_t)); 809 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL); 810 rw_init(&buf->b_data_lock, NULL, RW_DEFAULT, NULL); 811 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); 812 813 return (0); 814 } 815 816 /* 817 * Destructor callback - called when a cached buf is 818 * no longer required. 819 */ 820 /* ARGSUSED */ 821 static void 822 hdr_dest(void *vbuf, void *unused) 823 { 824 arc_buf_hdr_t *buf = vbuf; 825 826 ASSERT(BUF_EMPTY(buf)); 827 refcount_destroy(&buf->b_refcnt); 828 cv_destroy(&buf->b_cv); 829 mutex_destroy(&buf->b_freeze_lock); 830 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 831 } 832 833 /* ARGSUSED */ 834 static void 835 buf_dest(void *vbuf, void *unused) 836 { 837 arc_buf_t *buf = vbuf; 838 839 mutex_destroy(&buf->b_evict_lock); 840 rw_destroy(&buf->b_data_lock); 841 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); 842 } 843 844 /* 845 * Reclaim callback -- invoked when memory is low. 846 */ 847 /* ARGSUSED */ 848 static void 849 hdr_recl(void *unused) 850 { 851 dprintf("hdr_recl called\n"); 852 /* 853 * umem calls the reclaim func when we destroy the buf cache, 854 * which is after we do arc_fini(). 855 */ 856 if (!arc_dead) 857 cv_signal(&arc_reclaim_thr_cv); 858 } 859 860 static void 861 buf_init(void) 862 { 863 uint64_t *ct; 864 uint64_t hsize = 1ULL << 12; 865 int i, j; 866 867 /* 868 * The hash table is big enough to fill all of physical memory 869 * with an average 64K block size. The table will take up 870 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 871 */ 872 while (hsize * 65536 < physmem * PAGESIZE) 873 hsize <<= 1; 874 retry: 875 buf_hash_table.ht_mask = hsize - 1; 876 buf_hash_table.ht_table = 877 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 878 if (buf_hash_table.ht_table == NULL) { 879 ASSERT(hsize > (1ULL << 8)); 880 hsize >>= 1; 881 goto retry; 882 } 883 884 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 885 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 886 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 887 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 888 889 for (i = 0; i < 256; i++) 890 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 891 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 892 893 for (i = 0; i < BUF_LOCKS; i++) { 894 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 895 NULL, MUTEX_DEFAULT, NULL); 896 } 897 } 898 899 #define ARC_MINTIME (hz>>4) /* 62 ms */ 900 901 static void 902 arc_cksum_verify(arc_buf_t *buf) 903 { 904 zio_cksum_t zc; 905 906 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 907 return; 908 909 mutex_enter(&buf->b_hdr->b_freeze_lock); 910 if (buf->b_hdr->b_freeze_cksum == NULL || 911 (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 912 mutex_exit(&buf->b_hdr->b_freeze_lock); 913 return; 914 } 915 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 916 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 917 panic("buffer modified while frozen!"); 918 mutex_exit(&buf->b_hdr->b_freeze_lock); 919 } 920 921 static int 922 arc_cksum_equal(arc_buf_t *buf) 923 { 924 zio_cksum_t zc; 925 int equal; 926 927 mutex_enter(&buf->b_hdr->b_freeze_lock); 928 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 929 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 930 mutex_exit(&buf->b_hdr->b_freeze_lock); 931 932 return (equal); 933 } 934 935 static void 936 arc_cksum_compute(arc_buf_t *buf, boolean_t force) 937 { 938 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 939 return; 940 941 mutex_enter(&buf->b_hdr->b_freeze_lock); 942 if (buf->b_hdr->b_freeze_cksum != NULL) { 943 mutex_exit(&buf->b_hdr->b_freeze_lock); 944 return; 945 } 946 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 947 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 948 buf->b_hdr->b_freeze_cksum); 949 mutex_exit(&buf->b_hdr->b_freeze_lock); 950 } 951 952 void 953 arc_buf_thaw(arc_buf_t *buf) 954 { 955 if (zfs_flags & ZFS_DEBUG_MODIFY) { 956 if (buf->b_hdr->b_state != arc_anon) 957 panic("modifying non-anon buffer!"); 958 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 959 panic("modifying buffer while i/o in progress!"); 960 arc_cksum_verify(buf); 961 } 962 963 mutex_enter(&buf->b_hdr->b_freeze_lock); 964 if (buf->b_hdr->b_freeze_cksum != NULL) { 965 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 966 buf->b_hdr->b_freeze_cksum = NULL; 967 } 968 969 if (zfs_flags & ZFS_DEBUG_MODIFY) { 970 if (buf->b_hdr->b_thawed) 971 kmem_free(buf->b_hdr->b_thawed, 1); 972 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP); 973 } 974 975 mutex_exit(&buf->b_hdr->b_freeze_lock); 976 } 977 978 void 979 arc_buf_freeze(arc_buf_t *buf) 980 { 981 kmutex_t *hash_lock; 982 983 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 984 return; 985 986 hash_lock = HDR_LOCK(buf->b_hdr); 987 mutex_enter(hash_lock); 988 989 ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 990 buf->b_hdr->b_state == arc_anon); 991 arc_cksum_compute(buf, B_FALSE); 992 mutex_exit(hash_lock); 993 } 994 995 static void 996 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 997 { 998 ASSERT(MUTEX_HELD(hash_lock)); 999 1000 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 1001 (ab->b_state != arc_anon)) { 1002 uint64_t delta = ab->b_size * ab->b_datacnt; 1003 list_t *list = &ab->b_state->arcs_list[ab->b_type]; 1004 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 1005 1006 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 1007 mutex_enter(&ab->b_state->arcs_mtx); 1008 ASSERT(list_link_active(&ab->b_arc_node)); 1009 list_remove(list, ab); 1010 if (GHOST_STATE(ab->b_state)) { 1011 ASSERT3U(ab->b_datacnt, ==, 0); 1012 ASSERT3P(ab->b_buf, ==, NULL); 1013 delta = ab->b_size; 1014 } 1015 ASSERT(delta > 0); 1016 ASSERT3U(*size, >=, delta); 1017 atomic_add_64(size, -delta); 1018 mutex_exit(&ab->b_state->arcs_mtx); 1019 /* remove the prefetch flag if we get a reference */ 1020 if (ab->b_flags & ARC_PREFETCH) 1021 ab->b_flags &= ~ARC_PREFETCH; 1022 } 1023 } 1024 1025 static int 1026 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1027 { 1028 int cnt; 1029 arc_state_t *state = ab->b_state; 1030 1031 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 1032 ASSERT(!GHOST_STATE(state)); 1033 1034 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 1035 (state != arc_anon)) { 1036 uint64_t *size = &state->arcs_lsize[ab->b_type]; 1037 1038 ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 1039 mutex_enter(&state->arcs_mtx); 1040 ASSERT(!list_link_active(&ab->b_arc_node)); 1041 list_insert_head(&state->arcs_list[ab->b_type], ab); 1042 ASSERT(ab->b_datacnt > 0); 1043 atomic_add_64(size, ab->b_size * ab->b_datacnt); 1044 mutex_exit(&state->arcs_mtx); 1045 } 1046 return (cnt); 1047 } 1048 1049 /* 1050 * Move the supplied buffer to the indicated state. The mutex 1051 * for the buffer must be held by the caller. 1052 */ 1053 static void 1054 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 1055 { 1056 arc_state_t *old_state = ab->b_state; 1057 int64_t refcnt = refcount_count(&ab->b_refcnt); 1058 uint64_t from_delta, to_delta; 1059 1060 ASSERT(MUTEX_HELD(hash_lock)); 1061 ASSERT(new_state != old_state); 1062 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 1063 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 1064 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon); 1065 1066 from_delta = to_delta = ab->b_datacnt * ab->b_size; 1067 1068 /* 1069 * If this buffer is evictable, transfer it from the 1070 * old state list to the new state list. 1071 */ 1072 if (refcnt == 0) { 1073 if (old_state != arc_anon) { 1074 int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 1075 uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 1076 1077 if (use_mutex) 1078 mutex_enter(&old_state->arcs_mtx); 1079 1080 ASSERT(list_link_active(&ab->b_arc_node)); 1081 list_remove(&old_state->arcs_list[ab->b_type], ab); 1082 1083 /* 1084 * If prefetching out of the ghost cache, 1085 * we will have a non-zero datacnt. 1086 */ 1087 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 1088 /* ghost elements have a ghost size */ 1089 ASSERT(ab->b_buf == NULL); 1090 from_delta = ab->b_size; 1091 } 1092 ASSERT3U(*size, >=, from_delta); 1093 atomic_add_64(size, -from_delta); 1094 1095 if (use_mutex) 1096 mutex_exit(&old_state->arcs_mtx); 1097 } 1098 if (new_state != arc_anon) { 1099 int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 1100 uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1101 1102 if (use_mutex) 1103 mutex_enter(&new_state->arcs_mtx); 1104 1105 list_insert_head(&new_state->arcs_list[ab->b_type], ab); 1106 1107 /* ghost elements have a ghost size */ 1108 if (GHOST_STATE(new_state)) { 1109 ASSERT(ab->b_datacnt == 0); 1110 ASSERT(ab->b_buf == NULL); 1111 to_delta = ab->b_size; 1112 } 1113 atomic_add_64(size, to_delta); 1114 1115 if (use_mutex) 1116 mutex_exit(&new_state->arcs_mtx); 1117 } 1118 } 1119 1120 ASSERT(!BUF_EMPTY(ab)); 1121 if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab)) 1122 buf_hash_remove(ab); 1123 1124 /* adjust state sizes */ 1125 if (to_delta) 1126 atomic_add_64(&new_state->arcs_size, to_delta); 1127 if (from_delta) { 1128 ASSERT3U(old_state->arcs_size, >=, from_delta); 1129 atomic_add_64(&old_state->arcs_size, -from_delta); 1130 } 1131 ab->b_state = new_state; 1132 1133 /* adjust l2arc hdr stats */ 1134 if (new_state == arc_l2c_only) 1135 l2arc_hdr_stat_add(); 1136 else if (old_state == arc_l2c_only) 1137 l2arc_hdr_stat_remove(); 1138 } 1139 1140 void 1141 arc_space_consume(uint64_t space, arc_space_type_t type) 1142 { 1143 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1144 1145 switch (type) { 1146 case ARC_SPACE_DATA: 1147 ARCSTAT_INCR(arcstat_data_size, space); 1148 break; 1149 case ARC_SPACE_OTHER: 1150 ARCSTAT_INCR(arcstat_other_size, space); 1151 break; 1152 case ARC_SPACE_HDRS: 1153 ARCSTAT_INCR(arcstat_hdr_size, space); 1154 break; 1155 case ARC_SPACE_L2HDRS: 1156 ARCSTAT_INCR(arcstat_l2_hdr_size, space); 1157 break; 1158 } 1159 1160 atomic_add_64(&arc_meta_used, space); 1161 atomic_add_64(&arc_size, space); 1162 } 1163 1164 void 1165 arc_space_return(uint64_t space, arc_space_type_t type) 1166 { 1167 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1168 1169 switch (type) { 1170 case ARC_SPACE_DATA: 1171 ARCSTAT_INCR(arcstat_data_size, -space); 1172 break; 1173 case ARC_SPACE_OTHER: 1174 ARCSTAT_INCR(arcstat_other_size, -space); 1175 break; 1176 case ARC_SPACE_HDRS: 1177 ARCSTAT_INCR(arcstat_hdr_size, -space); 1178 break; 1179 case ARC_SPACE_L2HDRS: 1180 ARCSTAT_INCR(arcstat_l2_hdr_size, -space); 1181 break; 1182 } 1183 1184 ASSERT(arc_meta_used >= space); 1185 if (arc_meta_max < arc_meta_used) 1186 arc_meta_max = arc_meta_used; 1187 atomic_add_64(&arc_meta_used, -space); 1188 ASSERT(arc_size >= space); 1189 atomic_add_64(&arc_size, -space); 1190 } 1191 1192 void * 1193 arc_data_buf_alloc(uint64_t size) 1194 { 1195 if (arc_evict_needed(ARC_BUFC_DATA)) 1196 cv_signal(&arc_reclaim_thr_cv); 1197 atomic_add_64(&arc_size, size); 1198 return (zio_data_buf_alloc(size)); 1199 } 1200 1201 void 1202 arc_data_buf_free(void *buf, uint64_t size) 1203 { 1204 zio_data_buf_free(buf, size); 1205 ASSERT(arc_size >= size); 1206 atomic_add_64(&arc_size, -size); 1207 } 1208 1209 arc_buf_t * 1210 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1211 { 1212 arc_buf_hdr_t *hdr; 1213 arc_buf_t *buf; 1214 1215 ASSERT3U(size, >, 0); 1216 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1217 ASSERT(BUF_EMPTY(hdr)); 1218 hdr->b_size = size; 1219 hdr->b_type = type; 1220 hdr->b_spa = spa_guid(spa); 1221 hdr->b_state = arc_anon; 1222 hdr->b_arc_access = 0; 1223 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1224 buf->b_hdr = hdr; 1225 buf->b_data = NULL; 1226 buf->b_efunc = NULL; 1227 buf->b_private = NULL; 1228 buf->b_next = NULL; 1229 hdr->b_buf = buf; 1230 arc_get_data_buf(buf); 1231 hdr->b_datacnt = 1; 1232 hdr->b_flags = 0; 1233 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1234 (void) refcount_add(&hdr->b_refcnt, tag); 1235 1236 return (buf); 1237 } 1238 1239 static char *arc_onloan_tag = "onloan"; 1240 1241 /* 1242 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in 1243 * flight data by arc_tempreserve_space() until they are "returned". Loaned 1244 * buffers must be returned to the arc before they can be used by the DMU or 1245 * freed. 1246 */ 1247 arc_buf_t * 1248 arc_loan_buf(spa_t *spa, int size) 1249 { 1250 arc_buf_t *buf; 1251 1252 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA); 1253 1254 atomic_add_64(&arc_loaned_bytes, size); 1255 return (buf); 1256 } 1257 1258 /* 1259 * Return a loaned arc buffer to the arc. 1260 */ 1261 void 1262 arc_return_buf(arc_buf_t *buf, void *tag) 1263 { 1264 arc_buf_hdr_t *hdr = buf->b_hdr; 1265 1266 ASSERT(buf->b_data != NULL); 1267 (void) refcount_add(&hdr->b_refcnt, tag); 1268 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag); 1269 1270 atomic_add_64(&arc_loaned_bytes, -hdr->b_size); 1271 } 1272 1273 /* Detach an arc_buf from a dbuf (tag) */ 1274 void 1275 arc_loan_inuse_buf(arc_buf_t *buf, void *tag) 1276 { 1277 arc_buf_hdr_t *hdr; 1278 1279 ASSERT(buf->b_data != NULL); 1280 hdr = buf->b_hdr; 1281 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag); 1282 (void) refcount_remove(&hdr->b_refcnt, tag); 1283 buf->b_efunc = NULL; 1284 buf->b_private = NULL; 1285 1286 atomic_add_64(&arc_loaned_bytes, hdr->b_size); 1287 } 1288 1289 static arc_buf_t * 1290 arc_buf_clone(arc_buf_t *from) 1291 { 1292 arc_buf_t *buf; 1293 arc_buf_hdr_t *hdr = from->b_hdr; 1294 uint64_t size = hdr->b_size; 1295 1296 ASSERT(hdr->b_state != arc_anon); 1297 1298 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1299 buf->b_hdr = hdr; 1300 buf->b_data = NULL; 1301 buf->b_efunc = NULL; 1302 buf->b_private = NULL; 1303 buf->b_next = hdr->b_buf; 1304 hdr->b_buf = buf; 1305 arc_get_data_buf(buf); 1306 bcopy(from->b_data, buf->b_data, size); 1307 hdr->b_datacnt += 1; 1308 return (buf); 1309 } 1310 1311 void 1312 arc_buf_add_ref(arc_buf_t *buf, void* tag) 1313 { 1314 arc_buf_hdr_t *hdr; 1315 kmutex_t *hash_lock; 1316 1317 /* 1318 * Check to see if this buffer is evicted. Callers 1319 * must verify b_data != NULL to know if the add_ref 1320 * was successful. 1321 */ 1322 mutex_enter(&buf->b_evict_lock); 1323 if (buf->b_data == NULL) { 1324 mutex_exit(&buf->b_evict_lock); 1325 return; 1326 } 1327 hash_lock = HDR_LOCK(buf->b_hdr); 1328 mutex_enter(hash_lock); 1329 hdr = buf->b_hdr; 1330 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1331 mutex_exit(&buf->b_evict_lock); 1332 1333 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1334 add_reference(hdr, hash_lock, tag); 1335 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 1336 arc_access(hdr, hash_lock); 1337 mutex_exit(hash_lock); 1338 ARCSTAT_BUMP(arcstat_hits); 1339 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 1340 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 1341 data, metadata, hits); 1342 } 1343 1344 /* 1345 * Free the arc data buffer. If it is an l2arc write in progress, 1346 * the buffer is placed on l2arc_free_on_write to be freed later. 1347 */ 1348 static void 1349 arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), 1350 void *data, size_t size) 1351 { 1352 if (HDR_L2_WRITING(hdr)) { 1353 l2arc_data_free_t *df; 1354 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 1355 df->l2df_data = data; 1356 df->l2df_size = size; 1357 df->l2df_func = free_func; 1358 mutex_enter(&l2arc_free_on_write_mtx); 1359 list_insert_head(l2arc_free_on_write, df); 1360 mutex_exit(&l2arc_free_on_write_mtx); 1361 ARCSTAT_BUMP(arcstat_l2_free_on_write); 1362 } else { 1363 free_func(data, size); 1364 } 1365 } 1366 1367 static void 1368 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1369 { 1370 arc_buf_t **bufp; 1371 1372 /* free up data associated with the buf */ 1373 if (buf->b_data) { 1374 arc_state_t *state = buf->b_hdr->b_state; 1375 uint64_t size = buf->b_hdr->b_size; 1376 arc_buf_contents_t type = buf->b_hdr->b_type; 1377 1378 arc_cksum_verify(buf); 1379 1380 if (!recycle) { 1381 if (type == ARC_BUFC_METADATA) { 1382 arc_buf_data_free(buf->b_hdr, zio_buf_free, 1383 buf->b_data, size); 1384 arc_space_return(size, ARC_SPACE_DATA); 1385 } else { 1386 ASSERT(type == ARC_BUFC_DATA); 1387 arc_buf_data_free(buf->b_hdr, 1388 zio_data_buf_free, buf->b_data, size); 1389 ARCSTAT_INCR(arcstat_data_size, -size); 1390 atomic_add_64(&arc_size, -size); 1391 } 1392 } 1393 if (list_link_active(&buf->b_hdr->b_arc_node)) { 1394 uint64_t *cnt = &state->arcs_lsize[type]; 1395 1396 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 1397 ASSERT(state != arc_anon); 1398 1399 ASSERT3U(*cnt, >=, size); 1400 atomic_add_64(cnt, -size); 1401 } 1402 ASSERT3U(state->arcs_size, >=, size); 1403 atomic_add_64(&state->arcs_size, -size); 1404 buf->b_data = NULL; 1405 ASSERT(buf->b_hdr->b_datacnt > 0); 1406 buf->b_hdr->b_datacnt -= 1; 1407 } 1408 1409 /* only remove the buf if requested */ 1410 if (!all) 1411 return; 1412 1413 /* remove the buf from the hdr list */ 1414 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1415 continue; 1416 *bufp = buf->b_next; 1417 buf->b_next = NULL; 1418 1419 ASSERT(buf->b_efunc == NULL); 1420 1421 /* clean up the buf */ 1422 buf->b_hdr = NULL; 1423 kmem_cache_free(buf_cache, buf); 1424 } 1425 1426 static void 1427 arc_hdr_destroy(arc_buf_hdr_t *hdr) 1428 { 1429 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1430 ASSERT3P(hdr->b_state, ==, arc_anon); 1431 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1432 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr; 1433 1434 if (l2hdr != NULL) { 1435 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx); 1436 /* 1437 * To prevent arc_free() and l2arc_evict() from 1438 * attempting to free the same buffer at the same time, 1439 * a FREE_IN_PROGRESS flag is given to arc_free() to 1440 * give it priority. l2arc_evict() can't destroy this 1441 * header while we are waiting on l2arc_buflist_mtx. 1442 * 1443 * The hdr may be removed from l2ad_buflist before we 1444 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. 1445 */ 1446 if (!buflist_held) { 1447 mutex_enter(&l2arc_buflist_mtx); 1448 l2hdr = hdr->b_l2hdr; 1449 } 1450 1451 if (l2hdr != NULL) { 1452 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 1453 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1454 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 1455 if (hdr->b_state == arc_l2c_only) 1456 l2arc_hdr_stat_remove(); 1457 hdr->b_l2hdr = NULL; 1458 } 1459 1460 if (!buflist_held) 1461 mutex_exit(&l2arc_buflist_mtx); 1462 } 1463 1464 if (!BUF_EMPTY(hdr)) { 1465 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1466 buf_discard_identity(hdr); 1467 } 1468 while (hdr->b_buf) { 1469 arc_buf_t *buf = hdr->b_buf; 1470 1471 if (buf->b_efunc) { 1472 mutex_enter(&arc_eviction_mtx); 1473 mutex_enter(&buf->b_evict_lock); 1474 ASSERT(buf->b_hdr != NULL); 1475 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1476 hdr->b_buf = buf->b_next; 1477 buf->b_hdr = &arc_eviction_hdr; 1478 buf->b_next = arc_eviction_list; 1479 arc_eviction_list = buf; 1480 mutex_exit(&buf->b_evict_lock); 1481 mutex_exit(&arc_eviction_mtx); 1482 } else { 1483 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1484 } 1485 } 1486 if (hdr->b_freeze_cksum != NULL) { 1487 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1488 hdr->b_freeze_cksum = NULL; 1489 } 1490 if (hdr->b_thawed) { 1491 kmem_free(hdr->b_thawed, 1); 1492 hdr->b_thawed = NULL; 1493 } 1494 1495 ASSERT(!list_link_active(&hdr->b_arc_node)); 1496 ASSERT3P(hdr->b_hash_next, ==, NULL); 1497 ASSERT3P(hdr->b_acb, ==, NULL); 1498 kmem_cache_free(hdr_cache, hdr); 1499 } 1500 1501 void 1502 arc_buf_free(arc_buf_t *buf, void *tag) 1503 { 1504 arc_buf_hdr_t *hdr = buf->b_hdr; 1505 int hashed = hdr->b_state != arc_anon; 1506 1507 ASSERT(buf->b_efunc == NULL); 1508 ASSERT(buf->b_data != NULL); 1509 1510 if (hashed) { 1511 kmutex_t *hash_lock = HDR_LOCK(hdr); 1512 1513 mutex_enter(hash_lock); 1514 hdr = buf->b_hdr; 1515 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1516 1517 (void) remove_reference(hdr, hash_lock, tag); 1518 if (hdr->b_datacnt > 1) { 1519 arc_buf_destroy(buf, FALSE, TRUE); 1520 } else { 1521 ASSERT(buf == hdr->b_buf); 1522 ASSERT(buf->b_efunc == NULL); 1523 hdr->b_flags |= ARC_BUF_AVAILABLE; 1524 } 1525 mutex_exit(hash_lock); 1526 } else if (HDR_IO_IN_PROGRESS(hdr)) { 1527 int destroy_hdr; 1528 /* 1529 * We are in the middle of an async write. Don't destroy 1530 * this buffer unless the write completes before we finish 1531 * decrementing the reference count. 1532 */ 1533 mutex_enter(&arc_eviction_mtx); 1534 (void) remove_reference(hdr, NULL, tag); 1535 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1536 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1537 mutex_exit(&arc_eviction_mtx); 1538 if (destroy_hdr) 1539 arc_hdr_destroy(hdr); 1540 } else { 1541 if (remove_reference(hdr, NULL, tag) > 0) 1542 arc_buf_destroy(buf, FALSE, TRUE); 1543 else 1544 arc_hdr_destroy(hdr); 1545 } 1546 } 1547 1548 int 1549 arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1550 { 1551 arc_buf_hdr_t *hdr = buf->b_hdr; 1552 kmutex_t *hash_lock = HDR_LOCK(hdr); 1553 int no_callback = (buf->b_efunc == NULL); 1554 1555 if (hdr->b_state == arc_anon) { 1556 ASSERT(hdr->b_datacnt == 1); 1557 arc_buf_free(buf, tag); 1558 return (no_callback); 1559 } 1560 1561 mutex_enter(hash_lock); 1562 hdr = buf->b_hdr; 1563 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1564 ASSERT(hdr->b_state != arc_anon); 1565 ASSERT(buf->b_data != NULL); 1566 1567 (void) remove_reference(hdr, hash_lock, tag); 1568 if (hdr->b_datacnt > 1) { 1569 if (no_callback) 1570 arc_buf_destroy(buf, FALSE, TRUE); 1571 } else if (no_callback) { 1572 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1573 ASSERT(buf->b_efunc == NULL); 1574 hdr->b_flags |= ARC_BUF_AVAILABLE; 1575 } 1576 ASSERT(no_callback || hdr->b_datacnt > 1 || 1577 refcount_is_zero(&hdr->b_refcnt)); 1578 mutex_exit(hash_lock); 1579 return (no_callback); 1580 } 1581 1582 int 1583 arc_buf_size(arc_buf_t *buf) 1584 { 1585 return (buf->b_hdr->b_size); 1586 } 1587 1588 /* 1589 * Evict buffers from list until we've removed the specified number of 1590 * bytes. Move the removed buffers to the appropriate evict state. 1591 * If the recycle flag is set, then attempt to "recycle" a buffer: 1592 * - look for a buffer to evict that is `bytes' long. 1593 * - return the data block from this buffer rather than freeing it. 1594 * This flag is used by callers that are trying to make space for a 1595 * new buffer in a full arc cache. 1596 * 1597 * This function makes a "best effort". It skips over any buffers 1598 * it can't get a hash_lock on, and so may not catch all candidates. 1599 * It may also return without evicting as much space as requested. 1600 */ 1601 static void * 1602 arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle, 1603 arc_buf_contents_t type) 1604 { 1605 arc_state_t *evicted_state; 1606 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1607 arc_buf_hdr_t *ab, *ab_prev = NULL; 1608 list_t *list = &state->arcs_list[type]; 1609 kmutex_t *hash_lock; 1610 boolean_t have_lock; 1611 void *stolen = NULL; 1612 1613 ASSERT(state == arc_mru || state == arc_mfu); 1614 1615 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1616 1617 mutex_enter(&state->arcs_mtx); 1618 mutex_enter(&evicted_state->arcs_mtx); 1619 1620 for (ab = list_tail(list); ab; ab = ab_prev) { 1621 ab_prev = list_prev(list, ab); 1622 /* prefetch buffers have a minimum lifespan */ 1623 if (HDR_IO_IN_PROGRESS(ab) || 1624 (spa && ab->b_spa != spa) || 1625 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1626 ddi_get_lbolt() - ab->b_arc_access < 1627 arc_min_prefetch_lifespan)) { 1628 skipped++; 1629 continue; 1630 } 1631 /* "lookahead" for better eviction candidate */ 1632 if (recycle && ab->b_size != bytes && 1633 ab_prev && ab_prev->b_size == bytes) 1634 continue; 1635 hash_lock = HDR_LOCK(ab); 1636 have_lock = MUTEX_HELD(hash_lock); 1637 if (have_lock || mutex_tryenter(hash_lock)) { 1638 ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 1639 ASSERT(ab->b_datacnt > 0); 1640 while (ab->b_buf) { 1641 arc_buf_t *buf = ab->b_buf; 1642 if (!mutex_tryenter(&buf->b_evict_lock)) { 1643 missed += 1; 1644 break; 1645 } 1646 if (buf->b_data) { 1647 bytes_evicted += ab->b_size; 1648 if (recycle && ab->b_type == type && 1649 ab->b_size == bytes && 1650 !HDR_L2_WRITING(ab)) { 1651 stolen = buf->b_data; 1652 recycle = FALSE; 1653 } 1654 } 1655 if (buf->b_efunc) { 1656 mutex_enter(&arc_eviction_mtx); 1657 arc_buf_destroy(buf, 1658 buf->b_data == stolen, FALSE); 1659 ab->b_buf = buf->b_next; 1660 buf->b_hdr = &arc_eviction_hdr; 1661 buf->b_next = arc_eviction_list; 1662 arc_eviction_list = buf; 1663 mutex_exit(&arc_eviction_mtx); 1664 mutex_exit(&buf->b_evict_lock); 1665 } else { 1666 mutex_exit(&buf->b_evict_lock); 1667 arc_buf_destroy(buf, 1668 buf->b_data == stolen, TRUE); 1669 } 1670 } 1671 1672 if (ab->b_l2hdr) { 1673 ARCSTAT_INCR(arcstat_evict_l2_cached, 1674 ab->b_size); 1675 } else { 1676 if (l2arc_write_eligible(ab->b_spa, ab)) { 1677 ARCSTAT_INCR(arcstat_evict_l2_eligible, 1678 ab->b_size); 1679 } else { 1680 ARCSTAT_INCR( 1681 arcstat_evict_l2_ineligible, 1682 ab->b_size); 1683 } 1684 } 1685 1686 if (ab->b_datacnt == 0) { 1687 arc_change_state(evicted_state, ab, hash_lock); 1688 ASSERT(HDR_IN_HASH_TABLE(ab)); 1689 ab->b_flags |= ARC_IN_HASH_TABLE; 1690 ab->b_flags &= ~ARC_BUF_AVAILABLE; 1691 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1692 } 1693 if (!have_lock) 1694 mutex_exit(hash_lock); 1695 if (bytes >= 0 && bytes_evicted >= bytes) 1696 break; 1697 } else { 1698 missed += 1; 1699 } 1700 } 1701 1702 mutex_exit(&evicted_state->arcs_mtx); 1703 mutex_exit(&state->arcs_mtx); 1704 1705 if (bytes_evicted < bytes) 1706 dprintf("only evicted %lld bytes from %x", 1707 (longlong_t)bytes_evicted, state); 1708 1709 if (skipped) 1710 ARCSTAT_INCR(arcstat_evict_skip, skipped); 1711 1712 if (missed) 1713 ARCSTAT_INCR(arcstat_mutex_miss, missed); 1714 1715 /* 1716 * We have just evicted some date into the ghost state, make 1717 * sure we also adjust the ghost state size if necessary. 1718 */ 1719 if (arc_no_grow && 1720 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 1721 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 1722 arc_mru_ghost->arcs_size - arc_c; 1723 1724 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 1725 int64_t todelete = 1726 MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 1727 arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1728 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 1729 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 1730 arc_mru_ghost->arcs_size + 1731 arc_mfu_ghost->arcs_size - arc_c); 1732 arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1733 } 1734 } 1735 1736 return (stolen); 1737 } 1738 1739 /* 1740 * Remove buffers from list until we've removed the specified number of 1741 * bytes. Destroy the buffers that are removed. 1742 */ 1743 static void 1744 arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes) 1745 { 1746 arc_buf_hdr_t *ab, *ab_prev; 1747 arc_buf_hdr_t marker = { 0 }; 1748 list_t *list = &state->arcs_list[ARC_BUFC_DATA]; 1749 kmutex_t *hash_lock; 1750 uint64_t bytes_deleted = 0; 1751 uint64_t bufs_skipped = 0; 1752 1753 ASSERT(GHOST_STATE(state)); 1754 top: 1755 mutex_enter(&state->arcs_mtx); 1756 for (ab = list_tail(list); ab; ab = ab_prev) { 1757 ab_prev = list_prev(list, ab); 1758 if (spa && ab->b_spa != spa) 1759 continue; 1760 1761 /* ignore markers */ 1762 if (ab->b_spa == 0) 1763 continue; 1764 1765 hash_lock = HDR_LOCK(ab); 1766 /* caller may be trying to modify this buffer, skip it */ 1767 if (MUTEX_HELD(hash_lock)) 1768 continue; 1769 if (mutex_tryenter(hash_lock)) { 1770 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1771 ASSERT(ab->b_buf == NULL); 1772 ARCSTAT_BUMP(arcstat_deleted); 1773 bytes_deleted += ab->b_size; 1774 1775 if (ab->b_l2hdr != NULL) { 1776 /* 1777 * This buffer is cached on the 2nd Level ARC; 1778 * don't destroy the header. 1779 */ 1780 arc_change_state(arc_l2c_only, ab, hash_lock); 1781 mutex_exit(hash_lock); 1782 } else { 1783 arc_change_state(arc_anon, ab, hash_lock); 1784 mutex_exit(hash_lock); 1785 arc_hdr_destroy(ab); 1786 } 1787 1788 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1789 if (bytes >= 0 && bytes_deleted >= bytes) 1790 break; 1791 } else if (bytes < 0) { 1792 /* 1793 * Insert a list marker and then wait for the 1794 * hash lock to become available. Once its 1795 * available, restart from where we left off. 1796 */ 1797 list_insert_after(list, ab, &marker); 1798 mutex_exit(&state->arcs_mtx); 1799 mutex_enter(hash_lock); 1800 mutex_exit(hash_lock); 1801 mutex_enter(&state->arcs_mtx); 1802 ab_prev = list_prev(list, &marker); 1803 list_remove(list, &marker); 1804 } else 1805 bufs_skipped += 1; 1806 } 1807 mutex_exit(&state->arcs_mtx); 1808 1809 if (list == &state->arcs_list[ARC_BUFC_DATA] && 1810 (bytes < 0 || bytes_deleted < bytes)) { 1811 list = &state->arcs_list[ARC_BUFC_METADATA]; 1812 goto top; 1813 } 1814 1815 if (bufs_skipped) { 1816 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1817 ASSERT(bytes >= 0); 1818 } 1819 1820 if (bytes_deleted < bytes) 1821 dprintf("only deleted %lld bytes from %p", 1822 (longlong_t)bytes_deleted, state); 1823 } 1824 1825 static void 1826 arc_adjust(void) 1827 { 1828 int64_t adjustment, delta; 1829 1830 /* 1831 * Adjust MRU size 1832 */ 1833 1834 adjustment = MIN((int64_t)(arc_size - arc_c), 1835 (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used - 1836 arc_p)); 1837 1838 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 1839 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment); 1840 (void) arc_evict(arc_mru, NULL, delta, FALSE, ARC_BUFC_DATA); 1841 adjustment -= delta; 1842 } 1843 1844 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1845 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment); 1846 (void) arc_evict(arc_mru, NULL, delta, FALSE, 1847 ARC_BUFC_METADATA); 1848 } 1849 1850 /* 1851 * Adjust MFU size 1852 */ 1853 1854 adjustment = arc_size - arc_c; 1855 1856 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 1857 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]); 1858 (void) arc_evict(arc_mfu, NULL, delta, FALSE, ARC_BUFC_DATA); 1859 adjustment -= delta; 1860 } 1861 1862 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1863 int64_t delta = MIN(adjustment, 1864 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]); 1865 (void) arc_evict(arc_mfu, NULL, delta, FALSE, 1866 ARC_BUFC_METADATA); 1867 } 1868 1869 /* 1870 * Adjust ghost lists 1871 */ 1872 1873 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c; 1874 1875 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) { 1876 delta = MIN(arc_mru_ghost->arcs_size, adjustment); 1877 arc_evict_ghost(arc_mru_ghost, NULL, delta); 1878 } 1879 1880 adjustment = 1881 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c; 1882 1883 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) { 1884 delta = MIN(arc_mfu_ghost->arcs_size, adjustment); 1885 arc_evict_ghost(arc_mfu_ghost, NULL, delta); 1886 } 1887 } 1888 1889 static void 1890 arc_do_user_evicts(void) 1891 { 1892 mutex_enter(&arc_eviction_mtx); 1893 while (arc_eviction_list != NULL) { 1894 arc_buf_t *buf = arc_eviction_list; 1895 arc_eviction_list = buf->b_next; 1896 mutex_enter(&buf->b_evict_lock); 1897 buf->b_hdr = NULL; 1898 mutex_exit(&buf->b_evict_lock); 1899 mutex_exit(&arc_eviction_mtx); 1900 1901 if (buf->b_efunc != NULL) 1902 VERIFY(buf->b_efunc(buf) == 0); 1903 1904 buf->b_efunc = NULL; 1905 buf->b_private = NULL; 1906 kmem_cache_free(buf_cache, buf); 1907 mutex_enter(&arc_eviction_mtx); 1908 } 1909 mutex_exit(&arc_eviction_mtx); 1910 } 1911 1912 /* 1913 * Flush all *evictable* data from the cache for the given spa. 1914 * NOTE: this will not touch "active" (i.e. referenced) data. 1915 */ 1916 void 1917 arc_flush(spa_t *spa) 1918 { 1919 uint64_t guid = 0; 1920 1921 if (spa) 1922 guid = spa_guid(spa); 1923 1924 while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { 1925 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA); 1926 if (spa) 1927 break; 1928 } 1929 while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) { 1930 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA); 1931 if (spa) 1932 break; 1933 } 1934 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) { 1935 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA); 1936 if (spa) 1937 break; 1938 } 1939 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) { 1940 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA); 1941 if (spa) 1942 break; 1943 } 1944 1945 arc_evict_ghost(arc_mru_ghost, guid, -1); 1946 arc_evict_ghost(arc_mfu_ghost, guid, -1); 1947 1948 mutex_enter(&arc_reclaim_thr_lock); 1949 arc_do_user_evicts(); 1950 mutex_exit(&arc_reclaim_thr_lock); 1951 ASSERT(spa || arc_eviction_list == NULL); 1952 } 1953 1954 void 1955 arc_shrink(void) 1956 { 1957 if (arc_c > arc_c_min) { 1958 uint64_t to_free; 1959 1960 #ifdef _KERNEL 1961 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); 1962 #else 1963 to_free = arc_c >> arc_shrink_shift; 1964 #endif 1965 if (arc_c > arc_c_min + to_free) 1966 atomic_add_64(&arc_c, -to_free); 1967 else 1968 arc_c = arc_c_min; 1969 1970 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 1971 if (arc_c > arc_size) 1972 arc_c = MAX(arc_size, arc_c_min); 1973 if (arc_p > arc_c) 1974 arc_p = (arc_c >> 1); 1975 ASSERT(arc_c >= arc_c_min); 1976 ASSERT((int64_t)arc_p >= 0); 1977 } 1978 1979 if (arc_size > arc_c) 1980 arc_adjust(); 1981 } 1982 1983 static int 1984 arc_reclaim_needed(void) 1985 { 1986 uint64_t extra; 1987 1988 #ifdef _KERNEL 1989 1990 if (needfree) 1991 return (1); 1992 1993 /* 1994 * take 'desfree' extra pages, so we reclaim sooner, rather than later 1995 */ 1996 extra = desfree; 1997 1998 /* 1999 * check that we're out of range of the pageout scanner. It starts to 2000 * schedule paging if freemem is less than lotsfree and needfree. 2001 * lotsfree is the high-water mark for pageout, and needfree is the 2002 * number of needed free pages. We add extra pages here to make sure 2003 * the scanner doesn't start up while we're freeing memory. 2004 */ 2005 if (freemem < lotsfree + needfree + extra) 2006 return (1); 2007 2008 /* 2009 * check to make sure that swapfs has enough space so that anon 2010 * reservations can still succeed. anon_resvmem() checks that the 2011 * availrmem is greater than swapfs_minfree, and the number of reserved 2012 * swap pages. We also add a bit of extra here just to prevent 2013 * circumstances from getting really dire. 2014 */ 2015 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 2016 return (1); 2017 2018 #if defined(__i386) 2019 /* 2020 * If we're on an i386 platform, it's possible that we'll exhaust the 2021 * kernel heap space before we ever run out of available physical 2022 * memory. Most checks of the size of the heap_area compare against 2023 * tune.t_minarmem, which is the minimum available real memory that we 2024 * can have in the system. However, this is generally fixed at 25 pages 2025 * which is so low that it's useless. In this comparison, we seek to 2026 * calculate the total heap-size, and reclaim if more than 3/4ths of the 2027 * heap is allocated. (Or, in the calculation, if less than 1/4th is 2028 * free) 2029 */ 2030 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 2031 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 2032 return (1); 2033 #endif 2034 2035 #else 2036 if (spa_get_random(100) == 0) 2037 return (1); 2038 #endif 2039 return (0); 2040 } 2041 2042 static void 2043 arc_kmem_reap_now(arc_reclaim_strategy_t strat) 2044 { 2045 size_t i; 2046 kmem_cache_t *prev_cache = NULL; 2047 kmem_cache_t *prev_data_cache = NULL; 2048 extern kmem_cache_t *zio_buf_cache[]; 2049 extern kmem_cache_t *zio_data_buf_cache[]; 2050 2051 #ifdef _KERNEL 2052 if (arc_meta_used >= arc_meta_limit) { 2053 /* 2054 * We are exceeding our meta-data cache limit. 2055 * Purge some DNLC entries to release holds on meta-data. 2056 */ 2057 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 2058 } 2059 #if defined(__i386) 2060 /* 2061 * Reclaim unused memory from all kmem caches. 2062 */ 2063 kmem_reap(); 2064 #endif 2065 #endif 2066 2067 /* 2068 * An aggressive reclamation will shrink the cache size as well as 2069 * reap free buffers from the arc kmem caches. 2070 */ 2071 if (strat == ARC_RECLAIM_AGGR) 2072 arc_shrink(); 2073 2074 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 2075 if (zio_buf_cache[i] != prev_cache) { 2076 prev_cache = zio_buf_cache[i]; 2077 kmem_cache_reap_now(zio_buf_cache[i]); 2078 } 2079 if (zio_data_buf_cache[i] != prev_data_cache) { 2080 prev_data_cache = zio_data_buf_cache[i]; 2081 kmem_cache_reap_now(zio_data_buf_cache[i]); 2082 } 2083 } 2084 kmem_cache_reap_now(buf_cache); 2085 kmem_cache_reap_now(hdr_cache); 2086 } 2087 2088 static void 2089 arc_reclaim_thread(void) 2090 { 2091 clock_t growtime = 0; 2092 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 2093 callb_cpr_t cpr; 2094 2095 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 2096 2097 mutex_enter(&arc_reclaim_thr_lock); 2098 while (arc_thread_exit == 0) { 2099 if (arc_reclaim_needed()) { 2100 2101 if (arc_no_grow) { 2102 if (last_reclaim == ARC_RECLAIM_CONS) { 2103 last_reclaim = ARC_RECLAIM_AGGR; 2104 } else { 2105 last_reclaim = ARC_RECLAIM_CONS; 2106 } 2107 } else { 2108 arc_no_grow = TRUE; 2109 last_reclaim = ARC_RECLAIM_AGGR; 2110 membar_producer(); 2111 } 2112 2113 /* reset the growth delay for every reclaim */ 2114 growtime = ddi_get_lbolt() + (arc_grow_retry * hz); 2115 2116 arc_kmem_reap_now(last_reclaim); 2117 arc_warm = B_TRUE; 2118 2119 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) { 2120 arc_no_grow = FALSE; 2121 } 2122 2123 arc_adjust(); 2124 2125 if (arc_eviction_list != NULL) 2126 arc_do_user_evicts(); 2127 2128 /* block until needed, or one second, whichever is shorter */ 2129 CALLB_CPR_SAFE_BEGIN(&cpr); 2130 (void) cv_timedwait(&arc_reclaim_thr_cv, 2131 &arc_reclaim_thr_lock, (ddi_get_lbolt() + hz)); 2132 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 2133 } 2134 2135 arc_thread_exit = 0; 2136 cv_broadcast(&arc_reclaim_thr_cv); 2137 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 2138 thread_exit(); 2139 } 2140 2141 /* 2142 * Adapt arc info given the number of bytes we are trying to add and 2143 * the state that we are comming from. This function is only called 2144 * when we are adding new content to the cache. 2145 */ 2146 static void 2147 arc_adapt(int bytes, arc_state_t *state) 2148 { 2149 int mult; 2150 uint64_t arc_p_min = (arc_c >> arc_p_min_shift); 2151 2152 if (state == arc_l2c_only) 2153 return; 2154 2155 ASSERT(bytes > 0); 2156 /* 2157 * Adapt the target size of the MRU list: 2158 * - if we just hit in the MRU ghost list, then increase 2159 * the target size of the MRU list. 2160 * - if we just hit in the MFU ghost list, then increase 2161 * the target size of the MFU list by decreasing the 2162 * target size of the MRU list. 2163 */ 2164 if (state == arc_mru_ghost) { 2165 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 2166 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 2167 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */ 2168 2169 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); 2170 } else if (state == arc_mfu_ghost) { 2171 uint64_t delta; 2172 2173 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 2174 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 2175 mult = MIN(mult, 10); 2176 2177 delta = MIN(bytes * mult, arc_p); 2178 arc_p = MAX(arc_p_min, arc_p - delta); 2179 } 2180 ASSERT((int64_t)arc_p >= 0); 2181 2182 if (arc_reclaim_needed()) { 2183 cv_signal(&arc_reclaim_thr_cv); 2184 return; 2185 } 2186 2187 if (arc_no_grow) 2188 return; 2189 2190 if (arc_c >= arc_c_max) 2191 return; 2192 2193 /* 2194 * If we're within (2 * maxblocksize) bytes of the target 2195 * cache size, increment the target cache size 2196 */ 2197 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 2198 atomic_add_64(&arc_c, (int64_t)bytes); 2199 if (arc_c > arc_c_max) 2200 arc_c = arc_c_max; 2201 else if (state == arc_anon) 2202 atomic_add_64(&arc_p, (int64_t)bytes); 2203 if (arc_p > arc_c) 2204 arc_p = arc_c; 2205 } 2206 ASSERT((int64_t)arc_p >= 0); 2207 } 2208 2209 /* 2210 * Check if the cache has reached its limits and eviction is required 2211 * prior to insert. 2212 */ 2213 static int 2214 arc_evict_needed(arc_buf_contents_t type) 2215 { 2216 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 2217 return (1); 2218 2219 #ifdef _KERNEL 2220 /* 2221 * If zio data pages are being allocated out of a separate heap segment, 2222 * then enforce that the size of available vmem for this area remains 2223 * above about 1/32nd free. 2224 */ 2225 if (type == ARC_BUFC_DATA && zio_arena != NULL && 2226 vmem_size(zio_arena, VMEM_FREE) < 2227 (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 2228 return (1); 2229 #endif 2230 2231 if (arc_reclaim_needed()) 2232 return (1); 2233 2234 return (arc_size > arc_c); 2235 } 2236 2237 /* 2238 * The buffer, supplied as the first argument, needs a data block. 2239 * So, if we are at cache max, determine which cache should be victimized. 2240 * We have the following cases: 2241 * 2242 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2243 * In this situation if we're out of space, but the resident size of the MFU is 2244 * under the limit, victimize the MFU cache to satisfy this insertion request. 2245 * 2246 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2247 * Here, we've used up all of the available space for the MRU, so we need to 2248 * evict from our own cache instead. Evict from the set of resident MRU 2249 * entries. 2250 * 2251 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2252 * c minus p represents the MFU space in the cache, since p is the size of the 2253 * cache that is dedicated to the MRU. In this situation there's still space on 2254 * the MFU side, so the MRU side needs to be victimized. 2255 * 2256 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2257 * MFU's resident set is consuming more space than it has been allotted. In 2258 * this situation, we must victimize our own cache, the MFU, for this insertion. 2259 */ 2260 static void 2261 arc_get_data_buf(arc_buf_t *buf) 2262 { 2263 arc_state_t *state = buf->b_hdr->b_state; 2264 uint64_t size = buf->b_hdr->b_size; 2265 arc_buf_contents_t type = buf->b_hdr->b_type; 2266 2267 arc_adapt(size, state); 2268 2269 /* 2270 * We have not yet reached cache maximum size, 2271 * just allocate a new buffer. 2272 */ 2273 if (!arc_evict_needed(type)) { 2274 if (type == ARC_BUFC_METADATA) { 2275 buf->b_data = zio_buf_alloc(size); 2276 arc_space_consume(size, ARC_SPACE_DATA); 2277 } else { 2278 ASSERT(type == ARC_BUFC_DATA); 2279 buf->b_data = zio_data_buf_alloc(size); 2280 ARCSTAT_INCR(arcstat_data_size, size); 2281 atomic_add_64(&arc_size, size); 2282 } 2283 goto out; 2284 } 2285 2286 /* 2287 * If we are prefetching from the mfu ghost list, this buffer 2288 * will end up on the mru list; so steal space from there. 2289 */ 2290 if (state == arc_mfu_ghost) 2291 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 2292 else if (state == arc_mru_ghost) 2293 state = arc_mru; 2294 2295 if (state == arc_mru || state == arc_anon) { 2296 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 2297 state = (arc_mfu->arcs_lsize[type] >= size && 2298 arc_p > mru_used) ? arc_mfu : arc_mru; 2299 } else { 2300 /* MFU cases */ 2301 uint64_t mfu_space = arc_c - arc_p; 2302 state = (arc_mru->arcs_lsize[type] >= size && 2303 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 2304 } 2305 if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { 2306 if (type == ARC_BUFC_METADATA) { 2307 buf->b_data = zio_buf_alloc(size); 2308 arc_space_consume(size, ARC_SPACE_DATA); 2309 } else { 2310 ASSERT(type == ARC_BUFC_DATA); 2311 buf->b_data = zio_data_buf_alloc(size); 2312 ARCSTAT_INCR(arcstat_data_size, size); 2313 atomic_add_64(&arc_size, size); 2314 } 2315 ARCSTAT_BUMP(arcstat_recycle_miss); 2316 } 2317 ASSERT(buf->b_data != NULL); 2318 out: 2319 /* 2320 * Update the state size. Note that ghost states have a 2321 * "ghost size" and so don't need to be updated. 2322 */ 2323 if (!GHOST_STATE(buf->b_hdr->b_state)) { 2324 arc_buf_hdr_t *hdr = buf->b_hdr; 2325 2326 atomic_add_64(&hdr->b_state->arcs_size, size); 2327 if (list_link_active(&hdr->b_arc_node)) { 2328 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2329 atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2330 } 2331 /* 2332 * If we are growing the cache, and we are adding anonymous 2333 * data, and we have outgrown arc_p, update arc_p 2334 */ 2335 if (arc_size < arc_c && hdr->b_state == arc_anon && 2336 arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 2337 arc_p = MIN(arc_c, arc_p + size); 2338 } 2339 } 2340 2341 /* 2342 * This routine is called whenever a buffer is accessed. 2343 * NOTE: the hash lock is dropped in this function. 2344 */ 2345 static void 2346 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2347 { 2348 clock_t now; 2349 2350 ASSERT(MUTEX_HELD(hash_lock)); 2351 2352 if (buf->b_state == arc_anon) { 2353 /* 2354 * This buffer is not in the cache, and does not 2355 * appear in our "ghost" list. Add the new buffer 2356 * to the MRU state. 2357 */ 2358 2359 ASSERT(buf->b_arc_access == 0); 2360 buf->b_arc_access = ddi_get_lbolt(); 2361 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2362 arc_change_state(arc_mru, buf, hash_lock); 2363 2364 } else if (buf->b_state == arc_mru) { 2365 now = ddi_get_lbolt(); 2366 2367 /* 2368 * If this buffer is here because of a prefetch, then either: 2369 * - clear the flag if this is a "referencing" read 2370 * (any subsequent access will bump this into the MFU state). 2371 * or 2372 * - move the buffer to the head of the list if this is 2373 * another prefetch (to make it less likely to be evicted). 2374 */ 2375 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2376 if (refcount_count(&buf->b_refcnt) == 0) { 2377 ASSERT(list_link_active(&buf->b_arc_node)); 2378 } else { 2379 buf->b_flags &= ~ARC_PREFETCH; 2380 ARCSTAT_BUMP(arcstat_mru_hits); 2381 } 2382 buf->b_arc_access = now; 2383 return; 2384 } 2385 2386 /* 2387 * This buffer has been "accessed" only once so far, 2388 * but it is still in the cache. Move it to the MFU 2389 * state. 2390 */ 2391 if (now > buf->b_arc_access + ARC_MINTIME) { 2392 /* 2393 * More than 125ms have passed since we 2394 * instantiated this buffer. Move it to the 2395 * most frequently used state. 2396 */ 2397 buf->b_arc_access = now; 2398 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2399 arc_change_state(arc_mfu, buf, hash_lock); 2400 } 2401 ARCSTAT_BUMP(arcstat_mru_hits); 2402 } else if (buf->b_state == arc_mru_ghost) { 2403 arc_state_t *new_state; 2404 /* 2405 * This buffer has been "accessed" recently, but 2406 * was evicted from the cache. Move it to the 2407 * MFU state. 2408 */ 2409 2410 if (buf->b_flags & ARC_PREFETCH) { 2411 new_state = arc_mru; 2412 if (refcount_count(&buf->b_refcnt) > 0) 2413 buf->b_flags &= ~ARC_PREFETCH; 2414 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2415 } else { 2416 new_state = arc_mfu; 2417 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2418 } 2419 2420 buf->b_arc_access = ddi_get_lbolt(); 2421 arc_change_state(new_state, buf, hash_lock); 2422 2423 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2424 } else if (buf->b_state == arc_mfu) { 2425 /* 2426 * This buffer has been accessed more than once and is 2427 * still in the cache. Keep it in the MFU state. 2428 * 2429 * NOTE: an add_reference() that occurred when we did 2430 * the arc_read() will have kicked this off the list. 2431 * If it was a prefetch, we will explicitly move it to 2432 * the head of the list now. 2433 */ 2434 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2435 ASSERT(refcount_count(&buf->b_refcnt) == 0); 2436 ASSERT(list_link_active(&buf->b_arc_node)); 2437 } 2438 ARCSTAT_BUMP(arcstat_mfu_hits); 2439 buf->b_arc_access = ddi_get_lbolt(); 2440 } else if (buf->b_state == arc_mfu_ghost) { 2441 arc_state_t *new_state = arc_mfu; 2442 /* 2443 * This buffer has been accessed more than once but has 2444 * been evicted from the cache. Move it back to the 2445 * MFU state. 2446 */ 2447 2448 if (buf->b_flags & ARC_PREFETCH) { 2449 /* 2450 * This is a prefetch access... 2451 * move this block back to the MRU state. 2452 */ 2453 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 2454 new_state = arc_mru; 2455 } 2456 2457 buf->b_arc_access = ddi_get_lbolt(); 2458 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2459 arc_change_state(new_state, buf, hash_lock); 2460 2461 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2462 } else if (buf->b_state == arc_l2c_only) { 2463 /* 2464 * This buffer is on the 2nd Level ARC. 2465 */ 2466 2467 buf->b_arc_access = ddi_get_lbolt(); 2468 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2469 arc_change_state(arc_mfu, buf, hash_lock); 2470 } else { 2471 ASSERT(!"invalid arc state"); 2472 } 2473 } 2474 2475 /* a generic arc_done_func_t which you can use */ 2476 /* ARGSUSED */ 2477 void 2478 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2479 { 2480 if (zio == NULL || zio->io_error == 0) 2481 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2482 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2483 } 2484 2485 /* a generic arc_done_func_t */ 2486 void 2487 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2488 { 2489 arc_buf_t **bufp = arg; 2490 if (zio && zio->io_error) { 2491 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2492 *bufp = NULL; 2493 } else { 2494 *bufp = buf; 2495 ASSERT(buf->b_data); 2496 } 2497 } 2498 2499 static void 2500 arc_read_done(zio_t *zio) 2501 { 2502 arc_buf_hdr_t *hdr, *found; 2503 arc_buf_t *buf; 2504 arc_buf_t *abuf; /* buffer we're assigning to callback */ 2505 kmutex_t *hash_lock; 2506 arc_callback_t *callback_list, *acb; 2507 int freeable = FALSE; 2508 2509 buf = zio->io_private; 2510 hdr = buf->b_hdr; 2511 2512 /* 2513 * The hdr was inserted into hash-table and removed from lists 2514 * prior to starting I/O. We should find this header, since 2515 * it's in the hash table, and it should be legit since it's 2516 * not possible to evict it during the I/O. The only possible 2517 * reason for it not to be found is if we were freed during the 2518 * read. 2519 */ 2520 found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth, 2521 &hash_lock); 2522 2523 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 2524 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 2525 (found == hdr && HDR_L2_READING(hdr))); 2526 2527 hdr->b_flags &= ~ARC_L2_EVICTED; 2528 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 2529 hdr->b_flags &= ~ARC_L2CACHE; 2530 2531 /* byteswap if necessary */ 2532 callback_list = hdr->b_acb; 2533 ASSERT(callback_list != NULL); 2534 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) { 2535 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? 2536 byteswap_uint64_array : 2537 dmu_ot[BP_GET_TYPE(zio->io_bp)].ot_byteswap; 2538 func(buf->b_data, hdr->b_size); 2539 } 2540 2541 arc_cksum_compute(buf, B_FALSE); 2542 2543 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) { 2544 /* 2545 * Only call arc_access on anonymous buffers. This is because 2546 * if we've issued an I/O for an evicted buffer, we've already 2547 * called arc_access (to prevent any simultaneous readers from 2548 * getting confused). 2549 */ 2550 arc_access(hdr, hash_lock); 2551 } 2552 2553 /* create copies of the data buffer for the callers */ 2554 abuf = buf; 2555 for (acb = callback_list; acb; acb = acb->acb_next) { 2556 if (acb->acb_done) { 2557 if (abuf == NULL) 2558 abuf = arc_buf_clone(buf); 2559 acb->acb_buf = abuf; 2560 abuf = NULL; 2561 } 2562 } 2563 hdr->b_acb = NULL; 2564 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2565 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 2566 if (abuf == buf) { 2567 ASSERT(buf->b_efunc == NULL); 2568 ASSERT(hdr->b_datacnt == 1); 2569 hdr->b_flags |= ARC_BUF_AVAILABLE; 2570 } 2571 2572 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2573 2574 if (zio->io_error != 0) { 2575 hdr->b_flags |= ARC_IO_ERROR; 2576 if (hdr->b_state != arc_anon) 2577 arc_change_state(arc_anon, hdr, hash_lock); 2578 if (HDR_IN_HASH_TABLE(hdr)) 2579 buf_hash_remove(hdr); 2580 freeable = refcount_is_zero(&hdr->b_refcnt); 2581 } 2582 2583 /* 2584 * Broadcast before we drop the hash_lock to avoid the possibility 2585 * that the hdr (and hence the cv) might be freed before we get to 2586 * the cv_broadcast(). 2587 */ 2588 cv_broadcast(&hdr->b_cv); 2589 2590 if (hash_lock) { 2591 mutex_exit(hash_lock); 2592 } else { 2593 /* 2594 * This block was freed while we waited for the read to 2595 * complete. It has been removed from the hash table and 2596 * moved to the anonymous state (so that it won't show up 2597 * in the cache). 2598 */ 2599 ASSERT3P(hdr->b_state, ==, arc_anon); 2600 freeable = refcount_is_zero(&hdr->b_refcnt); 2601 } 2602 2603 /* execute each callback and free its structure */ 2604 while ((acb = callback_list) != NULL) { 2605 if (acb->acb_done) 2606 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2607 2608 if (acb->acb_zio_dummy != NULL) { 2609 acb->acb_zio_dummy->io_error = zio->io_error; 2610 zio_nowait(acb->acb_zio_dummy); 2611 } 2612 2613 callback_list = acb->acb_next; 2614 kmem_free(acb, sizeof (arc_callback_t)); 2615 } 2616 2617 if (freeable) 2618 arc_hdr_destroy(hdr); 2619 } 2620 2621 /* 2622 * "Read" the block block at the specified DVA (in bp) via the 2623 * cache. If the block is found in the cache, invoke the provided 2624 * callback immediately and return. Note that the `zio' parameter 2625 * in the callback will be NULL in this case, since no IO was 2626 * required. If the block is not in the cache pass the read request 2627 * on to the spa with a substitute callback function, so that the 2628 * requested block will be added to the cache. 2629 * 2630 * If a read request arrives for a block that has a read in-progress, 2631 * either wait for the in-progress read to complete (and return the 2632 * results); or, if this is a read with a "done" func, add a record 2633 * to the read to invoke the "done" func when the read completes, 2634 * and return; or just return. 2635 * 2636 * arc_read_done() will invoke all the requested "done" functions 2637 * for readers of this block. 2638 * 2639 * Normal callers should use arc_read and pass the arc buffer and offset 2640 * for the bp. But if you know you don't need locking, you can use 2641 * arc_read_bp. 2642 */ 2643 int 2644 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_buf_t *pbuf, 2645 arc_done_func_t *done, void *private, int priority, int zio_flags, 2646 uint32_t *arc_flags, const zbookmark_t *zb) 2647 { 2648 int err; 2649 2650 if (pbuf == NULL) { 2651 /* 2652 * XXX This happens from traverse callback funcs, for 2653 * the objset_phys_t block. 2654 */ 2655 return (arc_read_nolock(pio, spa, bp, done, private, priority, 2656 zio_flags, arc_flags, zb)); 2657 } 2658 2659 ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt)); 2660 ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size); 2661 rw_enter(&pbuf->b_data_lock, RW_READER); 2662 2663 err = arc_read_nolock(pio, spa, bp, done, private, priority, 2664 zio_flags, arc_flags, zb); 2665 rw_exit(&pbuf->b_data_lock); 2666 2667 return (err); 2668 } 2669 2670 int 2671 arc_read_nolock(zio_t *pio, spa_t *spa, const blkptr_t *bp, 2672 arc_done_func_t *done, void *private, int priority, int zio_flags, 2673 uint32_t *arc_flags, const zbookmark_t *zb) 2674 { 2675 arc_buf_hdr_t *hdr; 2676 arc_buf_t *buf; 2677 kmutex_t *hash_lock; 2678 zio_t *rzio; 2679 uint64_t guid = spa_guid(spa); 2680 2681 top: 2682 hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp), 2683 &hash_lock); 2684 if (hdr && hdr->b_datacnt > 0) { 2685 2686 *arc_flags |= ARC_CACHED; 2687 2688 if (HDR_IO_IN_PROGRESS(hdr)) { 2689 2690 if (*arc_flags & ARC_WAIT) { 2691 cv_wait(&hdr->b_cv, hash_lock); 2692 mutex_exit(hash_lock); 2693 goto top; 2694 } 2695 ASSERT(*arc_flags & ARC_NOWAIT); 2696 2697 if (done) { 2698 arc_callback_t *acb = NULL; 2699 2700 acb = kmem_zalloc(sizeof (arc_callback_t), 2701 KM_SLEEP); 2702 acb->acb_done = done; 2703 acb->acb_private = private; 2704 if (pio != NULL) 2705 acb->acb_zio_dummy = zio_null(pio, 2706 spa, NULL, NULL, NULL, zio_flags); 2707 2708 ASSERT(acb->acb_done != NULL); 2709 acb->acb_next = hdr->b_acb; 2710 hdr->b_acb = acb; 2711 add_reference(hdr, hash_lock, private); 2712 mutex_exit(hash_lock); 2713 return (0); 2714 } 2715 mutex_exit(hash_lock); 2716 return (0); 2717 } 2718 2719 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2720 2721 if (done) { 2722 add_reference(hdr, hash_lock, private); 2723 /* 2724 * If this block is already in use, create a new 2725 * copy of the data so that we will be guaranteed 2726 * that arc_release() will always succeed. 2727 */ 2728 buf = hdr->b_buf; 2729 ASSERT(buf); 2730 ASSERT(buf->b_data); 2731 if (HDR_BUF_AVAILABLE(hdr)) { 2732 ASSERT(buf->b_efunc == NULL); 2733 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2734 } else { 2735 buf = arc_buf_clone(buf); 2736 } 2737 2738 } else if (*arc_flags & ARC_PREFETCH && 2739 refcount_count(&hdr->b_refcnt) == 0) { 2740 hdr->b_flags |= ARC_PREFETCH; 2741 } 2742 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 2743 arc_access(hdr, hash_lock); 2744 if (*arc_flags & ARC_L2CACHE) 2745 hdr->b_flags |= ARC_L2CACHE; 2746 mutex_exit(hash_lock); 2747 ARCSTAT_BUMP(arcstat_hits); 2748 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2749 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2750 data, metadata, hits); 2751 2752 if (done) 2753 done(NULL, buf, private); 2754 } else { 2755 uint64_t size = BP_GET_LSIZE(bp); 2756 arc_callback_t *acb; 2757 vdev_t *vd = NULL; 2758 uint64_t addr; 2759 boolean_t devw = B_FALSE; 2760 2761 if (hdr == NULL) { 2762 /* this block is not in the cache */ 2763 arc_buf_hdr_t *exists; 2764 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 2765 buf = arc_buf_alloc(spa, size, private, type); 2766 hdr = buf->b_hdr; 2767 hdr->b_dva = *BP_IDENTITY(bp); 2768 hdr->b_birth = BP_PHYSICAL_BIRTH(bp); 2769 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2770 exists = buf_hash_insert(hdr, &hash_lock); 2771 if (exists) { 2772 /* somebody beat us to the hash insert */ 2773 mutex_exit(hash_lock); 2774 buf_discard_identity(hdr); 2775 (void) arc_buf_remove_ref(buf, private); 2776 goto top; /* restart the IO request */ 2777 } 2778 /* if this is a prefetch, we don't have a reference */ 2779 if (*arc_flags & ARC_PREFETCH) { 2780 (void) remove_reference(hdr, hash_lock, 2781 private); 2782 hdr->b_flags |= ARC_PREFETCH; 2783 } 2784 if (*arc_flags & ARC_L2CACHE) 2785 hdr->b_flags |= ARC_L2CACHE; 2786 if (BP_GET_LEVEL(bp) > 0) 2787 hdr->b_flags |= ARC_INDIRECT; 2788 } else { 2789 /* this block is in the ghost cache */ 2790 ASSERT(GHOST_STATE(hdr->b_state)); 2791 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2792 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 2793 ASSERT(hdr->b_buf == NULL); 2794 2795 /* if this is a prefetch, we don't have a reference */ 2796 if (*arc_flags & ARC_PREFETCH) 2797 hdr->b_flags |= ARC_PREFETCH; 2798 else 2799 add_reference(hdr, hash_lock, private); 2800 if (*arc_flags & ARC_L2CACHE) 2801 hdr->b_flags |= ARC_L2CACHE; 2802 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 2803 buf->b_hdr = hdr; 2804 buf->b_data = NULL; 2805 buf->b_efunc = NULL; 2806 buf->b_private = NULL; 2807 buf->b_next = NULL; 2808 hdr->b_buf = buf; 2809 ASSERT(hdr->b_datacnt == 0); 2810 hdr->b_datacnt = 1; 2811 arc_get_data_buf(buf); 2812 arc_access(hdr, hash_lock); 2813 } 2814 2815 ASSERT(!GHOST_STATE(hdr->b_state)); 2816 2817 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2818 acb->acb_done = done; 2819 acb->acb_private = private; 2820 2821 ASSERT(hdr->b_acb == NULL); 2822 hdr->b_acb = acb; 2823 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2824 2825 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL && 2826 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { 2827 devw = hdr->b_l2hdr->b_dev->l2ad_writing; 2828 addr = hdr->b_l2hdr->b_daddr; 2829 /* 2830 * Lock out device removal. 2831 */ 2832 if (vdev_is_dead(vd) || 2833 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 2834 vd = NULL; 2835 } 2836 2837 mutex_exit(hash_lock); 2838 2839 ASSERT3U(hdr->b_size, ==, size); 2840 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp, 2841 uint64_t, size, zbookmark_t *, zb); 2842 ARCSTAT_BUMP(arcstat_misses); 2843 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2844 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2845 data, metadata, misses); 2846 2847 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { 2848 /* 2849 * Read from the L2ARC if the following are true: 2850 * 1. The L2ARC vdev was previously cached. 2851 * 2. This buffer still has L2ARC metadata. 2852 * 3. This buffer isn't currently writing to the L2ARC. 2853 * 4. The L2ARC entry wasn't evicted, which may 2854 * also have invalidated the vdev. 2855 * 5. This isn't prefetch and l2arc_noprefetch is set. 2856 */ 2857 if (hdr->b_l2hdr != NULL && 2858 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && 2859 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { 2860 l2arc_read_callback_t *cb; 2861 2862 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 2863 ARCSTAT_BUMP(arcstat_l2_hits); 2864 2865 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 2866 KM_SLEEP); 2867 cb->l2rcb_buf = buf; 2868 cb->l2rcb_spa = spa; 2869 cb->l2rcb_bp = *bp; 2870 cb->l2rcb_zb = *zb; 2871 cb->l2rcb_flags = zio_flags; 2872 2873 /* 2874 * l2arc read. The SCL_L2ARC lock will be 2875 * released by l2arc_read_done(). 2876 */ 2877 rzio = zio_read_phys(pio, vd, addr, size, 2878 buf->b_data, ZIO_CHECKSUM_OFF, 2879 l2arc_read_done, cb, priority, zio_flags | 2880 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | 2881 ZIO_FLAG_DONT_PROPAGATE | 2882 ZIO_FLAG_DONT_RETRY, B_FALSE); 2883 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 2884 zio_t *, rzio); 2885 ARCSTAT_INCR(arcstat_l2_read_bytes, size); 2886 2887 if (*arc_flags & ARC_NOWAIT) { 2888 zio_nowait(rzio); 2889 return (0); 2890 } 2891 2892 ASSERT(*arc_flags & ARC_WAIT); 2893 if (zio_wait(rzio) == 0) 2894 return (0); 2895 2896 /* l2arc read error; goto zio_read() */ 2897 } else { 2898 DTRACE_PROBE1(l2arc__miss, 2899 arc_buf_hdr_t *, hdr); 2900 ARCSTAT_BUMP(arcstat_l2_misses); 2901 if (HDR_L2_WRITING(hdr)) 2902 ARCSTAT_BUMP(arcstat_l2_rw_clash); 2903 spa_config_exit(spa, SCL_L2ARC, vd); 2904 } 2905 } else { 2906 if (vd != NULL) 2907 spa_config_exit(spa, SCL_L2ARC, vd); 2908 if (l2arc_ndev != 0) { 2909 DTRACE_PROBE1(l2arc__miss, 2910 arc_buf_hdr_t *, hdr); 2911 ARCSTAT_BUMP(arcstat_l2_misses); 2912 } 2913 } 2914 2915 rzio = zio_read(pio, spa, bp, buf->b_data, size, 2916 arc_read_done, buf, priority, zio_flags, zb); 2917 2918 if (*arc_flags & ARC_WAIT) 2919 return (zio_wait(rzio)); 2920 2921 ASSERT(*arc_flags & ARC_NOWAIT); 2922 zio_nowait(rzio); 2923 } 2924 return (0); 2925 } 2926 2927 void 2928 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 2929 { 2930 ASSERT(buf->b_hdr != NULL); 2931 ASSERT(buf->b_hdr->b_state != arc_anon); 2932 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 2933 ASSERT(buf->b_efunc == NULL); 2934 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr)); 2935 2936 buf->b_efunc = func; 2937 buf->b_private = private; 2938 } 2939 2940 /* 2941 * This is used by the DMU to let the ARC know that a buffer is 2942 * being evicted, so the ARC should clean up. If this arc buf 2943 * is not yet in the evicted state, it will be put there. 2944 */ 2945 int 2946 arc_buf_evict(arc_buf_t *buf) 2947 { 2948 arc_buf_hdr_t *hdr; 2949 kmutex_t *hash_lock; 2950 arc_buf_t **bufp; 2951 2952 mutex_enter(&buf->b_evict_lock); 2953 hdr = buf->b_hdr; 2954 if (hdr == NULL) { 2955 /* 2956 * We are in arc_do_user_evicts(). 2957 */ 2958 ASSERT(buf->b_data == NULL); 2959 mutex_exit(&buf->b_evict_lock); 2960 return (0); 2961 } else if (buf->b_data == NULL) { 2962 arc_buf_t copy = *buf; /* structure assignment */ 2963 /* 2964 * We are on the eviction list; process this buffer now 2965 * but let arc_do_user_evicts() do the reaping. 2966 */ 2967 buf->b_efunc = NULL; 2968 mutex_exit(&buf->b_evict_lock); 2969 VERIFY(copy.b_efunc(©) == 0); 2970 return (1); 2971 } 2972 hash_lock = HDR_LOCK(hdr); 2973 mutex_enter(hash_lock); 2974 hdr = buf->b_hdr; 2975 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 2976 2977 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 2978 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2979 2980 /* 2981 * Pull this buffer off of the hdr 2982 */ 2983 bufp = &hdr->b_buf; 2984 while (*bufp != buf) 2985 bufp = &(*bufp)->b_next; 2986 *bufp = buf->b_next; 2987 2988 ASSERT(buf->b_data != NULL); 2989 arc_buf_destroy(buf, FALSE, FALSE); 2990 2991 if (hdr->b_datacnt == 0) { 2992 arc_state_t *old_state = hdr->b_state; 2993 arc_state_t *evicted_state; 2994 2995 ASSERT(hdr->b_buf == NULL); 2996 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2997 2998 evicted_state = 2999 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 3000 3001 mutex_enter(&old_state->arcs_mtx); 3002 mutex_enter(&evicted_state->arcs_mtx); 3003 3004 arc_change_state(evicted_state, hdr, hash_lock); 3005 ASSERT(HDR_IN_HASH_TABLE(hdr)); 3006 hdr->b_flags |= ARC_IN_HASH_TABLE; 3007 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 3008 3009 mutex_exit(&evicted_state->arcs_mtx); 3010 mutex_exit(&old_state->arcs_mtx); 3011 } 3012 mutex_exit(hash_lock); 3013 mutex_exit(&buf->b_evict_lock); 3014 3015 VERIFY(buf->b_efunc(buf) == 0); 3016 buf->b_efunc = NULL; 3017 buf->b_private = NULL; 3018 buf->b_hdr = NULL; 3019 buf->b_next = NULL; 3020 kmem_cache_free(buf_cache, buf); 3021 return (1); 3022 } 3023 3024 /* 3025 * Release this buffer from the cache. This must be done 3026 * after a read and prior to modifying the buffer contents. 3027 * If the buffer has more than one reference, we must make 3028 * a new hdr for the buffer. 3029 */ 3030 void 3031 arc_release(arc_buf_t *buf, void *tag) 3032 { 3033 arc_buf_hdr_t *hdr; 3034 kmutex_t *hash_lock = NULL; 3035 l2arc_buf_hdr_t *l2hdr; 3036 uint64_t buf_size; 3037 3038 /* 3039 * It would be nice to assert that if it's DMU metadata (level > 3040 * 0 || it's the dnode file), then it must be syncing context. 3041 * But we don't know that information at this level. 3042 */ 3043 3044 mutex_enter(&buf->b_evict_lock); 3045 hdr = buf->b_hdr; 3046 3047 /* this buffer is not on any list */ 3048 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 3049 3050 if (hdr->b_state == arc_anon) { 3051 /* this buffer is already released */ 3052 ASSERT(buf->b_efunc == NULL); 3053 } else { 3054 hash_lock = HDR_LOCK(hdr); 3055 mutex_enter(hash_lock); 3056 hdr = buf->b_hdr; 3057 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3058 } 3059 3060 l2hdr = hdr->b_l2hdr; 3061 if (l2hdr) { 3062 mutex_enter(&l2arc_buflist_mtx); 3063 hdr->b_l2hdr = NULL; 3064 buf_size = hdr->b_size; 3065 } 3066 3067 /* 3068 * Do we have more than one buf? 3069 */ 3070 if (hdr->b_datacnt > 1) { 3071 arc_buf_hdr_t *nhdr; 3072 arc_buf_t **bufp; 3073 uint64_t blksz = hdr->b_size; 3074 uint64_t spa = hdr->b_spa; 3075 arc_buf_contents_t type = hdr->b_type; 3076 uint32_t flags = hdr->b_flags; 3077 3078 ASSERT(hdr->b_buf != buf || buf->b_next != NULL); 3079 /* 3080 * Pull the data off of this hdr and attach it to 3081 * a new anonymous hdr. 3082 */ 3083 (void) remove_reference(hdr, hash_lock, tag); 3084 bufp = &hdr->b_buf; 3085 while (*bufp != buf) 3086 bufp = &(*bufp)->b_next; 3087 *bufp = buf->b_next; 3088 buf->b_next = NULL; 3089 3090 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 3091 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 3092 if (refcount_is_zero(&hdr->b_refcnt)) { 3093 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 3094 ASSERT3U(*size, >=, hdr->b_size); 3095 atomic_add_64(size, -hdr->b_size); 3096 } 3097 hdr->b_datacnt -= 1; 3098 arc_cksum_verify(buf); 3099 3100 mutex_exit(hash_lock); 3101 3102 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 3103 nhdr->b_size = blksz; 3104 nhdr->b_spa = spa; 3105 nhdr->b_type = type; 3106 nhdr->b_buf = buf; 3107 nhdr->b_state = arc_anon; 3108 nhdr->b_arc_access = 0; 3109 nhdr->b_flags = flags & ARC_L2_WRITING; 3110 nhdr->b_l2hdr = NULL; 3111 nhdr->b_datacnt = 1; 3112 nhdr->b_freeze_cksum = NULL; 3113 (void) refcount_add(&nhdr->b_refcnt, tag); 3114 buf->b_hdr = nhdr; 3115 mutex_exit(&buf->b_evict_lock); 3116 atomic_add_64(&arc_anon->arcs_size, blksz); 3117 } else { 3118 mutex_exit(&buf->b_evict_lock); 3119 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 3120 ASSERT(!list_link_active(&hdr->b_arc_node)); 3121 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3122 if (hdr->b_state != arc_anon) 3123 arc_change_state(arc_anon, hdr, hash_lock); 3124 hdr->b_arc_access = 0; 3125 if (hash_lock) 3126 mutex_exit(hash_lock); 3127 3128 buf_discard_identity(hdr); 3129 arc_buf_thaw(buf); 3130 } 3131 buf->b_efunc = NULL; 3132 buf->b_private = NULL; 3133 3134 if (l2hdr) { 3135 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 3136 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 3137 ARCSTAT_INCR(arcstat_l2_size, -buf_size); 3138 mutex_exit(&l2arc_buflist_mtx); 3139 } 3140 } 3141 3142 /* 3143 * Release this buffer. If it does not match the provided BP, fill it 3144 * with that block's contents. 3145 */ 3146 /* ARGSUSED */ 3147 int 3148 arc_release_bp(arc_buf_t *buf, void *tag, blkptr_t *bp, spa_t *spa, 3149 zbookmark_t *zb) 3150 { 3151 arc_release(buf, tag); 3152 return (0); 3153 } 3154 3155 int 3156 arc_released(arc_buf_t *buf) 3157 { 3158 int released; 3159 3160 mutex_enter(&buf->b_evict_lock); 3161 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 3162 mutex_exit(&buf->b_evict_lock); 3163 return (released); 3164 } 3165 3166 int 3167 arc_has_callback(arc_buf_t *buf) 3168 { 3169 int callback; 3170 3171 mutex_enter(&buf->b_evict_lock); 3172 callback = (buf->b_efunc != NULL); 3173 mutex_exit(&buf->b_evict_lock); 3174 return (callback); 3175 } 3176 3177 #ifdef ZFS_DEBUG 3178 int 3179 arc_referenced(arc_buf_t *buf) 3180 { 3181 int referenced; 3182 3183 mutex_enter(&buf->b_evict_lock); 3184 referenced = (refcount_count(&buf->b_hdr->b_refcnt)); 3185 mutex_exit(&buf->b_evict_lock); 3186 return (referenced); 3187 } 3188 #endif 3189 3190 static void 3191 arc_write_ready(zio_t *zio) 3192 { 3193 arc_write_callback_t *callback = zio->io_private; 3194 arc_buf_t *buf = callback->awcb_buf; 3195 arc_buf_hdr_t *hdr = buf->b_hdr; 3196 3197 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 3198 callback->awcb_ready(zio, buf, callback->awcb_private); 3199 3200 /* 3201 * If the IO is already in progress, then this is a re-write 3202 * attempt, so we need to thaw and re-compute the cksum. 3203 * It is the responsibility of the callback to handle the 3204 * accounting for any re-write attempt. 3205 */ 3206 if (HDR_IO_IN_PROGRESS(hdr)) { 3207 mutex_enter(&hdr->b_freeze_lock); 3208 if (hdr->b_freeze_cksum != NULL) { 3209 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 3210 hdr->b_freeze_cksum = NULL; 3211 } 3212 mutex_exit(&hdr->b_freeze_lock); 3213 } 3214 arc_cksum_compute(buf, B_FALSE); 3215 hdr->b_flags |= ARC_IO_IN_PROGRESS; 3216 } 3217 3218 static void 3219 arc_write_done(zio_t *zio) 3220 { 3221 arc_write_callback_t *callback = zio->io_private; 3222 arc_buf_t *buf = callback->awcb_buf; 3223 arc_buf_hdr_t *hdr = buf->b_hdr; 3224 3225 ASSERT(hdr->b_acb == NULL); 3226 3227 if (zio->io_error == 0) { 3228 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 3229 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp); 3230 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 3231 } else { 3232 ASSERT(BUF_EMPTY(hdr)); 3233 } 3234 3235 /* 3236 * If the block to be written was all-zero, we may have 3237 * compressed it away. In this case no write was performed 3238 * so there will be no dva/birth/checksum. The buffer must 3239 * therefore remain anonymous (and uncached). 3240 */ 3241 if (!BUF_EMPTY(hdr)) { 3242 arc_buf_hdr_t *exists; 3243 kmutex_t *hash_lock; 3244 3245 ASSERT(zio->io_error == 0); 3246 3247 arc_cksum_verify(buf); 3248 3249 exists = buf_hash_insert(hdr, &hash_lock); 3250 if (exists) { 3251 /* 3252 * This can only happen if we overwrite for 3253 * sync-to-convergence, because we remove 3254 * buffers from the hash table when we arc_free(). 3255 */ 3256 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 3257 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 3258 panic("bad overwrite, hdr=%p exists=%p", 3259 (void *)hdr, (void *)exists); 3260 ASSERT(refcount_is_zero(&exists->b_refcnt)); 3261 arc_change_state(arc_anon, exists, hash_lock); 3262 mutex_exit(hash_lock); 3263 arc_hdr_destroy(exists); 3264 exists = buf_hash_insert(hdr, &hash_lock); 3265 ASSERT3P(exists, ==, NULL); 3266 } else { 3267 /* Dedup */ 3268 ASSERT(hdr->b_datacnt == 1); 3269 ASSERT(hdr->b_state == arc_anon); 3270 ASSERT(BP_GET_DEDUP(zio->io_bp)); 3271 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 3272 } 3273 } 3274 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3275 /* if it's not anon, we are doing a scrub */ 3276 if (!exists && hdr->b_state == arc_anon) 3277 arc_access(hdr, hash_lock); 3278 mutex_exit(hash_lock); 3279 } else { 3280 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3281 } 3282 3283 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 3284 callback->awcb_done(zio, buf, callback->awcb_private); 3285 3286 kmem_free(callback, sizeof (arc_write_callback_t)); 3287 } 3288 3289 zio_t * 3290 arc_write(zio_t *pio, spa_t *spa, uint64_t txg, 3291 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, const zio_prop_t *zp, 3292 arc_done_func_t *ready, arc_done_func_t *done, void *private, 3293 int priority, int zio_flags, const zbookmark_t *zb) 3294 { 3295 arc_buf_hdr_t *hdr = buf->b_hdr; 3296 arc_write_callback_t *callback; 3297 zio_t *zio; 3298 3299 ASSERT(ready != NULL); 3300 ASSERT(done != NULL); 3301 ASSERT(!HDR_IO_ERROR(hdr)); 3302 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 3303 ASSERT(hdr->b_acb == NULL); 3304 if (l2arc) 3305 hdr->b_flags |= ARC_L2CACHE; 3306 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3307 callback->awcb_ready = ready; 3308 callback->awcb_done = done; 3309 callback->awcb_private = private; 3310 callback->awcb_buf = buf; 3311 3312 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp, 3313 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb); 3314 3315 return (zio); 3316 } 3317 3318 static int 3319 arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg) 3320 { 3321 #ifdef _KERNEL 3322 uint64_t available_memory = ptob(freemem); 3323 static uint64_t page_load = 0; 3324 static uint64_t last_txg = 0; 3325 3326 #if defined(__i386) 3327 available_memory = 3328 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 3329 #endif 3330 if (available_memory >= zfs_write_limit_max) 3331 return (0); 3332 3333 if (txg > last_txg) { 3334 last_txg = txg; 3335 page_load = 0; 3336 } 3337 /* 3338 * If we are in pageout, we know that memory is already tight, 3339 * the arc is already going to be evicting, so we just want to 3340 * continue to let page writes occur as quickly as possible. 3341 */ 3342 if (curproc == proc_pageout) { 3343 if (page_load > MAX(ptob(minfree), available_memory) / 4) 3344 return (ERESTART); 3345 /* Note: reserve is inflated, so we deflate */ 3346 page_load += reserve / 8; 3347 return (0); 3348 } else if (page_load > 0 && arc_reclaim_needed()) { 3349 /* memory is low, delay before restarting */ 3350 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3351 return (EAGAIN); 3352 } 3353 page_load = 0; 3354 3355 if (arc_size > arc_c_min) { 3356 uint64_t evictable_memory = 3357 arc_mru->arcs_lsize[ARC_BUFC_DATA] + 3358 arc_mru->arcs_lsize[ARC_BUFC_METADATA] + 3359 arc_mfu->arcs_lsize[ARC_BUFC_DATA] + 3360 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; 3361 available_memory += MIN(evictable_memory, arc_size - arc_c_min); 3362 } 3363 3364 if (inflight_data > available_memory / 4) { 3365 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3366 return (ERESTART); 3367 } 3368 #endif 3369 return (0); 3370 } 3371 3372 void 3373 arc_tempreserve_clear(uint64_t reserve) 3374 { 3375 atomic_add_64(&arc_tempreserve, -reserve); 3376 ASSERT((int64_t)arc_tempreserve >= 0); 3377 } 3378 3379 int 3380 arc_tempreserve_space(uint64_t reserve, uint64_t txg) 3381 { 3382 int error; 3383 uint64_t anon_size; 3384 3385 #ifdef ZFS_DEBUG 3386 /* 3387 * Once in a while, fail for no reason. Everything should cope. 3388 */ 3389 if (spa_get_random(10000) == 0) { 3390 dprintf("forcing random failure\n"); 3391 return (ERESTART); 3392 } 3393 #endif 3394 if (reserve > arc_c/4 && !arc_no_grow) 3395 arc_c = MIN(arc_c_max, reserve * 4); 3396 if (reserve > arc_c) 3397 return (ENOMEM); 3398 3399 /* 3400 * Don't count loaned bufs as in flight dirty data to prevent long 3401 * network delays from blocking transactions that are ready to be 3402 * assigned to a txg. 3403 */ 3404 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0); 3405 3406 /* 3407 * Writes will, almost always, require additional memory allocations 3408 * in order to compress/encrypt/etc the data. We therefor need to 3409 * make sure that there is sufficient available memory for this. 3410 */ 3411 if (error = arc_memory_throttle(reserve, anon_size, txg)) 3412 return (error); 3413 3414 /* 3415 * Throttle writes when the amount of dirty data in the cache 3416 * gets too large. We try to keep the cache less than half full 3417 * of dirty blocks so that our sync times don't grow too large. 3418 * Note: if two requests come in concurrently, we might let them 3419 * both succeed, when one of them should fail. Not a huge deal. 3420 */ 3421 3422 if (reserve + arc_tempreserve + anon_size > arc_c / 2 && 3423 anon_size > arc_c / 4) { 3424 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 3425 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 3426 arc_tempreserve>>10, 3427 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 3428 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3429 reserve>>10, arc_c>>10); 3430 return (ERESTART); 3431 } 3432 atomic_add_64(&arc_tempreserve, reserve); 3433 return (0); 3434 } 3435 3436 void 3437 arc_init(void) 3438 { 3439 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3440 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3441 3442 /* Convert seconds to clock ticks */ 3443 arc_min_prefetch_lifespan = 1 * hz; 3444 3445 /* Start out with 1/8 of all memory */ 3446 arc_c = physmem * PAGESIZE / 8; 3447 3448 #ifdef _KERNEL 3449 /* 3450 * On architectures where the physical memory can be larger 3451 * than the addressable space (intel in 32-bit mode), we may 3452 * need to limit the cache to 1/8 of VM size. 3453 */ 3454 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3455 #endif 3456 3457 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 3458 arc_c_min = MAX(arc_c / 4, 64<<20); 3459 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 3460 if (arc_c * 8 >= 1<<30) 3461 arc_c_max = (arc_c * 8) - (1<<30); 3462 else 3463 arc_c_max = arc_c_min; 3464 arc_c_max = MAX(arc_c * 6, arc_c_max); 3465 3466 /* 3467 * Allow the tunables to override our calculations if they are 3468 * reasonable (ie. over 64MB) 3469 */ 3470 if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) 3471 arc_c_max = zfs_arc_max; 3472 if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) 3473 arc_c_min = zfs_arc_min; 3474 3475 arc_c = arc_c_max; 3476 arc_p = (arc_c >> 1); 3477 3478 /* limit meta-data to 1/4 of the arc capacity */ 3479 arc_meta_limit = arc_c_max / 4; 3480 3481 /* Allow the tunable to override if it is reasonable */ 3482 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 3483 arc_meta_limit = zfs_arc_meta_limit; 3484 3485 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 3486 arc_c_min = arc_meta_limit / 2; 3487 3488 if (zfs_arc_grow_retry > 0) 3489 arc_grow_retry = zfs_arc_grow_retry; 3490 3491 if (zfs_arc_shrink_shift > 0) 3492 arc_shrink_shift = zfs_arc_shrink_shift; 3493 3494 if (zfs_arc_p_min_shift > 0) 3495 arc_p_min_shift = zfs_arc_p_min_shift; 3496 3497 /* if kmem_flags are set, lets try to use less memory */ 3498 if (kmem_debugging()) 3499 arc_c = arc_c / 2; 3500 if (arc_c < arc_c_min) 3501 arc_c = arc_c_min; 3502 3503 arc_anon = &ARC_anon; 3504 arc_mru = &ARC_mru; 3505 arc_mru_ghost = &ARC_mru_ghost; 3506 arc_mfu = &ARC_mfu; 3507 arc_mfu_ghost = &ARC_mfu_ghost; 3508 arc_l2c_only = &ARC_l2c_only; 3509 arc_size = 0; 3510 3511 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3512 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3513 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3514 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3515 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3516 mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3517 3518 list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], 3519 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3520 list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], 3521 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3522 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], 3523 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3524 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], 3525 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3526 list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], 3527 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3528 list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], 3529 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3530 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], 3531 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3532 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], 3533 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3534 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], 3535 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3536 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], 3537 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3538 3539 buf_init(); 3540 3541 arc_thread_exit = 0; 3542 arc_eviction_list = NULL; 3543 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 3544 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3545 3546 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 3547 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 3548 3549 if (arc_ksp != NULL) { 3550 arc_ksp->ks_data = &arc_stats; 3551 kstat_install(arc_ksp); 3552 } 3553 3554 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3555 TS_RUN, minclsyspri); 3556 3557 arc_dead = FALSE; 3558 arc_warm = B_FALSE; 3559 3560 if (zfs_write_limit_max == 0) 3561 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; 3562 else 3563 zfs_write_limit_shift = 0; 3564 mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL); 3565 } 3566 3567 void 3568 arc_fini(void) 3569 { 3570 mutex_enter(&arc_reclaim_thr_lock); 3571 arc_thread_exit = 1; 3572 while (arc_thread_exit != 0) 3573 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 3574 mutex_exit(&arc_reclaim_thr_lock); 3575 3576 arc_flush(NULL); 3577 3578 arc_dead = TRUE; 3579 3580 if (arc_ksp != NULL) { 3581 kstat_delete(arc_ksp); 3582 arc_ksp = NULL; 3583 } 3584 3585 mutex_destroy(&arc_eviction_mtx); 3586 mutex_destroy(&arc_reclaim_thr_lock); 3587 cv_destroy(&arc_reclaim_thr_cv); 3588 3589 list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); 3590 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 3591 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); 3592 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 3593 list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); 3594 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 3595 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); 3596 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 3597 3598 mutex_destroy(&arc_anon->arcs_mtx); 3599 mutex_destroy(&arc_mru->arcs_mtx); 3600 mutex_destroy(&arc_mru_ghost->arcs_mtx); 3601 mutex_destroy(&arc_mfu->arcs_mtx); 3602 mutex_destroy(&arc_mfu_ghost->arcs_mtx); 3603 mutex_destroy(&arc_l2c_only->arcs_mtx); 3604 3605 mutex_destroy(&zfs_write_limit_lock); 3606 3607 buf_fini(); 3608 3609 ASSERT(arc_loaned_bytes == 0); 3610 } 3611 3612 /* 3613 * Level 2 ARC 3614 * 3615 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 3616 * It uses dedicated storage devices to hold cached data, which are populated 3617 * using large infrequent writes. The main role of this cache is to boost 3618 * the performance of random read workloads. The intended L2ARC devices 3619 * include short-stroked disks, solid state disks, and other media with 3620 * substantially faster read latency than disk. 3621 * 3622 * +-----------------------+ 3623 * | ARC | 3624 * +-----------------------+ 3625 * | ^ ^ 3626 * | | | 3627 * l2arc_feed_thread() arc_read() 3628 * | | | 3629 * | l2arc read | 3630 * V | | 3631 * +---------------+ | 3632 * | L2ARC | | 3633 * +---------------+ | 3634 * | ^ | 3635 * l2arc_write() | | 3636 * | | | 3637 * V | | 3638 * +-------+ +-------+ 3639 * | vdev | | vdev | 3640 * | cache | | cache | 3641 * +-------+ +-------+ 3642 * +=========+ .-----. 3643 * : L2ARC : |-_____-| 3644 * : devices : | Disks | 3645 * +=========+ `-_____-' 3646 * 3647 * Read requests are satisfied from the following sources, in order: 3648 * 3649 * 1) ARC 3650 * 2) vdev cache of L2ARC devices 3651 * 3) L2ARC devices 3652 * 4) vdev cache of disks 3653 * 5) disks 3654 * 3655 * Some L2ARC device types exhibit extremely slow write performance. 3656 * To accommodate for this there are some significant differences between 3657 * the L2ARC and traditional cache design: 3658 * 3659 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 3660 * the ARC behave as usual, freeing buffers and placing headers on ghost 3661 * lists. The ARC does not send buffers to the L2ARC during eviction as 3662 * this would add inflated write latencies for all ARC memory pressure. 3663 * 3664 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 3665 * It does this by periodically scanning buffers from the eviction-end of 3666 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 3667 * not already there. It scans until a headroom of buffers is satisfied, 3668 * which itself is a buffer for ARC eviction. The thread that does this is 3669 * l2arc_feed_thread(), illustrated below; example sizes are included to 3670 * provide a better sense of ratio than this diagram: 3671 * 3672 * head --> tail 3673 * +---------------------+----------+ 3674 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 3675 * +---------------------+----------+ | o L2ARC eligible 3676 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 3677 * +---------------------+----------+ | 3678 * 15.9 Gbytes ^ 32 Mbytes | 3679 * headroom | 3680 * l2arc_feed_thread() 3681 * | 3682 * l2arc write hand <--[oooo]--' 3683 * | 8 Mbyte 3684 * | write max 3685 * V 3686 * +==============================+ 3687 * L2ARC dev |####|#|###|###| |####| ... | 3688 * +==============================+ 3689 * 32 Gbytes 3690 * 3691 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 3692 * evicted, then the L2ARC has cached a buffer much sooner than it probably 3693 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 3694 * safe to say that this is an uncommon case, since buffers at the end of 3695 * the ARC lists have moved there due to inactivity. 3696 * 3697 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 3698 * then the L2ARC simply misses copying some buffers. This serves as a 3699 * pressure valve to prevent heavy read workloads from both stalling the ARC 3700 * with waits and clogging the L2ARC with writes. This also helps prevent 3701 * the potential for the L2ARC to churn if it attempts to cache content too 3702 * quickly, such as during backups of the entire pool. 3703 * 3704 * 5. After system boot and before the ARC has filled main memory, there are 3705 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 3706 * lists can remain mostly static. Instead of searching from tail of these 3707 * lists as pictured, the l2arc_feed_thread() will search from the list heads 3708 * for eligible buffers, greatly increasing its chance of finding them. 3709 * 3710 * The L2ARC device write speed is also boosted during this time so that 3711 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 3712 * there are no L2ARC reads, and no fear of degrading read performance 3713 * through increased writes. 3714 * 3715 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 3716 * the vdev queue can aggregate them into larger and fewer writes. Each 3717 * device is written to in a rotor fashion, sweeping writes through 3718 * available space then repeating. 3719 * 3720 * 7. The L2ARC does not store dirty content. It never needs to flush 3721 * write buffers back to disk based storage. 3722 * 3723 * 8. If an ARC buffer is written (and dirtied) which also exists in the 3724 * L2ARC, the now stale L2ARC buffer is immediately dropped. 3725 * 3726 * The performance of the L2ARC can be tweaked by a number of tunables, which 3727 * may be necessary for different workloads: 3728 * 3729 * l2arc_write_max max write bytes per interval 3730 * l2arc_write_boost extra write bytes during device warmup 3731 * l2arc_noprefetch skip caching prefetched buffers 3732 * l2arc_headroom number of max device writes to precache 3733 * l2arc_feed_secs seconds between L2ARC writing 3734 * 3735 * Tunables may be removed or added as future performance improvements are 3736 * integrated, and also may become zpool properties. 3737 * 3738 * There are three key functions that control how the L2ARC warms up: 3739 * 3740 * l2arc_write_eligible() check if a buffer is eligible to cache 3741 * l2arc_write_size() calculate how much to write 3742 * l2arc_write_interval() calculate sleep delay between writes 3743 * 3744 * These three functions determine what to write, how much, and how quickly 3745 * to send writes. 3746 */ 3747 3748 static boolean_t 3749 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab) 3750 { 3751 /* 3752 * A buffer is *not* eligible for the L2ARC if it: 3753 * 1. belongs to a different spa. 3754 * 2. is already cached on the L2ARC. 3755 * 3. has an I/O in progress (it may be an incomplete read). 3756 * 4. is flagged not eligible (zfs property). 3757 */ 3758 if (ab->b_spa != spa_guid || ab->b_l2hdr != NULL || 3759 HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab)) 3760 return (B_FALSE); 3761 3762 return (B_TRUE); 3763 } 3764 3765 static uint64_t 3766 l2arc_write_size(l2arc_dev_t *dev) 3767 { 3768 uint64_t size; 3769 3770 size = dev->l2ad_write; 3771 3772 if (arc_warm == B_FALSE) 3773 size += dev->l2ad_boost; 3774 3775 return (size); 3776 3777 } 3778 3779 static clock_t 3780 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) 3781 { 3782 clock_t interval, next, now; 3783 3784 /* 3785 * If the ARC lists are busy, increase our write rate; if the 3786 * lists are stale, idle back. This is achieved by checking 3787 * how much we previously wrote - if it was more than half of 3788 * what we wanted, schedule the next write much sooner. 3789 */ 3790 if (l2arc_feed_again && wrote > (wanted / 2)) 3791 interval = (hz * l2arc_feed_min_ms) / 1000; 3792 else 3793 interval = hz * l2arc_feed_secs; 3794 3795 now = ddi_get_lbolt(); 3796 next = MAX(now, MIN(now + interval, began + interval)); 3797 3798 return (next); 3799 } 3800 3801 static void 3802 l2arc_hdr_stat_add(void) 3803 { 3804 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 3805 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 3806 } 3807 3808 static void 3809 l2arc_hdr_stat_remove(void) 3810 { 3811 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 3812 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 3813 } 3814 3815 /* 3816 * Cycle through L2ARC devices. This is how L2ARC load balances. 3817 * If a device is returned, this also returns holding the spa config lock. 3818 */ 3819 static l2arc_dev_t * 3820 l2arc_dev_get_next(void) 3821 { 3822 l2arc_dev_t *first, *next = NULL; 3823 3824 /* 3825 * Lock out the removal of spas (spa_namespace_lock), then removal 3826 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 3827 * both locks will be dropped and a spa config lock held instead. 3828 */ 3829 mutex_enter(&spa_namespace_lock); 3830 mutex_enter(&l2arc_dev_mtx); 3831 3832 /* if there are no vdevs, there is nothing to do */ 3833 if (l2arc_ndev == 0) 3834 goto out; 3835 3836 first = NULL; 3837 next = l2arc_dev_last; 3838 do { 3839 /* loop around the list looking for a non-faulted vdev */ 3840 if (next == NULL) { 3841 next = list_head(l2arc_dev_list); 3842 } else { 3843 next = list_next(l2arc_dev_list, next); 3844 if (next == NULL) 3845 next = list_head(l2arc_dev_list); 3846 } 3847 3848 /* if we have come back to the start, bail out */ 3849 if (first == NULL) 3850 first = next; 3851 else if (next == first) 3852 break; 3853 3854 } while (vdev_is_dead(next->l2ad_vdev)); 3855 3856 /* if we were unable to find any usable vdevs, return NULL */ 3857 if (vdev_is_dead(next->l2ad_vdev)) 3858 next = NULL; 3859 3860 l2arc_dev_last = next; 3861 3862 out: 3863 mutex_exit(&l2arc_dev_mtx); 3864 3865 /* 3866 * Grab the config lock to prevent the 'next' device from being 3867 * removed while we are writing to it. 3868 */ 3869 if (next != NULL) 3870 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 3871 mutex_exit(&spa_namespace_lock); 3872 3873 return (next); 3874 } 3875 3876 /* 3877 * Free buffers that were tagged for destruction. 3878 */ 3879 static void 3880 l2arc_do_free_on_write() 3881 { 3882 list_t *buflist; 3883 l2arc_data_free_t *df, *df_prev; 3884 3885 mutex_enter(&l2arc_free_on_write_mtx); 3886 buflist = l2arc_free_on_write; 3887 3888 for (df = list_tail(buflist); df; df = df_prev) { 3889 df_prev = list_prev(buflist, df); 3890 ASSERT(df->l2df_data != NULL); 3891 ASSERT(df->l2df_func != NULL); 3892 df->l2df_func(df->l2df_data, df->l2df_size); 3893 list_remove(buflist, df); 3894 kmem_free(df, sizeof (l2arc_data_free_t)); 3895 } 3896 3897 mutex_exit(&l2arc_free_on_write_mtx); 3898 } 3899 3900 /* 3901 * A write to a cache device has completed. Update all headers to allow 3902 * reads from these buffers to begin. 3903 */ 3904 static void 3905 l2arc_write_done(zio_t *zio) 3906 { 3907 l2arc_write_callback_t *cb; 3908 l2arc_dev_t *dev; 3909 list_t *buflist; 3910 arc_buf_hdr_t *head, *ab, *ab_prev; 3911 l2arc_buf_hdr_t *abl2; 3912 kmutex_t *hash_lock; 3913 3914 cb = zio->io_private; 3915 ASSERT(cb != NULL); 3916 dev = cb->l2wcb_dev; 3917 ASSERT(dev != NULL); 3918 head = cb->l2wcb_head; 3919 ASSERT(head != NULL); 3920 buflist = dev->l2ad_buflist; 3921 ASSERT(buflist != NULL); 3922 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 3923 l2arc_write_callback_t *, cb); 3924 3925 if (zio->io_error != 0) 3926 ARCSTAT_BUMP(arcstat_l2_writes_error); 3927 3928 mutex_enter(&l2arc_buflist_mtx); 3929 3930 /* 3931 * All writes completed, or an error was hit. 3932 */ 3933 for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 3934 ab_prev = list_prev(buflist, ab); 3935 3936 hash_lock = HDR_LOCK(ab); 3937 if (!mutex_tryenter(hash_lock)) { 3938 /* 3939 * This buffer misses out. It may be in a stage 3940 * of eviction. Its ARC_L2_WRITING flag will be 3941 * left set, denying reads to this buffer. 3942 */ 3943 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 3944 continue; 3945 } 3946 3947 if (zio->io_error != 0) { 3948 /* 3949 * Error - drop L2ARC entry. 3950 */ 3951 list_remove(buflist, ab); 3952 abl2 = ab->b_l2hdr; 3953 ab->b_l2hdr = NULL; 3954 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 3955 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 3956 } 3957 3958 /* 3959 * Allow ARC to begin reads to this L2ARC entry. 3960 */ 3961 ab->b_flags &= ~ARC_L2_WRITING; 3962 3963 mutex_exit(hash_lock); 3964 } 3965 3966 atomic_inc_64(&l2arc_writes_done); 3967 list_remove(buflist, head); 3968 kmem_cache_free(hdr_cache, head); 3969 mutex_exit(&l2arc_buflist_mtx); 3970 3971 l2arc_do_free_on_write(); 3972 3973 kmem_free(cb, sizeof (l2arc_write_callback_t)); 3974 } 3975 3976 /* 3977 * A read to a cache device completed. Validate buffer contents before 3978 * handing over to the regular ARC routines. 3979 */ 3980 static void 3981 l2arc_read_done(zio_t *zio) 3982 { 3983 l2arc_read_callback_t *cb; 3984 arc_buf_hdr_t *hdr; 3985 arc_buf_t *buf; 3986 kmutex_t *hash_lock; 3987 int equal; 3988 3989 ASSERT(zio->io_vd != NULL); 3990 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 3991 3992 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 3993 3994 cb = zio->io_private; 3995 ASSERT(cb != NULL); 3996 buf = cb->l2rcb_buf; 3997 ASSERT(buf != NULL); 3998 3999 hash_lock = HDR_LOCK(buf->b_hdr); 4000 mutex_enter(hash_lock); 4001 hdr = buf->b_hdr; 4002 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 4003 4004 /* 4005 * Check this survived the L2ARC journey. 4006 */ 4007 equal = arc_cksum_equal(buf); 4008 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 4009 mutex_exit(hash_lock); 4010 zio->io_private = buf; 4011 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 4012 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 4013 arc_read_done(zio); 4014 } else { 4015 mutex_exit(hash_lock); 4016 /* 4017 * Buffer didn't survive caching. Increment stats and 4018 * reissue to the original storage device. 4019 */ 4020 if (zio->io_error != 0) { 4021 ARCSTAT_BUMP(arcstat_l2_io_error); 4022 } else { 4023 zio->io_error = EIO; 4024 } 4025 if (!equal) 4026 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 4027 4028 /* 4029 * If there's no waiter, issue an async i/o to the primary 4030 * storage now. If there *is* a waiter, the caller must 4031 * issue the i/o in a context where it's OK to block. 4032 */ 4033 if (zio->io_waiter == NULL) { 4034 zio_t *pio = zio_unique_parent(zio); 4035 4036 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); 4037 4038 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp, 4039 buf->b_data, zio->io_size, arc_read_done, buf, 4040 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); 4041 } 4042 } 4043 4044 kmem_free(cb, sizeof (l2arc_read_callback_t)); 4045 } 4046 4047 /* 4048 * This is the list priority from which the L2ARC will search for pages to 4049 * cache. This is used within loops (0..3) to cycle through lists in the 4050 * desired order. This order can have a significant effect on cache 4051 * performance. 4052 * 4053 * Currently the metadata lists are hit first, MFU then MRU, followed by 4054 * the data lists. This function returns a locked list, and also returns 4055 * the lock pointer. 4056 */ 4057 static list_t * 4058 l2arc_list_locked(int list_num, kmutex_t **lock) 4059 { 4060 list_t *list; 4061 4062 ASSERT(list_num >= 0 && list_num <= 3); 4063 4064 switch (list_num) { 4065 case 0: 4066 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; 4067 *lock = &arc_mfu->arcs_mtx; 4068 break; 4069 case 1: 4070 list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; 4071 *lock = &arc_mru->arcs_mtx; 4072 break; 4073 case 2: 4074 list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; 4075 *lock = &arc_mfu->arcs_mtx; 4076 break; 4077 case 3: 4078 list = &arc_mru->arcs_list[ARC_BUFC_DATA]; 4079 *lock = &arc_mru->arcs_mtx; 4080 break; 4081 } 4082 4083 ASSERT(!(MUTEX_HELD(*lock))); 4084 mutex_enter(*lock); 4085 return (list); 4086 } 4087 4088 /* 4089 * Evict buffers from the device write hand to the distance specified in 4090 * bytes. This distance may span populated buffers, it may span nothing. 4091 * This is clearing a region on the L2ARC device ready for writing. 4092 * If the 'all' boolean is set, every buffer is evicted. 4093 */ 4094 static void 4095 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 4096 { 4097 list_t *buflist; 4098 l2arc_buf_hdr_t *abl2; 4099 arc_buf_hdr_t *ab, *ab_prev; 4100 kmutex_t *hash_lock; 4101 uint64_t taddr; 4102 4103 buflist = dev->l2ad_buflist; 4104 4105 if (buflist == NULL) 4106 return; 4107 4108 if (!all && dev->l2ad_first) { 4109 /* 4110 * This is the first sweep through the device. There is 4111 * nothing to evict. 4112 */ 4113 return; 4114 } 4115 4116 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 4117 /* 4118 * When nearing the end of the device, evict to the end 4119 * before the device write hand jumps to the start. 4120 */ 4121 taddr = dev->l2ad_end; 4122 } else { 4123 taddr = dev->l2ad_hand + distance; 4124 } 4125 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 4126 uint64_t, taddr, boolean_t, all); 4127 4128 top: 4129 mutex_enter(&l2arc_buflist_mtx); 4130 for (ab = list_tail(buflist); ab; ab = ab_prev) { 4131 ab_prev = list_prev(buflist, ab); 4132 4133 hash_lock = HDR_LOCK(ab); 4134 if (!mutex_tryenter(hash_lock)) { 4135 /* 4136 * Missed the hash lock. Retry. 4137 */ 4138 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 4139 mutex_exit(&l2arc_buflist_mtx); 4140 mutex_enter(hash_lock); 4141 mutex_exit(hash_lock); 4142 goto top; 4143 } 4144 4145 if (HDR_L2_WRITE_HEAD(ab)) { 4146 /* 4147 * We hit a write head node. Leave it for 4148 * l2arc_write_done(). 4149 */ 4150 list_remove(buflist, ab); 4151 mutex_exit(hash_lock); 4152 continue; 4153 } 4154 4155 if (!all && ab->b_l2hdr != NULL && 4156 (ab->b_l2hdr->b_daddr > taddr || 4157 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 4158 /* 4159 * We've evicted to the target address, 4160 * or the end of the device. 4161 */ 4162 mutex_exit(hash_lock); 4163 break; 4164 } 4165 4166 if (HDR_FREE_IN_PROGRESS(ab)) { 4167 /* 4168 * Already on the path to destruction. 4169 */ 4170 mutex_exit(hash_lock); 4171 continue; 4172 } 4173 4174 if (ab->b_state == arc_l2c_only) { 4175 ASSERT(!HDR_L2_READING(ab)); 4176 /* 4177 * This doesn't exist in the ARC. Destroy. 4178 * arc_hdr_destroy() will call list_remove() 4179 * and decrement arcstat_l2_size. 4180 */ 4181 arc_change_state(arc_anon, ab, hash_lock); 4182 arc_hdr_destroy(ab); 4183 } else { 4184 /* 4185 * Invalidate issued or about to be issued 4186 * reads, since we may be about to write 4187 * over this location. 4188 */ 4189 if (HDR_L2_READING(ab)) { 4190 ARCSTAT_BUMP(arcstat_l2_evict_reading); 4191 ab->b_flags |= ARC_L2_EVICTED; 4192 } 4193 4194 /* 4195 * Tell ARC this no longer exists in L2ARC. 4196 */ 4197 if (ab->b_l2hdr != NULL) { 4198 abl2 = ab->b_l2hdr; 4199 ab->b_l2hdr = NULL; 4200 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4201 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4202 } 4203 list_remove(buflist, ab); 4204 4205 /* 4206 * This may have been leftover after a 4207 * failed write. 4208 */ 4209 ab->b_flags &= ~ARC_L2_WRITING; 4210 } 4211 mutex_exit(hash_lock); 4212 } 4213 mutex_exit(&l2arc_buflist_mtx); 4214 4215 vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0); 4216 dev->l2ad_evict = taddr; 4217 } 4218 4219 /* 4220 * Find and write ARC buffers to the L2ARC device. 4221 * 4222 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 4223 * for reading until they have completed writing. 4224 */ 4225 static uint64_t 4226 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) 4227 { 4228 arc_buf_hdr_t *ab, *ab_prev, *head; 4229 l2arc_buf_hdr_t *hdrl2; 4230 list_t *list; 4231 uint64_t passed_sz, write_sz, buf_sz, headroom; 4232 void *buf_data; 4233 kmutex_t *hash_lock, *list_lock; 4234 boolean_t have_lock, full; 4235 l2arc_write_callback_t *cb; 4236 zio_t *pio, *wzio; 4237 uint64_t guid = spa_guid(spa); 4238 4239 ASSERT(dev->l2ad_vdev != NULL); 4240 4241 pio = NULL; 4242 write_sz = 0; 4243 full = B_FALSE; 4244 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 4245 head->b_flags |= ARC_L2_WRITE_HEAD; 4246 4247 /* 4248 * Copy buffers for L2ARC writing. 4249 */ 4250 mutex_enter(&l2arc_buflist_mtx); 4251 for (int try = 0; try <= 3; try++) { 4252 list = l2arc_list_locked(try, &list_lock); 4253 passed_sz = 0; 4254 4255 /* 4256 * L2ARC fast warmup. 4257 * 4258 * Until the ARC is warm and starts to evict, read from the 4259 * head of the ARC lists rather than the tail. 4260 */ 4261 headroom = target_sz * l2arc_headroom; 4262 if (arc_warm == B_FALSE) 4263 ab = list_head(list); 4264 else 4265 ab = list_tail(list); 4266 4267 for (; ab; ab = ab_prev) { 4268 if (arc_warm == B_FALSE) 4269 ab_prev = list_next(list, ab); 4270 else 4271 ab_prev = list_prev(list, ab); 4272 4273 hash_lock = HDR_LOCK(ab); 4274 have_lock = MUTEX_HELD(hash_lock); 4275 if (!have_lock && !mutex_tryenter(hash_lock)) { 4276 /* 4277 * Skip this buffer rather than waiting. 4278 */ 4279 continue; 4280 } 4281 4282 passed_sz += ab->b_size; 4283 if (passed_sz > headroom) { 4284 /* 4285 * Searched too far. 4286 */ 4287 mutex_exit(hash_lock); 4288 break; 4289 } 4290 4291 if (!l2arc_write_eligible(guid, ab)) { 4292 mutex_exit(hash_lock); 4293 continue; 4294 } 4295 4296 if ((write_sz + ab->b_size) > target_sz) { 4297 full = B_TRUE; 4298 mutex_exit(hash_lock); 4299 break; 4300 } 4301 4302 if (pio == NULL) { 4303 /* 4304 * Insert a dummy header on the buflist so 4305 * l2arc_write_done() can find where the 4306 * write buffers begin without searching. 4307 */ 4308 list_insert_head(dev->l2ad_buflist, head); 4309 4310 cb = kmem_alloc( 4311 sizeof (l2arc_write_callback_t), KM_SLEEP); 4312 cb->l2wcb_dev = dev; 4313 cb->l2wcb_head = head; 4314 pio = zio_root(spa, l2arc_write_done, cb, 4315 ZIO_FLAG_CANFAIL); 4316 } 4317 4318 /* 4319 * Create and add a new L2ARC header. 4320 */ 4321 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 4322 hdrl2->b_dev = dev; 4323 hdrl2->b_daddr = dev->l2ad_hand; 4324 4325 ab->b_flags |= ARC_L2_WRITING; 4326 ab->b_l2hdr = hdrl2; 4327 list_insert_head(dev->l2ad_buflist, ab); 4328 buf_data = ab->b_buf->b_data; 4329 buf_sz = ab->b_size; 4330 4331 /* 4332 * Compute and store the buffer cksum before 4333 * writing. On debug the cksum is verified first. 4334 */ 4335 arc_cksum_verify(ab->b_buf); 4336 arc_cksum_compute(ab->b_buf, B_TRUE); 4337 4338 mutex_exit(hash_lock); 4339 4340 wzio = zio_write_phys(pio, dev->l2ad_vdev, 4341 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 4342 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 4343 ZIO_FLAG_CANFAIL, B_FALSE); 4344 4345 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 4346 zio_t *, wzio); 4347 (void) zio_nowait(wzio); 4348 4349 /* 4350 * Keep the clock hand suitably device-aligned. 4351 */ 4352 buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); 4353 4354 write_sz += buf_sz; 4355 dev->l2ad_hand += buf_sz; 4356 } 4357 4358 mutex_exit(list_lock); 4359 4360 if (full == B_TRUE) 4361 break; 4362 } 4363 mutex_exit(&l2arc_buflist_mtx); 4364 4365 if (pio == NULL) { 4366 ASSERT3U(write_sz, ==, 0); 4367 kmem_cache_free(hdr_cache, head); 4368 return (0); 4369 } 4370 4371 ASSERT3U(write_sz, <=, target_sz); 4372 ARCSTAT_BUMP(arcstat_l2_writes_sent); 4373 ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz); 4374 ARCSTAT_INCR(arcstat_l2_size, write_sz); 4375 vdev_space_update(dev->l2ad_vdev, write_sz, 0, 0); 4376 4377 /* 4378 * Bump device hand to the device start if it is approaching the end. 4379 * l2arc_evict() will already have evicted ahead for this case. 4380 */ 4381 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 4382 vdev_space_update(dev->l2ad_vdev, 4383 dev->l2ad_end - dev->l2ad_hand, 0, 0); 4384 dev->l2ad_hand = dev->l2ad_start; 4385 dev->l2ad_evict = dev->l2ad_start; 4386 dev->l2ad_first = B_FALSE; 4387 } 4388 4389 dev->l2ad_writing = B_TRUE; 4390 (void) zio_wait(pio); 4391 dev->l2ad_writing = B_FALSE; 4392 4393 return (write_sz); 4394 } 4395 4396 /* 4397 * This thread feeds the L2ARC at regular intervals. This is the beating 4398 * heart of the L2ARC. 4399 */ 4400 static void 4401 l2arc_feed_thread(void) 4402 { 4403 callb_cpr_t cpr; 4404 l2arc_dev_t *dev; 4405 spa_t *spa; 4406 uint64_t size, wrote; 4407 clock_t begin, next = ddi_get_lbolt(); 4408 4409 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 4410 4411 mutex_enter(&l2arc_feed_thr_lock); 4412 4413 while (l2arc_thread_exit == 0) { 4414 CALLB_CPR_SAFE_BEGIN(&cpr); 4415 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 4416 next); 4417 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 4418 next = ddi_get_lbolt() + hz; 4419 4420 /* 4421 * Quick check for L2ARC devices. 4422 */ 4423 mutex_enter(&l2arc_dev_mtx); 4424 if (l2arc_ndev == 0) { 4425 mutex_exit(&l2arc_dev_mtx); 4426 continue; 4427 } 4428 mutex_exit(&l2arc_dev_mtx); 4429 begin = ddi_get_lbolt(); 4430 4431 /* 4432 * This selects the next l2arc device to write to, and in 4433 * doing so the next spa to feed from: dev->l2ad_spa. This 4434 * will return NULL if there are now no l2arc devices or if 4435 * they are all faulted. 4436 * 4437 * If a device is returned, its spa's config lock is also 4438 * held to prevent device removal. l2arc_dev_get_next() 4439 * will grab and release l2arc_dev_mtx. 4440 */ 4441 if ((dev = l2arc_dev_get_next()) == NULL) 4442 continue; 4443 4444 spa = dev->l2ad_spa; 4445 ASSERT(spa != NULL); 4446 4447 /* 4448 * If the pool is read-only then force the feed thread to 4449 * sleep a little longer. 4450 */ 4451 if (!spa_writeable(spa)) { 4452 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz; 4453 spa_config_exit(spa, SCL_L2ARC, dev); 4454 continue; 4455 } 4456 4457 /* 4458 * Avoid contributing to memory pressure. 4459 */ 4460 if (arc_reclaim_needed()) { 4461 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 4462 spa_config_exit(spa, SCL_L2ARC, dev); 4463 continue; 4464 } 4465 4466 ARCSTAT_BUMP(arcstat_l2_feeds); 4467 4468 size = l2arc_write_size(dev); 4469 4470 /* 4471 * Evict L2ARC buffers that will be overwritten. 4472 */ 4473 l2arc_evict(dev, size, B_FALSE); 4474 4475 /* 4476 * Write ARC buffers. 4477 */ 4478 wrote = l2arc_write_buffers(spa, dev, size); 4479 4480 /* 4481 * Calculate interval between writes. 4482 */ 4483 next = l2arc_write_interval(begin, size, wrote); 4484 spa_config_exit(spa, SCL_L2ARC, dev); 4485 } 4486 4487 l2arc_thread_exit = 0; 4488 cv_broadcast(&l2arc_feed_thr_cv); 4489 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 4490 thread_exit(); 4491 } 4492 4493 boolean_t 4494 l2arc_vdev_present(vdev_t *vd) 4495 { 4496 l2arc_dev_t *dev; 4497 4498 mutex_enter(&l2arc_dev_mtx); 4499 for (dev = list_head(l2arc_dev_list); dev != NULL; 4500 dev = list_next(l2arc_dev_list, dev)) { 4501 if (dev->l2ad_vdev == vd) 4502 break; 4503 } 4504 mutex_exit(&l2arc_dev_mtx); 4505 4506 return (dev != NULL); 4507 } 4508 4509 /* 4510 * Add a vdev for use by the L2ARC. By this point the spa has already 4511 * validated the vdev and opened it. 4512 */ 4513 void 4514 l2arc_add_vdev(spa_t *spa, vdev_t *vd) 4515 { 4516 l2arc_dev_t *adddev; 4517 4518 ASSERT(!l2arc_vdev_present(vd)); 4519 4520 /* 4521 * Create a new l2arc device entry. 4522 */ 4523 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 4524 adddev->l2ad_spa = spa; 4525 adddev->l2ad_vdev = vd; 4526 adddev->l2ad_write = l2arc_write_max; 4527 adddev->l2ad_boost = l2arc_write_boost; 4528 adddev->l2ad_start = VDEV_LABEL_START_SIZE; 4529 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); 4530 adddev->l2ad_hand = adddev->l2ad_start; 4531 adddev->l2ad_evict = adddev->l2ad_start; 4532 adddev->l2ad_first = B_TRUE; 4533 adddev->l2ad_writing = B_FALSE; 4534 ASSERT3U(adddev->l2ad_write, >, 0); 4535 4536 /* 4537 * This is a list of all ARC buffers that are still valid on the 4538 * device. 4539 */ 4540 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 4541 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 4542 offsetof(arc_buf_hdr_t, b_l2node)); 4543 4544 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); 4545 4546 /* 4547 * Add device to global list 4548 */ 4549 mutex_enter(&l2arc_dev_mtx); 4550 list_insert_head(l2arc_dev_list, adddev); 4551 atomic_inc_64(&l2arc_ndev); 4552 mutex_exit(&l2arc_dev_mtx); 4553 } 4554 4555 /* 4556 * Remove a vdev from the L2ARC. 4557 */ 4558 void 4559 l2arc_remove_vdev(vdev_t *vd) 4560 { 4561 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 4562 4563 /* 4564 * Find the device by vdev 4565 */ 4566 mutex_enter(&l2arc_dev_mtx); 4567 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 4568 nextdev = list_next(l2arc_dev_list, dev); 4569 if (vd == dev->l2ad_vdev) { 4570 remdev = dev; 4571 break; 4572 } 4573 } 4574 ASSERT(remdev != NULL); 4575 4576 /* 4577 * Remove device from global list 4578 */ 4579 list_remove(l2arc_dev_list, remdev); 4580 l2arc_dev_last = NULL; /* may have been invalidated */ 4581 atomic_dec_64(&l2arc_ndev); 4582 mutex_exit(&l2arc_dev_mtx); 4583 4584 /* 4585 * Clear all buflists and ARC references. L2ARC device flush. 4586 */ 4587 l2arc_evict(remdev, 0, B_TRUE); 4588 list_destroy(remdev->l2ad_buflist); 4589 kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 4590 kmem_free(remdev, sizeof (l2arc_dev_t)); 4591 } 4592 4593 void 4594 l2arc_init(void) 4595 { 4596 l2arc_thread_exit = 0; 4597 l2arc_ndev = 0; 4598 l2arc_writes_sent = 0; 4599 l2arc_writes_done = 0; 4600 4601 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 4602 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 4603 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 4604 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 4605 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 4606 4607 l2arc_dev_list = &L2ARC_dev_list; 4608 l2arc_free_on_write = &L2ARC_free_on_write; 4609 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 4610 offsetof(l2arc_dev_t, l2ad_node)); 4611 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 4612 offsetof(l2arc_data_free_t, l2df_list_node)); 4613 } 4614 4615 void 4616 l2arc_fini(void) 4617 { 4618 /* 4619 * This is called from dmu_fini(), which is called from spa_fini(); 4620 * Because of this, we can assume that all l2arc devices have 4621 * already been removed when the pools themselves were removed. 4622 */ 4623 4624 l2arc_do_free_on_write(); 4625 4626 mutex_destroy(&l2arc_feed_thr_lock); 4627 cv_destroy(&l2arc_feed_thr_cv); 4628 mutex_destroy(&l2arc_dev_mtx); 4629 mutex_destroy(&l2arc_buflist_mtx); 4630 mutex_destroy(&l2arc_free_on_write_mtx); 4631 4632 list_destroy(l2arc_dev_list); 4633 list_destroy(l2arc_free_on_write); 4634 } 4635 4636 void 4637 l2arc_start(void) 4638 { 4639 if (!(spa_mode_global & FWRITE)) 4640 return; 4641 4642 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 4643 TS_RUN, minclsyspri); 4644 } 4645 4646 void 4647 l2arc_stop(void) 4648 { 4649 if (!(spa_mode_global & FWRITE)) 4650 return; 4651 4652 mutex_enter(&l2arc_feed_thr_lock); 4653 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 4654 l2arc_thread_exit = 1; 4655 while (l2arc_thread_exit != 0) 4656 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 4657 mutex_exit(&l2arc_feed_thr_lock); 4658 } 4659