1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * DVA-based Adjustable Replacement Cache 30 * 31 * While much of the theory of operation used here is 32 * based on the self-tuning, low overhead replacement cache 33 * presented by Megiddo and Modha at FAST 2003, there are some 34 * significant differences: 35 * 36 * 1. The Megiddo and Modha model assumes any page is evictable. 37 * Pages in its cache cannot be "locked" into memory. This makes 38 * the eviction algorithm simple: evict the last page in the list. 39 * This also make the performance characteristics easy to reason 40 * about. Our cache is not so simple. At any given moment, some 41 * subset of the blocks in the cache are un-evictable because we 42 * have handed out a reference to them. Blocks are only evictable 43 * when there are no external references active. This makes 44 * eviction far more problematic: we choose to evict the evictable 45 * blocks that are the "lowest" in the list. 46 * 47 * There are times when it is not possible to evict the requested 48 * space. In these circumstances we are unable to adjust the cache 49 * size. To prevent the cache growing unbounded at these times we 50 * implement a "cache throttle" that slows the flow of new data 51 * into the cache until we can make space available. 52 * 53 * 2. The Megiddo and Modha model assumes a fixed cache size. 54 * Pages are evicted when the cache is full and there is a cache 55 * miss. Our model has a variable sized cache. It grows with 56 * high use, but also tries to react to memory pressure from the 57 * operating system: decreasing its size when system memory is 58 * tight. 59 * 60 * 3. The Megiddo and Modha model assumes a fixed page size. All 61 * elements of the cache are therefor exactly the same size. So 62 * when adjusting the cache size following a cache miss, its simply 63 * a matter of choosing a single page to evict. In our model, we 64 * have variable sized cache blocks (rangeing from 512 bytes to 65 * 128K bytes). We therefor choose a set of blocks to evict to make 66 * space for a cache miss that approximates as closely as possible 67 * the space used by the new block. 68 * 69 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 70 * by N. Megiddo & D. Modha, FAST 2003 71 */ 72 73 /* 74 * The locking model: 75 * 76 * A new reference to a cache buffer can be obtained in two 77 * ways: 1) via a hash table lookup using the DVA as a key, 78 * or 2) via one of the ARC lists. The arc_read() interface 79 * uses method 1, while the internal arc algorithms for 80 * adjusting the cache use method 2. We therefor provide two 81 * types of locks: 1) the hash table lock array, and 2) the 82 * arc list locks. 83 * 84 * Buffers do not have their own mutexs, rather they rely on the 85 * hash table mutexs for the bulk of their protection (i.e. most 86 * fields in the arc_buf_hdr_t are protected by these mutexs). 87 * 88 * buf_hash_find() returns the appropriate mutex (held) when it 89 * locates the requested buffer in the hash table. It returns 90 * NULL for the mutex if the buffer was not in the table. 91 * 92 * buf_hash_remove() expects the appropriate hash mutex to be 93 * already held before it is invoked. 94 * 95 * Each arc state also has a mutex which is used to protect the 96 * buffer list associated with the state. When attempting to 97 * obtain a hash table lock while holding an arc list lock you 98 * must use: mutex_tryenter() to avoid deadlock. Also note that 99 * the active state mutex must be held before the ghost state mutex. 100 * 101 * Arc buffers may have an associated eviction callback function. 102 * This function will be invoked prior to removing the buffer (e.g. 103 * in arc_do_user_evicts()). Note however that the data associated 104 * with the buffer may be evicted prior to the callback. The callback 105 * must be made with *no locks held* (to prevent deadlock). Additionally, 106 * the users of callbacks must ensure that their private data is 107 * protected from simultaneous callbacks from arc_buf_evict() 108 * and arc_do_user_evicts(). 109 * 110 * Note that the majority of the performance stats are manipulated 111 * with atomic operations. 112 * 113 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 114 * 115 * - L2ARC buflist creation 116 * - L2ARC buflist eviction 117 * - L2ARC write completion, which walks L2ARC buflists 118 * - ARC header destruction, as it removes from L2ARC buflists 119 * - ARC header release, as it removes from L2ARC buflists 120 */ 121 122 #include <sys/spa.h> 123 #include <sys/zio.h> 124 #include <sys/zio_checksum.h> 125 #include <sys/zfs_context.h> 126 #include <sys/arc.h> 127 #include <sys/refcount.h> 128 #include <sys/vdev.h> 129 #ifdef _KERNEL 130 #include <sys/vmsystm.h> 131 #include <vm/anon.h> 132 #include <sys/fs/swapnode.h> 133 #include <sys/dnlc.h> 134 #endif 135 #include <sys/callb.h> 136 #include <sys/kstat.h> 137 138 static kmutex_t arc_reclaim_thr_lock; 139 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 140 static uint8_t arc_thread_exit; 141 142 extern int zfs_write_limit_shift; 143 extern uint64_t zfs_write_limit_max; 144 extern uint64_t zfs_write_limit_inflated; 145 146 #define ARC_REDUCE_DNLC_PERCENT 3 147 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 148 149 typedef enum arc_reclaim_strategy { 150 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 151 ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 152 } arc_reclaim_strategy_t; 153 154 /* number of seconds before growing cache again */ 155 static int arc_grow_retry = 60; 156 157 /* 158 * minimum lifespan of a prefetch block in clock ticks 159 * (initialized in arc_init()) 160 */ 161 static int arc_min_prefetch_lifespan; 162 163 static int arc_dead; 164 165 /* 166 * The arc has filled available memory and has now warmed up. 167 */ 168 static boolean_t arc_warm; 169 170 /* 171 * These tunables are for performance analysis. 172 */ 173 uint64_t zfs_arc_max; 174 uint64_t zfs_arc_min; 175 uint64_t zfs_arc_meta_limit = 0; 176 177 /* 178 * Note that buffers can be in one of 6 states: 179 * ARC_anon - anonymous (discussed below) 180 * ARC_mru - recently used, currently cached 181 * ARC_mru_ghost - recentely used, no longer in cache 182 * ARC_mfu - frequently used, currently cached 183 * ARC_mfu_ghost - frequently used, no longer in cache 184 * ARC_l2c_only - exists in L2ARC but not other states 185 * When there are no active references to the buffer, they are 186 * are linked onto a list in one of these arc states. These are 187 * the only buffers that can be evicted or deleted. Within each 188 * state there are multiple lists, one for meta-data and one for 189 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 190 * etc.) is tracked separately so that it can be managed more 191 * explicitly: favored over data, limited explicitly. 192 * 193 * Anonymous buffers are buffers that are not associated with 194 * a DVA. These are buffers that hold dirty block copies 195 * before they are written to stable storage. By definition, 196 * they are "ref'd" and are considered part of arc_mru 197 * that cannot be freed. Generally, they will aquire a DVA 198 * as they are written and migrate onto the arc_mru list. 199 * 200 * The ARC_l2c_only state is for buffers that are in the second 201 * level ARC but no longer in any of the ARC_m* lists. The second 202 * level ARC itself may also contain buffers that are in any of 203 * the ARC_m* states - meaning that a buffer can exist in two 204 * places. The reason for the ARC_l2c_only state is to keep the 205 * buffer header in the hash table, so that reads that hit the 206 * second level ARC benefit from these fast lookups. 207 */ 208 209 typedef struct arc_state { 210 list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ 211 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 212 uint64_t arcs_size; /* total amount of data in this state */ 213 kmutex_t arcs_mtx; 214 } arc_state_t; 215 216 /* The 6 states: */ 217 static arc_state_t ARC_anon; 218 static arc_state_t ARC_mru; 219 static arc_state_t ARC_mru_ghost; 220 static arc_state_t ARC_mfu; 221 static arc_state_t ARC_mfu_ghost; 222 static arc_state_t ARC_l2c_only; 223 224 typedef struct arc_stats { 225 kstat_named_t arcstat_hits; 226 kstat_named_t arcstat_misses; 227 kstat_named_t arcstat_demand_data_hits; 228 kstat_named_t arcstat_demand_data_misses; 229 kstat_named_t arcstat_demand_metadata_hits; 230 kstat_named_t arcstat_demand_metadata_misses; 231 kstat_named_t arcstat_prefetch_data_hits; 232 kstat_named_t arcstat_prefetch_data_misses; 233 kstat_named_t arcstat_prefetch_metadata_hits; 234 kstat_named_t arcstat_prefetch_metadata_misses; 235 kstat_named_t arcstat_mru_hits; 236 kstat_named_t arcstat_mru_ghost_hits; 237 kstat_named_t arcstat_mfu_hits; 238 kstat_named_t arcstat_mfu_ghost_hits; 239 kstat_named_t arcstat_deleted; 240 kstat_named_t arcstat_recycle_miss; 241 kstat_named_t arcstat_mutex_miss; 242 kstat_named_t arcstat_evict_skip; 243 kstat_named_t arcstat_hash_elements; 244 kstat_named_t arcstat_hash_elements_max; 245 kstat_named_t arcstat_hash_collisions; 246 kstat_named_t arcstat_hash_chains; 247 kstat_named_t arcstat_hash_chain_max; 248 kstat_named_t arcstat_p; 249 kstat_named_t arcstat_c; 250 kstat_named_t arcstat_c_min; 251 kstat_named_t arcstat_c_max; 252 kstat_named_t arcstat_size; 253 kstat_named_t arcstat_hdr_size; 254 kstat_named_t arcstat_l2_hits; 255 kstat_named_t arcstat_l2_misses; 256 kstat_named_t arcstat_l2_feeds; 257 kstat_named_t arcstat_l2_rw_clash; 258 kstat_named_t arcstat_l2_writes_sent; 259 kstat_named_t arcstat_l2_writes_done; 260 kstat_named_t arcstat_l2_writes_error; 261 kstat_named_t arcstat_l2_writes_hdr_miss; 262 kstat_named_t arcstat_l2_evict_lock_retry; 263 kstat_named_t arcstat_l2_evict_reading; 264 kstat_named_t arcstat_l2_free_on_write; 265 kstat_named_t arcstat_l2_abort_lowmem; 266 kstat_named_t arcstat_l2_cksum_bad; 267 kstat_named_t arcstat_l2_io_error; 268 kstat_named_t arcstat_l2_size; 269 kstat_named_t arcstat_l2_hdr_size; 270 kstat_named_t arcstat_memory_throttle_count; 271 } arc_stats_t; 272 273 static arc_stats_t arc_stats = { 274 { "hits", KSTAT_DATA_UINT64 }, 275 { "misses", KSTAT_DATA_UINT64 }, 276 { "demand_data_hits", KSTAT_DATA_UINT64 }, 277 { "demand_data_misses", KSTAT_DATA_UINT64 }, 278 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 279 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 280 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 281 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 282 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 283 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 284 { "mru_hits", KSTAT_DATA_UINT64 }, 285 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 286 { "mfu_hits", KSTAT_DATA_UINT64 }, 287 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 288 { "deleted", KSTAT_DATA_UINT64 }, 289 { "recycle_miss", KSTAT_DATA_UINT64 }, 290 { "mutex_miss", KSTAT_DATA_UINT64 }, 291 { "evict_skip", KSTAT_DATA_UINT64 }, 292 { "hash_elements", KSTAT_DATA_UINT64 }, 293 { "hash_elements_max", KSTAT_DATA_UINT64 }, 294 { "hash_collisions", KSTAT_DATA_UINT64 }, 295 { "hash_chains", KSTAT_DATA_UINT64 }, 296 { "hash_chain_max", KSTAT_DATA_UINT64 }, 297 { "p", KSTAT_DATA_UINT64 }, 298 { "c", KSTAT_DATA_UINT64 }, 299 { "c_min", KSTAT_DATA_UINT64 }, 300 { "c_max", KSTAT_DATA_UINT64 }, 301 { "size", KSTAT_DATA_UINT64 }, 302 { "hdr_size", KSTAT_DATA_UINT64 }, 303 { "l2_hits", KSTAT_DATA_UINT64 }, 304 { "l2_misses", KSTAT_DATA_UINT64 }, 305 { "l2_feeds", KSTAT_DATA_UINT64 }, 306 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 307 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 308 { "l2_writes_done", KSTAT_DATA_UINT64 }, 309 { "l2_writes_error", KSTAT_DATA_UINT64 }, 310 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 311 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 312 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 313 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 314 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 315 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 316 { "l2_io_error", KSTAT_DATA_UINT64 }, 317 { "l2_size", KSTAT_DATA_UINT64 }, 318 { "l2_hdr_size", KSTAT_DATA_UINT64 }, 319 { "memory_throttle_count", KSTAT_DATA_UINT64 } 320 }; 321 322 #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 323 324 #define ARCSTAT_INCR(stat, val) \ 325 atomic_add_64(&arc_stats.stat.value.ui64, (val)); 326 327 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 328 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 329 330 #define ARCSTAT_MAX(stat, val) { \ 331 uint64_t m; \ 332 while ((val) > (m = arc_stats.stat.value.ui64) && \ 333 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 334 continue; \ 335 } 336 337 #define ARCSTAT_MAXSTAT(stat) \ 338 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 339 340 /* 341 * We define a macro to allow ARC hits/misses to be easily broken down by 342 * two separate conditions, giving a total of four different subtypes for 343 * each of hits and misses (so eight statistics total). 344 */ 345 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 346 if (cond1) { \ 347 if (cond2) { \ 348 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 349 } else { \ 350 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 351 } \ 352 } else { \ 353 if (cond2) { \ 354 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 355 } else { \ 356 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 357 } \ 358 } 359 360 kstat_t *arc_ksp; 361 static arc_state_t *arc_anon; 362 static arc_state_t *arc_mru; 363 static arc_state_t *arc_mru_ghost; 364 static arc_state_t *arc_mfu; 365 static arc_state_t *arc_mfu_ghost; 366 static arc_state_t *arc_l2c_only; 367 368 /* 369 * There are several ARC variables that are critical to export as kstats -- 370 * but we don't want to have to grovel around in the kstat whenever we wish to 371 * manipulate them. For these variables, we therefore define them to be in 372 * terms of the statistic variable. This assures that we are not introducing 373 * the possibility of inconsistency by having shadow copies of the variables, 374 * while still allowing the code to be readable. 375 */ 376 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 377 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 378 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 379 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 380 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 381 382 static int arc_no_grow; /* Don't try to grow cache size */ 383 static uint64_t arc_tempreserve; 384 static uint64_t arc_meta_used; 385 static uint64_t arc_meta_limit; 386 static uint64_t arc_meta_max = 0; 387 388 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 389 390 typedef struct arc_callback arc_callback_t; 391 392 struct arc_callback { 393 void *acb_private; 394 arc_done_func_t *acb_done; 395 arc_byteswap_func_t *acb_byteswap; 396 arc_buf_t *acb_buf; 397 zio_t *acb_zio_dummy; 398 arc_callback_t *acb_next; 399 }; 400 401 typedef struct arc_write_callback arc_write_callback_t; 402 403 struct arc_write_callback { 404 void *awcb_private; 405 arc_done_func_t *awcb_ready; 406 arc_done_func_t *awcb_done; 407 arc_buf_t *awcb_buf; 408 }; 409 410 struct arc_buf_hdr { 411 /* protected by hash lock */ 412 dva_t b_dva; 413 uint64_t b_birth; 414 uint64_t b_cksum0; 415 416 kmutex_t b_freeze_lock; 417 zio_cksum_t *b_freeze_cksum; 418 419 arc_buf_hdr_t *b_hash_next; 420 arc_buf_t *b_buf; 421 uint32_t b_flags; 422 uint32_t b_datacnt; 423 424 arc_callback_t *b_acb; 425 kcondvar_t b_cv; 426 427 /* immutable */ 428 arc_buf_contents_t b_type; 429 uint64_t b_size; 430 spa_t *b_spa; 431 432 /* protected by arc state mutex */ 433 arc_state_t *b_state; 434 list_node_t b_arc_node; 435 436 /* updated atomically */ 437 clock_t b_arc_access; 438 439 /* self protecting */ 440 refcount_t b_refcnt; 441 442 l2arc_buf_hdr_t *b_l2hdr; 443 list_node_t b_l2node; 444 }; 445 446 static arc_buf_t *arc_eviction_list; 447 static kmutex_t arc_eviction_mtx; 448 static arc_buf_hdr_t arc_eviction_hdr; 449 static void arc_get_data_buf(arc_buf_t *buf); 450 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 451 static int arc_evict_needed(arc_buf_contents_t type); 452 static void arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes); 453 454 #define GHOST_STATE(state) \ 455 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 456 (state) == arc_l2c_only) 457 458 /* 459 * Private ARC flags. These flags are private ARC only flags that will show up 460 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 461 * be passed in as arc_flags in things like arc_read. However, these flags 462 * should never be passed and should only be set by ARC code. When adding new 463 * public flags, make sure not to smash the private ones. 464 */ 465 466 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 467 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 468 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 469 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 470 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 471 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 472 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 473 #define ARC_DONT_L2CACHE (1 << 16) /* originated by prefetch */ 474 #define ARC_L2_WRITING (1 << 17) /* L2ARC write in progress */ 475 #define ARC_L2_EVICTED (1 << 18) /* evicted during I/O */ 476 #define ARC_L2_WRITE_HEAD (1 << 19) /* head of write list */ 477 478 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 479 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 480 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 481 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 482 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 483 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 484 #define HDR_DONT_L2CACHE(hdr) ((hdr)->b_flags & ARC_DONT_L2CACHE) 485 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ 486 (hdr)->b_l2hdr != NULL) 487 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 488 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 489 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 490 491 /* 492 * Other sizes 493 */ 494 495 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 496 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 497 498 /* 499 * Hash table routines 500 */ 501 502 #define HT_LOCK_PAD 64 503 504 struct ht_lock { 505 kmutex_t ht_lock; 506 #ifdef _KERNEL 507 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 508 #endif 509 }; 510 511 #define BUF_LOCKS 256 512 typedef struct buf_hash_table { 513 uint64_t ht_mask; 514 arc_buf_hdr_t **ht_table; 515 struct ht_lock ht_locks[BUF_LOCKS]; 516 } buf_hash_table_t; 517 518 static buf_hash_table_t buf_hash_table; 519 520 #define BUF_HASH_INDEX(spa, dva, birth) \ 521 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 522 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 523 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 524 #define HDR_LOCK(buf) \ 525 (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 526 527 uint64_t zfs_crc64_table[256]; 528 529 /* 530 * Level 2 ARC 531 */ 532 533 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 534 #define L2ARC_HEADROOM 4 /* num of writes */ 535 #define L2ARC_FEED_SECS 1 /* caching interval */ 536 537 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 538 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 539 540 /* 541 * L2ARC Performance Tunables 542 */ 543 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 544 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 545 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 546 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 547 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 548 549 /* 550 * L2ARC Internals 551 */ 552 typedef struct l2arc_dev { 553 vdev_t *l2ad_vdev; /* vdev */ 554 spa_t *l2ad_spa; /* spa */ 555 uint64_t l2ad_hand; /* next write location */ 556 uint64_t l2ad_write; /* desired write size, bytes */ 557 uint64_t l2ad_boost; /* warmup write boost, bytes */ 558 uint64_t l2ad_start; /* first addr on device */ 559 uint64_t l2ad_end; /* last addr on device */ 560 uint64_t l2ad_evict; /* last addr eviction reached */ 561 boolean_t l2ad_first; /* first sweep through */ 562 list_t *l2ad_buflist; /* buffer list */ 563 list_node_t l2ad_node; /* device list node */ 564 } l2arc_dev_t; 565 566 static list_t L2ARC_dev_list; /* device list */ 567 static list_t *l2arc_dev_list; /* device list pointer */ 568 static kmutex_t l2arc_dev_mtx; /* device list mutex */ 569 static l2arc_dev_t *l2arc_dev_last; /* last device used */ 570 static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 571 static list_t L2ARC_free_on_write; /* free after write buf list */ 572 static list_t *l2arc_free_on_write; /* free after write list ptr */ 573 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 574 static uint64_t l2arc_ndev; /* number of devices */ 575 576 typedef struct l2arc_read_callback { 577 arc_buf_t *l2rcb_buf; /* read buffer */ 578 spa_t *l2rcb_spa; /* spa */ 579 blkptr_t l2rcb_bp; /* original blkptr */ 580 zbookmark_t l2rcb_zb; /* original bookmark */ 581 int l2rcb_flags; /* original flags */ 582 } l2arc_read_callback_t; 583 584 typedef struct l2arc_write_callback { 585 l2arc_dev_t *l2wcb_dev; /* device info */ 586 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 587 } l2arc_write_callback_t; 588 589 struct l2arc_buf_hdr { 590 /* protected by arc_buf_hdr mutex */ 591 l2arc_dev_t *b_dev; /* L2ARC device */ 592 daddr_t b_daddr; /* disk address, offset byte */ 593 }; 594 595 typedef struct l2arc_data_free { 596 /* protected by l2arc_free_on_write_mtx */ 597 void *l2df_data; 598 size_t l2df_size; 599 void (*l2df_func)(void *, size_t); 600 list_node_t l2df_list_node; 601 } l2arc_data_free_t; 602 603 static kmutex_t l2arc_feed_thr_lock; 604 static kcondvar_t l2arc_feed_thr_cv; 605 static uint8_t l2arc_thread_exit; 606 607 static void l2arc_read_done(zio_t *zio); 608 static void l2arc_hdr_stat_add(void); 609 static void l2arc_hdr_stat_remove(void); 610 611 static uint64_t 612 buf_hash(spa_t *spa, dva_t *dva, uint64_t birth) 613 { 614 uintptr_t spav = (uintptr_t)spa; 615 uint8_t *vdva = (uint8_t *)dva; 616 uint64_t crc = -1ULL; 617 int i; 618 619 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 620 621 for (i = 0; i < sizeof (dva_t); i++) 622 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 623 624 crc ^= (spav>>8) ^ birth; 625 626 return (crc); 627 } 628 629 #define BUF_EMPTY(buf) \ 630 ((buf)->b_dva.dva_word[0] == 0 && \ 631 (buf)->b_dva.dva_word[1] == 0 && \ 632 (buf)->b_birth == 0) 633 634 #define BUF_EQUAL(spa, dva, birth, buf) \ 635 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 636 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 637 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 638 639 static arc_buf_hdr_t * 640 buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp) 641 { 642 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 643 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 644 arc_buf_hdr_t *buf; 645 646 mutex_enter(hash_lock); 647 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 648 buf = buf->b_hash_next) { 649 if (BUF_EQUAL(spa, dva, birth, buf)) { 650 *lockp = hash_lock; 651 return (buf); 652 } 653 } 654 mutex_exit(hash_lock); 655 *lockp = NULL; 656 return (NULL); 657 } 658 659 /* 660 * Insert an entry into the hash table. If there is already an element 661 * equal to elem in the hash table, then the already existing element 662 * will be returned and the new element will not be inserted. 663 * Otherwise returns NULL. 664 */ 665 static arc_buf_hdr_t * 666 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 667 { 668 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 669 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 670 arc_buf_hdr_t *fbuf; 671 uint32_t i; 672 673 ASSERT(!HDR_IN_HASH_TABLE(buf)); 674 *lockp = hash_lock; 675 mutex_enter(hash_lock); 676 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 677 fbuf = fbuf->b_hash_next, i++) { 678 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 679 return (fbuf); 680 } 681 682 buf->b_hash_next = buf_hash_table.ht_table[idx]; 683 buf_hash_table.ht_table[idx] = buf; 684 buf->b_flags |= ARC_IN_HASH_TABLE; 685 686 /* collect some hash table performance data */ 687 if (i > 0) { 688 ARCSTAT_BUMP(arcstat_hash_collisions); 689 if (i == 1) 690 ARCSTAT_BUMP(arcstat_hash_chains); 691 692 ARCSTAT_MAX(arcstat_hash_chain_max, i); 693 } 694 695 ARCSTAT_BUMP(arcstat_hash_elements); 696 ARCSTAT_MAXSTAT(arcstat_hash_elements); 697 698 return (NULL); 699 } 700 701 static void 702 buf_hash_remove(arc_buf_hdr_t *buf) 703 { 704 arc_buf_hdr_t *fbuf, **bufp; 705 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 706 707 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 708 ASSERT(HDR_IN_HASH_TABLE(buf)); 709 710 bufp = &buf_hash_table.ht_table[idx]; 711 while ((fbuf = *bufp) != buf) { 712 ASSERT(fbuf != NULL); 713 bufp = &fbuf->b_hash_next; 714 } 715 *bufp = buf->b_hash_next; 716 buf->b_hash_next = NULL; 717 buf->b_flags &= ~ARC_IN_HASH_TABLE; 718 719 /* collect some hash table performance data */ 720 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 721 722 if (buf_hash_table.ht_table[idx] && 723 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 724 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 725 } 726 727 /* 728 * Global data structures and functions for the buf kmem cache. 729 */ 730 static kmem_cache_t *hdr_cache; 731 static kmem_cache_t *buf_cache; 732 733 static void 734 buf_fini(void) 735 { 736 int i; 737 738 kmem_free(buf_hash_table.ht_table, 739 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 740 for (i = 0; i < BUF_LOCKS; i++) 741 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 742 kmem_cache_destroy(hdr_cache); 743 kmem_cache_destroy(buf_cache); 744 } 745 746 /* 747 * Constructor callback - called when the cache is empty 748 * and a new buf is requested. 749 */ 750 /* ARGSUSED */ 751 static int 752 hdr_cons(void *vbuf, void *unused, int kmflag) 753 { 754 arc_buf_hdr_t *buf = vbuf; 755 756 bzero(buf, sizeof (arc_buf_hdr_t)); 757 refcount_create(&buf->b_refcnt); 758 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 759 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 760 761 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 762 return (0); 763 } 764 765 /* 766 * Destructor callback - called when a cached buf is 767 * no longer required. 768 */ 769 /* ARGSUSED */ 770 static void 771 hdr_dest(void *vbuf, void *unused) 772 { 773 arc_buf_hdr_t *buf = vbuf; 774 775 refcount_destroy(&buf->b_refcnt); 776 cv_destroy(&buf->b_cv); 777 mutex_destroy(&buf->b_freeze_lock); 778 779 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 780 } 781 782 /* 783 * Reclaim callback -- invoked when memory is low. 784 */ 785 /* ARGSUSED */ 786 static void 787 hdr_recl(void *unused) 788 { 789 dprintf("hdr_recl called\n"); 790 /* 791 * umem calls the reclaim func when we destroy the buf cache, 792 * which is after we do arc_fini(). 793 */ 794 if (!arc_dead) 795 cv_signal(&arc_reclaim_thr_cv); 796 } 797 798 static void 799 buf_init(void) 800 { 801 uint64_t *ct; 802 uint64_t hsize = 1ULL << 12; 803 int i, j; 804 805 /* 806 * The hash table is big enough to fill all of physical memory 807 * with an average 64K block size. The table will take up 808 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 809 */ 810 while (hsize * 65536 < physmem * PAGESIZE) 811 hsize <<= 1; 812 retry: 813 buf_hash_table.ht_mask = hsize - 1; 814 buf_hash_table.ht_table = 815 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 816 if (buf_hash_table.ht_table == NULL) { 817 ASSERT(hsize > (1ULL << 8)); 818 hsize >>= 1; 819 goto retry; 820 } 821 822 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 823 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 824 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 825 0, NULL, NULL, NULL, NULL, NULL, 0); 826 827 for (i = 0; i < 256; i++) 828 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 829 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 830 831 for (i = 0; i < BUF_LOCKS; i++) { 832 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 833 NULL, MUTEX_DEFAULT, NULL); 834 } 835 } 836 837 #define ARC_MINTIME (hz>>4) /* 62 ms */ 838 839 static void 840 arc_cksum_verify(arc_buf_t *buf) 841 { 842 zio_cksum_t zc; 843 844 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 845 return; 846 847 mutex_enter(&buf->b_hdr->b_freeze_lock); 848 if (buf->b_hdr->b_freeze_cksum == NULL || 849 (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 850 mutex_exit(&buf->b_hdr->b_freeze_lock); 851 return; 852 } 853 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 854 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 855 panic("buffer modified while frozen!"); 856 mutex_exit(&buf->b_hdr->b_freeze_lock); 857 } 858 859 static int 860 arc_cksum_equal(arc_buf_t *buf) 861 { 862 zio_cksum_t zc; 863 int equal; 864 865 mutex_enter(&buf->b_hdr->b_freeze_lock); 866 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 867 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 868 mutex_exit(&buf->b_hdr->b_freeze_lock); 869 870 return (equal); 871 } 872 873 static void 874 arc_cksum_compute(arc_buf_t *buf, boolean_t force) 875 { 876 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 877 return; 878 879 mutex_enter(&buf->b_hdr->b_freeze_lock); 880 if (buf->b_hdr->b_freeze_cksum != NULL) { 881 mutex_exit(&buf->b_hdr->b_freeze_lock); 882 return; 883 } 884 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 885 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 886 buf->b_hdr->b_freeze_cksum); 887 mutex_exit(&buf->b_hdr->b_freeze_lock); 888 } 889 890 void 891 arc_buf_thaw(arc_buf_t *buf) 892 { 893 if (zfs_flags & ZFS_DEBUG_MODIFY) { 894 if (buf->b_hdr->b_state != arc_anon) 895 panic("modifying non-anon buffer!"); 896 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 897 panic("modifying buffer while i/o in progress!"); 898 arc_cksum_verify(buf); 899 } 900 901 mutex_enter(&buf->b_hdr->b_freeze_lock); 902 if (buf->b_hdr->b_freeze_cksum != NULL) { 903 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 904 buf->b_hdr->b_freeze_cksum = NULL; 905 } 906 mutex_exit(&buf->b_hdr->b_freeze_lock); 907 } 908 909 void 910 arc_buf_freeze(arc_buf_t *buf) 911 { 912 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 913 return; 914 915 ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 916 buf->b_hdr->b_state == arc_anon); 917 arc_cksum_compute(buf, B_FALSE); 918 } 919 920 static void 921 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 922 { 923 ASSERT(MUTEX_HELD(hash_lock)); 924 925 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 926 (ab->b_state != arc_anon)) { 927 uint64_t delta = ab->b_size * ab->b_datacnt; 928 list_t *list = &ab->b_state->arcs_list[ab->b_type]; 929 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 930 931 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 932 mutex_enter(&ab->b_state->arcs_mtx); 933 ASSERT(list_link_active(&ab->b_arc_node)); 934 list_remove(list, ab); 935 if (GHOST_STATE(ab->b_state)) { 936 ASSERT3U(ab->b_datacnt, ==, 0); 937 ASSERT3P(ab->b_buf, ==, NULL); 938 delta = ab->b_size; 939 } 940 ASSERT(delta > 0); 941 ASSERT3U(*size, >=, delta); 942 atomic_add_64(size, -delta); 943 mutex_exit(&ab->b_state->arcs_mtx); 944 /* remove the prefetch flag is we get a reference */ 945 if (ab->b_flags & ARC_PREFETCH) 946 ab->b_flags &= ~ARC_PREFETCH; 947 } 948 } 949 950 static int 951 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 952 { 953 int cnt; 954 arc_state_t *state = ab->b_state; 955 956 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 957 ASSERT(!GHOST_STATE(state)); 958 959 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 960 (state != arc_anon)) { 961 uint64_t *size = &state->arcs_lsize[ab->b_type]; 962 963 ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 964 mutex_enter(&state->arcs_mtx); 965 ASSERT(!list_link_active(&ab->b_arc_node)); 966 list_insert_head(&state->arcs_list[ab->b_type], ab); 967 ASSERT(ab->b_datacnt > 0); 968 atomic_add_64(size, ab->b_size * ab->b_datacnt); 969 mutex_exit(&state->arcs_mtx); 970 } 971 return (cnt); 972 } 973 974 /* 975 * Move the supplied buffer to the indicated state. The mutex 976 * for the buffer must be held by the caller. 977 */ 978 static void 979 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 980 { 981 arc_state_t *old_state = ab->b_state; 982 int64_t refcnt = refcount_count(&ab->b_refcnt); 983 uint64_t from_delta, to_delta; 984 985 ASSERT(MUTEX_HELD(hash_lock)); 986 ASSERT(new_state != old_state); 987 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 988 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 989 990 from_delta = to_delta = ab->b_datacnt * ab->b_size; 991 992 /* 993 * If this buffer is evictable, transfer it from the 994 * old state list to the new state list. 995 */ 996 if (refcnt == 0) { 997 if (old_state != arc_anon) { 998 int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 999 uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 1000 1001 if (use_mutex) 1002 mutex_enter(&old_state->arcs_mtx); 1003 1004 ASSERT(list_link_active(&ab->b_arc_node)); 1005 list_remove(&old_state->arcs_list[ab->b_type], ab); 1006 1007 /* 1008 * If prefetching out of the ghost cache, 1009 * we will have a non-null datacnt. 1010 */ 1011 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 1012 /* ghost elements have a ghost size */ 1013 ASSERT(ab->b_buf == NULL); 1014 from_delta = ab->b_size; 1015 } 1016 ASSERT3U(*size, >=, from_delta); 1017 atomic_add_64(size, -from_delta); 1018 1019 if (use_mutex) 1020 mutex_exit(&old_state->arcs_mtx); 1021 } 1022 if (new_state != arc_anon) { 1023 int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 1024 uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1025 1026 if (use_mutex) 1027 mutex_enter(&new_state->arcs_mtx); 1028 1029 list_insert_head(&new_state->arcs_list[ab->b_type], ab); 1030 1031 /* ghost elements have a ghost size */ 1032 if (GHOST_STATE(new_state)) { 1033 ASSERT(ab->b_datacnt == 0); 1034 ASSERT(ab->b_buf == NULL); 1035 to_delta = ab->b_size; 1036 } 1037 atomic_add_64(size, to_delta); 1038 1039 if (use_mutex) 1040 mutex_exit(&new_state->arcs_mtx); 1041 } 1042 } 1043 1044 ASSERT(!BUF_EMPTY(ab)); 1045 if (new_state == arc_anon) { 1046 buf_hash_remove(ab); 1047 } 1048 1049 /* adjust state sizes */ 1050 if (to_delta) 1051 atomic_add_64(&new_state->arcs_size, to_delta); 1052 if (from_delta) { 1053 ASSERT3U(old_state->arcs_size, >=, from_delta); 1054 atomic_add_64(&old_state->arcs_size, -from_delta); 1055 } 1056 ab->b_state = new_state; 1057 1058 /* adjust l2arc hdr stats */ 1059 if (new_state == arc_l2c_only) 1060 l2arc_hdr_stat_add(); 1061 else if (old_state == arc_l2c_only) 1062 l2arc_hdr_stat_remove(); 1063 } 1064 1065 void 1066 arc_space_consume(uint64_t space) 1067 { 1068 atomic_add_64(&arc_meta_used, space); 1069 atomic_add_64(&arc_size, space); 1070 } 1071 1072 void 1073 arc_space_return(uint64_t space) 1074 { 1075 ASSERT(arc_meta_used >= space); 1076 if (arc_meta_max < arc_meta_used) 1077 arc_meta_max = arc_meta_used; 1078 atomic_add_64(&arc_meta_used, -space); 1079 ASSERT(arc_size >= space); 1080 atomic_add_64(&arc_size, -space); 1081 } 1082 1083 void * 1084 arc_data_buf_alloc(uint64_t size) 1085 { 1086 if (arc_evict_needed(ARC_BUFC_DATA)) 1087 cv_signal(&arc_reclaim_thr_cv); 1088 atomic_add_64(&arc_size, size); 1089 return (zio_data_buf_alloc(size)); 1090 } 1091 1092 void 1093 arc_data_buf_free(void *buf, uint64_t size) 1094 { 1095 zio_data_buf_free(buf, size); 1096 ASSERT(arc_size >= size); 1097 atomic_add_64(&arc_size, -size); 1098 } 1099 1100 arc_buf_t * 1101 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1102 { 1103 arc_buf_hdr_t *hdr; 1104 arc_buf_t *buf; 1105 1106 ASSERT3U(size, >, 0); 1107 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1108 ASSERT(BUF_EMPTY(hdr)); 1109 hdr->b_size = size; 1110 hdr->b_type = type; 1111 hdr->b_spa = spa; 1112 hdr->b_state = arc_anon; 1113 hdr->b_arc_access = 0; 1114 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1115 buf->b_hdr = hdr; 1116 buf->b_data = NULL; 1117 buf->b_efunc = NULL; 1118 buf->b_private = NULL; 1119 buf->b_next = NULL; 1120 hdr->b_buf = buf; 1121 arc_get_data_buf(buf); 1122 hdr->b_datacnt = 1; 1123 hdr->b_flags = 0; 1124 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1125 (void) refcount_add(&hdr->b_refcnt, tag); 1126 1127 return (buf); 1128 } 1129 1130 static arc_buf_t * 1131 arc_buf_clone(arc_buf_t *from) 1132 { 1133 arc_buf_t *buf; 1134 arc_buf_hdr_t *hdr = from->b_hdr; 1135 uint64_t size = hdr->b_size; 1136 1137 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1138 buf->b_hdr = hdr; 1139 buf->b_data = NULL; 1140 buf->b_efunc = NULL; 1141 buf->b_private = NULL; 1142 buf->b_next = hdr->b_buf; 1143 hdr->b_buf = buf; 1144 arc_get_data_buf(buf); 1145 bcopy(from->b_data, buf->b_data, size); 1146 hdr->b_datacnt += 1; 1147 return (buf); 1148 } 1149 1150 void 1151 arc_buf_add_ref(arc_buf_t *buf, void* tag) 1152 { 1153 arc_buf_hdr_t *hdr; 1154 kmutex_t *hash_lock; 1155 1156 /* 1157 * Check to see if this buffer is currently being evicted via 1158 * arc_do_user_evicts(). 1159 */ 1160 mutex_enter(&arc_eviction_mtx); 1161 hdr = buf->b_hdr; 1162 if (hdr == NULL) { 1163 mutex_exit(&arc_eviction_mtx); 1164 return; 1165 } 1166 hash_lock = HDR_LOCK(hdr); 1167 mutex_exit(&arc_eviction_mtx); 1168 1169 mutex_enter(hash_lock); 1170 if (buf->b_data == NULL) { 1171 /* 1172 * This buffer is evicted. 1173 */ 1174 mutex_exit(hash_lock); 1175 return; 1176 } 1177 1178 ASSERT(buf->b_hdr == hdr); 1179 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1180 add_reference(hdr, hash_lock, tag); 1181 arc_access(hdr, hash_lock); 1182 mutex_exit(hash_lock); 1183 ARCSTAT_BUMP(arcstat_hits); 1184 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 1185 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 1186 data, metadata, hits); 1187 } 1188 1189 /* 1190 * Free the arc data buffer. If it is an l2arc write in progress, 1191 * the buffer is placed on l2arc_free_on_write to be freed later. 1192 */ 1193 static void 1194 arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), 1195 void *data, size_t size) 1196 { 1197 if (HDR_L2_WRITING(hdr)) { 1198 l2arc_data_free_t *df; 1199 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 1200 df->l2df_data = data; 1201 df->l2df_size = size; 1202 df->l2df_func = free_func; 1203 mutex_enter(&l2arc_free_on_write_mtx); 1204 list_insert_head(l2arc_free_on_write, df); 1205 mutex_exit(&l2arc_free_on_write_mtx); 1206 ARCSTAT_BUMP(arcstat_l2_free_on_write); 1207 } else { 1208 free_func(data, size); 1209 } 1210 } 1211 1212 static void 1213 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1214 { 1215 arc_buf_t **bufp; 1216 1217 /* free up data associated with the buf */ 1218 if (buf->b_data) { 1219 arc_state_t *state = buf->b_hdr->b_state; 1220 uint64_t size = buf->b_hdr->b_size; 1221 arc_buf_contents_t type = buf->b_hdr->b_type; 1222 1223 arc_cksum_verify(buf); 1224 if (!recycle) { 1225 if (type == ARC_BUFC_METADATA) { 1226 arc_buf_data_free(buf->b_hdr, zio_buf_free, 1227 buf->b_data, size); 1228 arc_space_return(size); 1229 } else { 1230 ASSERT(type == ARC_BUFC_DATA); 1231 arc_buf_data_free(buf->b_hdr, 1232 zio_data_buf_free, buf->b_data, size); 1233 atomic_add_64(&arc_size, -size); 1234 } 1235 } 1236 if (list_link_active(&buf->b_hdr->b_arc_node)) { 1237 uint64_t *cnt = &state->arcs_lsize[type]; 1238 1239 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 1240 ASSERT(state != arc_anon); 1241 1242 ASSERT3U(*cnt, >=, size); 1243 atomic_add_64(cnt, -size); 1244 } 1245 ASSERT3U(state->arcs_size, >=, size); 1246 atomic_add_64(&state->arcs_size, -size); 1247 buf->b_data = NULL; 1248 ASSERT(buf->b_hdr->b_datacnt > 0); 1249 buf->b_hdr->b_datacnt -= 1; 1250 } 1251 1252 /* only remove the buf if requested */ 1253 if (!all) 1254 return; 1255 1256 /* remove the buf from the hdr list */ 1257 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1258 continue; 1259 *bufp = buf->b_next; 1260 1261 ASSERT(buf->b_efunc == NULL); 1262 1263 /* clean up the buf */ 1264 buf->b_hdr = NULL; 1265 kmem_cache_free(buf_cache, buf); 1266 } 1267 1268 static void 1269 arc_hdr_destroy(arc_buf_hdr_t *hdr) 1270 { 1271 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1272 ASSERT3P(hdr->b_state, ==, arc_anon); 1273 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1274 1275 if (hdr->b_l2hdr != NULL) { 1276 if (!MUTEX_HELD(&l2arc_buflist_mtx)) { 1277 /* 1278 * To prevent arc_free() and l2arc_evict() from 1279 * attempting to free the same buffer at the same time, 1280 * a FREE_IN_PROGRESS flag is given to arc_free() to 1281 * give it priority. l2arc_evict() can't destroy this 1282 * header while we are waiting on l2arc_buflist_mtx. 1283 */ 1284 mutex_enter(&l2arc_buflist_mtx); 1285 ASSERT(hdr->b_l2hdr != NULL); 1286 1287 list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); 1288 mutex_exit(&l2arc_buflist_mtx); 1289 } else { 1290 list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); 1291 } 1292 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1293 kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t)); 1294 if (hdr->b_state == arc_l2c_only) 1295 l2arc_hdr_stat_remove(); 1296 hdr->b_l2hdr = NULL; 1297 } 1298 1299 if (!BUF_EMPTY(hdr)) { 1300 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1301 bzero(&hdr->b_dva, sizeof (dva_t)); 1302 hdr->b_birth = 0; 1303 hdr->b_cksum0 = 0; 1304 } 1305 while (hdr->b_buf) { 1306 arc_buf_t *buf = hdr->b_buf; 1307 1308 if (buf->b_efunc) { 1309 mutex_enter(&arc_eviction_mtx); 1310 ASSERT(buf->b_hdr != NULL); 1311 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1312 hdr->b_buf = buf->b_next; 1313 buf->b_hdr = &arc_eviction_hdr; 1314 buf->b_next = arc_eviction_list; 1315 arc_eviction_list = buf; 1316 mutex_exit(&arc_eviction_mtx); 1317 } else { 1318 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1319 } 1320 } 1321 if (hdr->b_freeze_cksum != NULL) { 1322 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1323 hdr->b_freeze_cksum = NULL; 1324 } 1325 1326 ASSERT(!list_link_active(&hdr->b_arc_node)); 1327 ASSERT3P(hdr->b_hash_next, ==, NULL); 1328 ASSERT3P(hdr->b_acb, ==, NULL); 1329 kmem_cache_free(hdr_cache, hdr); 1330 } 1331 1332 void 1333 arc_buf_free(arc_buf_t *buf, void *tag) 1334 { 1335 arc_buf_hdr_t *hdr = buf->b_hdr; 1336 int hashed = hdr->b_state != arc_anon; 1337 1338 ASSERT(buf->b_efunc == NULL); 1339 ASSERT(buf->b_data != NULL); 1340 1341 if (hashed) { 1342 kmutex_t *hash_lock = HDR_LOCK(hdr); 1343 1344 mutex_enter(hash_lock); 1345 (void) remove_reference(hdr, hash_lock, tag); 1346 if (hdr->b_datacnt > 1) 1347 arc_buf_destroy(buf, FALSE, TRUE); 1348 else 1349 hdr->b_flags |= ARC_BUF_AVAILABLE; 1350 mutex_exit(hash_lock); 1351 } else if (HDR_IO_IN_PROGRESS(hdr)) { 1352 int destroy_hdr; 1353 /* 1354 * We are in the middle of an async write. Don't destroy 1355 * this buffer unless the write completes before we finish 1356 * decrementing the reference count. 1357 */ 1358 mutex_enter(&arc_eviction_mtx); 1359 (void) remove_reference(hdr, NULL, tag); 1360 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1361 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1362 mutex_exit(&arc_eviction_mtx); 1363 if (destroy_hdr) 1364 arc_hdr_destroy(hdr); 1365 } else { 1366 if (remove_reference(hdr, NULL, tag) > 0) { 1367 ASSERT(HDR_IO_ERROR(hdr)); 1368 arc_buf_destroy(buf, FALSE, TRUE); 1369 } else { 1370 arc_hdr_destroy(hdr); 1371 } 1372 } 1373 } 1374 1375 int 1376 arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1377 { 1378 arc_buf_hdr_t *hdr = buf->b_hdr; 1379 kmutex_t *hash_lock = HDR_LOCK(hdr); 1380 int no_callback = (buf->b_efunc == NULL); 1381 1382 if (hdr->b_state == arc_anon) { 1383 arc_buf_free(buf, tag); 1384 return (no_callback); 1385 } 1386 1387 mutex_enter(hash_lock); 1388 ASSERT(hdr->b_state != arc_anon); 1389 ASSERT(buf->b_data != NULL); 1390 1391 (void) remove_reference(hdr, hash_lock, tag); 1392 if (hdr->b_datacnt > 1) { 1393 if (no_callback) 1394 arc_buf_destroy(buf, FALSE, TRUE); 1395 } else if (no_callback) { 1396 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1397 hdr->b_flags |= ARC_BUF_AVAILABLE; 1398 } 1399 ASSERT(no_callback || hdr->b_datacnt > 1 || 1400 refcount_is_zero(&hdr->b_refcnt)); 1401 mutex_exit(hash_lock); 1402 return (no_callback); 1403 } 1404 1405 int 1406 arc_buf_size(arc_buf_t *buf) 1407 { 1408 return (buf->b_hdr->b_size); 1409 } 1410 1411 /* 1412 * Evict buffers from list until we've removed the specified number of 1413 * bytes. Move the removed buffers to the appropriate evict state. 1414 * If the recycle flag is set, then attempt to "recycle" a buffer: 1415 * - look for a buffer to evict that is `bytes' long. 1416 * - return the data block from this buffer rather than freeing it. 1417 * This flag is used by callers that are trying to make space for a 1418 * new buffer in a full arc cache. 1419 * 1420 * This function makes a "best effort". It skips over any buffers 1421 * it can't get a hash_lock on, and so may not catch all candidates. 1422 * It may also return without evicting as much space as requested. 1423 */ 1424 static void * 1425 arc_evict(arc_state_t *state, spa_t *spa, int64_t bytes, boolean_t recycle, 1426 arc_buf_contents_t type) 1427 { 1428 arc_state_t *evicted_state; 1429 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1430 arc_buf_hdr_t *ab, *ab_prev = NULL; 1431 list_t *list = &state->arcs_list[type]; 1432 kmutex_t *hash_lock; 1433 boolean_t have_lock; 1434 void *stolen = NULL; 1435 1436 ASSERT(state == arc_mru || state == arc_mfu); 1437 1438 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1439 1440 mutex_enter(&state->arcs_mtx); 1441 mutex_enter(&evicted_state->arcs_mtx); 1442 1443 for (ab = list_tail(list); ab; ab = ab_prev) { 1444 ab_prev = list_prev(list, ab); 1445 /* prefetch buffers have a minimum lifespan */ 1446 if (HDR_IO_IN_PROGRESS(ab) || 1447 (spa && ab->b_spa != spa) || 1448 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1449 lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { 1450 skipped++; 1451 continue; 1452 } 1453 /* "lookahead" for better eviction candidate */ 1454 if (recycle && ab->b_size != bytes && 1455 ab_prev && ab_prev->b_size == bytes) 1456 continue; 1457 hash_lock = HDR_LOCK(ab); 1458 have_lock = MUTEX_HELD(hash_lock); 1459 if (have_lock || mutex_tryenter(hash_lock)) { 1460 ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 1461 ASSERT(ab->b_datacnt > 0); 1462 while (ab->b_buf) { 1463 arc_buf_t *buf = ab->b_buf; 1464 if (buf->b_data) { 1465 bytes_evicted += ab->b_size; 1466 if (recycle && ab->b_type == type && 1467 ab->b_size == bytes && 1468 !HDR_L2_WRITING(ab)) { 1469 stolen = buf->b_data; 1470 recycle = FALSE; 1471 } 1472 } 1473 if (buf->b_efunc) { 1474 mutex_enter(&arc_eviction_mtx); 1475 arc_buf_destroy(buf, 1476 buf->b_data == stolen, FALSE); 1477 ab->b_buf = buf->b_next; 1478 buf->b_hdr = &arc_eviction_hdr; 1479 buf->b_next = arc_eviction_list; 1480 arc_eviction_list = buf; 1481 mutex_exit(&arc_eviction_mtx); 1482 } else { 1483 arc_buf_destroy(buf, 1484 buf->b_data == stolen, TRUE); 1485 } 1486 } 1487 ASSERT(ab->b_datacnt == 0); 1488 arc_change_state(evicted_state, ab, hash_lock); 1489 ASSERT(HDR_IN_HASH_TABLE(ab)); 1490 ab->b_flags |= ARC_IN_HASH_TABLE; 1491 ab->b_flags &= ~ARC_BUF_AVAILABLE; 1492 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1493 if (!have_lock) 1494 mutex_exit(hash_lock); 1495 if (bytes >= 0 && bytes_evicted >= bytes) 1496 break; 1497 } else { 1498 missed += 1; 1499 } 1500 } 1501 1502 mutex_exit(&evicted_state->arcs_mtx); 1503 mutex_exit(&state->arcs_mtx); 1504 1505 if (bytes_evicted < bytes) 1506 dprintf("only evicted %lld bytes from %x", 1507 (longlong_t)bytes_evicted, state); 1508 1509 if (skipped) 1510 ARCSTAT_INCR(arcstat_evict_skip, skipped); 1511 1512 if (missed) 1513 ARCSTAT_INCR(arcstat_mutex_miss, missed); 1514 1515 /* 1516 * We have just evicted some date into the ghost state, make 1517 * sure we also adjust the ghost state size if necessary. 1518 */ 1519 if (arc_no_grow && 1520 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 1521 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 1522 arc_mru_ghost->arcs_size - arc_c; 1523 1524 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 1525 int64_t todelete = 1526 MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 1527 arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1528 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 1529 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 1530 arc_mru_ghost->arcs_size + 1531 arc_mfu_ghost->arcs_size - arc_c); 1532 arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1533 } 1534 } 1535 1536 return (stolen); 1537 } 1538 1539 /* 1540 * Remove buffers from list until we've removed the specified number of 1541 * bytes. Destroy the buffers that are removed. 1542 */ 1543 static void 1544 arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes) 1545 { 1546 arc_buf_hdr_t *ab, *ab_prev; 1547 list_t *list = &state->arcs_list[ARC_BUFC_DATA]; 1548 kmutex_t *hash_lock; 1549 uint64_t bytes_deleted = 0; 1550 uint64_t bufs_skipped = 0; 1551 1552 ASSERT(GHOST_STATE(state)); 1553 top: 1554 mutex_enter(&state->arcs_mtx); 1555 for (ab = list_tail(list); ab; ab = ab_prev) { 1556 ab_prev = list_prev(list, ab); 1557 if (spa && ab->b_spa != spa) 1558 continue; 1559 hash_lock = HDR_LOCK(ab); 1560 if (mutex_tryenter(hash_lock)) { 1561 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1562 ASSERT(ab->b_buf == NULL); 1563 ARCSTAT_BUMP(arcstat_deleted); 1564 bytes_deleted += ab->b_size; 1565 1566 if (ab->b_l2hdr != NULL) { 1567 /* 1568 * This buffer is cached on the 2nd Level ARC; 1569 * don't destroy the header. 1570 */ 1571 arc_change_state(arc_l2c_only, ab, hash_lock); 1572 mutex_exit(hash_lock); 1573 } else { 1574 arc_change_state(arc_anon, ab, hash_lock); 1575 mutex_exit(hash_lock); 1576 arc_hdr_destroy(ab); 1577 } 1578 1579 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1580 if (bytes >= 0 && bytes_deleted >= bytes) 1581 break; 1582 } else { 1583 if (bytes < 0) { 1584 mutex_exit(&state->arcs_mtx); 1585 mutex_enter(hash_lock); 1586 mutex_exit(hash_lock); 1587 goto top; 1588 } 1589 bufs_skipped += 1; 1590 } 1591 } 1592 mutex_exit(&state->arcs_mtx); 1593 1594 if (list == &state->arcs_list[ARC_BUFC_DATA] && 1595 (bytes < 0 || bytes_deleted < bytes)) { 1596 list = &state->arcs_list[ARC_BUFC_METADATA]; 1597 goto top; 1598 } 1599 1600 if (bufs_skipped) { 1601 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1602 ASSERT(bytes >= 0); 1603 } 1604 1605 if (bytes_deleted < bytes) 1606 dprintf("only deleted %lld bytes from %p", 1607 (longlong_t)bytes_deleted, state); 1608 } 1609 1610 static void 1611 arc_adjust(void) 1612 { 1613 int64_t top_sz, mru_over, arc_over, todelete; 1614 1615 top_sz = arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used; 1616 1617 if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 1618 int64_t toevict = 1619 MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], top_sz - arc_p); 1620 (void) arc_evict(arc_mru, NULL, toevict, FALSE, ARC_BUFC_DATA); 1621 top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1622 } 1623 1624 if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1625 int64_t toevict = 1626 MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], top_sz - arc_p); 1627 (void) arc_evict(arc_mru, NULL, toevict, FALSE, 1628 ARC_BUFC_METADATA); 1629 top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1630 } 1631 1632 mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; 1633 1634 if (mru_over > 0) { 1635 if (arc_mru_ghost->arcs_size > 0) { 1636 todelete = MIN(arc_mru_ghost->arcs_size, mru_over); 1637 arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1638 } 1639 } 1640 1641 if ((arc_over = arc_size - arc_c) > 0) { 1642 int64_t tbl_over; 1643 1644 if (arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 1645 int64_t toevict = 1646 MIN(arc_mfu->arcs_lsize[ARC_BUFC_DATA], arc_over); 1647 (void) arc_evict(arc_mfu, NULL, toevict, FALSE, 1648 ARC_BUFC_DATA); 1649 arc_over = arc_size - arc_c; 1650 } 1651 1652 if (arc_over > 0 && 1653 arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1654 int64_t toevict = 1655 MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA], 1656 arc_over); 1657 (void) arc_evict(arc_mfu, NULL, toevict, FALSE, 1658 ARC_BUFC_METADATA); 1659 } 1660 1661 tbl_over = arc_size + arc_mru_ghost->arcs_size + 1662 arc_mfu_ghost->arcs_size - arc_c * 2; 1663 1664 if (tbl_over > 0 && arc_mfu_ghost->arcs_size > 0) { 1665 todelete = MIN(arc_mfu_ghost->arcs_size, tbl_over); 1666 arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1667 } 1668 } 1669 } 1670 1671 static void 1672 arc_do_user_evicts(void) 1673 { 1674 mutex_enter(&arc_eviction_mtx); 1675 while (arc_eviction_list != NULL) { 1676 arc_buf_t *buf = arc_eviction_list; 1677 arc_eviction_list = buf->b_next; 1678 buf->b_hdr = NULL; 1679 mutex_exit(&arc_eviction_mtx); 1680 1681 if (buf->b_efunc != NULL) 1682 VERIFY(buf->b_efunc(buf) == 0); 1683 1684 buf->b_efunc = NULL; 1685 buf->b_private = NULL; 1686 kmem_cache_free(buf_cache, buf); 1687 mutex_enter(&arc_eviction_mtx); 1688 } 1689 mutex_exit(&arc_eviction_mtx); 1690 } 1691 1692 /* 1693 * Flush all *evictable* data from the cache for the given spa. 1694 * NOTE: this will not touch "active" (i.e. referenced) data. 1695 */ 1696 void 1697 arc_flush(spa_t *spa) 1698 { 1699 while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { 1700 (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_DATA); 1701 if (spa) 1702 break; 1703 } 1704 while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) { 1705 (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_METADATA); 1706 if (spa) 1707 break; 1708 } 1709 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) { 1710 (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_DATA); 1711 if (spa) 1712 break; 1713 } 1714 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) { 1715 (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_METADATA); 1716 if (spa) 1717 break; 1718 } 1719 1720 arc_evict_ghost(arc_mru_ghost, spa, -1); 1721 arc_evict_ghost(arc_mfu_ghost, spa, -1); 1722 1723 mutex_enter(&arc_reclaim_thr_lock); 1724 arc_do_user_evicts(); 1725 mutex_exit(&arc_reclaim_thr_lock); 1726 ASSERT(spa || arc_eviction_list == NULL); 1727 } 1728 1729 int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ 1730 1731 void 1732 arc_shrink(void) 1733 { 1734 if (arc_c > arc_c_min) { 1735 uint64_t to_free; 1736 1737 #ifdef _KERNEL 1738 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); 1739 #else 1740 to_free = arc_c >> arc_shrink_shift; 1741 #endif 1742 if (arc_c > arc_c_min + to_free) 1743 atomic_add_64(&arc_c, -to_free); 1744 else 1745 arc_c = arc_c_min; 1746 1747 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 1748 if (arc_c > arc_size) 1749 arc_c = MAX(arc_size, arc_c_min); 1750 if (arc_p > arc_c) 1751 arc_p = (arc_c >> 1); 1752 ASSERT(arc_c >= arc_c_min); 1753 ASSERT((int64_t)arc_p >= 0); 1754 } 1755 1756 if (arc_size > arc_c) 1757 arc_adjust(); 1758 } 1759 1760 static int 1761 arc_reclaim_needed(void) 1762 { 1763 uint64_t extra; 1764 1765 #ifdef _KERNEL 1766 1767 if (needfree) 1768 return (1); 1769 1770 /* 1771 * take 'desfree' extra pages, so we reclaim sooner, rather than later 1772 */ 1773 extra = desfree; 1774 1775 /* 1776 * check that we're out of range of the pageout scanner. It starts to 1777 * schedule paging if freemem is less than lotsfree and needfree. 1778 * lotsfree is the high-water mark for pageout, and needfree is the 1779 * number of needed free pages. We add extra pages here to make sure 1780 * the scanner doesn't start up while we're freeing memory. 1781 */ 1782 if (freemem < lotsfree + needfree + extra) 1783 return (1); 1784 1785 /* 1786 * check to make sure that swapfs has enough space so that anon 1787 * reservations can still succeed. anon_resvmem() checks that the 1788 * availrmem is greater than swapfs_minfree, and the number of reserved 1789 * swap pages. We also add a bit of extra here just to prevent 1790 * circumstances from getting really dire. 1791 */ 1792 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1793 return (1); 1794 1795 #if defined(__i386) 1796 /* 1797 * If we're on an i386 platform, it's possible that we'll exhaust the 1798 * kernel heap space before we ever run out of available physical 1799 * memory. Most checks of the size of the heap_area compare against 1800 * tune.t_minarmem, which is the minimum available real memory that we 1801 * can have in the system. However, this is generally fixed at 25 pages 1802 * which is so low that it's useless. In this comparison, we seek to 1803 * calculate the total heap-size, and reclaim if more than 3/4ths of the 1804 * heap is allocated. (Or, in the calculation, if less than 1/4th is 1805 * free) 1806 */ 1807 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1808 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1809 return (1); 1810 #endif 1811 1812 #else 1813 if (spa_get_random(100) == 0) 1814 return (1); 1815 #endif 1816 return (0); 1817 } 1818 1819 static void 1820 arc_kmem_reap_now(arc_reclaim_strategy_t strat) 1821 { 1822 size_t i; 1823 kmem_cache_t *prev_cache = NULL; 1824 kmem_cache_t *prev_data_cache = NULL; 1825 extern kmem_cache_t *zio_buf_cache[]; 1826 extern kmem_cache_t *zio_data_buf_cache[]; 1827 1828 #ifdef _KERNEL 1829 if (arc_meta_used >= arc_meta_limit) { 1830 /* 1831 * We are exceeding our meta-data cache limit. 1832 * Purge some DNLC entries to release holds on meta-data. 1833 */ 1834 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 1835 } 1836 #if defined(__i386) 1837 /* 1838 * Reclaim unused memory from all kmem caches. 1839 */ 1840 kmem_reap(); 1841 #endif 1842 #endif 1843 1844 /* 1845 * An aggressive reclamation will shrink the cache size as well as 1846 * reap free buffers from the arc kmem caches. 1847 */ 1848 if (strat == ARC_RECLAIM_AGGR) 1849 arc_shrink(); 1850 1851 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1852 if (zio_buf_cache[i] != prev_cache) { 1853 prev_cache = zio_buf_cache[i]; 1854 kmem_cache_reap_now(zio_buf_cache[i]); 1855 } 1856 if (zio_data_buf_cache[i] != prev_data_cache) { 1857 prev_data_cache = zio_data_buf_cache[i]; 1858 kmem_cache_reap_now(zio_data_buf_cache[i]); 1859 } 1860 } 1861 kmem_cache_reap_now(buf_cache); 1862 kmem_cache_reap_now(hdr_cache); 1863 } 1864 1865 static void 1866 arc_reclaim_thread(void) 1867 { 1868 clock_t growtime = 0; 1869 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1870 callb_cpr_t cpr; 1871 1872 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1873 1874 mutex_enter(&arc_reclaim_thr_lock); 1875 while (arc_thread_exit == 0) { 1876 if (arc_reclaim_needed()) { 1877 1878 if (arc_no_grow) { 1879 if (last_reclaim == ARC_RECLAIM_CONS) { 1880 last_reclaim = ARC_RECLAIM_AGGR; 1881 } else { 1882 last_reclaim = ARC_RECLAIM_CONS; 1883 } 1884 } else { 1885 arc_no_grow = TRUE; 1886 last_reclaim = ARC_RECLAIM_AGGR; 1887 membar_producer(); 1888 } 1889 1890 /* reset the growth delay for every reclaim */ 1891 growtime = lbolt + (arc_grow_retry * hz); 1892 1893 arc_kmem_reap_now(last_reclaim); 1894 arc_warm = B_TRUE; 1895 1896 } else if (arc_no_grow && lbolt >= growtime) { 1897 arc_no_grow = FALSE; 1898 } 1899 1900 if (2 * arc_c < arc_size + 1901 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size) 1902 arc_adjust(); 1903 1904 if (arc_eviction_list != NULL) 1905 arc_do_user_evicts(); 1906 1907 /* block until needed, or one second, whichever is shorter */ 1908 CALLB_CPR_SAFE_BEGIN(&cpr); 1909 (void) cv_timedwait(&arc_reclaim_thr_cv, 1910 &arc_reclaim_thr_lock, (lbolt + hz)); 1911 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 1912 } 1913 1914 arc_thread_exit = 0; 1915 cv_broadcast(&arc_reclaim_thr_cv); 1916 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 1917 thread_exit(); 1918 } 1919 1920 /* 1921 * Adapt arc info given the number of bytes we are trying to add and 1922 * the state that we are comming from. This function is only called 1923 * when we are adding new content to the cache. 1924 */ 1925 static void 1926 arc_adapt(int bytes, arc_state_t *state) 1927 { 1928 int mult; 1929 1930 if (state == arc_l2c_only) 1931 return; 1932 1933 ASSERT(bytes > 0); 1934 /* 1935 * Adapt the target size of the MRU list: 1936 * - if we just hit in the MRU ghost list, then increase 1937 * the target size of the MRU list. 1938 * - if we just hit in the MFU ghost list, then increase 1939 * the target size of the MFU list by decreasing the 1940 * target size of the MRU list. 1941 */ 1942 if (state == arc_mru_ghost) { 1943 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 1944 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 1945 1946 arc_p = MIN(arc_c, arc_p + bytes * mult); 1947 } else if (state == arc_mfu_ghost) { 1948 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 1949 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 1950 1951 arc_p = MAX(0, (int64_t)arc_p - bytes * mult); 1952 } 1953 ASSERT((int64_t)arc_p >= 0); 1954 1955 if (arc_reclaim_needed()) { 1956 cv_signal(&arc_reclaim_thr_cv); 1957 return; 1958 } 1959 1960 if (arc_no_grow) 1961 return; 1962 1963 if (arc_c >= arc_c_max) 1964 return; 1965 1966 /* 1967 * If we're within (2 * maxblocksize) bytes of the target 1968 * cache size, increment the target cache size 1969 */ 1970 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 1971 atomic_add_64(&arc_c, (int64_t)bytes); 1972 if (arc_c > arc_c_max) 1973 arc_c = arc_c_max; 1974 else if (state == arc_anon) 1975 atomic_add_64(&arc_p, (int64_t)bytes); 1976 if (arc_p > arc_c) 1977 arc_p = arc_c; 1978 } 1979 ASSERT((int64_t)arc_p >= 0); 1980 } 1981 1982 /* 1983 * Check if the cache has reached its limits and eviction is required 1984 * prior to insert. 1985 */ 1986 static int 1987 arc_evict_needed(arc_buf_contents_t type) 1988 { 1989 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 1990 return (1); 1991 1992 #ifdef _KERNEL 1993 /* 1994 * If zio data pages are being allocated out of a separate heap segment, 1995 * then enforce that the size of available vmem for this area remains 1996 * above about 1/32nd free. 1997 */ 1998 if (type == ARC_BUFC_DATA && zio_arena != NULL && 1999 vmem_size(zio_arena, VMEM_FREE) < 2000 (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 2001 return (1); 2002 #endif 2003 2004 if (arc_reclaim_needed()) 2005 return (1); 2006 2007 return (arc_size > arc_c); 2008 } 2009 2010 /* 2011 * The buffer, supplied as the first argument, needs a data block. 2012 * So, if we are at cache max, determine which cache should be victimized. 2013 * We have the following cases: 2014 * 2015 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2016 * In this situation if we're out of space, but the resident size of the MFU is 2017 * under the limit, victimize the MFU cache to satisfy this insertion request. 2018 * 2019 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2020 * Here, we've used up all of the available space for the MRU, so we need to 2021 * evict from our own cache instead. Evict from the set of resident MRU 2022 * entries. 2023 * 2024 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2025 * c minus p represents the MFU space in the cache, since p is the size of the 2026 * cache that is dedicated to the MRU. In this situation there's still space on 2027 * the MFU side, so the MRU side needs to be victimized. 2028 * 2029 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2030 * MFU's resident set is consuming more space than it has been allotted. In 2031 * this situation, we must victimize our own cache, the MFU, for this insertion. 2032 */ 2033 static void 2034 arc_get_data_buf(arc_buf_t *buf) 2035 { 2036 arc_state_t *state = buf->b_hdr->b_state; 2037 uint64_t size = buf->b_hdr->b_size; 2038 arc_buf_contents_t type = buf->b_hdr->b_type; 2039 2040 arc_adapt(size, state); 2041 2042 /* 2043 * We have not yet reached cache maximum size, 2044 * just allocate a new buffer. 2045 */ 2046 if (!arc_evict_needed(type)) { 2047 if (type == ARC_BUFC_METADATA) { 2048 buf->b_data = zio_buf_alloc(size); 2049 arc_space_consume(size); 2050 } else { 2051 ASSERT(type == ARC_BUFC_DATA); 2052 buf->b_data = zio_data_buf_alloc(size); 2053 atomic_add_64(&arc_size, size); 2054 } 2055 goto out; 2056 } 2057 2058 /* 2059 * If we are prefetching from the mfu ghost list, this buffer 2060 * will end up on the mru list; so steal space from there. 2061 */ 2062 if (state == arc_mfu_ghost) 2063 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 2064 else if (state == arc_mru_ghost) 2065 state = arc_mru; 2066 2067 if (state == arc_mru || state == arc_anon) { 2068 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 2069 state = (arc_mfu->arcs_lsize[type] > 0 && 2070 arc_p > mru_used) ? arc_mfu : arc_mru; 2071 } else { 2072 /* MFU cases */ 2073 uint64_t mfu_space = arc_c - arc_p; 2074 state = (arc_mru->arcs_lsize[type] > 0 && 2075 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 2076 } 2077 if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { 2078 if (type == ARC_BUFC_METADATA) { 2079 buf->b_data = zio_buf_alloc(size); 2080 arc_space_consume(size); 2081 } else { 2082 ASSERT(type == ARC_BUFC_DATA); 2083 buf->b_data = zio_data_buf_alloc(size); 2084 atomic_add_64(&arc_size, size); 2085 } 2086 ARCSTAT_BUMP(arcstat_recycle_miss); 2087 } 2088 ASSERT(buf->b_data != NULL); 2089 out: 2090 /* 2091 * Update the state size. Note that ghost states have a 2092 * "ghost size" and so don't need to be updated. 2093 */ 2094 if (!GHOST_STATE(buf->b_hdr->b_state)) { 2095 arc_buf_hdr_t *hdr = buf->b_hdr; 2096 2097 atomic_add_64(&hdr->b_state->arcs_size, size); 2098 if (list_link_active(&hdr->b_arc_node)) { 2099 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2100 atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2101 } 2102 /* 2103 * If we are growing the cache, and we are adding anonymous 2104 * data, and we have outgrown arc_p, update arc_p 2105 */ 2106 if (arc_size < arc_c && hdr->b_state == arc_anon && 2107 arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 2108 arc_p = MIN(arc_c, arc_p + size); 2109 } 2110 } 2111 2112 /* 2113 * This routine is called whenever a buffer is accessed. 2114 * NOTE: the hash lock is dropped in this function. 2115 */ 2116 static void 2117 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2118 { 2119 ASSERT(MUTEX_HELD(hash_lock)); 2120 2121 if (buf->b_state == arc_anon) { 2122 /* 2123 * This buffer is not in the cache, and does not 2124 * appear in our "ghost" list. Add the new buffer 2125 * to the MRU state. 2126 */ 2127 2128 ASSERT(buf->b_arc_access == 0); 2129 buf->b_arc_access = lbolt; 2130 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2131 arc_change_state(arc_mru, buf, hash_lock); 2132 2133 } else if (buf->b_state == arc_mru) { 2134 /* 2135 * If this buffer is here because of a prefetch, then either: 2136 * - clear the flag if this is a "referencing" read 2137 * (any subsequent access will bump this into the MFU state). 2138 * or 2139 * - move the buffer to the head of the list if this is 2140 * another prefetch (to make it less likely to be evicted). 2141 */ 2142 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2143 if (refcount_count(&buf->b_refcnt) == 0) { 2144 ASSERT(list_link_active(&buf->b_arc_node)); 2145 } else { 2146 buf->b_flags &= ~ARC_PREFETCH; 2147 ARCSTAT_BUMP(arcstat_mru_hits); 2148 } 2149 buf->b_arc_access = lbolt; 2150 return; 2151 } 2152 2153 /* 2154 * This buffer has been "accessed" only once so far, 2155 * but it is still in the cache. Move it to the MFU 2156 * state. 2157 */ 2158 if (lbolt > buf->b_arc_access + ARC_MINTIME) { 2159 /* 2160 * More than 125ms have passed since we 2161 * instantiated this buffer. Move it to the 2162 * most frequently used state. 2163 */ 2164 buf->b_arc_access = lbolt; 2165 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2166 arc_change_state(arc_mfu, buf, hash_lock); 2167 } 2168 ARCSTAT_BUMP(arcstat_mru_hits); 2169 } else if (buf->b_state == arc_mru_ghost) { 2170 arc_state_t *new_state; 2171 /* 2172 * This buffer has been "accessed" recently, but 2173 * was evicted from the cache. Move it to the 2174 * MFU state. 2175 */ 2176 2177 if (buf->b_flags & ARC_PREFETCH) { 2178 new_state = arc_mru; 2179 if (refcount_count(&buf->b_refcnt) > 0) 2180 buf->b_flags &= ~ARC_PREFETCH; 2181 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2182 } else { 2183 new_state = arc_mfu; 2184 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2185 } 2186 2187 buf->b_arc_access = lbolt; 2188 arc_change_state(new_state, buf, hash_lock); 2189 2190 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2191 } else if (buf->b_state == arc_mfu) { 2192 /* 2193 * This buffer has been accessed more than once and is 2194 * still in the cache. Keep it in the MFU state. 2195 * 2196 * NOTE: an add_reference() that occurred when we did 2197 * the arc_read() will have kicked this off the list. 2198 * If it was a prefetch, we will explicitly move it to 2199 * the head of the list now. 2200 */ 2201 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2202 ASSERT(refcount_count(&buf->b_refcnt) == 0); 2203 ASSERT(list_link_active(&buf->b_arc_node)); 2204 } 2205 ARCSTAT_BUMP(arcstat_mfu_hits); 2206 buf->b_arc_access = lbolt; 2207 } else if (buf->b_state == arc_mfu_ghost) { 2208 arc_state_t *new_state = arc_mfu; 2209 /* 2210 * This buffer has been accessed more than once but has 2211 * been evicted from the cache. Move it back to the 2212 * MFU state. 2213 */ 2214 2215 if (buf->b_flags & ARC_PREFETCH) { 2216 /* 2217 * This is a prefetch access... 2218 * move this block back to the MRU state. 2219 */ 2220 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 2221 new_state = arc_mru; 2222 } 2223 2224 buf->b_arc_access = lbolt; 2225 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2226 arc_change_state(new_state, buf, hash_lock); 2227 2228 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2229 } else if (buf->b_state == arc_l2c_only) { 2230 /* 2231 * This buffer is on the 2nd Level ARC. 2232 */ 2233 2234 buf->b_arc_access = lbolt; 2235 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2236 arc_change_state(arc_mfu, buf, hash_lock); 2237 } else { 2238 ASSERT(!"invalid arc state"); 2239 } 2240 } 2241 2242 /* a generic arc_done_func_t which you can use */ 2243 /* ARGSUSED */ 2244 void 2245 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2246 { 2247 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2248 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2249 } 2250 2251 /* a generic arc_done_func_t */ 2252 void 2253 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2254 { 2255 arc_buf_t **bufp = arg; 2256 if (zio && zio->io_error) { 2257 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2258 *bufp = NULL; 2259 } else { 2260 *bufp = buf; 2261 } 2262 } 2263 2264 static void 2265 arc_read_done(zio_t *zio) 2266 { 2267 arc_buf_hdr_t *hdr, *found; 2268 arc_buf_t *buf; 2269 arc_buf_t *abuf; /* buffer we're assigning to callback */ 2270 kmutex_t *hash_lock; 2271 arc_callback_t *callback_list, *acb; 2272 int freeable = FALSE; 2273 2274 buf = zio->io_private; 2275 hdr = buf->b_hdr; 2276 2277 /* 2278 * The hdr was inserted into hash-table and removed from lists 2279 * prior to starting I/O. We should find this header, since 2280 * it's in the hash table, and it should be legit since it's 2281 * not possible to evict it during the I/O. The only possible 2282 * reason for it not to be found is if we were freed during the 2283 * read. 2284 */ 2285 found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 2286 &hash_lock); 2287 2288 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 2289 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 2290 (found == hdr && HDR_L2_READING(hdr))); 2291 2292 hdr->b_flags &= ~ARC_L2_EVICTED; 2293 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 2294 hdr->b_flags |= ARC_DONT_L2CACHE; 2295 2296 /* byteswap if necessary */ 2297 callback_list = hdr->b_acb; 2298 ASSERT(callback_list != NULL); 2299 if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap) 2300 callback_list->acb_byteswap(buf->b_data, hdr->b_size); 2301 2302 arc_cksum_compute(buf, B_FALSE); 2303 2304 /* create copies of the data buffer for the callers */ 2305 abuf = buf; 2306 for (acb = callback_list; acb; acb = acb->acb_next) { 2307 if (acb->acb_done) { 2308 if (abuf == NULL) 2309 abuf = arc_buf_clone(buf); 2310 acb->acb_buf = abuf; 2311 abuf = NULL; 2312 } 2313 } 2314 hdr->b_acb = NULL; 2315 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2316 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 2317 if (abuf == buf) 2318 hdr->b_flags |= ARC_BUF_AVAILABLE; 2319 2320 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2321 2322 if (zio->io_error != 0) { 2323 hdr->b_flags |= ARC_IO_ERROR; 2324 if (hdr->b_state != arc_anon) 2325 arc_change_state(arc_anon, hdr, hash_lock); 2326 if (HDR_IN_HASH_TABLE(hdr)) 2327 buf_hash_remove(hdr); 2328 freeable = refcount_is_zero(&hdr->b_refcnt); 2329 /* convert checksum errors into IO errors */ 2330 if (zio->io_error == ECKSUM) 2331 zio->io_error = EIO; 2332 } 2333 2334 /* 2335 * Broadcast before we drop the hash_lock to avoid the possibility 2336 * that the hdr (and hence the cv) might be freed before we get to 2337 * the cv_broadcast(). 2338 */ 2339 cv_broadcast(&hdr->b_cv); 2340 2341 if (hash_lock) { 2342 /* 2343 * Only call arc_access on anonymous buffers. This is because 2344 * if we've issued an I/O for an evicted buffer, we've already 2345 * called arc_access (to prevent any simultaneous readers from 2346 * getting confused). 2347 */ 2348 if (zio->io_error == 0 && hdr->b_state == arc_anon) 2349 arc_access(hdr, hash_lock); 2350 mutex_exit(hash_lock); 2351 } else { 2352 /* 2353 * This block was freed while we waited for the read to 2354 * complete. It has been removed from the hash table and 2355 * moved to the anonymous state (so that it won't show up 2356 * in the cache). 2357 */ 2358 ASSERT3P(hdr->b_state, ==, arc_anon); 2359 freeable = refcount_is_zero(&hdr->b_refcnt); 2360 } 2361 2362 /* execute each callback and free its structure */ 2363 while ((acb = callback_list) != NULL) { 2364 if (acb->acb_done) 2365 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2366 2367 if (acb->acb_zio_dummy != NULL) { 2368 acb->acb_zio_dummy->io_error = zio->io_error; 2369 zio_nowait(acb->acb_zio_dummy); 2370 } 2371 2372 callback_list = acb->acb_next; 2373 kmem_free(acb, sizeof (arc_callback_t)); 2374 } 2375 2376 if (freeable) 2377 arc_hdr_destroy(hdr); 2378 } 2379 2380 /* 2381 * "Read" the block block at the specified DVA (in bp) via the 2382 * cache. If the block is found in the cache, invoke the provided 2383 * callback immediately and return. Note that the `zio' parameter 2384 * in the callback will be NULL in this case, since no IO was 2385 * required. If the block is not in the cache pass the read request 2386 * on to the spa with a substitute callback function, so that the 2387 * requested block will be added to the cache. 2388 * 2389 * If a read request arrives for a block that has a read in-progress, 2390 * either wait for the in-progress read to complete (and return the 2391 * results); or, if this is a read with a "done" func, add a record 2392 * to the read to invoke the "done" func when the read completes, 2393 * and return; or just return. 2394 * 2395 * arc_read_done() will invoke all the requested "done" functions 2396 * for readers of this block. 2397 */ 2398 int 2399 arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap, 2400 arc_done_func_t *done, void *private, int priority, int flags, 2401 uint32_t *arc_flags, zbookmark_t *zb) 2402 { 2403 arc_buf_hdr_t *hdr; 2404 arc_buf_t *buf; 2405 kmutex_t *hash_lock; 2406 zio_t *rzio; 2407 2408 top: 2409 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2410 if (hdr && hdr->b_datacnt > 0) { 2411 2412 *arc_flags |= ARC_CACHED; 2413 2414 if (HDR_IO_IN_PROGRESS(hdr)) { 2415 2416 if (*arc_flags & ARC_WAIT) { 2417 cv_wait(&hdr->b_cv, hash_lock); 2418 mutex_exit(hash_lock); 2419 goto top; 2420 } 2421 ASSERT(*arc_flags & ARC_NOWAIT); 2422 2423 if (done) { 2424 arc_callback_t *acb = NULL; 2425 2426 acb = kmem_zalloc(sizeof (arc_callback_t), 2427 KM_SLEEP); 2428 acb->acb_done = done; 2429 acb->acb_private = private; 2430 acb->acb_byteswap = swap; 2431 if (pio != NULL) 2432 acb->acb_zio_dummy = zio_null(pio, 2433 spa, NULL, NULL, flags); 2434 2435 ASSERT(acb->acb_done != NULL); 2436 acb->acb_next = hdr->b_acb; 2437 hdr->b_acb = acb; 2438 add_reference(hdr, hash_lock, private); 2439 mutex_exit(hash_lock); 2440 return (0); 2441 } 2442 mutex_exit(hash_lock); 2443 return (0); 2444 } 2445 2446 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2447 2448 if (done) { 2449 add_reference(hdr, hash_lock, private); 2450 /* 2451 * If this block is already in use, create a new 2452 * copy of the data so that we will be guaranteed 2453 * that arc_release() will always succeed. 2454 */ 2455 buf = hdr->b_buf; 2456 ASSERT(buf); 2457 ASSERT(buf->b_data); 2458 if (HDR_BUF_AVAILABLE(hdr)) { 2459 ASSERT(buf->b_efunc == NULL); 2460 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2461 } else { 2462 buf = arc_buf_clone(buf); 2463 } 2464 } else if (*arc_flags & ARC_PREFETCH && 2465 refcount_count(&hdr->b_refcnt) == 0) { 2466 hdr->b_flags |= ARC_PREFETCH; 2467 } 2468 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 2469 arc_access(hdr, hash_lock); 2470 mutex_exit(hash_lock); 2471 ARCSTAT_BUMP(arcstat_hits); 2472 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2473 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2474 data, metadata, hits); 2475 2476 if (done) 2477 done(NULL, buf, private); 2478 } else { 2479 uint64_t size = BP_GET_LSIZE(bp); 2480 arc_callback_t *acb; 2481 vdev_t *vd = NULL; 2482 daddr_t addr; 2483 2484 if (hdr == NULL) { 2485 /* this block is not in the cache */ 2486 arc_buf_hdr_t *exists; 2487 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 2488 buf = arc_buf_alloc(spa, size, private, type); 2489 hdr = buf->b_hdr; 2490 hdr->b_dva = *BP_IDENTITY(bp); 2491 hdr->b_birth = bp->blk_birth; 2492 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2493 exists = buf_hash_insert(hdr, &hash_lock); 2494 if (exists) { 2495 /* somebody beat us to the hash insert */ 2496 mutex_exit(hash_lock); 2497 bzero(&hdr->b_dva, sizeof (dva_t)); 2498 hdr->b_birth = 0; 2499 hdr->b_cksum0 = 0; 2500 (void) arc_buf_remove_ref(buf, private); 2501 goto top; /* restart the IO request */ 2502 } 2503 /* if this is a prefetch, we don't have a reference */ 2504 if (*arc_flags & ARC_PREFETCH) { 2505 (void) remove_reference(hdr, hash_lock, 2506 private); 2507 hdr->b_flags |= ARC_PREFETCH; 2508 } 2509 if (BP_GET_LEVEL(bp) > 0) 2510 hdr->b_flags |= ARC_INDIRECT; 2511 } else { 2512 /* this block is in the ghost cache */ 2513 ASSERT(GHOST_STATE(hdr->b_state)); 2514 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2515 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 2516 ASSERT(hdr->b_buf == NULL); 2517 2518 /* if this is a prefetch, we don't have a reference */ 2519 if (*arc_flags & ARC_PREFETCH) 2520 hdr->b_flags |= ARC_PREFETCH; 2521 else 2522 add_reference(hdr, hash_lock, private); 2523 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 2524 buf->b_hdr = hdr; 2525 buf->b_data = NULL; 2526 buf->b_efunc = NULL; 2527 buf->b_private = NULL; 2528 buf->b_next = NULL; 2529 hdr->b_buf = buf; 2530 arc_get_data_buf(buf); 2531 ASSERT(hdr->b_datacnt == 0); 2532 hdr->b_datacnt = 1; 2533 2534 } 2535 2536 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2537 acb->acb_done = done; 2538 acb->acb_private = private; 2539 acb->acb_byteswap = swap; 2540 2541 ASSERT(hdr->b_acb == NULL); 2542 hdr->b_acb = acb; 2543 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2544 2545 /* 2546 * If the buffer has been evicted, migrate it to a present state 2547 * before issuing the I/O. Once we drop the hash-table lock, 2548 * the header will be marked as I/O in progress and have an 2549 * attached buffer. At this point, anybody who finds this 2550 * buffer ought to notice that it's legit but has a pending I/O. 2551 */ 2552 2553 if (GHOST_STATE(hdr->b_state)) 2554 arc_access(hdr, hash_lock); 2555 2556 if (hdr->b_l2hdr != NULL) { 2557 vd = hdr->b_l2hdr->b_dev->l2ad_vdev; 2558 addr = hdr->b_l2hdr->b_daddr; 2559 } 2560 2561 mutex_exit(hash_lock); 2562 2563 ASSERT3U(hdr->b_size, ==, size); 2564 DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 2565 zbookmark_t *, zb); 2566 ARCSTAT_BUMP(arcstat_misses); 2567 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2568 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2569 data, metadata, misses); 2570 2571 if (l2arc_ndev != 0) { 2572 /* 2573 * Lock out device removal. 2574 */ 2575 spa_config_enter(spa, RW_READER, FTAG); 2576 2577 /* 2578 * Read from the L2ARC if the following are true: 2579 * 1. The L2ARC vdev was previously cached. 2580 * 2. This buffer still has L2ARC metadata. 2581 * 3. This buffer isn't currently writing to the L2ARC. 2582 * 4. The L2ARC entry wasn't evicted, which may 2583 * also have invalidated the vdev. 2584 */ 2585 if (vd != NULL && hdr->b_l2hdr != NULL && 2586 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr)) { 2587 l2arc_read_callback_t *cb; 2588 2589 if (vdev_is_dead(vd)) 2590 goto l2skip; 2591 2592 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 2593 ARCSTAT_BUMP(arcstat_l2_hits); 2594 2595 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 2596 KM_SLEEP); 2597 cb->l2rcb_buf = buf; 2598 cb->l2rcb_spa = spa; 2599 cb->l2rcb_bp = *bp; 2600 cb->l2rcb_zb = *zb; 2601 cb->l2rcb_flags = flags; 2602 2603 /* 2604 * l2arc read. 2605 */ 2606 rzio = zio_read_phys(pio, vd, addr, size, 2607 buf->b_data, ZIO_CHECKSUM_OFF, 2608 l2arc_read_done, cb, priority, flags | 2609 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL, 2610 B_FALSE); 2611 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 2612 zio_t *, rzio); 2613 spa_config_exit(spa, FTAG); 2614 2615 if (*arc_flags & ARC_NOWAIT) { 2616 zio_nowait(rzio); 2617 return (0); 2618 } 2619 2620 ASSERT(*arc_flags & ARC_WAIT); 2621 if (zio_wait(rzio) == 0) 2622 return (0); 2623 2624 /* l2arc read error; goto zio_read() */ 2625 } else { 2626 DTRACE_PROBE1(l2arc__miss, 2627 arc_buf_hdr_t *, hdr); 2628 ARCSTAT_BUMP(arcstat_l2_misses); 2629 if (HDR_L2_WRITING(hdr)) 2630 ARCSTAT_BUMP(arcstat_l2_rw_clash); 2631 l2skip: 2632 spa_config_exit(spa, FTAG); 2633 } 2634 } 2635 2636 rzio = zio_read(pio, spa, bp, buf->b_data, size, 2637 arc_read_done, buf, priority, flags, zb); 2638 2639 if (*arc_flags & ARC_WAIT) 2640 return (zio_wait(rzio)); 2641 2642 ASSERT(*arc_flags & ARC_NOWAIT); 2643 zio_nowait(rzio); 2644 } 2645 return (0); 2646 } 2647 2648 /* 2649 * arc_read() variant to support pool traversal. If the block is already 2650 * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 2651 * The idea is that we don't want pool traversal filling up memory, but 2652 * if the ARC already has the data anyway, we shouldn't pay for the I/O. 2653 */ 2654 int 2655 arc_tryread(spa_t *spa, blkptr_t *bp, void *data) 2656 { 2657 arc_buf_hdr_t *hdr; 2658 kmutex_t *hash_mtx; 2659 int rc = 0; 2660 2661 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 2662 2663 if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 2664 arc_buf_t *buf = hdr->b_buf; 2665 2666 ASSERT(buf); 2667 while (buf->b_data == NULL) { 2668 buf = buf->b_next; 2669 ASSERT(buf); 2670 } 2671 bcopy(buf->b_data, data, hdr->b_size); 2672 } else { 2673 rc = ENOENT; 2674 } 2675 2676 if (hash_mtx) 2677 mutex_exit(hash_mtx); 2678 2679 return (rc); 2680 } 2681 2682 void 2683 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 2684 { 2685 ASSERT(buf->b_hdr != NULL); 2686 ASSERT(buf->b_hdr->b_state != arc_anon); 2687 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 2688 buf->b_efunc = func; 2689 buf->b_private = private; 2690 } 2691 2692 /* 2693 * This is used by the DMU to let the ARC know that a buffer is 2694 * being evicted, so the ARC should clean up. If this arc buf 2695 * is not yet in the evicted state, it will be put there. 2696 */ 2697 int 2698 arc_buf_evict(arc_buf_t *buf) 2699 { 2700 arc_buf_hdr_t *hdr; 2701 kmutex_t *hash_lock; 2702 arc_buf_t **bufp; 2703 2704 mutex_enter(&arc_eviction_mtx); 2705 hdr = buf->b_hdr; 2706 if (hdr == NULL) { 2707 /* 2708 * We are in arc_do_user_evicts(). 2709 */ 2710 ASSERT(buf->b_data == NULL); 2711 mutex_exit(&arc_eviction_mtx); 2712 return (0); 2713 } 2714 hash_lock = HDR_LOCK(hdr); 2715 mutex_exit(&arc_eviction_mtx); 2716 2717 mutex_enter(hash_lock); 2718 2719 if (buf->b_data == NULL) { 2720 /* 2721 * We are on the eviction list. 2722 */ 2723 mutex_exit(hash_lock); 2724 mutex_enter(&arc_eviction_mtx); 2725 if (buf->b_hdr == NULL) { 2726 /* 2727 * We are already in arc_do_user_evicts(). 2728 */ 2729 mutex_exit(&arc_eviction_mtx); 2730 return (0); 2731 } else { 2732 arc_buf_t copy = *buf; /* structure assignment */ 2733 /* 2734 * Process this buffer now 2735 * but let arc_do_user_evicts() do the reaping. 2736 */ 2737 buf->b_efunc = NULL; 2738 mutex_exit(&arc_eviction_mtx); 2739 VERIFY(copy.b_efunc(©) == 0); 2740 return (1); 2741 } 2742 } 2743 2744 ASSERT(buf->b_hdr == hdr); 2745 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 2746 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2747 2748 /* 2749 * Pull this buffer off of the hdr 2750 */ 2751 bufp = &hdr->b_buf; 2752 while (*bufp != buf) 2753 bufp = &(*bufp)->b_next; 2754 *bufp = buf->b_next; 2755 2756 ASSERT(buf->b_data != NULL); 2757 arc_buf_destroy(buf, FALSE, FALSE); 2758 2759 if (hdr->b_datacnt == 0) { 2760 arc_state_t *old_state = hdr->b_state; 2761 arc_state_t *evicted_state; 2762 2763 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2764 2765 evicted_state = 2766 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 2767 2768 mutex_enter(&old_state->arcs_mtx); 2769 mutex_enter(&evicted_state->arcs_mtx); 2770 2771 arc_change_state(evicted_state, hdr, hash_lock); 2772 ASSERT(HDR_IN_HASH_TABLE(hdr)); 2773 hdr->b_flags |= ARC_IN_HASH_TABLE; 2774 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2775 2776 mutex_exit(&evicted_state->arcs_mtx); 2777 mutex_exit(&old_state->arcs_mtx); 2778 } 2779 mutex_exit(hash_lock); 2780 2781 VERIFY(buf->b_efunc(buf) == 0); 2782 buf->b_efunc = NULL; 2783 buf->b_private = NULL; 2784 buf->b_hdr = NULL; 2785 kmem_cache_free(buf_cache, buf); 2786 return (1); 2787 } 2788 2789 /* 2790 * Release this buffer from the cache. This must be done 2791 * after a read and prior to modifying the buffer contents. 2792 * If the buffer has more than one reference, we must make 2793 * make a new hdr for the buffer. 2794 */ 2795 void 2796 arc_release(arc_buf_t *buf, void *tag) 2797 { 2798 arc_buf_hdr_t *hdr = buf->b_hdr; 2799 kmutex_t *hash_lock = HDR_LOCK(hdr); 2800 l2arc_buf_hdr_t *l2hdr = NULL; 2801 uint64_t buf_size; 2802 2803 /* this buffer is not on any list */ 2804 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2805 2806 if (hdr->b_state == arc_anon) { 2807 /* this buffer is already released */ 2808 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2809 ASSERT(BUF_EMPTY(hdr)); 2810 ASSERT(buf->b_efunc == NULL); 2811 arc_buf_thaw(buf); 2812 return; 2813 } 2814 2815 mutex_enter(hash_lock); 2816 2817 /* 2818 * Do we have more than one buf? 2819 */ 2820 if (hdr->b_buf != buf || buf->b_next != NULL) { 2821 arc_buf_hdr_t *nhdr; 2822 arc_buf_t **bufp; 2823 uint64_t blksz = hdr->b_size; 2824 spa_t *spa = hdr->b_spa; 2825 arc_buf_contents_t type = hdr->b_type; 2826 uint32_t flags = hdr->b_flags; 2827 2828 ASSERT(hdr->b_datacnt > 1); 2829 /* 2830 * Pull the data off of this buf and attach it to 2831 * a new anonymous buf. 2832 */ 2833 (void) remove_reference(hdr, hash_lock, tag); 2834 bufp = &hdr->b_buf; 2835 while (*bufp != buf) 2836 bufp = &(*bufp)->b_next; 2837 *bufp = (*bufp)->b_next; 2838 buf->b_next = NULL; 2839 2840 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 2841 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 2842 if (refcount_is_zero(&hdr->b_refcnt)) { 2843 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 2844 ASSERT3U(*size, >=, hdr->b_size); 2845 atomic_add_64(size, -hdr->b_size); 2846 } 2847 hdr->b_datacnt -= 1; 2848 if (hdr->b_l2hdr != NULL) { 2849 mutex_enter(&l2arc_buflist_mtx); 2850 l2hdr = hdr->b_l2hdr; 2851 hdr->b_l2hdr = NULL; 2852 buf_size = hdr->b_size; 2853 } 2854 arc_cksum_verify(buf); 2855 2856 mutex_exit(hash_lock); 2857 2858 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 2859 nhdr->b_size = blksz; 2860 nhdr->b_spa = spa; 2861 nhdr->b_type = type; 2862 nhdr->b_buf = buf; 2863 nhdr->b_state = arc_anon; 2864 nhdr->b_arc_access = 0; 2865 nhdr->b_flags = flags & ARC_L2_WRITING; 2866 nhdr->b_l2hdr = NULL; 2867 nhdr->b_datacnt = 1; 2868 nhdr->b_freeze_cksum = NULL; 2869 (void) refcount_add(&nhdr->b_refcnt, tag); 2870 buf->b_hdr = nhdr; 2871 atomic_add_64(&arc_anon->arcs_size, blksz); 2872 } else { 2873 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2874 ASSERT(!list_link_active(&hdr->b_arc_node)); 2875 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2876 arc_change_state(arc_anon, hdr, hash_lock); 2877 hdr->b_arc_access = 0; 2878 if (hdr->b_l2hdr != NULL) { 2879 mutex_enter(&l2arc_buflist_mtx); 2880 l2hdr = hdr->b_l2hdr; 2881 hdr->b_l2hdr = NULL; 2882 buf_size = hdr->b_size; 2883 } 2884 mutex_exit(hash_lock); 2885 2886 bzero(&hdr->b_dva, sizeof (dva_t)); 2887 hdr->b_birth = 0; 2888 hdr->b_cksum0 = 0; 2889 arc_buf_thaw(buf); 2890 } 2891 buf->b_efunc = NULL; 2892 buf->b_private = NULL; 2893 2894 if (l2hdr) { 2895 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 2896 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 2897 ARCSTAT_INCR(arcstat_l2_size, -buf_size); 2898 } 2899 if (MUTEX_HELD(&l2arc_buflist_mtx)) 2900 mutex_exit(&l2arc_buflist_mtx); 2901 } 2902 2903 int 2904 arc_released(arc_buf_t *buf) 2905 { 2906 return (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 2907 } 2908 2909 int 2910 arc_has_callback(arc_buf_t *buf) 2911 { 2912 return (buf->b_efunc != NULL); 2913 } 2914 2915 #ifdef ZFS_DEBUG 2916 int 2917 arc_referenced(arc_buf_t *buf) 2918 { 2919 return (refcount_count(&buf->b_hdr->b_refcnt)); 2920 } 2921 #endif 2922 2923 static void 2924 arc_write_ready(zio_t *zio) 2925 { 2926 arc_write_callback_t *callback = zio->io_private; 2927 arc_buf_t *buf = callback->awcb_buf; 2928 arc_buf_hdr_t *hdr = buf->b_hdr; 2929 2930 if (zio->io_error == 0 && callback->awcb_ready) { 2931 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 2932 callback->awcb_ready(zio, buf, callback->awcb_private); 2933 } 2934 /* 2935 * If the IO is already in progress, then this is a re-write 2936 * attempt, so we need to thaw and re-compute the cksum. It is 2937 * the responsibility of the callback to handle the freeing 2938 * and accounting for any re-write attempt. If we don't have a 2939 * callback registered then simply free the block here. 2940 */ 2941 if (HDR_IO_IN_PROGRESS(hdr)) { 2942 if (!BP_IS_HOLE(&zio->io_bp_orig) && 2943 callback->awcb_ready == NULL) { 2944 zio_nowait(zio_free(zio, zio->io_spa, zio->io_txg, 2945 &zio->io_bp_orig, NULL, NULL)); 2946 } 2947 mutex_enter(&hdr->b_freeze_lock); 2948 if (hdr->b_freeze_cksum != NULL) { 2949 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 2950 hdr->b_freeze_cksum = NULL; 2951 } 2952 mutex_exit(&hdr->b_freeze_lock); 2953 } 2954 arc_cksum_compute(buf, B_FALSE); 2955 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2956 } 2957 2958 static void 2959 arc_write_done(zio_t *zio) 2960 { 2961 arc_write_callback_t *callback = zio->io_private; 2962 arc_buf_t *buf = callback->awcb_buf; 2963 arc_buf_hdr_t *hdr = buf->b_hdr; 2964 2965 hdr->b_acb = NULL; 2966 2967 /* this buffer is on no lists and is not in the hash table */ 2968 ASSERT3P(hdr->b_state, ==, arc_anon); 2969 2970 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 2971 hdr->b_birth = zio->io_bp->blk_birth; 2972 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 2973 /* 2974 * If the block to be written was all-zero, we may have 2975 * compressed it away. In this case no write was performed 2976 * so there will be no dva/birth-date/checksum. The buffer 2977 * must therefor remain anonymous (and uncached). 2978 */ 2979 if (!BUF_EMPTY(hdr)) { 2980 arc_buf_hdr_t *exists; 2981 kmutex_t *hash_lock; 2982 2983 arc_cksum_verify(buf); 2984 2985 exists = buf_hash_insert(hdr, &hash_lock); 2986 if (exists) { 2987 /* 2988 * This can only happen if we overwrite for 2989 * sync-to-convergence, because we remove 2990 * buffers from the hash table when we arc_free(). 2991 */ 2992 ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 2993 BP_IDENTITY(zio->io_bp))); 2994 ASSERT3U(zio->io_bp_orig.blk_birth, ==, 2995 zio->io_bp->blk_birth); 2996 2997 ASSERT(refcount_is_zero(&exists->b_refcnt)); 2998 arc_change_state(arc_anon, exists, hash_lock); 2999 mutex_exit(hash_lock); 3000 arc_hdr_destroy(exists); 3001 exists = buf_hash_insert(hdr, &hash_lock); 3002 ASSERT3P(exists, ==, NULL); 3003 } 3004 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3005 arc_access(hdr, hash_lock); 3006 mutex_exit(hash_lock); 3007 } else if (callback->awcb_done == NULL) { 3008 int destroy_hdr; 3009 /* 3010 * This is an anonymous buffer with no user callback, 3011 * destroy it if there are no active references. 3012 */ 3013 mutex_enter(&arc_eviction_mtx); 3014 destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 3015 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3016 mutex_exit(&arc_eviction_mtx); 3017 if (destroy_hdr) 3018 arc_hdr_destroy(hdr); 3019 } else { 3020 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3021 } 3022 3023 if (callback->awcb_done) { 3024 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 3025 callback->awcb_done(zio, buf, callback->awcb_private); 3026 } 3027 3028 kmem_free(callback, sizeof (arc_write_callback_t)); 3029 } 3030 3031 zio_t * 3032 arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies, 3033 uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 3034 arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, 3035 int flags, zbookmark_t *zb) 3036 { 3037 arc_buf_hdr_t *hdr = buf->b_hdr; 3038 arc_write_callback_t *callback; 3039 zio_t *zio; 3040 3041 /* this is a private buffer - no locking required */ 3042 ASSERT3P(hdr->b_state, ==, arc_anon); 3043 ASSERT(BUF_EMPTY(hdr)); 3044 ASSERT(!HDR_IO_ERROR(hdr)); 3045 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 3046 ASSERT(hdr->b_acb == 0); 3047 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3048 callback->awcb_ready = ready; 3049 callback->awcb_done = done; 3050 callback->awcb_private = private; 3051 callback->awcb_buf = buf; 3052 zio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp, 3053 buf->b_data, hdr->b_size, arc_write_ready, arc_write_done, callback, 3054 priority, flags, zb); 3055 3056 return (zio); 3057 } 3058 3059 int 3060 arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 3061 zio_done_func_t *done, void *private, uint32_t arc_flags) 3062 { 3063 arc_buf_hdr_t *ab; 3064 kmutex_t *hash_lock; 3065 zio_t *zio; 3066 3067 /* 3068 * If this buffer is in the cache, release it, so it 3069 * can be re-used. 3070 */ 3071 ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 3072 if (ab != NULL) { 3073 /* 3074 * The checksum of blocks to free is not always 3075 * preserved (eg. on the deadlist). However, if it is 3076 * nonzero, it should match what we have in the cache. 3077 */ 3078 ASSERT(bp->blk_cksum.zc_word[0] == 0 || 3079 ab->b_cksum0 == bp->blk_cksum.zc_word[0]); 3080 if (ab->b_state != arc_anon) 3081 arc_change_state(arc_anon, ab, hash_lock); 3082 if (HDR_IO_IN_PROGRESS(ab)) { 3083 /* 3084 * This should only happen when we prefetch. 3085 */ 3086 ASSERT(ab->b_flags & ARC_PREFETCH); 3087 ASSERT3U(ab->b_datacnt, ==, 1); 3088 ab->b_flags |= ARC_FREED_IN_READ; 3089 if (HDR_IN_HASH_TABLE(ab)) 3090 buf_hash_remove(ab); 3091 ab->b_arc_access = 0; 3092 bzero(&ab->b_dva, sizeof (dva_t)); 3093 ab->b_birth = 0; 3094 ab->b_cksum0 = 0; 3095 ab->b_buf->b_efunc = NULL; 3096 ab->b_buf->b_private = NULL; 3097 mutex_exit(hash_lock); 3098 } else if (refcount_is_zero(&ab->b_refcnt)) { 3099 ab->b_flags |= ARC_FREE_IN_PROGRESS; 3100 mutex_exit(hash_lock); 3101 arc_hdr_destroy(ab); 3102 ARCSTAT_BUMP(arcstat_deleted); 3103 } else { 3104 /* 3105 * We still have an active reference on this 3106 * buffer. This can happen, e.g., from 3107 * dbuf_unoverride(). 3108 */ 3109 ASSERT(!HDR_IN_HASH_TABLE(ab)); 3110 ab->b_arc_access = 0; 3111 bzero(&ab->b_dva, sizeof (dva_t)); 3112 ab->b_birth = 0; 3113 ab->b_cksum0 = 0; 3114 ab->b_buf->b_efunc = NULL; 3115 ab->b_buf->b_private = NULL; 3116 mutex_exit(hash_lock); 3117 } 3118 } 3119 3120 zio = zio_free(pio, spa, txg, bp, done, private); 3121 3122 if (arc_flags & ARC_WAIT) 3123 return (zio_wait(zio)); 3124 3125 ASSERT(arc_flags & ARC_NOWAIT); 3126 zio_nowait(zio); 3127 3128 return (0); 3129 } 3130 3131 static int 3132 arc_memory_throttle(uint64_t reserve, uint64_t txg) 3133 { 3134 #ifdef _KERNEL 3135 uint64_t inflight_data = arc_anon->arcs_size; 3136 uint64_t available_memory = ptob(freemem); 3137 static uint64_t page_load = 0; 3138 static uint64_t last_txg = 0; 3139 3140 #if defined(__i386) 3141 available_memory = 3142 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 3143 #endif 3144 if (available_memory >= zfs_write_limit_max) 3145 return (0); 3146 3147 if (txg > last_txg) { 3148 last_txg = txg; 3149 page_load = 0; 3150 } 3151 /* 3152 * If we are in pageout, we know that memory is already tight, 3153 * the arc is already going to be evicting, so we just want to 3154 * continue to let page writes occur as quickly as possible. 3155 */ 3156 if (curproc == proc_pageout) { 3157 if (page_load > MAX(ptob(minfree), available_memory) / 4) 3158 return (ERESTART); 3159 /* Note: reserve is inflated, so we deflate */ 3160 page_load += reserve / 8; 3161 return (0); 3162 } else if (page_load > 0 && arc_reclaim_needed()) { 3163 /* memory is low, delay before restarting */ 3164 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3165 return (EAGAIN); 3166 } 3167 page_load = 0; 3168 3169 if (arc_size > arc_c_min) { 3170 uint64_t evictable_memory = 3171 arc_mru->arcs_lsize[ARC_BUFC_DATA] + 3172 arc_mru->arcs_lsize[ARC_BUFC_METADATA] + 3173 arc_mfu->arcs_lsize[ARC_BUFC_DATA] + 3174 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; 3175 available_memory += MIN(evictable_memory, arc_size - arc_c_min); 3176 } 3177 3178 if (inflight_data > available_memory / 4) { 3179 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3180 return (ERESTART); 3181 } 3182 #endif 3183 return (0); 3184 } 3185 3186 void 3187 arc_tempreserve_clear(uint64_t reserve) 3188 { 3189 atomic_add_64(&arc_tempreserve, -reserve); 3190 ASSERT((int64_t)arc_tempreserve >= 0); 3191 } 3192 3193 int 3194 arc_tempreserve_space(uint64_t reserve, uint64_t txg) 3195 { 3196 int error; 3197 3198 #ifdef ZFS_DEBUG 3199 /* 3200 * Once in a while, fail for no reason. Everything should cope. 3201 */ 3202 if (spa_get_random(10000) == 0) { 3203 dprintf("forcing random failure\n"); 3204 return (ERESTART); 3205 } 3206 #endif 3207 if (reserve > arc_c/4 && !arc_no_grow) 3208 arc_c = MIN(arc_c_max, reserve * 4); 3209 if (reserve > arc_c) 3210 return (ENOMEM); 3211 3212 /* 3213 * Writes will, almost always, require additional memory allocations 3214 * in order to compress/encrypt/etc the data. We therefor need to 3215 * make sure that there is sufficient available memory for this. 3216 */ 3217 if (error = arc_memory_throttle(reserve, txg)) 3218 return (error); 3219 3220 /* 3221 * Throttle writes when the amount of dirty data in the cache 3222 * gets too large. We try to keep the cache less than half full 3223 * of dirty blocks so that our sync times don't grow too large. 3224 * Note: if two requests come in concurrently, we might let them 3225 * both succeed, when one of them should fail. Not a huge deal. 3226 */ 3227 if (reserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && 3228 arc_anon->arcs_size > arc_c / 4) { 3229 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 3230 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 3231 arc_tempreserve>>10, 3232 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 3233 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3234 reserve>>10, arc_c>>10); 3235 return (ERESTART); 3236 } 3237 atomic_add_64(&arc_tempreserve, reserve); 3238 return (0); 3239 } 3240 3241 void 3242 arc_init(void) 3243 { 3244 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3245 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3246 3247 /* Convert seconds to clock ticks */ 3248 arc_min_prefetch_lifespan = 1 * hz; 3249 3250 /* Start out with 1/8 of all memory */ 3251 arc_c = physmem * PAGESIZE / 8; 3252 3253 #ifdef _KERNEL 3254 /* 3255 * On architectures where the physical memory can be larger 3256 * than the addressable space (intel in 32-bit mode), we may 3257 * need to limit the cache to 1/8 of VM size. 3258 */ 3259 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3260 #endif 3261 3262 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 3263 arc_c_min = MAX(arc_c / 4, 64<<20); 3264 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 3265 if (arc_c * 8 >= 1<<30) 3266 arc_c_max = (arc_c * 8) - (1<<30); 3267 else 3268 arc_c_max = arc_c_min; 3269 arc_c_max = MAX(arc_c * 6, arc_c_max); 3270 3271 /* 3272 * Allow the tunables to override our calculations if they are 3273 * reasonable (ie. over 64MB) 3274 */ 3275 if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) 3276 arc_c_max = zfs_arc_max; 3277 if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) 3278 arc_c_min = zfs_arc_min; 3279 3280 arc_c = arc_c_max; 3281 arc_p = (arc_c >> 1); 3282 3283 /* limit meta-data to 1/4 of the arc capacity */ 3284 arc_meta_limit = arc_c_max / 4; 3285 3286 /* Allow the tunable to override if it is reasonable */ 3287 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 3288 arc_meta_limit = zfs_arc_meta_limit; 3289 3290 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 3291 arc_c_min = arc_meta_limit / 2; 3292 3293 /* if kmem_flags are set, lets try to use less memory */ 3294 if (kmem_debugging()) 3295 arc_c = arc_c / 2; 3296 if (arc_c < arc_c_min) 3297 arc_c = arc_c_min; 3298 3299 arc_anon = &ARC_anon; 3300 arc_mru = &ARC_mru; 3301 arc_mru_ghost = &ARC_mru_ghost; 3302 arc_mfu = &ARC_mfu; 3303 arc_mfu_ghost = &ARC_mfu_ghost; 3304 arc_l2c_only = &ARC_l2c_only; 3305 arc_size = 0; 3306 3307 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3308 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3309 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3310 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3311 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3312 mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3313 3314 list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], 3315 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3316 list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], 3317 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3318 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], 3319 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3320 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], 3321 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3322 list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], 3323 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3324 list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], 3325 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3326 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], 3327 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3328 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], 3329 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3330 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], 3331 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3332 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], 3333 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3334 3335 buf_init(); 3336 3337 arc_thread_exit = 0; 3338 arc_eviction_list = NULL; 3339 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 3340 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3341 3342 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 3343 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 3344 3345 if (arc_ksp != NULL) { 3346 arc_ksp->ks_data = &arc_stats; 3347 kstat_install(arc_ksp); 3348 } 3349 3350 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3351 TS_RUN, minclsyspri); 3352 3353 arc_dead = FALSE; 3354 arc_warm = B_FALSE; 3355 3356 if (zfs_write_limit_max == 0) 3357 zfs_write_limit_max = physmem * PAGESIZE >> 3358 zfs_write_limit_shift; 3359 else 3360 zfs_write_limit_shift = 0; 3361 } 3362 3363 void 3364 arc_fini(void) 3365 { 3366 mutex_enter(&arc_reclaim_thr_lock); 3367 arc_thread_exit = 1; 3368 while (arc_thread_exit != 0) 3369 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 3370 mutex_exit(&arc_reclaim_thr_lock); 3371 3372 arc_flush(NULL); 3373 3374 arc_dead = TRUE; 3375 3376 if (arc_ksp != NULL) { 3377 kstat_delete(arc_ksp); 3378 arc_ksp = NULL; 3379 } 3380 3381 mutex_destroy(&arc_eviction_mtx); 3382 mutex_destroy(&arc_reclaim_thr_lock); 3383 cv_destroy(&arc_reclaim_thr_cv); 3384 3385 list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); 3386 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 3387 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); 3388 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 3389 list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); 3390 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 3391 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); 3392 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 3393 3394 mutex_destroy(&arc_anon->arcs_mtx); 3395 mutex_destroy(&arc_mru->arcs_mtx); 3396 mutex_destroy(&arc_mru_ghost->arcs_mtx); 3397 mutex_destroy(&arc_mfu->arcs_mtx); 3398 mutex_destroy(&arc_mfu_ghost->arcs_mtx); 3399 3400 buf_fini(); 3401 } 3402 3403 /* 3404 * Level 2 ARC 3405 * 3406 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 3407 * It uses dedicated storage devices to hold cached data, which are populated 3408 * using large infrequent writes. The main role of this cache is to boost 3409 * the performance of random read workloads. The intended L2ARC devices 3410 * include short-stroked disks, solid state disks, and other media with 3411 * substantially faster read latency than disk. 3412 * 3413 * +-----------------------+ 3414 * | ARC | 3415 * +-----------------------+ 3416 * | ^ ^ 3417 * | | | 3418 * l2arc_feed_thread() arc_read() 3419 * | | | 3420 * | l2arc read | 3421 * V | | 3422 * +---------------+ | 3423 * | L2ARC | | 3424 * +---------------+ | 3425 * | ^ | 3426 * l2arc_write() | | 3427 * | | | 3428 * V | | 3429 * +-------+ +-------+ 3430 * | vdev | | vdev | 3431 * | cache | | cache | 3432 * +-------+ +-------+ 3433 * +=========+ .-----. 3434 * : L2ARC : |-_____-| 3435 * : devices : | Disks | 3436 * +=========+ `-_____-' 3437 * 3438 * Read requests are satisfied from the following sources, in order: 3439 * 3440 * 1) ARC 3441 * 2) vdev cache of L2ARC devices 3442 * 3) L2ARC devices 3443 * 4) vdev cache of disks 3444 * 5) disks 3445 * 3446 * Some L2ARC device types exhibit extremely slow write performance. 3447 * To accommodate for this there are some significant differences between 3448 * the L2ARC and traditional cache design: 3449 * 3450 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 3451 * the ARC behave as usual, freeing buffers and placing headers on ghost 3452 * lists. The ARC does not send buffers to the L2ARC during eviction as 3453 * this would add inflated write latencies for all ARC memory pressure. 3454 * 3455 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 3456 * It does this by periodically scanning buffers from the eviction-end of 3457 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 3458 * not already there. It scans until a headroom of buffers is satisfied, 3459 * which itself is a buffer for ARC eviction. The thread that does this is 3460 * l2arc_feed_thread(), illustrated below; example sizes are included to 3461 * provide a better sense of ratio than this diagram: 3462 * 3463 * head --> tail 3464 * +---------------------+----------+ 3465 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 3466 * +---------------------+----------+ | o L2ARC eligible 3467 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 3468 * +---------------------+----------+ | 3469 * 15.9 Gbytes ^ 32 Mbytes | 3470 * headroom | 3471 * l2arc_feed_thread() 3472 * | 3473 * l2arc write hand <--[oooo]--' 3474 * | 8 Mbyte 3475 * | write max 3476 * V 3477 * +==============================+ 3478 * L2ARC dev |####|#|###|###| |####| ... | 3479 * +==============================+ 3480 * 32 Gbytes 3481 * 3482 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 3483 * evicted, then the L2ARC has cached a buffer much sooner than it probably 3484 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 3485 * safe to say that this is an uncommon case, since buffers at the end of 3486 * the ARC lists have moved there due to inactivity. 3487 * 3488 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 3489 * then the L2ARC simply misses copying some buffers. This serves as a 3490 * pressure valve to prevent heavy read workloads from both stalling the ARC 3491 * with waits and clogging the L2ARC with writes. This also helps prevent 3492 * the potential for the L2ARC to churn if it attempts to cache content too 3493 * quickly, such as during backups of the entire pool. 3494 * 3495 * 5. After system boot and before the ARC has filled main memory, there are 3496 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 3497 * lists can remain mostly static. Instead of searching from tail of these 3498 * lists as pictured, the l2arc_feed_thread() will search from the list heads 3499 * for eligible buffers, greatly increasing its chance of finding them. 3500 * 3501 * The L2ARC device write speed is also boosted during this time so that 3502 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 3503 * there are no L2ARC reads, and no fear of degrading read performance 3504 * through increased writes. 3505 * 3506 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 3507 * the vdev queue can aggregate them into larger and fewer writes. Each 3508 * device is written to in a rotor fashion, sweeping writes through 3509 * available space then repeating. 3510 * 3511 * 7. The L2ARC does not store dirty content. It never needs to flush 3512 * write buffers back to disk based storage. 3513 * 3514 * 8. If an ARC buffer is written (and dirtied) which also exists in the 3515 * L2ARC, the now stale L2ARC buffer is immediately dropped. 3516 * 3517 * The performance of the L2ARC can be tweaked by a number of tunables, which 3518 * may be necessary for different workloads: 3519 * 3520 * l2arc_write_max max write bytes per interval 3521 * l2arc_write_boost extra write bytes during device warmup 3522 * l2arc_noprefetch skip caching prefetched buffers 3523 * l2arc_headroom number of max device writes to precache 3524 * l2arc_feed_secs seconds between L2ARC writing 3525 * 3526 * Tunables may be removed or added as future performance improvements are 3527 * integrated, and also may become zpool properties. 3528 */ 3529 3530 static void 3531 l2arc_hdr_stat_add(void) 3532 { 3533 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 3534 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 3535 } 3536 3537 static void 3538 l2arc_hdr_stat_remove(void) 3539 { 3540 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 3541 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 3542 } 3543 3544 /* 3545 * Cycle through L2ARC devices. This is how L2ARC load balances. 3546 * If a device is returned, this also returns holding the spa config lock. 3547 */ 3548 static l2arc_dev_t * 3549 l2arc_dev_get_next(void) 3550 { 3551 l2arc_dev_t *first, *next = NULL; 3552 3553 /* 3554 * Lock out the removal of spas (spa_namespace_lock), then removal 3555 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 3556 * both locks will be dropped and a spa config lock held instead. 3557 */ 3558 mutex_enter(&spa_namespace_lock); 3559 mutex_enter(&l2arc_dev_mtx); 3560 3561 /* if there are no vdevs, there is nothing to do */ 3562 if (l2arc_ndev == 0) 3563 goto out; 3564 3565 first = NULL; 3566 next = l2arc_dev_last; 3567 do { 3568 /* loop around the list looking for a non-faulted vdev */ 3569 if (next == NULL) { 3570 next = list_head(l2arc_dev_list); 3571 } else { 3572 next = list_next(l2arc_dev_list, next); 3573 if (next == NULL) 3574 next = list_head(l2arc_dev_list); 3575 } 3576 3577 /* if we have come back to the start, bail out */ 3578 if (first == NULL) 3579 first = next; 3580 else if (next == first) 3581 break; 3582 3583 } while (vdev_is_dead(next->l2ad_vdev)); 3584 3585 /* if we were unable to find any usable vdevs, return NULL */ 3586 if (vdev_is_dead(next->l2ad_vdev)) 3587 next = NULL; 3588 3589 l2arc_dev_last = next; 3590 3591 out: 3592 mutex_exit(&l2arc_dev_mtx); 3593 3594 /* 3595 * Grab the config lock to prevent the 'next' device from being 3596 * removed while we are writing to it. 3597 */ 3598 if (next != NULL) 3599 spa_config_enter(next->l2ad_spa, RW_READER, next); 3600 mutex_exit(&spa_namespace_lock); 3601 3602 return (next); 3603 } 3604 3605 /* 3606 * Free buffers that were tagged for destruction. 3607 */ 3608 static void 3609 l2arc_do_free_on_write() 3610 { 3611 list_t *buflist; 3612 l2arc_data_free_t *df, *df_prev; 3613 3614 mutex_enter(&l2arc_free_on_write_mtx); 3615 buflist = l2arc_free_on_write; 3616 3617 for (df = list_tail(buflist); df; df = df_prev) { 3618 df_prev = list_prev(buflist, df); 3619 ASSERT(df->l2df_data != NULL); 3620 ASSERT(df->l2df_func != NULL); 3621 df->l2df_func(df->l2df_data, df->l2df_size); 3622 list_remove(buflist, df); 3623 kmem_free(df, sizeof (l2arc_data_free_t)); 3624 } 3625 3626 mutex_exit(&l2arc_free_on_write_mtx); 3627 } 3628 3629 /* 3630 * A write to a cache device has completed. Update all headers to allow 3631 * reads from these buffers to begin. 3632 */ 3633 static void 3634 l2arc_write_done(zio_t *zio) 3635 { 3636 l2arc_write_callback_t *cb; 3637 l2arc_dev_t *dev; 3638 list_t *buflist; 3639 arc_buf_hdr_t *head, *ab, *ab_prev; 3640 l2arc_buf_hdr_t *abl2; 3641 kmutex_t *hash_lock; 3642 3643 cb = zio->io_private; 3644 ASSERT(cb != NULL); 3645 dev = cb->l2wcb_dev; 3646 ASSERT(dev != NULL); 3647 head = cb->l2wcb_head; 3648 ASSERT(head != NULL); 3649 buflist = dev->l2ad_buflist; 3650 ASSERT(buflist != NULL); 3651 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 3652 l2arc_write_callback_t *, cb); 3653 3654 if (zio->io_error != 0) 3655 ARCSTAT_BUMP(arcstat_l2_writes_error); 3656 3657 mutex_enter(&l2arc_buflist_mtx); 3658 3659 /* 3660 * All writes completed, or an error was hit. 3661 */ 3662 for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 3663 ab_prev = list_prev(buflist, ab); 3664 3665 hash_lock = HDR_LOCK(ab); 3666 if (!mutex_tryenter(hash_lock)) { 3667 /* 3668 * This buffer misses out. It may be in a stage 3669 * of eviction. Its ARC_L2_WRITING flag will be 3670 * left set, denying reads to this buffer. 3671 */ 3672 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 3673 continue; 3674 } 3675 3676 if (zio->io_error != 0) { 3677 /* 3678 * Error - drop L2ARC entry. 3679 */ 3680 list_remove(buflist, ab); 3681 abl2 = ab->b_l2hdr; 3682 ab->b_l2hdr = NULL; 3683 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 3684 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 3685 } 3686 3687 /* 3688 * Allow ARC to begin reads to this L2ARC entry. 3689 */ 3690 ab->b_flags &= ~ARC_L2_WRITING; 3691 3692 mutex_exit(hash_lock); 3693 } 3694 3695 atomic_inc_64(&l2arc_writes_done); 3696 list_remove(buflist, head); 3697 kmem_cache_free(hdr_cache, head); 3698 mutex_exit(&l2arc_buflist_mtx); 3699 3700 l2arc_do_free_on_write(); 3701 3702 kmem_free(cb, sizeof (l2arc_write_callback_t)); 3703 } 3704 3705 /* 3706 * A read to a cache device completed. Validate buffer contents before 3707 * handing over to the regular ARC routines. 3708 */ 3709 static void 3710 l2arc_read_done(zio_t *zio) 3711 { 3712 l2arc_read_callback_t *cb; 3713 arc_buf_hdr_t *hdr; 3714 arc_buf_t *buf; 3715 zio_t *rzio; 3716 kmutex_t *hash_lock; 3717 int equal; 3718 3719 cb = zio->io_private; 3720 ASSERT(cb != NULL); 3721 buf = cb->l2rcb_buf; 3722 ASSERT(buf != NULL); 3723 hdr = buf->b_hdr; 3724 ASSERT(hdr != NULL); 3725 3726 hash_lock = HDR_LOCK(hdr); 3727 mutex_enter(hash_lock); 3728 3729 /* 3730 * Check this survived the L2ARC journey. 3731 */ 3732 equal = arc_cksum_equal(buf); 3733 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 3734 mutex_exit(hash_lock); 3735 zio->io_private = buf; 3736 arc_read_done(zio); 3737 } else { 3738 mutex_exit(hash_lock); 3739 /* 3740 * Buffer didn't survive caching. Increment stats and 3741 * reissue to the original storage device. 3742 */ 3743 if (zio->io_error != 0) { 3744 ARCSTAT_BUMP(arcstat_l2_io_error); 3745 } else { 3746 zio->io_error = EIO; 3747 } 3748 if (!equal) 3749 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 3750 3751 if (zio->io_waiter == NULL) { 3752 /* 3753 * Let the resent I/O call arc_read_done() instead. 3754 */ 3755 zio->io_done = NULL; 3756 zio->io_flags &= ~ZIO_FLAG_DONT_CACHE; 3757 3758 rzio = zio_read(NULL, cb->l2rcb_spa, &cb->l2rcb_bp, 3759 buf->b_data, zio->io_size, arc_read_done, buf, 3760 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb); 3761 3762 (void) zio_nowait(rzio); 3763 } 3764 } 3765 3766 kmem_free(cb, sizeof (l2arc_read_callback_t)); 3767 } 3768 3769 /* 3770 * This is the list priority from which the L2ARC will search for pages to 3771 * cache. This is used within loops (0..3) to cycle through lists in the 3772 * desired order. This order can have a significant effect on cache 3773 * performance. 3774 * 3775 * Currently the metadata lists are hit first, MFU then MRU, followed by 3776 * the data lists. This function returns a locked list, and also returns 3777 * the lock pointer. 3778 */ 3779 static list_t * 3780 l2arc_list_locked(int list_num, kmutex_t **lock) 3781 { 3782 list_t *list; 3783 3784 ASSERT(list_num >= 0 && list_num <= 3); 3785 3786 switch (list_num) { 3787 case 0: 3788 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; 3789 *lock = &arc_mfu->arcs_mtx; 3790 break; 3791 case 1: 3792 list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; 3793 *lock = &arc_mru->arcs_mtx; 3794 break; 3795 case 2: 3796 list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; 3797 *lock = &arc_mfu->arcs_mtx; 3798 break; 3799 case 3: 3800 list = &arc_mru->arcs_list[ARC_BUFC_DATA]; 3801 *lock = &arc_mru->arcs_mtx; 3802 break; 3803 } 3804 3805 ASSERT(!(MUTEX_HELD(*lock))); 3806 mutex_enter(*lock); 3807 return (list); 3808 } 3809 3810 /* 3811 * Evict buffers from the device write hand to the distance specified in 3812 * bytes. This distance may span populated buffers, it may span nothing. 3813 * This is clearing a region on the L2ARC device ready for writing. 3814 * If the 'all' boolean is set, every buffer is evicted. 3815 */ 3816 static void 3817 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 3818 { 3819 list_t *buflist; 3820 l2arc_buf_hdr_t *abl2; 3821 arc_buf_hdr_t *ab, *ab_prev; 3822 kmutex_t *hash_lock; 3823 uint64_t taddr; 3824 3825 buflist = dev->l2ad_buflist; 3826 3827 if (buflist == NULL) 3828 return; 3829 3830 if (!all && dev->l2ad_first) { 3831 /* 3832 * This is the first sweep through the device. There is 3833 * nothing to evict. 3834 */ 3835 return; 3836 } 3837 3838 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 3839 /* 3840 * When nearing the end of the device, evict to the end 3841 * before the device write hand jumps to the start. 3842 */ 3843 taddr = dev->l2ad_end; 3844 } else { 3845 taddr = dev->l2ad_hand + distance; 3846 } 3847 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 3848 uint64_t, taddr, boolean_t, all); 3849 3850 top: 3851 mutex_enter(&l2arc_buflist_mtx); 3852 for (ab = list_tail(buflist); ab; ab = ab_prev) { 3853 ab_prev = list_prev(buflist, ab); 3854 3855 hash_lock = HDR_LOCK(ab); 3856 if (!mutex_tryenter(hash_lock)) { 3857 /* 3858 * Missed the hash lock. Retry. 3859 */ 3860 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 3861 mutex_exit(&l2arc_buflist_mtx); 3862 mutex_enter(hash_lock); 3863 mutex_exit(hash_lock); 3864 goto top; 3865 } 3866 3867 if (HDR_L2_WRITE_HEAD(ab)) { 3868 /* 3869 * We hit a write head node. Leave it for 3870 * l2arc_write_done(). 3871 */ 3872 list_remove(buflist, ab); 3873 mutex_exit(hash_lock); 3874 continue; 3875 } 3876 3877 if (!all && ab->b_l2hdr != NULL && 3878 (ab->b_l2hdr->b_daddr > taddr || 3879 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 3880 /* 3881 * We've evicted to the target address, 3882 * or the end of the device. 3883 */ 3884 mutex_exit(hash_lock); 3885 break; 3886 } 3887 3888 if (HDR_FREE_IN_PROGRESS(ab)) { 3889 /* 3890 * Already on the path to destruction. 3891 */ 3892 mutex_exit(hash_lock); 3893 continue; 3894 } 3895 3896 if (ab->b_state == arc_l2c_only) { 3897 ASSERT(!HDR_L2_READING(ab)); 3898 /* 3899 * This doesn't exist in the ARC. Destroy. 3900 * arc_hdr_destroy() will call list_remove() 3901 * and decrement arcstat_l2_size. 3902 */ 3903 arc_change_state(arc_anon, ab, hash_lock); 3904 arc_hdr_destroy(ab); 3905 } else { 3906 /* 3907 * Invalidate issued or about to be issued 3908 * reads, since we may be about to write 3909 * over this location. 3910 */ 3911 if (HDR_L2_READING(ab)) { 3912 ARCSTAT_BUMP(arcstat_l2_evict_reading); 3913 ab->b_flags |= ARC_L2_EVICTED; 3914 } 3915 3916 /* 3917 * Tell ARC this no longer exists in L2ARC. 3918 */ 3919 if (ab->b_l2hdr != NULL) { 3920 abl2 = ab->b_l2hdr; 3921 ab->b_l2hdr = NULL; 3922 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 3923 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 3924 } 3925 list_remove(buflist, ab); 3926 3927 /* 3928 * This may have been leftover after a 3929 * failed write. 3930 */ 3931 ab->b_flags &= ~ARC_L2_WRITING; 3932 } 3933 mutex_exit(hash_lock); 3934 } 3935 mutex_exit(&l2arc_buflist_mtx); 3936 3937 spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict)); 3938 dev->l2ad_evict = taddr; 3939 } 3940 3941 /* 3942 * Find and write ARC buffers to the L2ARC device. 3943 * 3944 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 3945 * for reading until they have completed writing. 3946 */ 3947 static void 3948 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) 3949 { 3950 arc_buf_hdr_t *ab, *ab_prev, *head; 3951 l2arc_buf_hdr_t *hdrl2; 3952 list_t *list; 3953 uint64_t passed_sz, write_sz, buf_sz, headroom; 3954 void *buf_data; 3955 kmutex_t *hash_lock, *list_lock; 3956 boolean_t have_lock, full; 3957 l2arc_write_callback_t *cb; 3958 zio_t *pio, *wzio; 3959 3960 ASSERT(dev->l2ad_vdev != NULL); 3961 3962 pio = NULL; 3963 write_sz = 0; 3964 full = B_FALSE; 3965 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 3966 head->b_flags |= ARC_L2_WRITE_HEAD; 3967 3968 /* 3969 * Copy buffers for L2ARC writing. 3970 */ 3971 mutex_enter(&l2arc_buflist_mtx); 3972 for (int try = 0; try <= 3; try++) { 3973 list = l2arc_list_locked(try, &list_lock); 3974 passed_sz = 0; 3975 3976 /* 3977 * L2ARC fast warmup. 3978 * 3979 * Until the ARC is warm and starts to evict, read from the 3980 * head of the ARC lists rather than the tail. 3981 */ 3982 headroom = target_sz * l2arc_headroom; 3983 if (arc_warm == B_FALSE) 3984 ab = list_head(list); 3985 else 3986 ab = list_tail(list); 3987 3988 for (; ab; ab = ab_prev) { 3989 if (arc_warm == B_FALSE) 3990 ab_prev = list_next(list, ab); 3991 else 3992 ab_prev = list_prev(list, ab); 3993 3994 hash_lock = HDR_LOCK(ab); 3995 have_lock = MUTEX_HELD(hash_lock); 3996 if (!have_lock && !mutex_tryenter(hash_lock)) { 3997 /* 3998 * Skip this buffer rather than waiting. 3999 */ 4000 continue; 4001 } 4002 4003 passed_sz += ab->b_size; 4004 if (passed_sz > headroom) { 4005 /* 4006 * Searched too far. 4007 */ 4008 mutex_exit(hash_lock); 4009 break; 4010 } 4011 4012 if (ab->b_spa != spa) { 4013 mutex_exit(hash_lock); 4014 continue; 4015 } 4016 4017 if (ab->b_l2hdr != NULL) { 4018 /* 4019 * Already in L2ARC. 4020 */ 4021 mutex_exit(hash_lock); 4022 continue; 4023 } 4024 4025 if (HDR_IO_IN_PROGRESS(ab) || HDR_DONT_L2CACHE(ab)) { 4026 mutex_exit(hash_lock); 4027 continue; 4028 } 4029 4030 if ((write_sz + ab->b_size) > target_sz) { 4031 full = B_TRUE; 4032 mutex_exit(hash_lock); 4033 break; 4034 } 4035 4036 if (ab->b_buf == NULL) { 4037 DTRACE_PROBE1(l2arc__buf__null, void *, ab); 4038 mutex_exit(hash_lock); 4039 continue; 4040 } 4041 4042 if (pio == NULL) { 4043 /* 4044 * Insert a dummy header on the buflist so 4045 * l2arc_write_done() can find where the 4046 * write buffers begin without searching. 4047 */ 4048 list_insert_head(dev->l2ad_buflist, head); 4049 4050 cb = kmem_alloc( 4051 sizeof (l2arc_write_callback_t), KM_SLEEP); 4052 cb->l2wcb_dev = dev; 4053 cb->l2wcb_head = head; 4054 pio = zio_root(spa, l2arc_write_done, cb, 4055 ZIO_FLAG_CANFAIL); 4056 } 4057 4058 /* 4059 * Create and add a new L2ARC header. 4060 */ 4061 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 4062 hdrl2->b_dev = dev; 4063 hdrl2->b_daddr = dev->l2ad_hand; 4064 4065 ab->b_flags |= ARC_L2_WRITING; 4066 ab->b_l2hdr = hdrl2; 4067 list_insert_head(dev->l2ad_buflist, ab); 4068 buf_data = ab->b_buf->b_data; 4069 buf_sz = ab->b_size; 4070 4071 /* 4072 * Compute and store the buffer cksum before 4073 * writing. On debug the cksum is verified first. 4074 */ 4075 arc_cksum_verify(ab->b_buf); 4076 arc_cksum_compute(ab->b_buf, B_TRUE); 4077 4078 mutex_exit(hash_lock); 4079 4080 wzio = zio_write_phys(pio, dev->l2ad_vdev, 4081 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 4082 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 4083 ZIO_FLAG_CANFAIL, B_FALSE); 4084 4085 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 4086 zio_t *, wzio); 4087 (void) zio_nowait(wzio); 4088 4089 write_sz += buf_sz; 4090 dev->l2ad_hand += buf_sz; 4091 } 4092 4093 mutex_exit(list_lock); 4094 4095 if (full == B_TRUE) 4096 break; 4097 } 4098 mutex_exit(&l2arc_buflist_mtx); 4099 4100 if (pio == NULL) { 4101 ASSERT3U(write_sz, ==, 0); 4102 kmem_cache_free(hdr_cache, head); 4103 return; 4104 } 4105 4106 ASSERT3U(write_sz, <=, target_sz); 4107 ARCSTAT_BUMP(arcstat_l2_writes_sent); 4108 ARCSTAT_INCR(arcstat_l2_size, write_sz); 4109 spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz); 4110 4111 /* 4112 * Bump device hand to the device start if it is approaching the end. 4113 * l2arc_evict() will already have evicted ahead for this case. 4114 */ 4115 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 4116 spa_l2cache_space_update(dev->l2ad_vdev, 0, 4117 dev->l2ad_end - dev->l2ad_hand); 4118 dev->l2ad_hand = dev->l2ad_start; 4119 dev->l2ad_evict = dev->l2ad_start; 4120 dev->l2ad_first = B_FALSE; 4121 } 4122 4123 (void) zio_wait(pio); 4124 } 4125 4126 /* 4127 * This thread feeds the L2ARC at regular intervals. This is the beating 4128 * heart of the L2ARC. 4129 */ 4130 static void 4131 l2arc_feed_thread(void) 4132 { 4133 callb_cpr_t cpr; 4134 l2arc_dev_t *dev; 4135 spa_t *spa; 4136 uint64_t size; 4137 4138 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 4139 4140 mutex_enter(&l2arc_feed_thr_lock); 4141 4142 while (l2arc_thread_exit == 0) { 4143 /* 4144 * Pause for l2arc_feed_secs seconds between writes. 4145 */ 4146 CALLB_CPR_SAFE_BEGIN(&cpr); 4147 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 4148 lbolt + (hz * l2arc_feed_secs)); 4149 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 4150 4151 /* 4152 * Quick check for L2ARC devices. 4153 */ 4154 mutex_enter(&l2arc_dev_mtx); 4155 if (l2arc_ndev == 0) { 4156 mutex_exit(&l2arc_dev_mtx); 4157 continue; 4158 } 4159 mutex_exit(&l2arc_dev_mtx); 4160 4161 /* 4162 * This selects the next l2arc device to write to, and in 4163 * doing so the next spa to feed from: dev->l2ad_spa. This 4164 * will return NULL if there are now no l2arc devices or if 4165 * they are all faulted. 4166 * 4167 * If a device is returned, its spa's config lock is also 4168 * held to prevent device removal. l2arc_dev_get_next() 4169 * will grab and release l2arc_dev_mtx. 4170 */ 4171 if ((dev = l2arc_dev_get_next()) == NULL) 4172 continue; 4173 4174 spa = dev->l2ad_spa; 4175 ASSERT(spa != NULL); 4176 4177 /* 4178 * Avoid contributing to memory pressure. 4179 */ 4180 if (arc_reclaim_needed()) { 4181 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 4182 spa_config_exit(spa, dev); 4183 continue; 4184 } 4185 4186 ARCSTAT_BUMP(arcstat_l2_feeds); 4187 4188 size = dev->l2ad_write; 4189 if (arc_warm == B_FALSE) 4190 size += dev->l2ad_boost; 4191 4192 /* 4193 * Evict L2ARC buffers that will be overwritten. 4194 */ 4195 l2arc_evict(dev, size, B_FALSE); 4196 4197 /* 4198 * Write ARC buffers. 4199 */ 4200 l2arc_write_buffers(spa, dev, size); 4201 spa_config_exit(spa, dev); 4202 } 4203 4204 l2arc_thread_exit = 0; 4205 cv_broadcast(&l2arc_feed_thr_cv); 4206 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 4207 thread_exit(); 4208 } 4209 4210 boolean_t 4211 l2arc_vdev_present(vdev_t *vd) 4212 { 4213 l2arc_dev_t *dev; 4214 4215 mutex_enter(&l2arc_dev_mtx); 4216 for (dev = list_head(l2arc_dev_list); dev != NULL; 4217 dev = list_next(l2arc_dev_list, dev)) { 4218 if (dev->l2ad_vdev == vd) 4219 break; 4220 } 4221 mutex_exit(&l2arc_dev_mtx); 4222 4223 return (dev != NULL); 4224 } 4225 4226 /* 4227 * Add a vdev for use by the L2ARC. By this point the spa has already 4228 * validated the vdev and opened it. 4229 */ 4230 void 4231 l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end) 4232 { 4233 l2arc_dev_t *adddev; 4234 4235 ASSERT(!l2arc_vdev_present(vd)); 4236 4237 /* 4238 * Create a new l2arc device entry. 4239 */ 4240 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 4241 adddev->l2ad_spa = spa; 4242 adddev->l2ad_vdev = vd; 4243 adddev->l2ad_write = l2arc_write_max; 4244 adddev->l2ad_boost = l2arc_write_boost; 4245 adddev->l2ad_start = start; 4246 adddev->l2ad_end = end; 4247 adddev->l2ad_hand = adddev->l2ad_start; 4248 adddev->l2ad_evict = adddev->l2ad_start; 4249 adddev->l2ad_first = B_TRUE; 4250 ASSERT3U(adddev->l2ad_write, >, 0); 4251 4252 /* 4253 * This is a list of all ARC buffers that are still valid on the 4254 * device. 4255 */ 4256 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 4257 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 4258 offsetof(arc_buf_hdr_t, b_l2node)); 4259 4260 spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0); 4261 4262 /* 4263 * Add device to global list 4264 */ 4265 mutex_enter(&l2arc_dev_mtx); 4266 list_insert_head(l2arc_dev_list, adddev); 4267 atomic_inc_64(&l2arc_ndev); 4268 mutex_exit(&l2arc_dev_mtx); 4269 } 4270 4271 /* 4272 * Remove a vdev from the L2ARC. 4273 */ 4274 void 4275 l2arc_remove_vdev(vdev_t *vd) 4276 { 4277 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 4278 4279 /* 4280 * Find the device by vdev 4281 */ 4282 mutex_enter(&l2arc_dev_mtx); 4283 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 4284 nextdev = list_next(l2arc_dev_list, dev); 4285 if (vd == dev->l2ad_vdev) { 4286 remdev = dev; 4287 break; 4288 } 4289 } 4290 ASSERT(remdev != NULL); 4291 4292 /* 4293 * Remove device from global list 4294 */ 4295 list_remove(l2arc_dev_list, remdev); 4296 l2arc_dev_last = NULL; /* may have been invalidated */ 4297 atomic_dec_64(&l2arc_ndev); 4298 mutex_exit(&l2arc_dev_mtx); 4299 4300 /* 4301 * Clear all buflists and ARC references. L2ARC device flush. 4302 */ 4303 l2arc_evict(remdev, 0, B_TRUE); 4304 list_destroy(remdev->l2ad_buflist); 4305 kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 4306 kmem_free(remdev, sizeof (l2arc_dev_t)); 4307 } 4308 4309 void 4310 l2arc_init() 4311 { 4312 l2arc_thread_exit = 0; 4313 l2arc_ndev = 0; 4314 l2arc_writes_sent = 0; 4315 l2arc_writes_done = 0; 4316 4317 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 4318 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 4319 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 4320 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 4321 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 4322 4323 l2arc_dev_list = &L2ARC_dev_list; 4324 l2arc_free_on_write = &L2ARC_free_on_write; 4325 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 4326 offsetof(l2arc_dev_t, l2ad_node)); 4327 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 4328 offsetof(l2arc_data_free_t, l2df_list_node)); 4329 4330 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 4331 TS_RUN, minclsyspri); 4332 } 4333 4334 void 4335 l2arc_fini() 4336 { 4337 /* 4338 * This is called from dmu_fini(), which is called from spa_fini(); 4339 * Because of this, we can assume that all l2arc devices have 4340 * already been removed when the pools themselves were removed. 4341 */ 4342 4343 mutex_enter(&l2arc_feed_thr_lock); 4344 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 4345 l2arc_thread_exit = 1; 4346 while (l2arc_thread_exit != 0) 4347 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 4348 mutex_exit(&l2arc_feed_thr_lock); 4349 4350 l2arc_do_free_on_write(); 4351 4352 mutex_destroy(&l2arc_feed_thr_lock); 4353 cv_destroy(&l2arc_feed_thr_cv); 4354 mutex_destroy(&l2arc_dev_mtx); 4355 mutex_destroy(&l2arc_buflist_mtx); 4356 mutex_destroy(&l2arc_free_on_write_mtx); 4357 4358 list_destroy(l2arc_dev_list); 4359 list_destroy(l2arc_free_on_write); 4360 } 4361