1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2012 by Delphix. All rights reserved. 25 */ 26 27 /* 28 * DVA-based Adjustable Replacement Cache 29 * 30 * While much of the theory of operation used here is 31 * based on the self-tuning, low overhead replacement cache 32 * presented by Megiddo and Modha at FAST 2003, there are some 33 * significant differences: 34 * 35 * 1. The Megiddo and Modha model assumes any page is evictable. 36 * Pages in its cache cannot be "locked" into memory. This makes 37 * the eviction algorithm simple: evict the last page in the list. 38 * This also make the performance characteristics easy to reason 39 * about. Our cache is not so simple. At any given moment, some 40 * subset of the blocks in the cache are un-evictable because we 41 * have handed out a reference to them. Blocks are only evictable 42 * when there are no external references active. This makes 43 * eviction far more problematic: we choose to evict the evictable 44 * blocks that are the "lowest" in the list. 45 * 46 * There are times when it is not possible to evict the requested 47 * space. In these circumstances we are unable to adjust the cache 48 * size. To prevent the cache growing unbounded at these times we 49 * implement a "cache throttle" that slows the flow of new data 50 * into the cache until we can make space available. 51 * 52 * 2. The Megiddo and Modha model assumes a fixed cache size. 53 * Pages are evicted when the cache is full and there is a cache 54 * miss. Our model has a variable sized cache. It grows with 55 * high use, but also tries to react to memory pressure from the 56 * operating system: decreasing its size when system memory is 57 * tight. 58 * 59 * 3. The Megiddo and Modha model assumes a fixed page size. All 60 * elements of the cache are therefor exactly the same size. So 61 * when adjusting the cache size following a cache miss, its simply 62 * a matter of choosing a single page to evict. In our model, we 63 * have variable sized cache blocks (rangeing from 512 bytes to 64 * 128K bytes). We therefor choose a set of blocks to evict to make 65 * space for a cache miss that approximates as closely as possible 66 * the space used by the new block. 67 * 68 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 69 * by N. Megiddo & D. Modha, FAST 2003 70 */ 71 72 /* 73 * The locking model: 74 * 75 * A new reference to a cache buffer can be obtained in two 76 * ways: 1) via a hash table lookup using the DVA as a key, 77 * or 2) via one of the ARC lists. The arc_read() interface 78 * uses method 1, while the internal arc algorithms for 79 * adjusting the cache use method 2. We therefor provide two 80 * types of locks: 1) the hash table lock array, and 2) the 81 * arc list locks. 82 * 83 * Buffers do not have their own mutexes, rather they rely on the 84 * hash table mutexes for the bulk of their protection (i.e. most 85 * fields in the arc_buf_hdr_t are protected by these mutexes). 86 * 87 * buf_hash_find() returns the appropriate mutex (held) when it 88 * locates the requested buffer in the hash table. It returns 89 * NULL for the mutex if the buffer was not in the table. 90 * 91 * buf_hash_remove() expects the appropriate hash mutex to be 92 * already held before it is invoked. 93 * 94 * Each arc state also has a mutex which is used to protect the 95 * buffer list associated with the state. When attempting to 96 * obtain a hash table lock while holding an arc list lock you 97 * must use: mutex_tryenter() to avoid deadlock. Also note that 98 * the active state mutex must be held before the ghost state mutex. 99 * 100 * Arc buffers may have an associated eviction callback function. 101 * This function will be invoked prior to removing the buffer (e.g. 102 * in arc_do_user_evicts()). Note however that the data associated 103 * with the buffer may be evicted prior to the callback. The callback 104 * must be made with *no locks held* (to prevent deadlock). Additionally, 105 * the users of callbacks must ensure that their private data is 106 * protected from simultaneous callbacks from arc_buf_evict() 107 * and arc_do_user_evicts(). 108 * 109 * Note that the majority of the performance stats are manipulated 110 * with atomic operations. 111 * 112 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 113 * 114 * - L2ARC buflist creation 115 * - L2ARC buflist eviction 116 * - L2ARC write completion, which walks L2ARC buflists 117 * - ARC header destruction, as it removes from L2ARC buflists 118 * - ARC header release, as it removes from L2ARC buflists 119 */ 120 121 #include <sys/spa.h> 122 #include <sys/zio.h> 123 #include <sys/zfs_context.h> 124 #include <sys/arc.h> 125 #include <sys/refcount.h> 126 #include <sys/vdev.h> 127 #include <sys/vdev_impl.h> 128 #ifdef _KERNEL 129 #include <sys/vmsystm.h> 130 #include <vm/anon.h> 131 #include <sys/fs/swapnode.h> 132 #include <sys/dnlc.h> 133 #endif 134 #include <sys/callb.h> 135 #include <sys/kstat.h> 136 #include <zfs_fletcher.h> 137 138 #ifndef _KERNEL 139 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */ 140 boolean_t arc_watch = B_FALSE; 141 int arc_procfd; 142 #endif 143 144 static kmutex_t arc_reclaim_thr_lock; 145 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 146 static uint8_t arc_thread_exit; 147 148 extern int zfs_write_limit_shift; 149 extern uint64_t zfs_write_limit_max; 150 extern kmutex_t zfs_write_limit_lock; 151 152 #define ARC_REDUCE_DNLC_PERCENT 3 153 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 154 155 typedef enum arc_reclaim_strategy { 156 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 157 ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 158 } arc_reclaim_strategy_t; 159 160 /* number of seconds before growing cache again */ 161 static int arc_grow_retry = 60; 162 163 /* shift of arc_c for calculating both min and max arc_p */ 164 static int arc_p_min_shift = 4; 165 166 /* log2(fraction of arc to reclaim) */ 167 static int arc_shrink_shift = 5; 168 169 /* 170 * minimum lifespan of a prefetch block in clock ticks 171 * (initialized in arc_init()) 172 */ 173 static int arc_min_prefetch_lifespan; 174 175 static int arc_dead; 176 177 /* 178 * The arc has filled available memory and has now warmed up. 179 */ 180 static boolean_t arc_warm; 181 182 /* 183 * These tunables are for performance analysis. 184 */ 185 uint64_t zfs_arc_max; 186 uint64_t zfs_arc_min; 187 uint64_t zfs_arc_meta_limit = 0; 188 int zfs_arc_grow_retry = 0; 189 int zfs_arc_shrink_shift = 0; 190 int zfs_arc_p_min_shift = 0; 191 192 /* 193 * Note that buffers can be in one of 6 states: 194 * ARC_anon - anonymous (discussed below) 195 * ARC_mru - recently used, currently cached 196 * ARC_mru_ghost - recentely used, no longer in cache 197 * ARC_mfu - frequently used, currently cached 198 * ARC_mfu_ghost - frequently used, no longer in cache 199 * ARC_l2c_only - exists in L2ARC but not other states 200 * When there are no active references to the buffer, they are 201 * are linked onto a list in one of these arc states. These are 202 * the only buffers that can be evicted or deleted. Within each 203 * state there are multiple lists, one for meta-data and one for 204 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 205 * etc.) is tracked separately so that it can be managed more 206 * explicitly: favored over data, limited explicitly. 207 * 208 * Anonymous buffers are buffers that are not associated with 209 * a DVA. These are buffers that hold dirty block copies 210 * before they are written to stable storage. By definition, 211 * they are "ref'd" and are considered part of arc_mru 212 * that cannot be freed. Generally, they will aquire a DVA 213 * as they are written and migrate onto the arc_mru list. 214 * 215 * The ARC_l2c_only state is for buffers that are in the second 216 * level ARC but no longer in any of the ARC_m* lists. The second 217 * level ARC itself may also contain buffers that are in any of 218 * the ARC_m* states - meaning that a buffer can exist in two 219 * places. The reason for the ARC_l2c_only state is to keep the 220 * buffer header in the hash table, so that reads that hit the 221 * second level ARC benefit from these fast lookups. 222 */ 223 224 typedef struct arc_state { 225 list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ 226 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 227 uint64_t arcs_size; /* total amount of data in this state */ 228 kmutex_t arcs_mtx; 229 } arc_state_t; 230 231 /* The 6 states: */ 232 static arc_state_t ARC_anon; 233 static arc_state_t ARC_mru; 234 static arc_state_t ARC_mru_ghost; 235 static arc_state_t ARC_mfu; 236 static arc_state_t ARC_mfu_ghost; 237 static arc_state_t ARC_l2c_only; 238 239 typedef struct arc_stats { 240 kstat_named_t arcstat_hits; 241 kstat_named_t arcstat_misses; 242 kstat_named_t arcstat_demand_data_hits; 243 kstat_named_t arcstat_demand_data_misses; 244 kstat_named_t arcstat_demand_metadata_hits; 245 kstat_named_t arcstat_demand_metadata_misses; 246 kstat_named_t arcstat_prefetch_data_hits; 247 kstat_named_t arcstat_prefetch_data_misses; 248 kstat_named_t arcstat_prefetch_metadata_hits; 249 kstat_named_t arcstat_prefetch_metadata_misses; 250 kstat_named_t arcstat_mru_hits; 251 kstat_named_t arcstat_mru_ghost_hits; 252 kstat_named_t arcstat_mfu_hits; 253 kstat_named_t arcstat_mfu_ghost_hits; 254 kstat_named_t arcstat_deleted; 255 kstat_named_t arcstat_recycle_miss; 256 kstat_named_t arcstat_mutex_miss; 257 kstat_named_t arcstat_evict_skip; 258 kstat_named_t arcstat_evict_l2_cached; 259 kstat_named_t arcstat_evict_l2_eligible; 260 kstat_named_t arcstat_evict_l2_ineligible; 261 kstat_named_t arcstat_hash_elements; 262 kstat_named_t arcstat_hash_elements_max; 263 kstat_named_t arcstat_hash_collisions; 264 kstat_named_t arcstat_hash_chains; 265 kstat_named_t arcstat_hash_chain_max; 266 kstat_named_t arcstat_p; 267 kstat_named_t arcstat_c; 268 kstat_named_t arcstat_c_min; 269 kstat_named_t arcstat_c_max; 270 kstat_named_t arcstat_size; 271 kstat_named_t arcstat_hdr_size; 272 kstat_named_t arcstat_data_size; 273 kstat_named_t arcstat_other_size; 274 kstat_named_t arcstat_l2_hits; 275 kstat_named_t arcstat_l2_misses; 276 kstat_named_t arcstat_l2_feeds; 277 kstat_named_t arcstat_l2_rw_clash; 278 kstat_named_t arcstat_l2_read_bytes; 279 kstat_named_t arcstat_l2_write_bytes; 280 kstat_named_t arcstat_l2_writes_sent; 281 kstat_named_t arcstat_l2_writes_done; 282 kstat_named_t arcstat_l2_writes_error; 283 kstat_named_t arcstat_l2_writes_hdr_miss; 284 kstat_named_t arcstat_l2_evict_lock_retry; 285 kstat_named_t arcstat_l2_evict_reading; 286 kstat_named_t arcstat_l2_free_on_write; 287 kstat_named_t arcstat_l2_abort_lowmem; 288 kstat_named_t arcstat_l2_cksum_bad; 289 kstat_named_t arcstat_l2_io_error; 290 kstat_named_t arcstat_l2_size; 291 kstat_named_t arcstat_l2_hdr_size; 292 kstat_named_t arcstat_memory_throttle_count; 293 } arc_stats_t; 294 295 static arc_stats_t arc_stats = { 296 { "hits", KSTAT_DATA_UINT64 }, 297 { "misses", KSTAT_DATA_UINT64 }, 298 { "demand_data_hits", KSTAT_DATA_UINT64 }, 299 { "demand_data_misses", KSTAT_DATA_UINT64 }, 300 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 301 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 302 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 303 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 304 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 305 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 306 { "mru_hits", KSTAT_DATA_UINT64 }, 307 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 308 { "mfu_hits", KSTAT_DATA_UINT64 }, 309 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 310 { "deleted", KSTAT_DATA_UINT64 }, 311 { "recycle_miss", KSTAT_DATA_UINT64 }, 312 { "mutex_miss", KSTAT_DATA_UINT64 }, 313 { "evict_skip", KSTAT_DATA_UINT64 }, 314 { "evict_l2_cached", KSTAT_DATA_UINT64 }, 315 { "evict_l2_eligible", KSTAT_DATA_UINT64 }, 316 { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, 317 { "hash_elements", KSTAT_DATA_UINT64 }, 318 { "hash_elements_max", KSTAT_DATA_UINT64 }, 319 { "hash_collisions", KSTAT_DATA_UINT64 }, 320 { "hash_chains", KSTAT_DATA_UINT64 }, 321 { "hash_chain_max", KSTAT_DATA_UINT64 }, 322 { "p", KSTAT_DATA_UINT64 }, 323 { "c", KSTAT_DATA_UINT64 }, 324 { "c_min", KSTAT_DATA_UINT64 }, 325 { "c_max", KSTAT_DATA_UINT64 }, 326 { "size", KSTAT_DATA_UINT64 }, 327 { "hdr_size", KSTAT_DATA_UINT64 }, 328 { "data_size", KSTAT_DATA_UINT64 }, 329 { "other_size", KSTAT_DATA_UINT64 }, 330 { "l2_hits", KSTAT_DATA_UINT64 }, 331 { "l2_misses", KSTAT_DATA_UINT64 }, 332 { "l2_feeds", KSTAT_DATA_UINT64 }, 333 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 334 { "l2_read_bytes", KSTAT_DATA_UINT64 }, 335 { "l2_write_bytes", KSTAT_DATA_UINT64 }, 336 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 337 { "l2_writes_done", KSTAT_DATA_UINT64 }, 338 { "l2_writes_error", KSTAT_DATA_UINT64 }, 339 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 340 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 341 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 342 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 343 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 344 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 345 { "l2_io_error", KSTAT_DATA_UINT64 }, 346 { "l2_size", KSTAT_DATA_UINT64 }, 347 { "l2_hdr_size", KSTAT_DATA_UINT64 }, 348 { "memory_throttle_count", KSTAT_DATA_UINT64 } 349 }; 350 351 #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 352 353 #define ARCSTAT_INCR(stat, val) \ 354 atomic_add_64(&arc_stats.stat.value.ui64, (val)); 355 356 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 357 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 358 359 #define ARCSTAT_MAX(stat, val) { \ 360 uint64_t m; \ 361 while ((val) > (m = arc_stats.stat.value.ui64) && \ 362 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 363 continue; \ 364 } 365 366 #define ARCSTAT_MAXSTAT(stat) \ 367 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 368 369 /* 370 * We define a macro to allow ARC hits/misses to be easily broken down by 371 * two separate conditions, giving a total of four different subtypes for 372 * each of hits and misses (so eight statistics total). 373 */ 374 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 375 if (cond1) { \ 376 if (cond2) { \ 377 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 378 } else { \ 379 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 380 } \ 381 } else { \ 382 if (cond2) { \ 383 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 384 } else { \ 385 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 386 } \ 387 } 388 389 kstat_t *arc_ksp; 390 static arc_state_t *arc_anon; 391 static arc_state_t *arc_mru; 392 static arc_state_t *arc_mru_ghost; 393 static arc_state_t *arc_mfu; 394 static arc_state_t *arc_mfu_ghost; 395 static arc_state_t *arc_l2c_only; 396 397 /* 398 * There are several ARC variables that are critical to export as kstats -- 399 * but we don't want to have to grovel around in the kstat whenever we wish to 400 * manipulate them. For these variables, we therefore define them to be in 401 * terms of the statistic variable. This assures that we are not introducing 402 * the possibility of inconsistency by having shadow copies of the variables, 403 * while still allowing the code to be readable. 404 */ 405 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 406 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 407 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 408 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 409 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 410 411 static int arc_no_grow; /* Don't try to grow cache size */ 412 static uint64_t arc_tempreserve; 413 static uint64_t arc_loaned_bytes; 414 static uint64_t arc_meta_used; 415 static uint64_t arc_meta_limit; 416 static uint64_t arc_meta_max = 0; 417 418 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 419 420 typedef struct arc_callback arc_callback_t; 421 422 struct arc_callback { 423 void *acb_private; 424 arc_done_func_t *acb_done; 425 arc_buf_t *acb_buf; 426 zio_t *acb_zio_dummy; 427 arc_callback_t *acb_next; 428 }; 429 430 typedef struct arc_write_callback arc_write_callback_t; 431 432 struct arc_write_callback { 433 void *awcb_private; 434 arc_done_func_t *awcb_ready; 435 arc_done_func_t *awcb_done; 436 arc_buf_t *awcb_buf; 437 }; 438 439 struct arc_buf_hdr { 440 /* protected by hash lock */ 441 dva_t b_dva; 442 uint64_t b_birth; 443 uint64_t b_cksum0; 444 445 kmutex_t b_freeze_lock; 446 zio_cksum_t *b_freeze_cksum; 447 void *b_thawed; 448 449 arc_buf_hdr_t *b_hash_next; 450 arc_buf_t *b_buf; 451 uint32_t b_flags; 452 uint32_t b_datacnt; 453 454 arc_callback_t *b_acb; 455 kcondvar_t b_cv; 456 457 /* immutable */ 458 arc_buf_contents_t b_type; 459 uint64_t b_size; 460 uint64_t b_spa; 461 462 /* protected by arc state mutex */ 463 arc_state_t *b_state; 464 list_node_t b_arc_node; 465 466 /* updated atomically */ 467 clock_t b_arc_access; 468 469 /* self protecting */ 470 refcount_t b_refcnt; 471 472 l2arc_buf_hdr_t *b_l2hdr; 473 list_node_t b_l2node; 474 }; 475 476 static arc_buf_t *arc_eviction_list; 477 static kmutex_t arc_eviction_mtx; 478 static arc_buf_hdr_t arc_eviction_hdr; 479 static void arc_get_data_buf(arc_buf_t *buf); 480 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 481 static int arc_evict_needed(arc_buf_contents_t type); 482 static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes); 483 static void arc_buf_watch(arc_buf_t *buf); 484 485 static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab); 486 487 #define GHOST_STATE(state) \ 488 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 489 (state) == arc_l2c_only) 490 491 /* 492 * Private ARC flags. These flags are private ARC only flags that will show up 493 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 494 * be passed in as arc_flags in things like arc_read. However, these flags 495 * should never be passed and should only be set by ARC code. When adding new 496 * public flags, make sure not to smash the private ones. 497 */ 498 499 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 500 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 501 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 502 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 503 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 504 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 505 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 506 #define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ 507 #define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ 508 #define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ 509 510 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 511 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 512 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 513 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH) 514 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 515 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 516 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 517 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE) 518 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ 519 (hdr)->b_l2hdr != NULL) 520 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 521 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 522 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 523 524 /* 525 * Other sizes 526 */ 527 528 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 529 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 530 531 /* 532 * Hash table routines 533 */ 534 535 #define HT_LOCK_PAD 64 536 537 struct ht_lock { 538 kmutex_t ht_lock; 539 #ifdef _KERNEL 540 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 541 #endif 542 }; 543 544 #define BUF_LOCKS 256 545 typedef struct buf_hash_table { 546 uint64_t ht_mask; 547 arc_buf_hdr_t **ht_table; 548 struct ht_lock ht_locks[BUF_LOCKS]; 549 } buf_hash_table_t; 550 551 static buf_hash_table_t buf_hash_table; 552 553 #define BUF_HASH_INDEX(spa, dva, birth) \ 554 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 555 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 556 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 557 #define HDR_LOCK(hdr) \ 558 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth))) 559 560 uint64_t zfs_crc64_table[256]; 561 562 /* 563 * Level 2 ARC 564 */ 565 566 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 567 #define L2ARC_HEADROOM 2 /* num of writes */ 568 #define L2ARC_FEED_SECS 1 /* caching interval secs */ 569 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ 570 571 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 572 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 573 574 /* 575 * L2ARC Performance Tunables 576 */ 577 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 578 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 579 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 580 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 581 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ 582 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 583 boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ 584 boolean_t l2arc_norw = B_TRUE; /* no reads during writes */ 585 586 /* 587 * L2ARC Internals 588 */ 589 typedef struct l2arc_dev { 590 vdev_t *l2ad_vdev; /* vdev */ 591 spa_t *l2ad_spa; /* spa */ 592 uint64_t l2ad_hand; /* next write location */ 593 uint64_t l2ad_write; /* desired write size, bytes */ 594 uint64_t l2ad_boost; /* warmup write boost, bytes */ 595 uint64_t l2ad_start; /* first addr on device */ 596 uint64_t l2ad_end; /* last addr on device */ 597 uint64_t l2ad_evict; /* last addr eviction reached */ 598 boolean_t l2ad_first; /* first sweep through */ 599 boolean_t l2ad_writing; /* currently writing */ 600 list_t *l2ad_buflist; /* buffer list */ 601 list_node_t l2ad_node; /* device list node */ 602 } l2arc_dev_t; 603 604 static list_t L2ARC_dev_list; /* device list */ 605 static list_t *l2arc_dev_list; /* device list pointer */ 606 static kmutex_t l2arc_dev_mtx; /* device list mutex */ 607 static l2arc_dev_t *l2arc_dev_last; /* last device used */ 608 static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 609 static list_t L2ARC_free_on_write; /* free after write buf list */ 610 static list_t *l2arc_free_on_write; /* free after write list ptr */ 611 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 612 static uint64_t l2arc_ndev; /* number of devices */ 613 614 typedef struct l2arc_read_callback { 615 arc_buf_t *l2rcb_buf; /* read buffer */ 616 spa_t *l2rcb_spa; /* spa */ 617 blkptr_t l2rcb_bp; /* original blkptr */ 618 zbookmark_t l2rcb_zb; /* original bookmark */ 619 int l2rcb_flags; /* original flags */ 620 } l2arc_read_callback_t; 621 622 typedef struct l2arc_write_callback { 623 l2arc_dev_t *l2wcb_dev; /* device info */ 624 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 625 } l2arc_write_callback_t; 626 627 struct l2arc_buf_hdr { 628 /* protected by arc_buf_hdr mutex */ 629 l2arc_dev_t *b_dev; /* L2ARC device */ 630 uint64_t b_daddr; /* disk address, offset byte */ 631 }; 632 633 typedef struct l2arc_data_free { 634 /* protected by l2arc_free_on_write_mtx */ 635 void *l2df_data; 636 size_t l2df_size; 637 void (*l2df_func)(void *, size_t); 638 list_node_t l2df_list_node; 639 } l2arc_data_free_t; 640 641 static kmutex_t l2arc_feed_thr_lock; 642 static kcondvar_t l2arc_feed_thr_cv; 643 static uint8_t l2arc_thread_exit; 644 645 static void l2arc_read_done(zio_t *zio); 646 static void l2arc_hdr_stat_add(void); 647 static void l2arc_hdr_stat_remove(void); 648 649 static uint64_t 650 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) 651 { 652 uint8_t *vdva = (uint8_t *)dva; 653 uint64_t crc = -1ULL; 654 int i; 655 656 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 657 658 for (i = 0; i < sizeof (dva_t); i++) 659 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 660 661 crc ^= (spa>>8) ^ birth; 662 663 return (crc); 664 } 665 666 #define BUF_EMPTY(buf) \ 667 ((buf)->b_dva.dva_word[0] == 0 && \ 668 (buf)->b_dva.dva_word[1] == 0 && \ 669 (buf)->b_birth == 0) 670 671 #define BUF_EQUAL(spa, dva, birth, buf) \ 672 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 673 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 674 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 675 676 static void 677 buf_discard_identity(arc_buf_hdr_t *hdr) 678 { 679 hdr->b_dva.dva_word[0] = 0; 680 hdr->b_dva.dva_word[1] = 0; 681 hdr->b_birth = 0; 682 hdr->b_cksum0 = 0; 683 } 684 685 static arc_buf_hdr_t * 686 buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp) 687 { 688 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 689 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 690 arc_buf_hdr_t *buf; 691 692 mutex_enter(hash_lock); 693 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 694 buf = buf->b_hash_next) { 695 if (BUF_EQUAL(spa, dva, birth, buf)) { 696 *lockp = hash_lock; 697 return (buf); 698 } 699 } 700 mutex_exit(hash_lock); 701 *lockp = NULL; 702 return (NULL); 703 } 704 705 /* 706 * Insert an entry into the hash table. If there is already an element 707 * equal to elem in the hash table, then the already existing element 708 * will be returned and the new element will not be inserted. 709 * Otherwise returns NULL. 710 */ 711 static arc_buf_hdr_t * 712 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 713 { 714 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 715 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 716 arc_buf_hdr_t *fbuf; 717 uint32_t i; 718 719 ASSERT(!HDR_IN_HASH_TABLE(buf)); 720 *lockp = hash_lock; 721 mutex_enter(hash_lock); 722 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 723 fbuf = fbuf->b_hash_next, i++) { 724 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 725 return (fbuf); 726 } 727 728 buf->b_hash_next = buf_hash_table.ht_table[idx]; 729 buf_hash_table.ht_table[idx] = buf; 730 buf->b_flags |= ARC_IN_HASH_TABLE; 731 732 /* collect some hash table performance data */ 733 if (i > 0) { 734 ARCSTAT_BUMP(arcstat_hash_collisions); 735 if (i == 1) 736 ARCSTAT_BUMP(arcstat_hash_chains); 737 738 ARCSTAT_MAX(arcstat_hash_chain_max, i); 739 } 740 741 ARCSTAT_BUMP(arcstat_hash_elements); 742 ARCSTAT_MAXSTAT(arcstat_hash_elements); 743 744 return (NULL); 745 } 746 747 static void 748 buf_hash_remove(arc_buf_hdr_t *buf) 749 { 750 arc_buf_hdr_t *fbuf, **bufp; 751 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 752 753 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 754 ASSERT(HDR_IN_HASH_TABLE(buf)); 755 756 bufp = &buf_hash_table.ht_table[idx]; 757 while ((fbuf = *bufp) != buf) { 758 ASSERT(fbuf != NULL); 759 bufp = &fbuf->b_hash_next; 760 } 761 *bufp = buf->b_hash_next; 762 buf->b_hash_next = NULL; 763 buf->b_flags &= ~ARC_IN_HASH_TABLE; 764 765 /* collect some hash table performance data */ 766 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 767 768 if (buf_hash_table.ht_table[idx] && 769 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 770 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 771 } 772 773 /* 774 * Global data structures and functions for the buf kmem cache. 775 */ 776 static kmem_cache_t *hdr_cache; 777 static kmem_cache_t *buf_cache; 778 779 static void 780 buf_fini(void) 781 { 782 int i; 783 784 kmem_free(buf_hash_table.ht_table, 785 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 786 for (i = 0; i < BUF_LOCKS; i++) 787 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 788 kmem_cache_destroy(hdr_cache); 789 kmem_cache_destroy(buf_cache); 790 } 791 792 /* 793 * Constructor callback - called when the cache is empty 794 * and a new buf is requested. 795 */ 796 /* ARGSUSED */ 797 static int 798 hdr_cons(void *vbuf, void *unused, int kmflag) 799 { 800 arc_buf_hdr_t *buf = vbuf; 801 802 bzero(buf, sizeof (arc_buf_hdr_t)); 803 refcount_create(&buf->b_refcnt); 804 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 805 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 806 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 807 808 return (0); 809 } 810 811 /* ARGSUSED */ 812 static int 813 buf_cons(void *vbuf, void *unused, int kmflag) 814 { 815 arc_buf_t *buf = vbuf; 816 817 bzero(buf, sizeof (arc_buf_t)); 818 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL); 819 rw_init(&buf->b_data_lock, NULL, RW_DEFAULT, NULL); 820 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); 821 822 return (0); 823 } 824 825 /* 826 * Destructor callback - called when a cached buf is 827 * no longer required. 828 */ 829 /* ARGSUSED */ 830 static void 831 hdr_dest(void *vbuf, void *unused) 832 { 833 arc_buf_hdr_t *buf = vbuf; 834 835 ASSERT(BUF_EMPTY(buf)); 836 refcount_destroy(&buf->b_refcnt); 837 cv_destroy(&buf->b_cv); 838 mutex_destroy(&buf->b_freeze_lock); 839 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 840 } 841 842 /* ARGSUSED */ 843 static void 844 buf_dest(void *vbuf, void *unused) 845 { 846 arc_buf_t *buf = vbuf; 847 848 mutex_destroy(&buf->b_evict_lock); 849 rw_destroy(&buf->b_data_lock); 850 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); 851 } 852 853 /* 854 * Reclaim callback -- invoked when memory is low. 855 */ 856 /* ARGSUSED */ 857 static void 858 hdr_recl(void *unused) 859 { 860 dprintf("hdr_recl called\n"); 861 /* 862 * umem calls the reclaim func when we destroy the buf cache, 863 * which is after we do arc_fini(). 864 */ 865 if (!arc_dead) 866 cv_signal(&arc_reclaim_thr_cv); 867 } 868 869 static void 870 buf_init(void) 871 { 872 uint64_t *ct; 873 uint64_t hsize = 1ULL << 12; 874 int i, j; 875 876 /* 877 * The hash table is big enough to fill all of physical memory 878 * with an average 64K block size. The table will take up 879 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 880 */ 881 while (hsize * 65536 < physmem * PAGESIZE) 882 hsize <<= 1; 883 retry: 884 buf_hash_table.ht_mask = hsize - 1; 885 buf_hash_table.ht_table = 886 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 887 if (buf_hash_table.ht_table == NULL) { 888 ASSERT(hsize > (1ULL << 8)); 889 hsize >>= 1; 890 goto retry; 891 } 892 893 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 894 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 895 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 896 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 897 898 for (i = 0; i < 256; i++) 899 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 900 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 901 902 for (i = 0; i < BUF_LOCKS; i++) { 903 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 904 NULL, MUTEX_DEFAULT, NULL); 905 } 906 } 907 908 #define ARC_MINTIME (hz>>4) /* 62 ms */ 909 910 static void 911 arc_cksum_verify(arc_buf_t *buf) 912 { 913 zio_cksum_t zc; 914 915 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 916 return; 917 918 mutex_enter(&buf->b_hdr->b_freeze_lock); 919 if (buf->b_hdr->b_freeze_cksum == NULL || 920 (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 921 mutex_exit(&buf->b_hdr->b_freeze_lock); 922 return; 923 } 924 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 925 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 926 panic("buffer modified while frozen!"); 927 mutex_exit(&buf->b_hdr->b_freeze_lock); 928 } 929 930 static int 931 arc_cksum_equal(arc_buf_t *buf) 932 { 933 zio_cksum_t zc; 934 int equal; 935 936 mutex_enter(&buf->b_hdr->b_freeze_lock); 937 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 938 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 939 mutex_exit(&buf->b_hdr->b_freeze_lock); 940 941 return (equal); 942 } 943 944 static void 945 arc_cksum_compute(arc_buf_t *buf, boolean_t force) 946 { 947 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 948 return; 949 950 mutex_enter(&buf->b_hdr->b_freeze_lock); 951 if (buf->b_hdr->b_freeze_cksum != NULL) { 952 mutex_exit(&buf->b_hdr->b_freeze_lock); 953 return; 954 } 955 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 956 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 957 buf->b_hdr->b_freeze_cksum); 958 mutex_exit(&buf->b_hdr->b_freeze_lock); 959 arc_buf_watch(buf); 960 } 961 962 #ifndef _KERNEL 963 typedef struct procctl { 964 long cmd; 965 prwatch_t prwatch; 966 } procctl_t; 967 #endif 968 969 /* ARGSUSED */ 970 static void 971 arc_buf_unwatch(arc_buf_t *buf) 972 { 973 #ifndef _KERNEL 974 if (arc_watch) { 975 int result; 976 procctl_t ctl; 977 ctl.cmd = PCWATCH; 978 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 979 ctl.prwatch.pr_size = 0; 980 ctl.prwatch.pr_wflags = 0; 981 result = write(arc_procfd, &ctl, sizeof (ctl)); 982 ASSERT3U(result, ==, sizeof (ctl)); 983 } 984 #endif 985 } 986 987 /* ARGSUSED */ 988 static void 989 arc_buf_watch(arc_buf_t *buf) 990 { 991 #ifndef _KERNEL 992 if (arc_watch) { 993 int result; 994 procctl_t ctl; 995 ctl.cmd = PCWATCH; 996 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 997 ctl.prwatch.pr_size = buf->b_hdr->b_size; 998 ctl.prwatch.pr_wflags = WA_WRITE; 999 result = write(arc_procfd, &ctl, sizeof (ctl)); 1000 ASSERT3U(result, ==, sizeof (ctl)); 1001 } 1002 #endif 1003 } 1004 1005 void 1006 arc_buf_thaw(arc_buf_t *buf) 1007 { 1008 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1009 if (buf->b_hdr->b_state != arc_anon) 1010 panic("modifying non-anon buffer!"); 1011 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 1012 panic("modifying buffer while i/o in progress!"); 1013 arc_cksum_verify(buf); 1014 } 1015 1016 mutex_enter(&buf->b_hdr->b_freeze_lock); 1017 if (buf->b_hdr->b_freeze_cksum != NULL) { 1018 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1019 buf->b_hdr->b_freeze_cksum = NULL; 1020 } 1021 1022 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1023 if (buf->b_hdr->b_thawed) 1024 kmem_free(buf->b_hdr->b_thawed, 1); 1025 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP); 1026 } 1027 1028 mutex_exit(&buf->b_hdr->b_freeze_lock); 1029 1030 arc_buf_unwatch(buf); 1031 } 1032 1033 void 1034 arc_buf_freeze(arc_buf_t *buf) 1035 { 1036 kmutex_t *hash_lock; 1037 1038 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1039 return; 1040 1041 hash_lock = HDR_LOCK(buf->b_hdr); 1042 mutex_enter(hash_lock); 1043 1044 ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 1045 buf->b_hdr->b_state == arc_anon); 1046 arc_cksum_compute(buf, B_FALSE); 1047 mutex_exit(hash_lock); 1048 1049 } 1050 1051 static void 1052 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1053 { 1054 ASSERT(MUTEX_HELD(hash_lock)); 1055 1056 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 1057 (ab->b_state != arc_anon)) { 1058 uint64_t delta = ab->b_size * ab->b_datacnt; 1059 list_t *list = &ab->b_state->arcs_list[ab->b_type]; 1060 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 1061 1062 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 1063 mutex_enter(&ab->b_state->arcs_mtx); 1064 ASSERT(list_link_active(&ab->b_arc_node)); 1065 list_remove(list, ab); 1066 if (GHOST_STATE(ab->b_state)) { 1067 ASSERT0(ab->b_datacnt); 1068 ASSERT3P(ab->b_buf, ==, NULL); 1069 delta = ab->b_size; 1070 } 1071 ASSERT(delta > 0); 1072 ASSERT3U(*size, >=, delta); 1073 atomic_add_64(size, -delta); 1074 mutex_exit(&ab->b_state->arcs_mtx); 1075 /* remove the prefetch flag if we get a reference */ 1076 if (ab->b_flags & ARC_PREFETCH) 1077 ab->b_flags &= ~ARC_PREFETCH; 1078 } 1079 } 1080 1081 static int 1082 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1083 { 1084 int cnt; 1085 arc_state_t *state = ab->b_state; 1086 1087 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 1088 ASSERT(!GHOST_STATE(state)); 1089 1090 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 1091 (state != arc_anon)) { 1092 uint64_t *size = &state->arcs_lsize[ab->b_type]; 1093 1094 ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 1095 mutex_enter(&state->arcs_mtx); 1096 ASSERT(!list_link_active(&ab->b_arc_node)); 1097 list_insert_head(&state->arcs_list[ab->b_type], ab); 1098 ASSERT(ab->b_datacnt > 0); 1099 atomic_add_64(size, ab->b_size * ab->b_datacnt); 1100 mutex_exit(&state->arcs_mtx); 1101 } 1102 return (cnt); 1103 } 1104 1105 /* 1106 * Move the supplied buffer to the indicated state. The mutex 1107 * for the buffer must be held by the caller. 1108 */ 1109 static void 1110 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 1111 { 1112 arc_state_t *old_state = ab->b_state; 1113 int64_t refcnt = refcount_count(&ab->b_refcnt); 1114 uint64_t from_delta, to_delta; 1115 1116 ASSERT(MUTEX_HELD(hash_lock)); 1117 ASSERT(new_state != old_state); 1118 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 1119 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 1120 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon); 1121 1122 from_delta = to_delta = ab->b_datacnt * ab->b_size; 1123 1124 /* 1125 * If this buffer is evictable, transfer it from the 1126 * old state list to the new state list. 1127 */ 1128 if (refcnt == 0) { 1129 if (old_state != arc_anon) { 1130 int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 1131 uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 1132 1133 if (use_mutex) 1134 mutex_enter(&old_state->arcs_mtx); 1135 1136 ASSERT(list_link_active(&ab->b_arc_node)); 1137 list_remove(&old_state->arcs_list[ab->b_type], ab); 1138 1139 /* 1140 * If prefetching out of the ghost cache, 1141 * we will have a non-zero datacnt. 1142 */ 1143 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 1144 /* ghost elements have a ghost size */ 1145 ASSERT(ab->b_buf == NULL); 1146 from_delta = ab->b_size; 1147 } 1148 ASSERT3U(*size, >=, from_delta); 1149 atomic_add_64(size, -from_delta); 1150 1151 if (use_mutex) 1152 mutex_exit(&old_state->arcs_mtx); 1153 } 1154 if (new_state != arc_anon) { 1155 int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 1156 uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1157 1158 if (use_mutex) 1159 mutex_enter(&new_state->arcs_mtx); 1160 1161 list_insert_head(&new_state->arcs_list[ab->b_type], ab); 1162 1163 /* ghost elements have a ghost size */ 1164 if (GHOST_STATE(new_state)) { 1165 ASSERT(ab->b_datacnt == 0); 1166 ASSERT(ab->b_buf == NULL); 1167 to_delta = ab->b_size; 1168 } 1169 atomic_add_64(size, to_delta); 1170 1171 if (use_mutex) 1172 mutex_exit(&new_state->arcs_mtx); 1173 } 1174 } 1175 1176 ASSERT(!BUF_EMPTY(ab)); 1177 if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab)) 1178 buf_hash_remove(ab); 1179 1180 /* adjust state sizes */ 1181 if (to_delta) 1182 atomic_add_64(&new_state->arcs_size, to_delta); 1183 if (from_delta) { 1184 ASSERT3U(old_state->arcs_size, >=, from_delta); 1185 atomic_add_64(&old_state->arcs_size, -from_delta); 1186 } 1187 ab->b_state = new_state; 1188 1189 /* adjust l2arc hdr stats */ 1190 if (new_state == arc_l2c_only) 1191 l2arc_hdr_stat_add(); 1192 else if (old_state == arc_l2c_only) 1193 l2arc_hdr_stat_remove(); 1194 } 1195 1196 void 1197 arc_space_consume(uint64_t space, arc_space_type_t type) 1198 { 1199 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1200 1201 switch (type) { 1202 case ARC_SPACE_DATA: 1203 ARCSTAT_INCR(arcstat_data_size, space); 1204 break; 1205 case ARC_SPACE_OTHER: 1206 ARCSTAT_INCR(arcstat_other_size, space); 1207 break; 1208 case ARC_SPACE_HDRS: 1209 ARCSTAT_INCR(arcstat_hdr_size, space); 1210 break; 1211 case ARC_SPACE_L2HDRS: 1212 ARCSTAT_INCR(arcstat_l2_hdr_size, space); 1213 break; 1214 } 1215 1216 atomic_add_64(&arc_meta_used, space); 1217 atomic_add_64(&arc_size, space); 1218 } 1219 1220 void 1221 arc_space_return(uint64_t space, arc_space_type_t type) 1222 { 1223 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1224 1225 switch (type) { 1226 case ARC_SPACE_DATA: 1227 ARCSTAT_INCR(arcstat_data_size, -space); 1228 break; 1229 case ARC_SPACE_OTHER: 1230 ARCSTAT_INCR(arcstat_other_size, -space); 1231 break; 1232 case ARC_SPACE_HDRS: 1233 ARCSTAT_INCR(arcstat_hdr_size, -space); 1234 break; 1235 case ARC_SPACE_L2HDRS: 1236 ARCSTAT_INCR(arcstat_l2_hdr_size, -space); 1237 break; 1238 } 1239 1240 ASSERT(arc_meta_used >= space); 1241 if (arc_meta_max < arc_meta_used) 1242 arc_meta_max = arc_meta_used; 1243 atomic_add_64(&arc_meta_used, -space); 1244 ASSERT(arc_size >= space); 1245 atomic_add_64(&arc_size, -space); 1246 } 1247 1248 void * 1249 arc_data_buf_alloc(uint64_t size) 1250 { 1251 if (arc_evict_needed(ARC_BUFC_DATA)) 1252 cv_signal(&arc_reclaim_thr_cv); 1253 atomic_add_64(&arc_size, size); 1254 return (zio_data_buf_alloc(size)); 1255 } 1256 1257 void 1258 arc_data_buf_free(void *buf, uint64_t size) 1259 { 1260 zio_data_buf_free(buf, size); 1261 ASSERT(arc_size >= size); 1262 atomic_add_64(&arc_size, -size); 1263 } 1264 1265 arc_buf_t * 1266 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1267 { 1268 arc_buf_hdr_t *hdr; 1269 arc_buf_t *buf; 1270 1271 ASSERT3U(size, >, 0); 1272 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1273 ASSERT(BUF_EMPTY(hdr)); 1274 hdr->b_size = size; 1275 hdr->b_type = type; 1276 hdr->b_spa = spa_load_guid(spa); 1277 hdr->b_state = arc_anon; 1278 hdr->b_arc_access = 0; 1279 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1280 buf->b_hdr = hdr; 1281 buf->b_data = NULL; 1282 buf->b_efunc = NULL; 1283 buf->b_private = NULL; 1284 buf->b_next = NULL; 1285 hdr->b_buf = buf; 1286 arc_get_data_buf(buf); 1287 hdr->b_datacnt = 1; 1288 hdr->b_flags = 0; 1289 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1290 (void) refcount_add(&hdr->b_refcnt, tag); 1291 1292 return (buf); 1293 } 1294 1295 static char *arc_onloan_tag = "onloan"; 1296 1297 /* 1298 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in 1299 * flight data by arc_tempreserve_space() until they are "returned". Loaned 1300 * buffers must be returned to the arc before they can be used by the DMU or 1301 * freed. 1302 */ 1303 arc_buf_t * 1304 arc_loan_buf(spa_t *spa, int size) 1305 { 1306 arc_buf_t *buf; 1307 1308 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA); 1309 1310 atomic_add_64(&arc_loaned_bytes, size); 1311 return (buf); 1312 } 1313 1314 /* 1315 * Return a loaned arc buffer to the arc. 1316 */ 1317 void 1318 arc_return_buf(arc_buf_t *buf, void *tag) 1319 { 1320 arc_buf_hdr_t *hdr = buf->b_hdr; 1321 1322 ASSERT(buf->b_data != NULL); 1323 (void) refcount_add(&hdr->b_refcnt, tag); 1324 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag); 1325 1326 atomic_add_64(&arc_loaned_bytes, -hdr->b_size); 1327 } 1328 1329 /* Detach an arc_buf from a dbuf (tag) */ 1330 void 1331 arc_loan_inuse_buf(arc_buf_t *buf, void *tag) 1332 { 1333 arc_buf_hdr_t *hdr; 1334 1335 ASSERT(buf->b_data != NULL); 1336 hdr = buf->b_hdr; 1337 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag); 1338 (void) refcount_remove(&hdr->b_refcnt, tag); 1339 buf->b_efunc = NULL; 1340 buf->b_private = NULL; 1341 1342 atomic_add_64(&arc_loaned_bytes, hdr->b_size); 1343 } 1344 1345 static arc_buf_t * 1346 arc_buf_clone(arc_buf_t *from) 1347 { 1348 arc_buf_t *buf; 1349 arc_buf_hdr_t *hdr = from->b_hdr; 1350 uint64_t size = hdr->b_size; 1351 1352 ASSERT(hdr->b_state != arc_anon); 1353 1354 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1355 buf->b_hdr = hdr; 1356 buf->b_data = NULL; 1357 buf->b_efunc = NULL; 1358 buf->b_private = NULL; 1359 buf->b_next = hdr->b_buf; 1360 hdr->b_buf = buf; 1361 arc_get_data_buf(buf); 1362 bcopy(from->b_data, buf->b_data, size); 1363 hdr->b_datacnt += 1; 1364 return (buf); 1365 } 1366 1367 void 1368 arc_buf_add_ref(arc_buf_t *buf, void* tag) 1369 { 1370 arc_buf_hdr_t *hdr; 1371 kmutex_t *hash_lock; 1372 1373 /* 1374 * Check to see if this buffer is evicted. Callers 1375 * must verify b_data != NULL to know if the add_ref 1376 * was successful. 1377 */ 1378 mutex_enter(&buf->b_evict_lock); 1379 if (buf->b_data == NULL) { 1380 mutex_exit(&buf->b_evict_lock); 1381 return; 1382 } 1383 hash_lock = HDR_LOCK(buf->b_hdr); 1384 mutex_enter(hash_lock); 1385 hdr = buf->b_hdr; 1386 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1387 mutex_exit(&buf->b_evict_lock); 1388 1389 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1390 add_reference(hdr, hash_lock, tag); 1391 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 1392 arc_access(hdr, hash_lock); 1393 mutex_exit(hash_lock); 1394 ARCSTAT_BUMP(arcstat_hits); 1395 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 1396 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 1397 data, metadata, hits); 1398 } 1399 1400 /* 1401 * Free the arc data buffer. If it is an l2arc write in progress, 1402 * the buffer is placed on l2arc_free_on_write to be freed later. 1403 */ 1404 static void 1405 arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t)) 1406 { 1407 arc_buf_hdr_t *hdr = buf->b_hdr; 1408 1409 if (HDR_L2_WRITING(hdr)) { 1410 l2arc_data_free_t *df; 1411 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 1412 df->l2df_data = buf->b_data; 1413 df->l2df_size = hdr->b_size; 1414 df->l2df_func = free_func; 1415 mutex_enter(&l2arc_free_on_write_mtx); 1416 list_insert_head(l2arc_free_on_write, df); 1417 mutex_exit(&l2arc_free_on_write_mtx); 1418 ARCSTAT_BUMP(arcstat_l2_free_on_write); 1419 } else { 1420 free_func(buf->b_data, hdr->b_size); 1421 } 1422 } 1423 1424 static void 1425 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1426 { 1427 arc_buf_t **bufp; 1428 1429 /* free up data associated with the buf */ 1430 if (buf->b_data) { 1431 arc_state_t *state = buf->b_hdr->b_state; 1432 uint64_t size = buf->b_hdr->b_size; 1433 arc_buf_contents_t type = buf->b_hdr->b_type; 1434 1435 arc_cksum_verify(buf); 1436 arc_buf_unwatch(buf); 1437 1438 if (!recycle) { 1439 if (type == ARC_BUFC_METADATA) { 1440 arc_buf_data_free(buf, zio_buf_free); 1441 arc_space_return(size, ARC_SPACE_DATA); 1442 } else { 1443 ASSERT(type == ARC_BUFC_DATA); 1444 arc_buf_data_free(buf, zio_data_buf_free); 1445 ARCSTAT_INCR(arcstat_data_size, -size); 1446 atomic_add_64(&arc_size, -size); 1447 } 1448 } 1449 if (list_link_active(&buf->b_hdr->b_arc_node)) { 1450 uint64_t *cnt = &state->arcs_lsize[type]; 1451 1452 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 1453 ASSERT(state != arc_anon); 1454 1455 ASSERT3U(*cnt, >=, size); 1456 atomic_add_64(cnt, -size); 1457 } 1458 ASSERT3U(state->arcs_size, >=, size); 1459 atomic_add_64(&state->arcs_size, -size); 1460 buf->b_data = NULL; 1461 ASSERT(buf->b_hdr->b_datacnt > 0); 1462 buf->b_hdr->b_datacnt -= 1; 1463 } 1464 1465 /* only remove the buf if requested */ 1466 if (!all) 1467 return; 1468 1469 /* remove the buf from the hdr list */ 1470 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1471 continue; 1472 *bufp = buf->b_next; 1473 buf->b_next = NULL; 1474 1475 ASSERT(buf->b_efunc == NULL); 1476 1477 /* clean up the buf */ 1478 buf->b_hdr = NULL; 1479 kmem_cache_free(buf_cache, buf); 1480 } 1481 1482 static void 1483 arc_hdr_destroy(arc_buf_hdr_t *hdr) 1484 { 1485 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1486 ASSERT3P(hdr->b_state, ==, arc_anon); 1487 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1488 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr; 1489 1490 if (l2hdr != NULL) { 1491 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx); 1492 /* 1493 * To prevent arc_free() and l2arc_evict() from 1494 * attempting to free the same buffer at the same time, 1495 * a FREE_IN_PROGRESS flag is given to arc_free() to 1496 * give it priority. l2arc_evict() can't destroy this 1497 * header while we are waiting on l2arc_buflist_mtx. 1498 * 1499 * The hdr may be removed from l2ad_buflist before we 1500 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. 1501 */ 1502 if (!buflist_held) { 1503 mutex_enter(&l2arc_buflist_mtx); 1504 l2hdr = hdr->b_l2hdr; 1505 } 1506 1507 if (l2hdr != NULL) { 1508 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 1509 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1510 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 1511 if (hdr->b_state == arc_l2c_only) 1512 l2arc_hdr_stat_remove(); 1513 hdr->b_l2hdr = NULL; 1514 } 1515 1516 if (!buflist_held) 1517 mutex_exit(&l2arc_buflist_mtx); 1518 } 1519 1520 if (!BUF_EMPTY(hdr)) { 1521 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1522 buf_discard_identity(hdr); 1523 } 1524 while (hdr->b_buf) { 1525 arc_buf_t *buf = hdr->b_buf; 1526 1527 if (buf->b_efunc) { 1528 mutex_enter(&arc_eviction_mtx); 1529 mutex_enter(&buf->b_evict_lock); 1530 ASSERT(buf->b_hdr != NULL); 1531 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1532 hdr->b_buf = buf->b_next; 1533 buf->b_hdr = &arc_eviction_hdr; 1534 buf->b_next = arc_eviction_list; 1535 arc_eviction_list = buf; 1536 mutex_exit(&buf->b_evict_lock); 1537 mutex_exit(&arc_eviction_mtx); 1538 } else { 1539 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1540 } 1541 } 1542 if (hdr->b_freeze_cksum != NULL) { 1543 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1544 hdr->b_freeze_cksum = NULL; 1545 } 1546 if (hdr->b_thawed) { 1547 kmem_free(hdr->b_thawed, 1); 1548 hdr->b_thawed = NULL; 1549 } 1550 1551 ASSERT(!list_link_active(&hdr->b_arc_node)); 1552 ASSERT3P(hdr->b_hash_next, ==, NULL); 1553 ASSERT3P(hdr->b_acb, ==, NULL); 1554 kmem_cache_free(hdr_cache, hdr); 1555 } 1556 1557 void 1558 arc_buf_free(arc_buf_t *buf, void *tag) 1559 { 1560 arc_buf_hdr_t *hdr = buf->b_hdr; 1561 int hashed = hdr->b_state != arc_anon; 1562 1563 ASSERT(buf->b_efunc == NULL); 1564 ASSERT(buf->b_data != NULL); 1565 1566 if (hashed) { 1567 kmutex_t *hash_lock = HDR_LOCK(hdr); 1568 1569 mutex_enter(hash_lock); 1570 hdr = buf->b_hdr; 1571 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1572 1573 (void) remove_reference(hdr, hash_lock, tag); 1574 if (hdr->b_datacnt > 1) { 1575 arc_buf_destroy(buf, FALSE, TRUE); 1576 } else { 1577 ASSERT(buf == hdr->b_buf); 1578 ASSERT(buf->b_efunc == NULL); 1579 hdr->b_flags |= ARC_BUF_AVAILABLE; 1580 } 1581 mutex_exit(hash_lock); 1582 } else if (HDR_IO_IN_PROGRESS(hdr)) { 1583 int destroy_hdr; 1584 /* 1585 * We are in the middle of an async write. Don't destroy 1586 * this buffer unless the write completes before we finish 1587 * decrementing the reference count. 1588 */ 1589 mutex_enter(&arc_eviction_mtx); 1590 (void) remove_reference(hdr, NULL, tag); 1591 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1592 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1593 mutex_exit(&arc_eviction_mtx); 1594 if (destroy_hdr) 1595 arc_hdr_destroy(hdr); 1596 } else { 1597 if (remove_reference(hdr, NULL, tag) > 0) 1598 arc_buf_destroy(buf, FALSE, TRUE); 1599 else 1600 arc_hdr_destroy(hdr); 1601 } 1602 } 1603 1604 int 1605 arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1606 { 1607 arc_buf_hdr_t *hdr = buf->b_hdr; 1608 kmutex_t *hash_lock = HDR_LOCK(hdr); 1609 int no_callback = (buf->b_efunc == NULL); 1610 1611 if (hdr->b_state == arc_anon) { 1612 ASSERT(hdr->b_datacnt == 1); 1613 arc_buf_free(buf, tag); 1614 return (no_callback); 1615 } 1616 1617 mutex_enter(hash_lock); 1618 hdr = buf->b_hdr; 1619 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1620 ASSERT(hdr->b_state != arc_anon); 1621 ASSERT(buf->b_data != NULL); 1622 1623 (void) remove_reference(hdr, hash_lock, tag); 1624 if (hdr->b_datacnt > 1) { 1625 if (no_callback) 1626 arc_buf_destroy(buf, FALSE, TRUE); 1627 } else if (no_callback) { 1628 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1629 ASSERT(buf->b_efunc == NULL); 1630 hdr->b_flags |= ARC_BUF_AVAILABLE; 1631 } 1632 ASSERT(no_callback || hdr->b_datacnt > 1 || 1633 refcount_is_zero(&hdr->b_refcnt)); 1634 mutex_exit(hash_lock); 1635 return (no_callback); 1636 } 1637 1638 int 1639 arc_buf_size(arc_buf_t *buf) 1640 { 1641 return (buf->b_hdr->b_size); 1642 } 1643 1644 /* 1645 * Evict buffers from list until we've removed the specified number of 1646 * bytes. Move the removed buffers to the appropriate evict state. 1647 * If the recycle flag is set, then attempt to "recycle" a buffer: 1648 * - look for a buffer to evict that is `bytes' long. 1649 * - return the data block from this buffer rather than freeing it. 1650 * This flag is used by callers that are trying to make space for a 1651 * new buffer in a full arc cache. 1652 * 1653 * This function makes a "best effort". It skips over any buffers 1654 * it can't get a hash_lock on, and so may not catch all candidates. 1655 * It may also return without evicting as much space as requested. 1656 */ 1657 static void * 1658 arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle, 1659 arc_buf_contents_t type) 1660 { 1661 arc_state_t *evicted_state; 1662 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1663 arc_buf_hdr_t *ab, *ab_prev = NULL; 1664 list_t *list = &state->arcs_list[type]; 1665 kmutex_t *hash_lock; 1666 boolean_t have_lock; 1667 void *stolen = NULL; 1668 1669 ASSERT(state == arc_mru || state == arc_mfu); 1670 1671 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1672 1673 mutex_enter(&state->arcs_mtx); 1674 mutex_enter(&evicted_state->arcs_mtx); 1675 1676 for (ab = list_tail(list); ab; ab = ab_prev) { 1677 ab_prev = list_prev(list, ab); 1678 /* prefetch buffers have a minimum lifespan */ 1679 if (HDR_IO_IN_PROGRESS(ab) || 1680 (spa && ab->b_spa != spa) || 1681 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1682 ddi_get_lbolt() - ab->b_arc_access < 1683 arc_min_prefetch_lifespan)) { 1684 skipped++; 1685 continue; 1686 } 1687 /* "lookahead" for better eviction candidate */ 1688 if (recycle && ab->b_size != bytes && 1689 ab_prev && ab_prev->b_size == bytes) 1690 continue; 1691 hash_lock = HDR_LOCK(ab); 1692 have_lock = MUTEX_HELD(hash_lock); 1693 if (have_lock || mutex_tryenter(hash_lock)) { 1694 ASSERT0(refcount_count(&ab->b_refcnt)); 1695 ASSERT(ab->b_datacnt > 0); 1696 while (ab->b_buf) { 1697 arc_buf_t *buf = ab->b_buf; 1698 if (!mutex_tryenter(&buf->b_evict_lock)) { 1699 missed += 1; 1700 break; 1701 } 1702 if (buf->b_data) { 1703 bytes_evicted += ab->b_size; 1704 if (recycle && ab->b_type == type && 1705 ab->b_size == bytes && 1706 !HDR_L2_WRITING(ab)) { 1707 stolen = buf->b_data; 1708 recycle = FALSE; 1709 } 1710 } 1711 if (buf->b_efunc) { 1712 mutex_enter(&arc_eviction_mtx); 1713 arc_buf_destroy(buf, 1714 buf->b_data == stolen, FALSE); 1715 ab->b_buf = buf->b_next; 1716 buf->b_hdr = &arc_eviction_hdr; 1717 buf->b_next = arc_eviction_list; 1718 arc_eviction_list = buf; 1719 mutex_exit(&arc_eviction_mtx); 1720 mutex_exit(&buf->b_evict_lock); 1721 } else { 1722 mutex_exit(&buf->b_evict_lock); 1723 arc_buf_destroy(buf, 1724 buf->b_data == stolen, TRUE); 1725 } 1726 } 1727 1728 if (ab->b_l2hdr) { 1729 ARCSTAT_INCR(arcstat_evict_l2_cached, 1730 ab->b_size); 1731 } else { 1732 if (l2arc_write_eligible(ab->b_spa, ab)) { 1733 ARCSTAT_INCR(arcstat_evict_l2_eligible, 1734 ab->b_size); 1735 } else { 1736 ARCSTAT_INCR( 1737 arcstat_evict_l2_ineligible, 1738 ab->b_size); 1739 } 1740 } 1741 1742 if (ab->b_datacnt == 0) { 1743 arc_change_state(evicted_state, ab, hash_lock); 1744 ASSERT(HDR_IN_HASH_TABLE(ab)); 1745 ab->b_flags |= ARC_IN_HASH_TABLE; 1746 ab->b_flags &= ~ARC_BUF_AVAILABLE; 1747 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1748 } 1749 if (!have_lock) 1750 mutex_exit(hash_lock); 1751 if (bytes >= 0 && bytes_evicted >= bytes) 1752 break; 1753 } else { 1754 missed += 1; 1755 } 1756 } 1757 1758 mutex_exit(&evicted_state->arcs_mtx); 1759 mutex_exit(&state->arcs_mtx); 1760 1761 if (bytes_evicted < bytes) 1762 dprintf("only evicted %lld bytes from %x", 1763 (longlong_t)bytes_evicted, state); 1764 1765 if (skipped) 1766 ARCSTAT_INCR(arcstat_evict_skip, skipped); 1767 1768 if (missed) 1769 ARCSTAT_INCR(arcstat_mutex_miss, missed); 1770 1771 /* 1772 * We have just evicted some date into the ghost state, make 1773 * sure we also adjust the ghost state size if necessary. 1774 */ 1775 if (arc_no_grow && 1776 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 1777 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 1778 arc_mru_ghost->arcs_size - arc_c; 1779 1780 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 1781 int64_t todelete = 1782 MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 1783 arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1784 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 1785 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 1786 arc_mru_ghost->arcs_size + 1787 arc_mfu_ghost->arcs_size - arc_c); 1788 arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1789 } 1790 } 1791 1792 return (stolen); 1793 } 1794 1795 /* 1796 * Remove buffers from list until we've removed the specified number of 1797 * bytes. Destroy the buffers that are removed. 1798 */ 1799 static void 1800 arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes) 1801 { 1802 arc_buf_hdr_t *ab, *ab_prev; 1803 arc_buf_hdr_t marker = { 0 }; 1804 list_t *list = &state->arcs_list[ARC_BUFC_DATA]; 1805 kmutex_t *hash_lock; 1806 uint64_t bytes_deleted = 0; 1807 uint64_t bufs_skipped = 0; 1808 1809 ASSERT(GHOST_STATE(state)); 1810 top: 1811 mutex_enter(&state->arcs_mtx); 1812 for (ab = list_tail(list); ab; ab = ab_prev) { 1813 ab_prev = list_prev(list, ab); 1814 if (spa && ab->b_spa != spa) 1815 continue; 1816 1817 /* ignore markers */ 1818 if (ab->b_spa == 0) 1819 continue; 1820 1821 hash_lock = HDR_LOCK(ab); 1822 /* caller may be trying to modify this buffer, skip it */ 1823 if (MUTEX_HELD(hash_lock)) 1824 continue; 1825 if (mutex_tryenter(hash_lock)) { 1826 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1827 ASSERT(ab->b_buf == NULL); 1828 ARCSTAT_BUMP(arcstat_deleted); 1829 bytes_deleted += ab->b_size; 1830 1831 if (ab->b_l2hdr != NULL) { 1832 /* 1833 * This buffer is cached on the 2nd Level ARC; 1834 * don't destroy the header. 1835 */ 1836 arc_change_state(arc_l2c_only, ab, hash_lock); 1837 mutex_exit(hash_lock); 1838 } else { 1839 arc_change_state(arc_anon, ab, hash_lock); 1840 mutex_exit(hash_lock); 1841 arc_hdr_destroy(ab); 1842 } 1843 1844 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1845 if (bytes >= 0 && bytes_deleted >= bytes) 1846 break; 1847 } else if (bytes < 0) { 1848 /* 1849 * Insert a list marker and then wait for the 1850 * hash lock to become available. Once its 1851 * available, restart from where we left off. 1852 */ 1853 list_insert_after(list, ab, &marker); 1854 mutex_exit(&state->arcs_mtx); 1855 mutex_enter(hash_lock); 1856 mutex_exit(hash_lock); 1857 mutex_enter(&state->arcs_mtx); 1858 ab_prev = list_prev(list, &marker); 1859 list_remove(list, &marker); 1860 } else 1861 bufs_skipped += 1; 1862 } 1863 mutex_exit(&state->arcs_mtx); 1864 1865 if (list == &state->arcs_list[ARC_BUFC_DATA] && 1866 (bytes < 0 || bytes_deleted < bytes)) { 1867 list = &state->arcs_list[ARC_BUFC_METADATA]; 1868 goto top; 1869 } 1870 1871 if (bufs_skipped) { 1872 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1873 ASSERT(bytes >= 0); 1874 } 1875 1876 if (bytes_deleted < bytes) 1877 dprintf("only deleted %lld bytes from %p", 1878 (longlong_t)bytes_deleted, state); 1879 } 1880 1881 static void 1882 arc_adjust(void) 1883 { 1884 int64_t adjustment, delta; 1885 1886 /* 1887 * Adjust MRU size 1888 */ 1889 1890 adjustment = MIN((int64_t)(arc_size - arc_c), 1891 (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used - 1892 arc_p)); 1893 1894 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 1895 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment); 1896 (void) arc_evict(arc_mru, NULL, delta, FALSE, ARC_BUFC_DATA); 1897 adjustment -= delta; 1898 } 1899 1900 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1901 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment); 1902 (void) arc_evict(arc_mru, NULL, delta, FALSE, 1903 ARC_BUFC_METADATA); 1904 } 1905 1906 /* 1907 * Adjust MFU size 1908 */ 1909 1910 adjustment = arc_size - arc_c; 1911 1912 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 1913 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]); 1914 (void) arc_evict(arc_mfu, NULL, delta, FALSE, ARC_BUFC_DATA); 1915 adjustment -= delta; 1916 } 1917 1918 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1919 int64_t delta = MIN(adjustment, 1920 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]); 1921 (void) arc_evict(arc_mfu, NULL, delta, FALSE, 1922 ARC_BUFC_METADATA); 1923 } 1924 1925 /* 1926 * Adjust ghost lists 1927 */ 1928 1929 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c; 1930 1931 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) { 1932 delta = MIN(arc_mru_ghost->arcs_size, adjustment); 1933 arc_evict_ghost(arc_mru_ghost, NULL, delta); 1934 } 1935 1936 adjustment = 1937 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c; 1938 1939 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) { 1940 delta = MIN(arc_mfu_ghost->arcs_size, adjustment); 1941 arc_evict_ghost(arc_mfu_ghost, NULL, delta); 1942 } 1943 } 1944 1945 static void 1946 arc_do_user_evicts(void) 1947 { 1948 mutex_enter(&arc_eviction_mtx); 1949 while (arc_eviction_list != NULL) { 1950 arc_buf_t *buf = arc_eviction_list; 1951 arc_eviction_list = buf->b_next; 1952 mutex_enter(&buf->b_evict_lock); 1953 buf->b_hdr = NULL; 1954 mutex_exit(&buf->b_evict_lock); 1955 mutex_exit(&arc_eviction_mtx); 1956 1957 if (buf->b_efunc != NULL) 1958 VERIFY(buf->b_efunc(buf) == 0); 1959 1960 buf->b_efunc = NULL; 1961 buf->b_private = NULL; 1962 kmem_cache_free(buf_cache, buf); 1963 mutex_enter(&arc_eviction_mtx); 1964 } 1965 mutex_exit(&arc_eviction_mtx); 1966 } 1967 1968 /* 1969 * Flush all *evictable* data from the cache for the given spa. 1970 * NOTE: this will not touch "active" (i.e. referenced) data. 1971 */ 1972 void 1973 arc_flush(spa_t *spa) 1974 { 1975 uint64_t guid = 0; 1976 1977 if (spa) 1978 guid = spa_load_guid(spa); 1979 1980 while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { 1981 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA); 1982 if (spa) 1983 break; 1984 } 1985 while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) { 1986 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA); 1987 if (spa) 1988 break; 1989 } 1990 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) { 1991 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA); 1992 if (spa) 1993 break; 1994 } 1995 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) { 1996 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA); 1997 if (spa) 1998 break; 1999 } 2000 2001 arc_evict_ghost(arc_mru_ghost, guid, -1); 2002 arc_evict_ghost(arc_mfu_ghost, guid, -1); 2003 2004 mutex_enter(&arc_reclaim_thr_lock); 2005 arc_do_user_evicts(); 2006 mutex_exit(&arc_reclaim_thr_lock); 2007 ASSERT(spa || arc_eviction_list == NULL); 2008 } 2009 2010 void 2011 arc_shrink(void) 2012 { 2013 if (arc_c > arc_c_min) { 2014 uint64_t to_free; 2015 2016 #ifdef _KERNEL 2017 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); 2018 #else 2019 to_free = arc_c >> arc_shrink_shift; 2020 #endif 2021 if (arc_c > arc_c_min + to_free) 2022 atomic_add_64(&arc_c, -to_free); 2023 else 2024 arc_c = arc_c_min; 2025 2026 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 2027 if (arc_c > arc_size) 2028 arc_c = MAX(arc_size, arc_c_min); 2029 if (arc_p > arc_c) 2030 arc_p = (arc_c >> 1); 2031 ASSERT(arc_c >= arc_c_min); 2032 ASSERT((int64_t)arc_p >= 0); 2033 } 2034 2035 if (arc_size > arc_c) 2036 arc_adjust(); 2037 } 2038 2039 /* 2040 * Determine if the system is under memory pressure and is asking 2041 * to reclaim memory. A return value of 1 indicates that the system 2042 * is under memory pressure and that the arc should adjust accordingly. 2043 */ 2044 static int 2045 arc_reclaim_needed(void) 2046 { 2047 uint64_t extra; 2048 2049 #ifdef _KERNEL 2050 2051 if (needfree) 2052 return (1); 2053 2054 /* 2055 * take 'desfree' extra pages, so we reclaim sooner, rather than later 2056 */ 2057 extra = desfree; 2058 2059 /* 2060 * check that we're out of range of the pageout scanner. It starts to 2061 * schedule paging if freemem is less than lotsfree and needfree. 2062 * lotsfree is the high-water mark for pageout, and needfree is the 2063 * number of needed free pages. We add extra pages here to make sure 2064 * the scanner doesn't start up while we're freeing memory. 2065 */ 2066 if (freemem < lotsfree + needfree + extra) 2067 return (1); 2068 2069 /* 2070 * check to make sure that swapfs has enough space so that anon 2071 * reservations can still succeed. anon_resvmem() checks that the 2072 * availrmem is greater than swapfs_minfree, and the number of reserved 2073 * swap pages. We also add a bit of extra here just to prevent 2074 * circumstances from getting really dire. 2075 */ 2076 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 2077 return (1); 2078 2079 #if defined(__i386) 2080 /* 2081 * If we're on an i386 platform, it's possible that we'll exhaust the 2082 * kernel heap space before we ever run out of available physical 2083 * memory. Most checks of the size of the heap_area compare against 2084 * tune.t_minarmem, which is the minimum available real memory that we 2085 * can have in the system. However, this is generally fixed at 25 pages 2086 * which is so low that it's useless. In this comparison, we seek to 2087 * calculate the total heap-size, and reclaim if more than 3/4ths of the 2088 * heap is allocated. (Or, in the calculation, if less than 1/4th is 2089 * free) 2090 */ 2091 if (vmem_size(heap_arena, VMEM_FREE) < 2092 (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2)) 2093 return (1); 2094 #endif 2095 2096 /* 2097 * If zio data pages are being allocated out of a separate heap segment, 2098 * then enforce that the size of available vmem for this arena remains 2099 * above about 1/16th free. 2100 * 2101 * Note: The 1/16th arena free requirement was put in place 2102 * to aggressively evict memory from the arc in order to avoid 2103 * memory fragmentation issues. 2104 */ 2105 if (zio_arena != NULL && 2106 vmem_size(zio_arena, VMEM_FREE) < 2107 (vmem_size(zio_arena, VMEM_ALLOC) >> 4)) 2108 return (1); 2109 #else 2110 if (spa_get_random(100) == 0) 2111 return (1); 2112 #endif 2113 return (0); 2114 } 2115 2116 static void 2117 arc_kmem_reap_now(arc_reclaim_strategy_t strat) 2118 { 2119 size_t i; 2120 kmem_cache_t *prev_cache = NULL; 2121 kmem_cache_t *prev_data_cache = NULL; 2122 extern kmem_cache_t *zio_buf_cache[]; 2123 extern kmem_cache_t *zio_data_buf_cache[]; 2124 2125 #ifdef _KERNEL 2126 if (arc_meta_used >= arc_meta_limit) { 2127 /* 2128 * We are exceeding our meta-data cache limit. 2129 * Purge some DNLC entries to release holds on meta-data. 2130 */ 2131 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 2132 } 2133 #if defined(__i386) 2134 /* 2135 * Reclaim unused memory from all kmem caches. 2136 */ 2137 kmem_reap(); 2138 #endif 2139 #endif 2140 2141 /* 2142 * An aggressive reclamation will shrink the cache size as well as 2143 * reap free buffers from the arc kmem caches. 2144 */ 2145 if (strat == ARC_RECLAIM_AGGR) 2146 arc_shrink(); 2147 2148 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 2149 if (zio_buf_cache[i] != prev_cache) { 2150 prev_cache = zio_buf_cache[i]; 2151 kmem_cache_reap_now(zio_buf_cache[i]); 2152 } 2153 if (zio_data_buf_cache[i] != prev_data_cache) { 2154 prev_data_cache = zio_data_buf_cache[i]; 2155 kmem_cache_reap_now(zio_data_buf_cache[i]); 2156 } 2157 } 2158 kmem_cache_reap_now(buf_cache); 2159 kmem_cache_reap_now(hdr_cache); 2160 2161 /* 2162 * Ask the vmem areana to reclaim unused memory from its 2163 * quantum caches. 2164 */ 2165 if (zio_arena != NULL && strat == ARC_RECLAIM_AGGR) 2166 vmem_qcache_reap(zio_arena); 2167 } 2168 2169 static void 2170 arc_reclaim_thread(void) 2171 { 2172 clock_t growtime = 0; 2173 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 2174 callb_cpr_t cpr; 2175 2176 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 2177 2178 mutex_enter(&arc_reclaim_thr_lock); 2179 while (arc_thread_exit == 0) { 2180 if (arc_reclaim_needed()) { 2181 2182 if (arc_no_grow) { 2183 if (last_reclaim == ARC_RECLAIM_CONS) { 2184 last_reclaim = ARC_RECLAIM_AGGR; 2185 } else { 2186 last_reclaim = ARC_RECLAIM_CONS; 2187 } 2188 } else { 2189 arc_no_grow = TRUE; 2190 last_reclaim = ARC_RECLAIM_AGGR; 2191 membar_producer(); 2192 } 2193 2194 /* reset the growth delay for every reclaim */ 2195 growtime = ddi_get_lbolt() + (arc_grow_retry * hz); 2196 2197 arc_kmem_reap_now(last_reclaim); 2198 arc_warm = B_TRUE; 2199 2200 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) { 2201 arc_no_grow = FALSE; 2202 } 2203 2204 arc_adjust(); 2205 2206 if (arc_eviction_list != NULL) 2207 arc_do_user_evicts(); 2208 2209 /* block until needed, or one second, whichever is shorter */ 2210 CALLB_CPR_SAFE_BEGIN(&cpr); 2211 (void) cv_timedwait(&arc_reclaim_thr_cv, 2212 &arc_reclaim_thr_lock, (ddi_get_lbolt() + hz)); 2213 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 2214 } 2215 2216 arc_thread_exit = 0; 2217 cv_broadcast(&arc_reclaim_thr_cv); 2218 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 2219 thread_exit(); 2220 } 2221 2222 /* 2223 * Adapt arc info given the number of bytes we are trying to add and 2224 * the state that we are comming from. This function is only called 2225 * when we are adding new content to the cache. 2226 */ 2227 static void 2228 arc_adapt(int bytes, arc_state_t *state) 2229 { 2230 int mult; 2231 uint64_t arc_p_min = (arc_c >> arc_p_min_shift); 2232 2233 if (state == arc_l2c_only) 2234 return; 2235 2236 ASSERT(bytes > 0); 2237 /* 2238 * Adapt the target size of the MRU list: 2239 * - if we just hit in the MRU ghost list, then increase 2240 * the target size of the MRU list. 2241 * - if we just hit in the MFU ghost list, then increase 2242 * the target size of the MFU list by decreasing the 2243 * target size of the MRU list. 2244 */ 2245 if (state == arc_mru_ghost) { 2246 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 2247 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 2248 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */ 2249 2250 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); 2251 } else if (state == arc_mfu_ghost) { 2252 uint64_t delta; 2253 2254 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 2255 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 2256 mult = MIN(mult, 10); 2257 2258 delta = MIN(bytes * mult, arc_p); 2259 arc_p = MAX(arc_p_min, arc_p - delta); 2260 } 2261 ASSERT((int64_t)arc_p >= 0); 2262 2263 if (arc_reclaim_needed()) { 2264 cv_signal(&arc_reclaim_thr_cv); 2265 return; 2266 } 2267 2268 if (arc_no_grow) 2269 return; 2270 2271 if (arc_c >= arc_c_max) 2272 return; 2273 2274 /* 2275 * If we're within (2 * maxblocksize) bytes of the target 2276 * cache size, increment the target cache size 2277 */ 2278 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 2279 atomic_add_64(&arc_c, (int64_t)bytes); 2280 if (arc_c > arc_c_max) 2281 arc_c = arc_c_max; 2282 else if (state == arc_anon) 2283 atomic_add_64(&arc_p, (int64_t)bytes); 2284 if (arc_p > arc_c) 2285 arc_p = arc_c; 2286 } 2287 ASSERT((int64_t)arc_p >= 0); 2288 } 2289 2290 /* 2291 * Check if the cache has reached its limits and eviction is required 2292 * prior to insert. 2293 */ 2294 static int 2295 arc_evict_needed(arc_buf_contents_t type) 2296 { 2297 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 2298 return (1); 2299 2300 if (arc_reclaim_needed()) 2301 return (1); 2302 2303 return (arc_size > arc_c); 2304 } 2305 2306 /* 2307 * The buffer, supplied as the first argument, needs a data block. 2308 * So, if we are at cache max, determine which cache should be victimized. 2309 * We have the following cases: 2310 * 2311 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2312 * In this situation if we're out of space, but the resident size of the MFU is 2313 * under the limit, victimize the MFU cache to satisfy this insertion request. 2314 * 2315 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2316 * Here, we've used up all of the available space for the MRU, so we need to 2317 * evict from our own cache instead. Evict from the set of resident MRU 2318 * entries. 2319 * 2320 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2321 * c minus p represents the MFU space in the cache, since p is the size of the 2322 * cache that is dedicated to the MRU. In this situation there's still space on 2323 * the MFU side, so the MRU side needs to be victimized. 2324 * 2325 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2326 * MFU's resident set is consuming more space than it has been allotted. In 2327 * this situation, we must victimize our own cache, the MFU, for this insertion. 2328 */ 2329 static void 2330 arc_get_data_buf(arc_buf_t *buf) 2331 { 2332 arc_state_t *state = buf->b_hdr->b_state; 2333 uint64_t size = buf->b_hdr->b_size; 2334 arc_buf_contents_t type = buf->b_hdr->b_type; 2335 2336 arc_adapt(size, state); 2337 2338 /* 2339 * We have not yet reached cache maximum size, 2340 * just allocate a new buffer. 2341 */ 2342 if (!arc_evict_needed(type)) { 2343 if (type == ARC_BUFC_METADATA) { 2344 buf->b_data = zio_buf_alloc(size); 2345 arc_space_consume(size, ARC_SPACE_DATA); 2346 } else { 2347 ASSERT(type == ARC_BUFC_DATA); 2348 buf->b_data = zio_data_buf_alloc(size); 2349 ARCSTAT_INCR(arcstat_data_size, size); 2350 atomic_add_64(&arc_size, size); 2351 } 2352 goto out; 2353 } 2354 2355 /* 2356 * If we are prefetching from the mfu ghost list, this buffer 2357 * will end up on the mru list; so steal space from there. 2358 */ 2359 if (state == arc_mfu_ghost) 2360 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 2361 else if (state == arc_mru_ghost) 2362 state = arc_mru; 2363 2364 if (state == arc_mru || state == arc_anon) { 2365 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 2366 state = (arc_mfu->arcs_lsize[type] >= size && 2367 arc_p > mru_used) ? arc_mfu : arc_mru; 2368 } else { 2369 /* MFU cases */ 2370 uint64_t mfu_space = arc_c - arc_p; 2371 state = (arc_mru->arcs_lsize[type] >= size && 2372 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 2373 } 2374 if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { 2375 if (type == ARC_BUFC_METADATA) { 2376 buf->b_data = zio_buf_alloc(size); 2377 arc_space_consume(size, ARC_SPACE_DATA); 2378 } else { 2379 ASSERT(type == ARC_BUFC_DATA); 2380 buf->b_data = zio_data_buf_alloc(size); 2381 ARCSTAT_INCR(arcstat_data_size, size); 2382 atomic_add_64(&arc_size, size); 2383 } 2384 ARCSTAT_BUMP(arcstat_recycle_miss); 2385 } 2386 ASSERT(buf->b_data != NULL); 2387 out: 2388 /* 2389 * Update the state size. Note that ghost states have a 2390 * "ghost size" and so don't need to be updated. 2391 */ 2392 if (!GHOST_STATE(buf->b_hdr->b_state)) { 2393 arc_buf_hdr_t *hdr = buf->b_hdr; 2394 2395 atomic_add_64(&hdr->b_state->arcs_size, size); 2396 if (list_link_active(&hdr->b_arc_node)) { 2397 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2398 atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2399 } 2400 /* 2401 * If we are growing the cache, and we are adding anonymous 2402 * data, and we have outgrown arc_p, update arc_p 2403 */ 2404 if (arc_size < arc_c && hdr->b_state == arc_anon && 2405 arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 2406 arc_p = MIN(arc_c, arc_p + size); 2407 } 2408 } 2409 2410 /* 2411 * This routine is called whenever a buffer is accessed. 2412 * NOTE: the hash lock is dropped in this function. 2413 */ 2414 static void 2415 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2416 { 2417 clock_t now; 2418 2419 ASSERT(MUTEX_HELD(hash_lock)); 2420 2421 if (buf->b_state == arc_anon) { 2422 /* 2423 * This buffer is not in the cache, and does not 2424 * appear in our "ghost" list. Add the new buffer 2425 * to the MRU state. 2426 */ 2427 2428 ASSERT(buf->b_arc_access == 0); 2429 buf->b_arc_access = ddi_get_lbolt(); 2430 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2431 arc_change_state(arc_mru, buf, hash_lock); 2432 2433 } else if (buf->b_state == arc_mru) { 2434 now = ddi_get_lbolt(); 2435 2436 /* 2437 * If this buffer is here because of a prefetch, then either: 2438 * - clear the flag if this is a "referencing" read 2439 * (any subsequent access will bump this into the MFU state). 2440 * or 2441 * - move the buffer to the head of the list if this is 2442 * another prefetch (to make it less likely to be evicted). 2443 */ 2444 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2445 if (refcount_count(&buf->b_refcnt) == 0) { 2446 ASSERT(list_link_active(&buf->b_arc_node)); 2447 } else { 2448 buf->b_flags &= ~ARC_PREFETCH; 2449 ARCSTAT_BUMP(arcstat_mru_hits); 2450 } 2451 buf->b_arc_access = now; 2452 return; 2453 } 2454 2455 /* 2456 * This buffer has been "accessed" only once so far, 2457 * but it is still in the cache. Move it to the MFU 2458 * state. 2459 */ 2460 if (now > buf->b_arc_access + ARC_MINTIME) { 2461 /* 2462 * More than 125ms have passed since we 2463 * instantiated this buffer. Move it to the 2464 * most frequently used state. 2465 */ 2466 buf->b_arc_access = now; 2467 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2468 arc_change_state(arc_mfu, buf, hash_lock); 2469 } 2470 ARCSTAT_BUMP(arcstat_mru_hits); 2471 } else if (buf->b_state == arc_mru_ghost) { 2472 arc_state_t *new_state; 2473 /* 2474 * This buffer has been "accessed" recently, but 2475 * was evicted from the cache. Move it to the 2476 * MFU state. 2477 */ 2478 2479 if (buf->b_flags & ARC_PREFETCH) { 2480 new_state = arc_mru; 2481 if (refcount_count(&buf->b_refcnt) > 0) 2482 buf->b_flags &= ~ARC_PREFETCH; 2483 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2484 } else { 2485 new_state = arc_mfu; 2486 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2487 } 2488 2489 buf->b_arc_access = ddi_get_lbolt(); 2490 arc_change_state(new_state, buf, hash_lock); 2491 2492 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2493 } else if (buf->b_state == arc_mfu) { 2494 /* 2495 * This buffer has been accessed more than once and is 2496 * still in the cache. Keep it in the MFU state. 2497 * 2498 * NOTE: an add_reference() that occurred when we did 2499 * the arc_read() will have kicked this off the list. 2500 * If it was a prefetch, we will explicitly move it to 2501 * the head of the list now. 2502 */ 2503 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2504 ASSERT(refcount_count(&buf->b_refcnt) == 0); 2505 ASSERT(list_link_active(&buf->b_arc_node)); 2506 } 2507 ARCSTAT_BUMP(arcstat_mfu_hits); 2508 buf->b_arc_access = ddi_get_lbolt(); 2509 } else if (buf->b_state == arc_mfu_ghost) { 2510 arc_state_t *new_state = arc_mfu; 2511 /* 2512 * This buffer has been accessed more than once but has 2513 * been evicted from the cache. Move it back to the 2514 * MFU state. 2515 */ 2516 2517 if (buf->b_flags & ARC_PREFETCH) { 2518 /* 2519 * This is a prefetch access... 2520 * move this block back to the MRU state. 2521 */ 2522 ASSERT0(refcount_count(&buf->b_refcnt)); 2523 new_state = arc_mru; 2524 } 2525 2526 buf->b_arc_access = ddi_get_lbolt(); 2527 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2528 arc_change_state(new_state, buf, hash_lock); 2529 2530 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2531 } else if (buf->b_state == arc_l2c_only) { 2532 /* 2533 * This buffer is on the 2nd Level ARC. 2534 */ 2535 2536 buf->b_arc_access = ddi_get_lbolt(); 2537 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2538 arc_change_state(arc_mfu, buf, hash_lock); 2539 } else { 2540 ASSERT(!"invalid arc state"); 2541 } 2542 } 2543 2544 /* a generic arc_done_func_t which you can use */ 2545 /* ARGSUSED */ 2546 void 2547 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2548 { 2549 if (zio == NULL || zio->io_error == 0) 2550 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2551 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2552 } 2553 2554 /* a generic arc_done_func_t */ 2555 void 2556 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2557 { 2558 arc_buf_t **bufp = arg; 2559 if (zio && zio->io_error) { 2560 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2561 *bufp = NULL; 2562 } else { 2563 *bufp = buf; 2564 ASSERT(buf->b_data); 2565 } 2566 } 2567 2568 static void 2569 arc_read_done(zio_t *zio) 2570 { 2571 arc_buf_hdr_t *hdr, *found; 2572 arc_buf_t *buf; 2573 arc_buf_t *abuf; /* buffer we're assigning to callback */ 2574 kmutex_t *hash_lock; 2575 arc_callback_t *callback_list, *acb; 2576 int freeable = FALSE; 2577 2578 buf = zio->io_private; 2579 hdr = buf->b_hdr; 2580 2581 /* 2582 * The hdr was inserted into hash-table and removed from lists 2583 * prior to starting I/O. We should find this header, since 2584 * it's in the hash table, and it should be legit since it's 2585 * not possible to evict it during the I/O. The only possible 2586 * reason for it not to be found is if we were freed during the 2587 * read. 2588 */ 2589 found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth, 2590 &hash_lock); 2591 2592 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 2593 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 2594 (found == hdr && HDR_L2_READING(hdr))); 2595 2596 hdr->b_flags &= ~ARC_L2_EVICTED; 2597 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 2598 hdr->b_flags &= ~ARC_L2CACHE; 2599 2600 /* byteswap if necessary */ 2601 callback_list = hdr->b_acb; 2602 ASSERT(callback_list != NULL); 2603 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) { 2604 dmu_object_byteswap_t bswap = 2605 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp)); 2606 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? 2607 byteswap_uint64_array : 2608 dmu_ot_byteswap[bswap].ob_func; 2609 func(buf->b_data, hdr->b_size); 2610 } 2611 2612 arc_cksum_compute(buf, B_FALSE); 2613 arc_buf_watch(buf); 2614 2615 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) { 2616 /* 2617 * Only call arc_access on anonymous buffers. This is because 2618 * if we've issued an I/O for an evicted buffer, we've already 2619 * called arc_access (to prevent any simultaneous readers from 2620 * getting confused). 2621 */ 2622 arc_access(hdr, hash_lock); 2623 } 2624 2625 /* create copies of the data buffer for the callers */ 2626 abuf = buf; 2627 for (acb = callback_list; acb; acb = acb->acb_next) { 2628 if (acb->acb_done) { 2629 if (abuf == NULL) 2630 abuf = arc_buf_clone(buf); 2631 acb->acb_buf = abuf; 2632 abuf = NULL; 2633 } 2634 } 2635 hdr->b_acb = NULL; 2636 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2637 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 2638 if (abuf == buf) { 2639 ASSERT(buf->b_efunc == NULL); 2640 ASSERT(hdr->b_datacnt == 1); 2641 hdr->b_flags |= ARC_BUF_AVAILABLE; 2642 } 2643 2644 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2645 2646 if (zio->io_error != 0) { 2647 hdr->b_flags |= ARC_IO_ERROR; 2648 if (hdr->b_state != arc_anon) 2649 arc_change_state(arc_anon, hdr, hash_lock); 2650 if (HDR_IN_HASH_TABLE(hdr)) 2651 buf_hash_remove(hdr); 2652 freeable = refcount_is_zero(&hdr->b_refcnt); 2653 } 2654 2655 /* 2656 * Broadcast before we drop the hash_lock to avoid the possibility 2657 * that the hdr (and hence the cv) might be freed before we get to 2658 * the cv_broadcast(). 2659 */ 2660 cv_broadcast(&hdr->b_cv); 2661 2662 if (hash_lock) { 2663 mutex_exit(hash_lock); 2664 } else { 2665 /* 2666 * This block was freed while we waited for the read to 2667 * complete. It has been removed from the hash table and 2668 * moved to the anonymous state (so that it won't show up 2669 * in the cache). 2670 */ 2671 ASSERT3P(hdr->b_state, ==, arc_anon); 2672 freeable = refcount_is_zero(&hdr->b_refcnt); 2673 } 2674 2675 /* execute each callback and free its structure */ 2676 while ((acb = callback_list) != NULL) { 2677 if (acb->acb_done) 2678 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2679 2680 if (acb->acb_zio_dummy != NULL) { 2681 acb->acb_zio_dummy->io_error = zio->io_error; 2682 zio_nowait(acb->acb_zio_dummy); 2683 } 2684 2685 callback_list = acb->acb_next; 2686 kmem_free(acb, sizeof (arc_callback_t)); 2687 } 2688 2689 if (freeable) 2690 arc_hdr_destroy(hdr); 2691 } 2692 2693 /* 2694 * "Read" the block at the specified DVA (in bp) via the 2695 * cache. If the block is found in the cache, invoke the provided 2696 * callback immediately and return. Note that the `zio' parameter 2697 * in the callback will be NULL in this case, since no IO was 2698 * required. If the block is not in the cache pass the read request 2699 * on to the spa with a substitute callback function, so that the 2700 * requested block will be added to the cache. 2701 * 2702 * If a read request arrives for a block that has a read in-progress, 2703 * either wait for the in-progress read to complete (and return the 2704 * results); or, if this is a read with a "done" func, add a record 2705 * to the read to invoke the "done" func when the read completes, 2706 * and return; or just return. 2707 * 2708 * arc_read_done() will invoke all the requested "done" functions 2709 * for readers of this block. 2710 * 2711 * Normal callers should use arc_read and pass the arc buffer and offset 2712 * for the bp. But if you know you don't need locking, you can use 2713 * arc_read_bp. 2714 */ 2715 int 2716 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_buf_t *pbuf, 2717 arc_done_func_t *done, void *private, int priority, int zio_flags, 2718 uint32_t *arc_flags, const zbookmark_t *zb) 2719 { 2720 int err; 2721 2722 if (pbuf == NULL) { 2723 /* 2724 * XXX This happens from traverse callback funcs, for 2725 * the objset_phys_t block. 2726 */ 2727 return (arc_read_nolock(pio, spa, bp, done, private, priority, 2728 zio_flags, arc_flags, zb)); 2729 } 2730 2731 ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt)); 2732 ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size); 2733 rw_enter(&pbuf->b_data_lock, RW_READER); 2734 2735 err = arc_read_nolock(pio, spa, bp, done, private, priority, 2736 zio_flags, arc_flags, zb); 2737 rw_exit(&pbuf->b_data_lock); 2738 2739 return (err); 2740 } 2741 2742 int 2743 arc_read_nolock(zio_t *pio, spa_t *spa, const blkptr_t *bp, 2744 arc_done_func_t *done, void *private, int priority, int zio_flags, 2745 uint32_t *arc_flags, const zbookmark_t *zb) 2746 { 2747 arc_buf_hdr_t *hdr; 2748 arc_buf_t *buf; 2749 kmutex_t *hash_lock; 2750 zio_t *rzio; 2751 uint64_t guid = spa_load_guid(spa); 2752 2753 top: 2754 hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp), 2755 &hash_lock); 2756 if (hdr && hdr->b_datacnt > 0) { 2757 2758 *arc_flags |= ARC_CACHED; 2759 2760 if (HDR_IO_IN_PROGRESS(hdr)) { 2761 2762 if (*arc_flags & ARC_WAIT) { 2763 cv_wait(&hdr->b_cv, hash_lock); 2764 mutex_exit(hash_lock); 2765 goto top; 2766 } 2767 ASSERT(*arc_flags & ARC_NOWAIT); 2768 2769 if (done) { 2770 arc_callback_t *acb = NULL; 2771 2772 acb = kmem_zalloc(sizeof (arc_callback_t), 2773 KM_SLEEP); 2774 acb->acb_done = done; 2775 acb->acb_private = private; 2776 if (pio != NULL) 2777 acb->acb_zio_dummy = zio_null(pio, 2778 spa, NULL, NULL, NULL, zio_flags); 2779 2780 ASSERT(acb->acb_done != NULL); 2781 acb->acb_next = hdr->b_acb; 2782 hdr->b_acb = acb; 2783 add_reference(hdr, hash_lock, private); 2784 mutex_exit(hash_lock); 2785 return (0); 2786 } 2787 mutex_exit(hash_lock); 2788 return (0); 2789 } 2790 2791 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2792 2793 if (done) { 2794 add_reference(hdr, hash_lock, private); 2795 /* 2796 * If this block is already in use, create a new 2797 * copy of the data so that we will be guaranteed 2798 * that arc_release() will always succeed. 2799 */ 2800 buf = hdr->b_buf; 2801 ASSERT(buf); 2802 ASSERT(buf->b_data); 2803 if (HDR_BUF_AVAILABLE(hdr)) { 2804 ASSERT(buf->b_efunc == NULL); 2805 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2806 } else { 2807 buf = arc_buf_clone(buf); 2808 } 2809 2810 } else if (*arc_flags & ARC_PREFETCH && 2811 refcount_count(&hdr->b_refcnt) == 0) { 2812 hdr->b_flags |= ARC_PREFETCH; 2813 } 2814 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 2815 arc_access(hdr, hash_lock); 2816 if (*arc_flags & ARC_L2CACHE) 2817 hdr->b_flags |= ARC_L2CACHE; 2818 mutex_exit(hash_lock); 2819 ARCSTAT_BUMP(arcstat_hits); 2820 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2821 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2822 data, metadata, hits); 2823 2824 if (done) 2825 done(NULL, buf, private); 2826 } else { 2827 uint64_t size = BP_GET_LSIZE(bp); 2828 arc_callback_t *acb; 2829 vdev_t *vd = NULL; 2830 uint64_t addr; 2831 boolean_t devw = B_FALSE; 2832 2833 if (hdr == NULL) { 2834 /* this block is not in the cache */ 2835 arc_buf_hdr_t *exists; 2836 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 2837 buf = arc_buf_alloc(spa, size, private, type); 2838 hdr = buf->b_hdr; 2839 hdr->b_dva = *BP_IDENTITY(bp); 2840 hdr->b_birth = BP_PHYSICAL_BIRTH(bp); 2841 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2842 exists = buf_hash_insert(hdr, &hash_lock); 2843 if (exists) { 2844 /* somebody beat us to the hash insert */ 2845 mutex_exit(hash_lock); 2846 buf_discard_identity(hdr); 2847 (void) arc_buf_remove_ref(buf, private); 2848 goto top; /* restart the IO request */ 2849 } 2850 /* if this is a prefetch, we don't have a reference */ 2851 if (*arc_flags & ARC_PREFETCH) { 2852 (void) remove_reference(hdr, hash_lock, 2853 private); 2854 hdr->b_flags |= ARC_PREFETCH; 2855 } 2856 if (*arc_flags & ARC_L2CACHE) 2857 hdr->b_flags |= ARC_L2CACHE; 2858 if (BP_GET_LEVEL(bp) > 0) 2859 hdr->b_flags |= ARC_INDIRECT; 2860 } else { 2861 /* this block is in the ghost cache */ 2862 ASSERT(GHOST_STATE(hdr->b_state)); 2863 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2864 ASSERT0(refcount_count(&hdr->b_refcnt)); 2865 ASSERT(hdr->b_buf == NULL); 2866 2867 /* if this is a prefetch, we don't have a reference */ 2868 if (*arc_flags & ARC_PREFETCH) 2869 hdr->b_flags |= ARC_PREFETCH; 2870 else 2871 add_reference(hdr, hash_lock, private); 2872 if (*arc_flags & ARC_L2CACHE) 2873 hdr->b_flags |= ARC_L2CACHE; 2874 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 2875 buf->b_hdr = hdr; 2876 buf->b_data = NULL; 2877 buf->b_efunc = NULL; 2878 buf->b_private = NULL; 2879 buf->b_next = NULL; 2880 hdr->b_buf = buf; 2881 ASSERT(hdr->b_datacnt == 0); 2882 hdr->b_datacnt = 1; 2883 arc_get_data_buf(buf); 2884 arc_access(hdr, hash_lock); 2885 } 2886 2887 ASSERT(!GHOST_STATE(hdr->b_state)); 2888 2889 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2890 acb->acb_done = done; 2891 acb->acb_private = private; 2892 2893 ASSERT(hdr->b_acb == NULL); 2894 hdr->b_acb = acb; 2895 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2896 2897 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL && 2898 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { 2899 devw = hdr->b_l2hdr->b_dev->l2ad_writing; 2900 addr = hdr->b_l2hdr->b_daddr; 2901 /* 2902 * Lock out device removal. 2903 */ 2904 if (vdev_is_dead(vd) || 2905 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 2906 vd = NULL; 2907 } 2908 2909 mutex_exit(hash_lock); 2910 2911 ASSERT3U(hdr->b_size, ==, size); 2912 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp, 2913 uint64_t, size, zbookmark_t *, zb); 2914 ARCSTAT_BUMP(arcstat_misses); 2915 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2916 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2917 data, metadata, misses); 2918 2919 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { 2920 /* 2921 * Read from the L2ARC if the following are true: 2922 * 1. The L2ARC vdev was previously cached. 2923 * 2. This buffer still has L2ARC metadata. 2924 * 3. This buffer isn't currently writing to the L2ARC. 2925 * 4. The L2ARC entry wasn't evicted, which may 2926 * also have invalidated the vdev. 2927 * 5. This isn't prefetch and l2arc_noprefetch is set. 2928 */ 2929 if (hdr->b_l2hdr != NULL && 2930 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && 2931 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { 2932 l2arc_read_callback_t *cb; 2933 2934 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 2935 ARCSTAT_BUMP(arcstat_l2_hits); 2936 2937 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 2938 KM_SLEEP); 2939 cb->l2rcb_buf = buf; 2940 cb->l2rcb_spa = spa; 2941 cb->l2rcb_bp = *bp; 2942 cb->l2rcb_zb = *zb; 2943 cb->l2rcb_flags = zio_flags; 2944 2945 /* 2946 * l2arc read. The SCL_L2ARC lock will be 2947 * released by l2arc_read_done(). 2948 */ 2949 rzio = zio_read_phys(pio, vd, addr, size, 2950 buf->b_data, ZIO_CHECKSUM_OFF, 2951 l2arc_read_done, cb, priority, zio_flags | 2952 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | 2953 ZIO_FLAG_DONT_PROPAGATE | 2954 ZIO_FLAG_DONT_RETRY, B_FALSE); 2955 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 2956 zio_t *, rzio); 2957 ARCSTAT_INCR(arcstat_l2_read_bytes, size); 2958 2959 if (*arc_flags & ARC_NOWAIT) { 2960 zio_nowait(rzio); 2961 return (0); 2962 } 2963 2964 ASSERT(*arc_flags & ARC_WAIT); 2965 if (zio_wait(rzio) == 0) 2966 return (0); 2967 2968 /* l2arc read error; goto zio_read() */ 2969 } else { 2970 DTRACE_PROBE1(l2arc__miss, 2971 arc_buf_hdr_t *, hdr); 2972 ARCSTAT_BUMP(arcstat_l2_misses); 2973 if (HDR_L2_WRITING(hdr)) 2974 ARCSTAT_BUMP(arcstat_l2_rw_clash); 2975 spa_config_exit(spa, SCL_L2ARC, vd); 2976 } 2977 } else { 2978 if (vd != NULL) 2979 spa_config_exit(spa, SCL_L2ARC, vd); 2980 if (l2arc_ndev != 0) { 2981 DTRACE_PROBE1(l2arc__miss, 2982 arc_buf_hdr_t *, hdr); 2983 ARCSTAT_BUMP(arcstat_l2_misses); 2984 } 2985 } 2986 2987 rzio = zio_read(pio, spa, bp, buf->b_data, size, 2988 arc_read_done, buf, priority, zio_flags, zb); 2989 2990 if (*arc_flags & ARC_WAIT) 2991 return (zio_wait(rzio)); 2992 2993 ASSERT(*arc_flags & ARC_NOWAIT); 2994 zio_nowait(rzio); 2995 } 2996 return (0); 2997 } 2998 2999 void 3000 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 3001 { 3002 ASSERT(buf->b_hdr != NULL); 3003 ASSERT(buf->b_hdr->b_state != arc_anon); 3004 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 3005 ASSERT(buf->b_efunc == NULL); 3006 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr)); 3007 3008 buf->b_efunc = func; 3009 buf->b_private = private; 3010 } 3011 3012 /* 3013 * This is used by the DMU to let the ARC know that a buffer is 3014 * being evicted, so the ARC should clean up. If this arc buf 3015 * is not yet in the evicted state, it will be put there. 3016 */ 3017 int 3018 arc_buf_evict(arc_buf_t *buf) 3019 { 3020 arc_buf_hdr_t *hdr; 3021 kmutex_t *hash_lock; 3022 arc_buf_t **bufp; 3023 3024 mutex_enter(&buf->b_evict_lock); 3025 hdr = buf->b_hdr; 3026 if (hdr == NULL) { 3027 /* 3028 * We are in arc_do_user_evicts(). 3029 */ 3030 ASSERT(buf->b_data == NULL); 3031 mutex_exit(&buf->b_evict_lock); 3032 return (0); 3033 } else if (buf->b_data == NULL) { 3034 arc_buf_t copy = *buf; /* structure assignment */ 3035 /* 3036 * We are on the eviction list; process this buffer now 3037 * but let arc_do_user_evicts() do the reaping. 3038 */ 3039 buf->b_efunc = NULL; 3040 mutex_exit(&buf->b_evict_lock); 3041 VERIFY(copy.b_efunc(©) == 0); 3042 return (1); 3043 } 3044 hash_lock = HDR_LOCK(hdr); 3045 mutex_enter(hash_lock); 3046 hdr = buf->b_hdr; 3047 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3048 3049 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 3050 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 3051 3052 /* 3053 * Pull this buffer off of the hdr 3054 */ 3055 bufp = &hdr->b_buf; 3056 while (*bufp != buf) 3057 bufp = &(*bufp)->b_next; 3058 *bufp = buf->b_next; 3059 3060 ASSERT(buf->b_data != NULL); 3061 arc_buf_destroy(buf, FALSE, FALSE); 3062 3063 if (hdr->b_datacnt == 0) { 3064 arc_state_t *old_state = hdr->b_state; 3065 arc_state_t *evicted_state; 3066 3067 ASSERT(hdr->b_buf == NULL); 3068 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 3069 3070 evicted_state = 3071 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 3072 3073 mutex_enter(&old_state->arcs_mtx); 3074 mutex_enter(&evicted_state->arcs_mtx); 3075 3076 arc_change_state(evicted_state, hdr, hash_lock); 3077 ASSERT(HDR_IN_HASH_TABLE(hdr)); 3078 hdr->b_flags |= ARC_IN_HASH_TABLE; 3079 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 3080 3081 mutex_exit(&evicted_state->arcs_mtx); 3082 mutex_exit(&old_state->arcs_mtx); 3083 } 3084 mutex_exit(hash_lock); 3085 mutex_exit(&buf->b_evict_lock); 3086 3087 VERIFY(buf->b_efunc(buf) == 0); 3088 buf->b_efunc = NULL; 3089 buf->b_private = NULL; 3090 buf->b_hdr = NULL; 3091 buf->b_next = NULL; 3092 kmem_cache_free(buf_cache, buf); 3093 return (1); 3094 } 3095 3096 /* 3097 * Release this buffer from the cache. This must be done 3098 * after a read and prior to modifying the buffer contents. 3099 * If the buffer has more than one reference, we must make 3100 * a new hdr for the buffer. 3101 */ 3102 void 3103 arc_release(arc_buf_t *buf, void *tag) 3104 { 3105 arc_buf_hdr_t *hdr; 3106 kmutex_t *hash_lock = NULL; 3107 l2arc_buf_hdr_t *l2hdr; 3108 uint64_t buf_size; 3109 3110 /* 3111 * It would be nice to assert that if it's DMU metadata (level > 3112 * 0 || it's the dnode file), then it must be syncing context. 3113 * But we don't know that information at this level. 3114 */ 3115 3116 mutex_enter(&buf->b_evict_lock); 3117 hdr = buf->b_hdr; 3118 3119 /* this buffer is not on any list */ 3120 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 3121 3122 if (hdr->b_state == arc_anon) { 3123 /* this buffer is already released */ 3124 ASSERT(buf->b_efunc == NULL); 3125 } else { 3126 hash_lock = HDR_LOCK(hdr); 3127 mutex_enter(hash_lock); 3128 hdr = buf->b_hdr; 3129 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3130 } 3131 3132 l2hdr = hdr->b_l2hdr; 3133 if (l2hdr) { 3134 mutex_enter(&l2arc_buflist_mtx); 3135 hdr->b_l2hdr = NULL; 3136 buf_size = hdr->b_size; 3137 } 3138 3139 /* 3140 * Do we have more than one buf? 3141 */ 3142 if (hdr->b_datacnt > 1) { 3143 arc_buf_hdr_t *nhdr; 3144 arc_buf_t **bufp; 3145 uint64_t blksz = hdr->b_size; 3146 uint64_t spa = hdr->b_spa; 3147 arc_buf_contents_t type = hdr->b_type; 3148 uint32_t flags = hdr->b_flags; 3149 3150 ASSERT(hdr->b_buf != buf || buf->b_next != NULL); 3151 /* 3152 * Pull the data off of this hdr and attach it to 3153 * a new anonymous hdr. 3154 */ 3155 (void) remove_reference(hdr, hash_lock, tag); 3156 bufp = &hdr->b_buf; 3157 while (*bufp != buf) 3158 bufp = &(*bufp)->b_next; 3159 *bufp = buf->b_next; 3160 buf->b_next = NULL; 3161 3162 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 3163 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 3164 if (refcount_is_zero(&hdr->b_refcnt)) { 3165 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 3166 ASSERT3U(*size, >=, hdr->b_size); 3167 atomic_add_64(size, -hdr->b_size); 3168 } 3169 hdr->b_datacnt -= 1; 3170 arc_cksum_verify(buf); 3171 arc_buf_unwatch(buf); 3172 3173 mutex_exit(hash_lock); 3174 3175 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 3176 nhdr->b_size = blksz; 3177 nhdr->b_spa = spa; 3178 nhdr->b_type = type; 3179 nhdr->b_buf = buf; 3180 nhdr->b_state = arc_anon; 3181 nhdr->b_arc_access = 0; 3182 nhdr->b_flags = flags & ARC_L2_WRITING; 3183 nhdr->b_l2hdr = NULL; 3184 nhdr->b_datacnt = 1; 3185 nhdr->b_freeze_cksum = NULL; 3186 (void) refcount_add(&nhdr->b_refcnt, tag); 3187 buf->b_hdr = nhdr; 3188 mutex_exit(&buf->b_evict_lock); 3189 atomic_add_64(&arc_anon->arcs_size, blksz); 3190 } else { 3191 mutex_exit(&buf->b_evict_lock); 3192 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 3193 ASSERT(!list_link_active(&hdr->b_arc_node)); 3194 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3195 if (hdr->b_state != arc_anon) 3196 arc_change_state(arc_anon, hdr, hash_lock); 3197 hdr->b_arc_access = 0; 3198 if (hash_lock) 3199 mutex_exit(hash_lock); 3200 3201 buf_discard_identity(hdr); 3202 arc_buf_thaw(buf); 3203 } 3204 buf->b_efunc = NULL; 3205 buf->b_private = NULL; 3206 3207 if (l2hdr) { 3208 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 3209 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 3210 ARCSTAT_INCR(arcstat_l2_size, -buf_size); 3211 mutex_exit(&l2arc_buflist_mtx); 3212 } 3213 } 3214 3215 /* 3216 * Release this buffer. If it does not match the provided BP, fill it 3217 * with that block's contents. 3218 */ 3219 /* ARGSUSED */ 3220 int 3221 arc_release_bp(arc_buf_t *buf, void *tag, blkptr_t *bp, spa_t *spa, 3222 zbookmark_t *zb) 3223 { 3224 arc_release(buf, tag); 3225 return (0); 3226 } 3227 3228 int 3229 arc_released(arc_buf_t *buf) 3230 { 3231 int released; 3232 3233 mutex_enter(&buf->b_evict_lock); 3234 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 3235 mutex_exit(&buf->b_evict_lock); 3236 return (released); 3237 } 3238 3239 int 3240 arc_has_callback(arc_buf_t *buf) 3241 { 3242 int callback; 3243 3244 mutex_enter(&buf->b_evict_lock); 3245 callback = (buf->b_efunc != NULL); 3246 mutex_exit(&buf->b_evict_lock); 3247 return (callback); 3248 } 3249 3250 #ifdef ZFS_DEBUG 3251 int 3252 arc_referenced(arc_buf_t *buf) 3253 { 3254 int referenced; 3255 3256 mutex_enter(&buf->b_evict_lock); 3257 referenced = (refcount_count(&buf->b_hdr->b_refcnt)); 3258 mutex_exit(&buf->b_evict_lock); 3259 return (referenced); 3260 } 3261 #endif 3262 3263 static void 3264 arc_write_ready(zio_t *zio) 3265 { 3266 arc_write_callback_t *callback = zio->io_private; 3267 arc_buf_t *buf = callback->awcb_buf; 3268 arc_buf_hdr_t *hdr = buf->b_hdr; 3269 3270 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 3271 callback->awcb_ready(zio, buf, callback->awcb_private); 3272 3273 /* 3274 * If the IO is already in progress, then this is a re-write 3275 * attempt, so we need to thaw and re-compute the cksum. 3276 * It is the responsibility of the callback to handle the 3277 * accounting for any re-write attempt. 3278 */ 3279 if (HDR_IO_IN_PROGRESS(hdr)) { 3280 mutex_enter(&hdr->b_freeze_lock); 3281 if (hdr->b_freeze_cksum != NULL) { 3282 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 3283 hdr->b_freeze_cksum = NULL; 3284 } 3285 mutex_exit(&hdr->b_freeze_lock); 3286 } 3287 arc_cksum_compute(buf, B_FALSE); 3288 hdr->b_flags |= ARC_IO_IN_PROGRESS; 3289 } 3290 3291 static void 3292 arc_write_done(zio_t *zio) 3293 { 3294 arc_write_callback_t *callback = zio->io_private; 3295 arc_buf_t *buf = callback->awcb_buf; 3296 arc_buf_hdr_t *hdr = buf->b_hdr; 3297 3298 ASSERT(hdr->b_acb == NULL); 3299 3300 if (zio->io_error == 0) { 3301 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 3302 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp); 3303 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 3304 } else { 3305 ASSERT(BUF_EMPTY(hdr)); 3306 } 3307 3308 /* 3309 * If the block to be written was all-zero, we may have 3310 * compressed it away. In this case no write was performed 3311 * so there will be no dva/birth/checksum. The buffer must 3312 * therefore remain anonymous (and uncached). 3313 */ 3314 if (!BUF_EMPTY(hdr)) { 3315 arc_buf_hdr_t *exists; 3316 kmutex_t *hash_lock; 3317 3318 ASSERT(zio->io_error == 0); 3319 3320 arc_cksum_verify(buf); 3321 3322 exists = buf_hash_insert(hdr, &hash_lock); 3323 if (exists) { 3324 /* 3325 * This can only happen if we overwrite for 3326 * sync-to-convergence, because we remove 3327 * buffers from the hash table when we arc_free(). 3328 */ 3329 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 3330 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 3331 panic("bad overwrite, hdr=%p exists=%p", 3332 (void *)hdr, (void *)exists); 3333 ASSERT(refcount_is_zero(&exists->b_refcnt)); 3334 arc_change_state(arc_anon, exists, hash_lock); 3335 mutex_exit(hash_lock); 3336 arc_hdr_destroy(exists); 3337 exists = buf_hash_insert(hdr, &hash_lock); 3338 ASSERT3P(exists, ==, NULL); 3339 } else { 3340 /* Dedup */ 3341 ASSERT(hdr->b_datacnt == 1); 3342 ASSERT(hdr->b_state == arc_anon); 3343 ASSERT(BP_GET_DEDUP(zio->io_bp)); 3344 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 3345 } 3346 } 3347 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3348 /* if it's not anon, we are doing a scrub */ 3349 if (!exists && hdr->b_state == arc_anon) 3350 arc_access(hdr, hash_lock); 3351 mutex_exit(hash_lock); 3352 } else { 3353 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3354 } 3355 3356 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 3357 callback->awcb_done(zio, buf, callback->awcb_private); 3358 3359 kmem_free(callback, sizeof (arc_write_callback_t)); 3360 } 3361 3362 zio_t * 3363 arc_write(zio_t *pio, spa_t *spa, uint64_t txg, 3364 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, const zio_prop_t *zp, 3365 arc_done_func_t *ready, arc_done_func_t *done, void *private, 3366 int priority, int zio_flags, const zbookmark_t *zb) 3367 { 3368 arc_buf_hdr_t *hdr = buf->b_hdr; 3369 arc_write_callback_t *callback; 3370 zio_t *zio; 3371 3372 ASSERT(ready != NULL); 3373 ASSERT(done != NULL); 3374 ASSERT(!HDR_IO_ERROR(hdr)); 3375 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 3376 ASSERT(hdr->b_acb == NULL); 3377 if (l2arc) 3378 hdr->b_flags |= ARC_L2CACHE; 3379 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3380 callback->awcb_ready = ready; 3381 callback->awcb_done = done; 3382 callback->awcb_private = private; 3383 callback->awcb_buf = buf; 3384 3385 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp, 3386 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb); 3387 3388 return (zio); 3389 } 3390 3391 static int 3392 arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg) 3393 { 3394 #ifdef _KERNEL 3395 uint64_t available_memory = ptob(freemem); 3396 static uint64_t page_load = 0; 3397 static uint64_t last_txg = 0; 3398 3399 #if defined(__i386) 3400 available_memory = 3401 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 3402 #endif 3403 if (available_memory >= zfs_write_limit_max) 3404 return (0); 3405 3406 if (txg > last_txg) { 3407 last_txg = txg; 3408 page_load = 0; 3409 } 3410 /* 3411 * If we are in pageout, we know that memory is already tight, 3412 * the arc is already going to be evicting, so we just want to 3413 * continue to let page writes occur as quickly as possible. 3414 */ 3415 if (curproc == proc_pageout) { 3416 if (page_load > MAX(ptob(minfree), available_memory) / 4) 3417 return (ERESTART); 3418 /* Note: reserve is inflated, so we deflate */ 3419 page_load += reserve / 8; 3420 return (0); 3421 } else if (page_load > 0 && arc_reclaim_needed()) { 3422 /* memory is low, delay before restarting */ 3423 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3424 return (EAGAIN); 3425 } 3426 page_load = 0; 3427 3428 if (arc_size > arc_c_min) { 3429 uint64_t evictable_memory = 3430 arc_mru->arcs_lsize[ARC_BUFC_DATA] + 3431 arc_mru->arcs_lsize[ARC_BUFC_METADATA] + 3432 arc_mfu->arcs_lsize[ARC_BUFC_DATA] + 3433 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; 3434 available_memory += MIN(evictable_memory, arc_size - arc_c_min); 3435 } 3436 3437 if (inflight_data > available_memory / 4) { 3438 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3439 return (ERESTART); 3440 } 3441 #endif 3442 return (0); 3443 } 3444 3445 void 3446 arc_tempreserve_clear(uint64_t reserve) 3447 { 3448 atomic_add_64(&arc_tempreserve, -reserve); 3449 ASSERT((int64_t)arc_tempreserve >= 0); 3450 } 3451 3452 int 3453 arc_tempreserve_space(uint64_t reserve, uint64_t txg) 3454 { 3455 int error; 3456 uint64_t anon_size; 3457 3458 #ifdef ZFS_DEBUG 3459 /* 3460 * Once in a while, fail for no reason. Everything should cope. 3461 */ 3462 if (spa_get_random(10000) == 0) { 3463 dprintf("forcing random failure\n"); 3464 return (ERESTART); 3465 } 3466 #endif 3467 if (reserve > arc_c/4 && !arc_no_grow) 3468 arc_c = MIN(arc_c_max, reserve * 4); 3469 if (reserve > arc_c) 3470 return (ENOMEM); 3471 3472 /* 3473 * Don't count loaned bufs as in flight dirty data to prevent long 3474 * network delays from blocking transactions that are ready to be 3475 * assigned to a txg. 3476 */ 3477 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0); 3478 3479 /* 3480 * Writes will, almost always, require additional memory allocations 3481 * in order to compress/encrypt/etc the data. We therefor need to 3482 * make sure that there is sufficient available memory for this. 3483 */ 3484 if (error = arc_memory_throttle(reserve, anon_size, txg)) 3485 return (error); 3486 3487 /* 3488 * Throttle writes when the amount of dirty data in the cache 3489 * gets too large. We try to keep the cache less than half full 3490 * of dirty blocks so that our sync times don't grow too large. 3491 * Note: if two requests come in concurrently, we might let them 3492 * both succeed, when one of them should fail. Not a huge deal. 3493 */ 3494 3495 if (reserve + arc_tempreserve + anon_size > arc_c / 2 && 3496 anon_size > arc_c / 4) { 3497 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 3498 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 3499 arc_tempreserve>>10, 3500 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 3501 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3502 reserve>>10, arc_c>>10); 3503 return (ERESTART); 3504 } 3505 atomic_add_64(&arc_tempreserve, reserve); 3506 return (0); 3507 } 3508 3509 void 3510 arc_init(void) 3511 { 3512 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3513 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3514 3515 /* Convert seconds to clock ticks */ 3516 arc_min_prefetch_lifespan = 1 * hz; 3517 3518 /* Start out with 1/8 of all memory */ 3519 arc_c = physmem * PAGESIZE / 8; 3520 3521 #ifdef _KERNEL 3522 /* 3523 * On architectures where the physical memory can be larger 3524 * than the addressable space (intel in 32-bit mode), we may 3525 * need to limit the cache to 1/8 of VM size. 3526 */ 3527 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3528 #endif 3529 3530 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 3531 arc_c_min = MAX(arc_c / 4, 64<<20); 3532 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 3533 if (arc_c * 8 >= 1<<30) 3534 arc_c_max = (arc_c * 8) - (1<<30); 3535 else 3536 arc_c_max = arc_c_min; 3537 arc_c_max = MAX(arc_c * 6, arc_c_max); 3538 3539 /* 3540 * Allow the tunables to override our calculations if they are 3541 * reasonable (ie. over 64MB) 3542 */ 3543 if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) 3544 arc_c_max = zfs_arc_max; 3545 if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) 3546 arc_c_min = zfs_arc_min; 3547 3548 arc_c = arc_c_max; 3549 arc_p = (arc_c >> 1); 3550 3551 /* limit meta-data to 1/4 of the arc capacity */ 3552 arc_meta_limit = arc_c_max / 4; 3553 3554 /* Allow the tunable to override if it is reasonable */ 3555 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 3556 arc_meta_limit = zfs_arc_meta_limit; 3557 3558 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 3559 arc_c_min = arc_meta_limit / 2; 3560 3561 if (zfs_arc_grow_retry > 0) 3562 arc_grow_retry = zfs_arc_grow_retry; 3563 3564 if (zfs_arc_shrink_shift > 0) 3565 arc_shrink_shift = zfs_arc_shrink_shift; 3566 3567 if (zfs_arc_p_min_shift > 0) 3568 arc_p_min_shift = zfs_arc_p_min_shift; 3569 3570 /* if kmem_flags are set, lets try to use less memory */ 3571 if (kmem_debugging()) 3572 arc_c = arc_c / 2; 3573 if (arc_c < arc_c_min) 3574 arc_c = arc_c_min; 3575 3576 arc_anon = &ARC_anon; 3577 arc_mru = &ARC_mru; 3578 arc_mru_ghost = &ARC_mru_ghost; 3579 arc_mfu = &ARC_mfu; 3580 arc_mfu_ghost = &ARC_mfu_ghost; 3581 arc_l2c_only = &ARC_l2c_only; 3582 arc_size = 0; 3583 3584 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3585 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3586 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3587 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3588 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3589 mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3590 3591 list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], 3592 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3593 list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], 3594 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3595 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], 3596 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3597 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], 3598 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3599 list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], 3600 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3601 list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], 3602 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3603 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], 3604 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3605 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], 3606 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3607 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], 3608 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3609 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], 3610 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3611 3612 buf_init(); 3613 3614 arc_thread_exit = 0; 3615 arc_eviction_list = NULL; 3616 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 3617 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3618 3619 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 3620 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 3621 3622 if (arc_ksp != NULL) { 3623 arc_ksp->ks_data = &arc_stats; 3624 kstat_install(arc_ksp); 3625 } 3626 3627 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3628 TS_RUN, minclsyspri); 3629 3630 arc_dead = FALSE; 3631 arc_warm = B_FALSE; 3632 3633 if (zfs_write_limit_max == 0) 3634 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; 3635 else 3636 zfs_write_limit_shift = 0; 3637 mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL); 3638 } 3639 3640 void 3641 arc_fini(void) 3642 { 3643 mutex_enter(&arc_reclaim_thr_lock); 3644 arc_thread_exit = 1; 3645 while (arc_thread_exit != 0) 3646 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 3647 mutex_exit(&arc_reclaim_thr_lock); 3648 3649 arc_flush(NULL); 3650 3651 arc_dead = TRUE; 3652 3653 if (arc_ksp != NULL) { 3654 kstat_delete(arc_ksp); 3655 arc_ksp = NULL; 3656 } 3657 3658 mutex_destroy(&arc_eviction_mtx); 3659 mutex_destroy(&arc_reclaim_thr_lock); 3660 cv_destroy(&arc_reclaim_thr_cv); 3661 3662 list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); 3663 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 3664 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); 3665 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 3666 list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); 3667 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 3668 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); 3669 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 3670 3671 mutex_destroy(&arc_anon->arcs_mtx); 3672 mutex_destroy(&arc_mru->arcs_mtx); 3673 mutex_destroy(&arc_mru_ghost->arcs_mtx); 3674 mutex_destroy(&arc_mfu->arcs_mtx); 3675 mutex_destroy(&arc_mfu_ghost->arcs_mtx); 3676 mutex_destroy(&arc_l2c_only->arcs_mtx); 3677 3678 mutex_destroy(&zfs_write_limit_lock); 3679 3680 buf_fini(); 3681 3682 ASSERT(arc_loaned_bytes == 0); 3683 } 3684 3685 /* 3686 * Level 2 ARC 3687 * 3688 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 3689 * It uses dedicated storage devices to hold cached data, which are populated 3690 * using large infrequent writes. The main role of this cache is to boost 3691 * the performance of random read workloads. The intended L2ARC devices 3692 * include short-stroked disks, solid state disks, and other media with 3693 * substantially faster read latency than disk. 3694 * 3695 * +-----------------------+ 3696 * | ARC | 3697 * +-----------------------+ 3698 * | ^ ^ 3699 * | | | 3700 * l2arc_feed_thread() arc_read() 3701 * | | | 3702 * | l2arc read | 3703 * V | | 3704 * +---------------+ | 3705 * | L2ARC | | 3706 * +---------------+ | 3707 * | ^ | 3708 * l2arc_write() | | 3709 * | | | 3710 * V | | 3711 * +-------+ +-------+ 3712 * | vdev | | vdev | 3713 * | cache | | cache | 3714 * +-------+ +-------+ 3715 * +=========+ .-----. 3716 * : L2ARC : |-_____-| 3717 * : devices : | Disks | 3718 * +=========+ `-_____-' 3719 * 3720 * Read requests are satisfied from the following sources, in order: 3721 * 3722 * 1) ARC 3723 * 2) vdev cache of L2ARC devices 3724 * 3) L2ARC devices 3725 * 4) vdev cache of disks 3726 * 5) disks 3727 * 3728 * Some L2ARC device types exhibit extremely slow write performance. 3729 * To accommodate for this there are some significant differences between 3730 * the L2ARC and traditional cache design: 3731 * 3732 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 3733 * the ARC behave as usual, freeing buffers and placing headers on ghost 3734 * lists. The ARC does not send buffers to the L2ARC during eviction as 3735 * this would add inflated write latencies for all ARC memory pressure. 3736 * 3737 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 3738 * It does this by periodically scanning buffers from the eviction-end of 3739 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 3740 * not already there. It scans until a headroom of buffers is satisfied, 3741 * which itself is a buffer for ARC eviction. The thread that does this is 3742 * l2arc_feed_thread(), illustrated below; example sizes are included to 3743 * provide a better sense of ratio than this diagram: 3744 * 3745 * head --> tail 3746 * +---------------------+----------+ 3747 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 3748 * +---------------------+----------+ | o L2ARC eligible 3749 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 3750 * +---------------------+----------+ | 3751 * 15.9 Gbytes ^ 32 Mbytes | 3752 * headroom | 3753 * l2arc_feed_thread() 3754 * | 3755 * l2arc write hand <--[oooo]--' 3756 * | 8 Mbyte 3757 * | write max 3758 * V 3759 * +==============================+ 3760 * L2ARC dev |####|#|###|###| |####| ... | 3761 * +==============================+ 3762 * 32 Gbytes 3763 * 3764 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 3765 * evicted, then the L2ARC has cached a buffer much sooner than it probably 3766 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 3767 * safe to say that this is an uncommon case, since buffers at the end of 3768 * the ARC lists have moved there due to inactivity. 3769 * 3770 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 3771 * then the L2ARC simply misses copying some buffers. This serves as a 3772 * pressure valve to prevent heavy read workloads from both stalling the ARC 3773 * with waits and clogging the L2ARC with writes. This also helps prevent 3774 * the potential for the L2ARC to churn if it attempts to cache content too 3775 * quickly, such as during backups of the entire pool. 3776 * 3777 * 5. After system boot and before the ARC has filled main memory, there are 3778 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 3779 * lists can remain mostly static. Instead of searching from tail of these 3780 * lists as pictured, the l2arc_feed_thread() will search from the list heads 3781 * for eligible buffers, greatly increasing its chance of finding them. 3782 * 3783 * The L2ARC device write speed is also boosted during this time so that 3784 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 3785 * there are no L2ARC reads, and no fear of degrading read performance 3786 * through increased writes. 3787 * 3788 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 3789 * the vdev queue can aggregate them into larger and fewer writes. Each 3790 * device is written to in a rotor fashion, sweeping writes through 3791 * available space then repeating. 3792 * 3793 * 7. The L2ARC does not store dirty content. It never needs to flush 3794 * write buffers back to disk based storage. 3795 * 3796 * 8. If an ARC buffer is written (and dirtied) which also exists in the 3797 * L2ARC, the now stale L2ARC buffer is immediately dropped. 3798 * 3799 * The performance of the L2ARC can be tweaked by a number of tunables, which 3800 * may be necessary for different workloads: 3801 * 3802 * l2arc_write_max max write bytes per interval 3803 * l2arc_write_boost extra write bytes during device warmup 3804 * l2arc_noprefetch skip caching prefetched buffers 3805 * l2arc_headroom number of max device writes to precache 3806 * l2arc_feed_secs seconds between L2ARC writing 3807 * 3808 * Tunables may be removed or added as future performance improvements are 3809 * integrated, and also may become zpool properties. 3810 * 3811 * There are three key functions that control how the L2ARC warms up: 3812 * 3813 * l2arc_write_eligible() check if a buffer is eligible to cache 3814 * l2arc_write_size() calculate how much to write 3815 * l2arc_write_interval() calculate sleep delay between writes 3816 * 3817 * These three functions determine what to write, how much, and how quickly 3818 * to send writes. 3819 */ 3820 3821 static boolean_t 3822 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab) 3823 { 3824 /* 3825 * A buffer is *not* eligible for the L2ARC if it: 3826 * 1. belongs to a different spa. 3827 * 2. is already cached on the L2ARC. 3828 * 3. has an I/O in progress (it may be an incomplete read). 3829 * 4. is flagged not eligible (zfs property). 3830 */ 3831 if (ab->b_spa != spa_guid || ab->b_l2hdr != NULL || 3832 HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab)) 3833 return (B_FALSE); 3834 3835 return (B_TRUE); 3836 } 3837 3838 static uint64_t 3839 l2arc_write_size(l2arc_dev_t *dev) 3840 { 3841 uint64_t size; 3842 3843 size = dev->l2ad_write; 3844 3845 if (arc_warm == B_FALSE) 3846 size += dev->l2ad_boost; 3847 3848 return (size); 3849 3850 } 3851 3852 static clock_t 3853 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) 3854 { 3855 clock_t interval, next, now; 3856 3857 /* 3858 * If the ARC lists are busy, increase our write rate; if the 3859 * lists are stale, idle back. This is achieved by checking 3860 * how much we previously wrote - if it was more than half of 3861 * what we wanted, schedule the next write much sooner. 3862 */ 3863 if (l2arc_feed_again && wrote > (wanted / 2)) 3864 interval = (hz * l2arc_feed_min_ms) / 1000; 3865 else 3866 interval = hz * l2arc_feed_secs; 3867 3868 now = ddi_get_lbolt(); 3869 next = MAX(now, MIN(now + interval, began + interval)); 3870 3871 return (next); 3872 } 3873 3874 static void 3875 l2arc_hdr_stat_add(void) 3876 { 3877 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 3878 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 3879 } 3880 3881 static void 3882 l2arc_hdr_stat_remove(void) 3883 { 3884 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 3885 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 3886 } 3887 3888 /* 3889 * Cycle through L2ARC devices. This is how L2ARC load balances. 3890 * If a device is returned, this also returns holding the spa config lock. 3891 */ 3892 static l2arc_dev_t * 3893 l2arc_dev_get_next(void) 3894 { 3895 l2arc_dev_t *first, *next = NULL; 3896 3897 /* 3898 * Lock out the removal of spas (spa_namespace_lock), then removal 3899 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 3900 * both locks will be dropped and a spa config lock held instead. 3901 */ 3902 mutex_enter(&spa_namespace_lock); 3903 mutex_enter(&l2arc_dev_mtx); 3904 3905 /* if there are no vdevs, there is nothing to do */ 3906 if (l2arc_ndev == 0) 3907 goto out; 3908 3909 first = NULL; 3910 next = l2arc_dev_last; 3911 do { 3912 /* loop around the list looking for a non-faulted vdev */ 3913 if (next == NULL) { 3914 next = list_head(l2arc_dev_list); 3915 } else { 3916 next = list_next(l2arc_dev_list, next); 3917 if (next == NULL) 3918 next = list_head(l2arc_dev_list); 3919 } 3920 3921 /* if we have come back to the start, bail out */ 3922 if (first == NULL) 3923 first = next; 3924 else if (next == first) 3925 break; 3926 3927 } while (vdev_is_dead(next->l2ad_vdev)); 3928 3929 /* if we were unable to find any usable vdevs, return NULL */ 3930 if (vdev_is_dead(next->l2ad_vdev)) 3931 next = NULL; 3932 3933 l2arc_dev_last = next; 3934 3935 out: 3936 mutex_exit(&l2arc_dev_mtx); 3937 3938 /* 3939 * Grab the config lock to prevent the 'next' device from being 3940 * removed while we are writing to it. 3941 */ 3942 if (next != NULL) 3943 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 3944 mutex_exit(&spa_namespace_lock); 3945 3946 return (next); 3947 } 3948 3949 /* 3950 * Free buffers that were tagged for destruction. 3951 */ 3952 static void 3953 l2arc_do_free_on_write() 3954 { 3955 list_t *buflist; 3956 l2arc_data_free_t *df, *df_prev; 3957 3958 mutex_enter(&l2arc_free_on_write_mtx); 3959 buflist = l2arc_free_on_write; 3960 3961 for (df = list_tail(buflist); df; df = df_prev) { 3962 df_prev = list_prev(buflist, df); 3963 ASSERT(df->l2df_data != NULL); 3964 ASSERT(df->l2df_func != NULL); 3965 df->l2df_func(df->l2df_data, df->l2df_size); 3966 list_remove(buflist, df); 3967 kmem_free(df, sizeof (l2arc_data_free_t)); 3968 } 3969 3970 mutex_exit(&l2arc_free_on_write_mtx); 3971 } 3972 3973 /* 3974 * A write to a cache device has completed. Update all headers to allow 3975 * reads from these buffers to begin. 3976 */ 3977 static void 3978 l2arc_write_done(zio_t *zio) 3979 { 3980 l2arc_write_callback_t *cb; 3981 l2arc_dev_t *dev; 3982 list_t *buflist; 3983 arc_buf_hdr_t *head, *ab, *ab_prev; 3984 l2arc_buf_hdr_t *abl2; 3985 kmutex_t *hash_lock; 3986 3987 cb = zio->io_private; 3988 ASSERT(cb != NULL); 3989 dev = cb->l2wcb_dev; 3990 ASSERT(dev != NULL); 3991 head = cb->l2wcb_head; 3992 ASSERT(head != NULL); 3993 buflist = dev->l2ad_buflist; 3994 ASSERT(buflist != NULL); 3995 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 3996 l2arc_write_callback_t *, cb); 3997 3998 if (zio->io_error != 0) 3999 ARCSTAT_BUMP(arcstat_l2_writes_error); 4000 4001 mutex_enter(&l2arc_buflist_mtx); 4002 4003 /* 4004 * All writes completed, or an error was hit. 4005 */ 4006 for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 4007 ab_prev = list_prev(buflist, ab); 4008 4009 hash_lock = HDR_LOCK(ab); 4010 if (!mutex_tryenter(hash_lock)) { 4011 /* 4012 * This buffer misses out. It may be in a stage 4013 * of eviction. Its ARC_L2_WRITING flag will be 4014 * left set, denying reads to this buffer. 4015 */ 4016 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 4017 continue; 4018 } 4019 4020 if (zio->io_error != 0) { 4021 /* 4022 * Error - drop L2ARC entry. 4023 */ 4024 list_remove(buflist, ab); 4025 abl2 = ab->b_l2hdr; 4026 ab->b_l2hdr = NULL; 4027 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4028 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4029 } 4030 4031 /* 4032 * Allow ARC to begin reads to this L2ARC entry. 4033 */ 4034 ab->b_flags &= ~ARC_L2_WRITING; 4035 4036 mutex_exit(hash_lock); 4037 } 4038 4039 atomic_inc_64(&l2arc_writes_done); 4040 list_remove(buflist, head); 4041 kmem_cache_free(hdr_cache, head); 4042 mutex_exit(&l2arc_buflist_mtx); 4043 4044 l2arc_do_free_on_write(); 4045 4046 kmem_free(cb, sizeof (l2arc_write_callback_t)); 4047 } 4048 4049 /* 4050 * A read to a cache device completed. Validate buffer contents before 4051 * handing over to the regular ARC routines. 4052 */ 4053 static void 4054 l2arc_read_done(zio_t *zio) 4055 { 4056 l2arc_read_callback_t *cb; 4057 arc_buf_hdr_t *hdr; 4058 arc_buf_t *buf; 4059 kmutex_t *hash_lock; 4060 int equal; 4061 4062 ASSERT(zio->io_vd != NULL); 4063 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 4064 4065 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 4066 4067 cb = zio->io_private; 4068 ASSERT(cb != NULL); 4069 buf = cb->l2rcb_buf; 4070 ASSERT(buf != NULL); 4071 4072 hash_lock = HDR_LOCK(buf->b_hdr); 4073 mutex_enter(hash_lock); 4074 hdr = buf->b_hdr; 4075 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 4076 4077 /* 4078 * Check this survived the L2ARC journey. 4079 */ 4080 equal = arc_cksum_equal(buf); 4081 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 4082 mutex_exit(hash_lock); 4083 zio->io_private = buf; 4084 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 4085 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 4086 arc_read_done(zio); 4087 } else { 4088 mutex_exit(hash_lock); 4089 /* 4090 * Buffer didn't survive caching. Increment stats and 4091 * reissue to the original storage device. 4092 */ 4093 if (zio->io_error != 0) { 4094 ARCSTAT_BUMP(arcstat_l2_io_error); 4095 } else { 4096 zio->io_error = EIO; 4097 } 4098 if (!equal) 4099 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 4100 4101 /* 4102 * If there's no waiter, issue an async i/o to the primary 4103 * storage now. If there *is* a waiter, the caller must 4104 * issue the i/o in a context where it's OK to block. 4105 */ 4106 if (zio->io_waiter == NULL) { 4107 zio_t *pio = zio_unique_parent(zio); 4108 4109 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); 4110 4111 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp, 4112 buf->b_data, zio->io_size, arc_read_done, buf, 4113 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); 4114 } 4115 } 4116 4117 kmem_free(cb, sizeof (l2arc_read_callback_t)); 4118 } 4119 4120 /* 4121 * This is the list priority from which the L2ARC will search for pages to 4122 * cache. This is used within loops (0..3) to cycle through lists in the 4123 * desired order. This order can have a significant effect on cache 4124 * performance. 4125 * 4126 * Currently the metadata lists are hit first, MFU then MRU, followed by 4127 * the data lists. This function returns a locked list, and also returns 4128 * the lock pointer. 4129 */ 4130 static list_t * 4131 l2arc_list_locked(int list_num, kmutex_t **lock) 4132 { 4133 list_t *list; 4134 4135 ASSERT(list_num >= 0 && list_num <= 3); 4136 4137 switch (list_num) { 4138 case 0: 4139 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; 4140 *lock = &arc_mfu->arcs_mtx; 4141 break; 4142 case 1: 4143 list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; 4144 *lock = &arc_mru->arcs_mtx; 4145 break; 4146 case 2: 4147 list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; 4148 *lock = &arc_mfu->arcs_mtx; 4149 break; 4150 case 3: 4151 list = &arc_mru->arcs_list[ARC_BUFC_DATA]; 4152 *lock = &arc_mru->arcs_mtx; 4153 break; 4154 } 4155 4156 ASSERT(!(MUTEX_HELD(*lock))); 4157 mutex_enter(*lock); 4158 return (list); 4159 } 4160 4161 /* 4162 * Evict buffers from the device write hand to the distance specified in 4163 * bytes. This distance may span populated buffers, it may span nothing. 4164 * This is clearing a region on the L2ARC device ready for writing. 4165 * If the 'all' boolean is set, every buffer is evicted. 4166 */ 4167 static void 4168 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 4169 { 4170 list_t *buflist; 4171 l2arc_buf_hdr_t *abl2; 4172 arc_buf_hdr_t *ab, *ab_prev; 4173 kmutex_t *hash_lock; 4174 uint64_t taddr; 4175 4176 buflist = dev->l2ad_buflist; 4177 4178 if (buflist == NULL) 4179 return; 4180 4181 if (!all && dev->l2ad_first) { 4182 /* 4183 * This is the first sweep through the device. There is 4184 * nothing to evict. 4185 */ 4186 return; 4187 } 4188 4189 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 4190 /* 4191 * When nearing the end of the device, evict to the end 4192 * before the device write hand jumps to the start. 4193 */ 4194 taddr = dev->l2ad_end; 4195 } else { 4196 taddr = dev->l2ad_hand + distance; 4197 } 4198 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 4199 uint64_t, taddr, boolean_t, all); 4200 4201 top: 4202 mutex_enter(&l2arc_buflist_mtx); 4203 for (ab = list_tail(buflist); ab; ab = ab_prev) { 4204 ab_prev = list_prev(buflist, ab); 4205 4206 hash_lock = HDR_LOCK(ab); 4207 if (!mutex_tryenter(hash_lock)) { 4208 /* 4209 * Missed the hash lock. Retry. 4210 */ 4211 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 4212 mutex_exit(&l2arc_buflist_mtx); 4213 mutex_enter(hash_lock); 4214 mutex_exit(hash_lock); 4215 goto top; 4216 } 4217 4218 if (HDR_L2_WRITE_HEAD(ab)) { 4219 /* 4220 * We hit a write head node. Leave it for 4221 * l2arc_write_done(). 4222 */ 4223 list_remove(buflist, ab); 4224 mutex_exit(hash_lock); 4225 continue; 4226 } 4227 4228 if (!all && ab->b_l2hdr != NULL && 4229 (ab->b_l2hdr->b_daddr > taddr || 4230 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 4231 /* 4232 * We've evicted to the target address, 4233 * or the end of the device. 4234 */ 4235 mutex_exit(hash_lock); 4236 break; 4237 } 4238 4239 if (HDR_FREE_IN_PROGRESS(ab)) { 4240 /* 4241 * Already on the path to destruction. 4242 */ 4243 mutex_exit(hash_lock); 4244 continue; 4245 } 4246 4247 if (ab->b_state == arc_l2c_only) { 4248 ASSERT(!HDR_L2_READING(ab)); 4249 /* 4250 * This doesn't exist in the ARC. Destroy. 4251 * arc_hdr_destroy() will call list_remove() 4252 * and decrement arcstat_l2_size. 4253 */ 4254 arc_change_state(arc_anon, ab, hash_lock); 4255 arc_hdr_destroy(ab); 4256 } else { 4257 /* 4258 * Invalidate issued or about to be issued 4259 * reads, since we may be about to write 4260 * over this location. 4261 */ 4262 if (HDR_L2_READING(ab)) { 4263 ARCSTAT_BUMP(arcstat_l2_evict_reading); 4264 ab->b_flags |= ARC_L2_EVICTED; 4265 } 4266 4267 /* 4268 * Tell ARC this no longer exists in L2ARC. 4269 */ 4270 if (ab->b_l2hdr != NULL) { 4271 abl2 = ab->b_l2hdr; 4272 ab->b_l2hdr = NULL; 4273 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4274 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4275 } 4276 list_remove(buflist, ab); 4277 4278 /* 4279 * This may have been leftover after a 4280 * failed write. 4281 */ 4282 ab->b_flags &= ~ARC_L2_WRITING; 4283 } 4284 mutex_exit(hash_lock); 4285 } 4286 mutex_exit(&l2arc_buflist_mtx); 4287 4288 vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0); 4289 dev->l2ad_evict = taddr; 4290 } 4291 4292 /* 4293 * Find and write ARC buffers to the L2ARC device. 4294 * 4295 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 4296 * for reading until they have completed writing. 4297 */ 4298 static uint64_t 4299 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) 4300 { 4301 arc_buf_hdr_t *ab, *ab_prev, *head; 4302 l2arc_buf_hdr_t *hdrl2; 4303 list_t *list; 4304 uint64_t passed_sz, write_sz, buf_sz, headroom; 4305 void *buf_data; 4306 kmutex_t *hash_lock, *list_lock; 4307 boolean_t have_lock, full; 4308 l2arc_write_callback_t *cb; 4309 zio_t *pio, *wzio; 4310 uint64_t guid = spa_load_guid(spa); 4311 4312 ASSERT(dev->l2ad_vdev != NULL); 4313 4314 pio = NULL; 4315 write_sz = 0; 4316 full = B_FALSE; 4317 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 4318 head->b_flags |= ARC_L2_WRITE_HEAD; 4319 4320 /* 4321 * Copy buffers for L2ARC writing. 4322 */ 4323 mutex_enter(&l2arc_buflist_mtx); 4324 for (int try = 0; try <= 3; try++) { 4325 list = l2arc_list_locked(try, &list_lock); 4326 passed_sz = 0; 4327 4328 /* 4329 * L2ARC fast warmup. 4330 * 4331 * Until the ARC is warm and starts to evict, read from the 4332 * head of the ARC lists rather than the tail. 4333 */ 4334 headroom = target_sz * l2arc_headroom; 4335 if (arc_warm == B_FALSE) 4336 ab = list_head(list); 4337 else 4338 ab = list_tail(list); 4339 4340 for (; ab; ab = ab_prev) { 4341 if (arc_warm == B_FALSE) 4342 ab_prev = list_next(list, ab); 4343 else 4344 ab_prev = list_prev(list, ab); 4345 4346 hash_lock = HDR_LOCK(ab); 4347 have_lock = MUTEX_HELD(hash_lock); 4348 if (!have_lock && !mutex_tryenter(hash_lock)) { 4349 /* 4350 * Skip this buffer rather than waiting. 4351 */ 4352 continue; 4353 } 4354 4355 passed_sz += ab->b_size; 4356 if (passed_sz > headroom) { 4357 /* 4358 * Searched too far. 4359 */ 4360 mutex_exit(hash_lock); 4361 break; 4362 } 4363 4364 if (!l2arc_write_eligible(guid, ab)) { 4365 mutex_exit(hash_lock); 4366 continue; 4367 } 4368 4369 if ((write_sz + ab->b_size) > target_sz) { 4370 full = B_TRUE; 4371 mutex_exit(hash_lock); 4372 break; 4373 } 4374 4375 if (pio == NULL) { 4376 /* 4377 * Insert a dummy header on the buflist so 4378 * l2arc_write_done() can find where the 4379 * write buffers begin without searching. 4380 */ 4381 list_insert_head(dev->l2ad_buflist, head); 4382 4383 cb = kmem_alloc( 4384 sizeof (l2arc_write_callback_t), KM_SLEEP); 4385 cb->l2wcb_dev = dev; 4386 cb->l2wcb_head = head; 4387 pio = zio_root(spa, l2arc_write_done, cb, 4388 ZIO_FLAG_CANFAIL); 4389 } 4390 4391 /* 4392 * Create and add a new L2ARC header. 4393 */ 4394 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 4395 hdrl2->b_dev = dev; 4396 hdrl2->b_daddr = dev->l2ad_hand; 4397 4398 ab->b_flags |= ARC_L2_WRITING; 4399 ab->b_l2hdr = hdrl2; 4400 list_insert_head(dev->l2ad_buflist, ab); 4401 buf_data = ab->b_buf->b_data; 4402 buf_sz = ab->b_size; 4403 4404 /* 4405 * Compute and store the buffer cksum before 4406 * writing. On debug the cksum is verified first. 4407 */ 4408 arc_cksum_verify(ab->b_buf); 4409 arc_cksum_compute(ab->b_buf, B_TRUE); 4410 4411 mutex_exit(hash_lock); 4412 4413 wzio = zio_write_phys(pio, dev->l2ad_vdev, 4414 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 4415 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 4416 ZIO_FLAG_CANFAIL, B_FALSE); 4417 4418 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 4419 zio_t *, wzio); 4420 (void) zio_nowait(wzio); 4421 4422 /* 4423 * Keep the clock hand suitably device-aligned. 4424 */ 4425 buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); 4426 4427 write_sz += buf_sz; 4428 dev->l2ad_hand += buf_sz; 4429 } 4430 4431 mutex_exit(list_lock); 4432 4433 if (full == B_TRUE) 4434 break; 4435 } 4436 mutex_exit(&l2arc_buflist_mtx); 4437 4438 if (pio == NULL) { 4439 ASSERT0(write_sz); 4440 kmem_cache_free(hdr_cache, head); 4441 return (0); 4442 } 4443 4444 ASSERT3U(write_sz, <=, target_sz); 4445 ARCSTAT_BUMP(arcstat_l2_writes_sent); 4446 ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz); 4447 ARCSTAT_INCR(arcstat_l2_size, write_sz); 4448 vdev_space_update(dev->l2ad_vdev, write_sz, 0, 0); 4449 4450 /* 4451 * Bump device hand to the device start if it is approaching the end. 4452 * l2arc_evict() will already have evicted ahead for this case. 4453 */ 4454 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 4455 vdev_space_update(dev->l2ad_vdev, 4456 dev->l2ad_end - dev->l2ad_hand, 0, 0); 4457 dev->l2ad_hand = dev->l2ad_start; 4458 dev->l2ad_evict = dev->l2ad_start; 4459 dev->l2ad_first = B_FALSE; 4460 } 4461 4462 dev->l2ad_writing = B_TRUE; 4463 (void) zio_wait(pio); 4464 dev->l2ad_writing = B_FALSE; 4465 4466 return (write_sz); 4467 } 4468 4469 /* 4470 * This thread feeds the L2ARC at regular intervals. This is the beating 4471 * heart of the L2ARC. 4472 */ 4473 static void 4474 l2arc_feed_thread(void) 4475 { 4476 callb_cpr_t cpr; 4477 l2arc_dev_t *dev; 4478 spa_t *spa; 4479 uint64_t size, wrote; 4480 clock_t begin, next = ddi_get_lbolt(); 4481 4482 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 4483 4484 mutex_enter(&l2arc_feed_thr_lock); 4485 4486 while (l2arc_thread_exit == 0) { 4487 CALLB_CPR_SAFE_BEGIN(&cpr); 4488 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 4489 next); 4490 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 4491 next = ddi_get_lbolt() + hz; 4492 4493 /* 4494 * Quick check for L2ARC devices. 4495 */ 4496 mutex_enter(&l2arc_dev_mtx); 4497 if (l2arc_ndev == 0) { 4498 mutex_exit(&l2arc_dev_mtx); 4499 continue; 4500 } 4501 mutex_exit(&l2arc_dev_mtx); 4502 begin = ddi_get_lbolt(); 4503 4504 /* 4505 * This selects the next l2arc device to write to, and in 4506 * doing so the next spa to feed from: dev->l2ad_spa. This 4507 * will return NULL if there are now no l2arc devices or if 4508 * they are all faulted. 4509 * 4510 * If a device is returned, its spa's config lock is also 4511 * held to prevent device removal. l2arc_dev_get_next() 4512 * will grab and release l2arc_dev_mtx. 4513 */ 4514 if ((dev = l2arc_dev_get_next()) == NULL) 4515 continue; 4516 4517 spa = dev->l2ad_spa; 4518 ASSERT(spa != NULL); 4519 4520 /* 4521 * If the pool is read-only then force the feed thread to 4522 * sleep a little longer. 4523 */ 4524 if (!spa_writeable(spa)) { 4525 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz; 4526 spa_config_exit(spa, SCL_L2ARC, dev); 4527 continue; 4528 } 4529 4530 /* 4531 * Avoid contributing to memory pressure. 4532 */ 4533 if (arc_reclaim_needed()) { 4534 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 4535 spa_config_exit(spa, SCL_L2ARC, dev); 4536 continue; 4537 } 4538 4539 ARCSTAT_BUMP(arcstat_l2_feeds); 4540 4541 size = l2arc_write_size(dev); 4542 4543 /* 4544 * Evict L2ARC buffers that will be overwritten. 4545 */ 4546 l2arc_evict(dev, size, B_FALSE); 4547 4548 /* 4549 * Write ARC buffers. 4550 */ 4551 wrote = l2arc_write_buffers(spa, dev, size); 4552 4553 /* 4554 * Calculate interval between writes. 4555 */ 4556 next = l2arc_write_interval(begin, size, wrote); 4557 spa_config_exit(spa, SCL_L2ARC, dev); 4558 } 4559 4560 l2arc_thread_exit = 0; 4561 cv_broadcast(&l2arc_feed_thr_cv); 4562 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 4563 thread_exit(); 4564 } 4565 4566 boolean_t 4567 l2arc_vdev_present(vdev_t *vd) 4568 { 4569 l2arc_dev_t *dev; 4570 4571 mutex_enter(&l2arc_dev_mtx); 4572 for (dev = list_head(l2arc_dev_list); dev != NULL; 4573 dev = list_next(l2arc_dev_list, dev)) { 4574 if (dev->l2ad_vdev == vd) 4575 break; 4576 } 4577 mutex_exit(&l2arc_dev_mtx); 4578 4579 return (dev != NULL); 4580 } 4581 4582 /* 4583 * Add a vdev for use by the L2ARC. By this point the spa has already 4584 * validated the vdev and opened it. 4585 */ 4586 void 4587 l2arc_add_vdev(spa_t *spa, vdev_t *vd) 4588 { 4589 l2arc_dev_t *adddev; 4590 4591 ASSERT(!l2arc_vdev_present(vd)); 4592 4593 /* 4594 * Create a new l2arc device entry. 4595 */ 4596 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 4597 adddev->l2ad_spa = spa; 4598 adddev->l2ad_vdev = vd; 4599 adddev->l2ad_write = l2arc_write_max; 4600 adddev->l2ad_boost = l2arc_write_boost; 4601 adddev->l2ad_start = VDEV_LABEL_START_SIZE; 4602 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); 4603 adddev->l2ad_hand = adddev->l2ad_start; 4604 adddev->l2ad_evict = adddev->l2ad_start; 4605 adddev->l2ad_first = B_TRUE; 4606 adddev->l2ad_writing = B_FALSE; 4607 ASSERT3U(adddev->l2ad_write, >, 0); 4608 4609 /* 4610 * This is a list of all ARC buffers that are still valid on the 4611 * device. 4612 */ 4613 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 4614 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 4615 offsetof(arc_buf_hdr_t, b_l2node)); 4616 4617 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); 4618 4619 /* 4620 * Add device to global list 4621 */ 4622 mutex_enter(&l2arc_dev_mtx); 4623 list_insert_head(l2arc_dev_list, adddev); 4624 atomic_inc_64(&l2arc_ndev); 4625 mutex_exit(&l2arc_dev_mtx); 4626 } 4627 4628 /* 4629 * Remove a vdev from the L2ARC. 4630 */ 4631 void 4632 l2arc_remove_vdev(vdev_t *vd) 4633 { 4634 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 4635 4636 /* 4637 * Find the device by vdev 4638 */ 4639 mutex_enter(&l2arc_dev_mtx); 4640 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 4641 nextdev = list_next(l2arc_dev_list, dev); 4642 if (vd == dev->l2ad_vdev) { 4643 remdev = dev; 4644 break; 4645 } 4646 } 4647 ASSERT(remdev != NULL); 4648 4649 /* 4650 * Remove device from global list 4651 */ 4652 list_remove(l2arc_dev_list, remdev); 4653 l2arc_dev_last = NULL; /* may have been invalidated */ 4654 atomic_dec_64(&l2arc_ndev); 4655 mutex_exit(&l2arc_dev_mtx); 4656 4657 /* 4658 * Clear all buflists and ARC references. L2ARC device flush. 4659 */ 4660 l2arc_evict(remdev, 0, B_TRUE); 4661 list_destroy(remdev->l2ad_buflist); 4662 kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 4663 kmem_free(remdev, sizeof (l2arc_dev_t)); 4664 } 4665 4666 void 4667 l2arc_init(void) 4668 { 4669 l2arc_thread_exit = 0; 4670 l2arc_ndev = 0; 4671 l2arc_writes_sent = 0; 4672 l2arc_writes_done = 0; 4673 4674 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 4675 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 4676 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 4677 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 4678 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 4679 4680 l2arc_dev_list = &L2ARC_dev_list; 4681 l2arc_free_on_write = &L2ARC_free_on_write; 4682 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 4683 offsetof(l2arc_dev_t, l2ad_node)); 4684 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 4685 offsetof(l2arc_data_free_t, l2df_list_node)); 4686 } 4687 4688 void 4689 l2arc_fini(void) 4690 { 4691 /* 4692 * This is called from dmu_fini(), which is called from spa_fini(); 4693 * Because of this, we can assume that all l2arc devices have 4694 * already been removed when the pools themselves were removed. 4695 */ 4696 4697 l2arc_do_free_on_write(); 4698 4699 mutex_destroy(&l2arc_feed_thr_lock); 4700 cv_destroy(&l2arc_feed_thr_cv); 4701 mutex_destroy(&l2arc_dev_mtx); 4702 mutex_destroy(&l2arc_buflist_mtx); 4703 mutex_destroy(&l2arc_free_on_write_mtx); 4704 4705 list_destroy(l2arc_dev_list); 4706 list_destroy(l2arc_free_on_write); 4707 } 4708 4709 void 4710 l2arc_start(void) 4711 { 4712 if (!(spa_mode_global & FWRITE)) 4713 return; 4714 4715 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 4716 TS_RUN, minclsyspri); 4717 } 4718 4719 void 4720 l2arc_stop(void) 4721 { 4722 if (!(spa_mode_global & FWRITE)) 4723 return; 4724 4725 mutex_enter(&l2arc_feed_thr_lock); 4726 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 4727 l2arc_thread_exit = 1; 4728 while (l2arc_thread_exit != 0) 4729 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 4730 mutex_exit(&l2arc_feed_thr_lock); 4731 } 4732