1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * DVA-based Adjustable Replacement Cache 30 * 31 * While much of the theory of operation used here is 32 * based on the self-tuning, low overhead replacement cache 33 * presented by Megiddo and Modha at FAST 2003, there are some 34 * significant differences: 35 * 36 * 1. The Megiddo and Modha model assumes any page is evictable. 37 * Pages in its cache cannot be "locked" into memory. This makes 38 * the eviction algorithm simple: evict the last page in the list. 39 * This also make the performance characteristics easy to reason 40 * about. Our cache is not so simple. At any given moment, some 41 * subset of the blocks in the cache are un-evictable because we 42 * have handed out a reference to them. Blocks are only evictable 43 * when there are no external references active. This makes 44 * eviction far more problematic: we choose to evict the evictable 45 * blocks that are the "lowest" in the list. 46 * 47 * There are times when it is not possible to evict the requested 48 * space. In these circumstances we are unable to adjust the cache 49 * size. To prevent the cache growing unbounded at these times we 50 * implement a "cache throttle" that slowes the flow of new data 51 * into the cache until we can make space avaiable. 52 * 53 * 2. The Megiddo and Modha model assumes a fixed cache size. 54 * Pages are evicted when the cache is full and there is a cache 55 * miss. Our model has a variable sized cache. It grows with 56 * high use, but also tries to react to memory preasure from the 57 * operating system: decreasing its size when system memory is 58 * tight. 59 * 60 * 3. The Megiddo and Modha model assumes a fixed page size. All 61 * elements of the cache are therefor exactly the same size. So 62 * when adjusting the cache size following a cache miss, its simply 63 * a matter of choosing a single page to evict. In our model, we 64 * have variable sized cache blocks (rangeing from 512 bytes to 65 * 128K bytes). We therefor choose a set of blocks to evict to make 66 * space for a cache miss that approximates as closely as possible 67 * the space used by the new block. 68 * 69 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 70 * by N. Megiddo & D. Modha, FAST 2003 71 */ 72 73 /* 74 * The locking model: 75 * 76 * A new reference to a cache buffer can be obtained in two 77 * ways: 1) via a hash table lookup using the DVA as a key, 78 * or 2) via one of the ARC lists. The arc_read() inerface 79 * uses method 1, while the internal arc algorithms for 80 * adjusting the cache use method 2. We therefor provide two 81 * types of locks: 1) the hash table lock array, and 2) the 82 * arc list locks. 83 * 84 * Buffers do not have their own mutexs, rather they rely on the 85 * hash table mutexs for the bulk of their protection (i.e. most 86 * fields in the arc_buf_hdr_t are protected by these mutexs). 87 * 88 * buf_hash_find() returns the appropriate mutex (held) when it 89 * locates the requested buffer in the hash table. It returns 90 * NULL for the mutex if the buffer was not in the table. 91 * 92 * buf_hash_remove() expects the appropriate hash mutex to be 93 * already held before it is invoked. 94 * 95 * Each arc state also has a mutex which is used to protect the 96 * buffer list associated with the state. When attempting to 97 * obtain a hash table lock while holding an arc list lock you 98 * must use: mutex_tryenter() to avoid deadlock. Also note that 99 * the active state mutex must be held before the ghost state mutex. 100 * 101 * Arc buffers may have an associated eviction callback function. 102 * This function will be invoked prior to removing the buffer (e.g. 103 * in arc_do_user_evicts()). Note however that the data associated 104 * with the buffer may be evicted prior to the callback. The callback 105 * must be made with *no locks held* (to prevent deadlock). Additionally, 106 * the users of callbacks must ensure that their private data is 107 * protected from simultaneous callbacks from arc_buf_evict() 108 * and arc_do_user_evicts(). 109 * 110 * Note that the majority of the performance stats are manipulated 111 * with atomic operations. 112 */ 113 114 #include <sys/spa.h> 115 #include <sys/zio.h> 116 #include <sys/zio_checksum.h> 117 #include <sys/zfs_context.h> 118 #include <sys/arc.h> 119 #include <sys/refcount.h> 120 #ifdef _KERNEL 121 #include <sys/vmsystm.h> 122 #include <vm/anon.h> 123 #include <sys/fs/swapnode.h> 124 #include <sys/dnlc.h> 125 #endif 126 #include <sys/callb.h> 127 #include <sys/kstat.h> 128 129 static kmutex_t arc_reclaim_thr_lock; 130 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 131 static uint8_t arc_thread_exit; 132 133 #define ARC_REDUCE_DNLC_PERCENT 3 134 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 135 136 typedef enum arc_reclaim_strategy { 137 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 138 ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 139 } arc_reclaim_strategy_t; 140 141 /* number of seconds before growing cache again */ 142 static int arc_grow_retry = 60; 143 144 /* 145 * minimum lifespan of a prefetch block in clock ticks 146 * (initialized in arc_init()) 147 */ 148 static int arc_min_prefetch_lifespan; 149 150 static int arc_dead; 151 152 /* 153 * These tunables are for performance analysis. 154 */ 155 uint64_t zfs_arc_max; 156 uint64_t zfs_arc_min; 157 158 /* 159 * Note that buffers can be on one of 5 states: 160 * ARC_anon - anonymous (discussed below) 161 * ARC_mru - recently used, currently cached 162 * ARC_mru_ghost - recentely used, no longer in cache 163 * ARC_mfu - frequently used, currently cached 164 * ARC_mfu_ghost - frequently used, no longer in cache 165 * When there are no active references to the buffer, they 166 * are linked onto one of the lists in arc. These are the 167 * only buffers that can be evicted or deleted. 168 * 169 * Anonymous buffers are buffers that are not associated with 170 * a DVA. These are buffers that hold dirty block copies 171 * before they are written to stable storage. By definition, 172 * they are "ref'd" and are considered part of arc_mru 173 * that cannot be freed. Generally, they will aquire a DVA 174 * as they are written and migrate onto the arc_mru list. 175 */ 176 177 typedef struct arc_state { 178 list_t arcs_list; /* linked list of evictable buffer in state */ 179 uint64_t arcs_lsize; /* total size of buffers in the linked list */ 180 uint64_t arcs_size; /* total size of all buffers in this state */ 181 kmutex_t arcs_mtx; 182 } arc_state_t; 183 184 /* The 5 states: */ 185 static arc_state_t ARC_anon; 186 static arc_state_t ARC_mru; 187 static arc_state_t ARC_mru_ghost; 188 static arc_state_t ARC_mfu; 189 static arc_state_t ARC_mfu_ghost; 190 191 typedef struct arc_stats { 192 kstat_named_t arcstat_hits; 193 kstat_named_t arcstat_misses; 194 kstat_named_t arcstat_demand_data_hits; 195 kstat_named_t arcstat_demand_data_misses; 196 kstat_named_t arcstat_demand_metadata_hits; 197 kstat_named_t arcstat_demand_metadata_misses; 198 kstat_named_t arcstat_prefetch_data_hits; 199 kstat_named_t arcstat_prefetch_data_misses; 200 kstat_named_t arcstat_prefetch_metadata_hits; 201 kstat_named_t arcstat_prefetch_metadata_misses; 202 kstat_named_t arcstat_mru_hits; 203 kstat_named_t arcstat_mru_ghost_hits; 204 kstat_named_t arcstat_mfu_hits; 205 kstat_named_t arcstat_mfu_ghost_hits; 206 kstat_named_t arcstat_deleted; 207 kstat_named_t arcstat_recycle_miss; 208 kstat_named_t arcstat_mutex_miss; 209 kstat_named_t arcstat_evict_skip; 210 kstat_named_t arcstat_hash_elements; 211 kstat_named_t arcstat_hash_elements_max; 212 kstat_named_t arcstat_hash_collisions; 213 kstat_named_t arcstat_hash_chains; 214 kstat_named_t arcstat_hash_chain_max; 215 kstat_named_t arcstat_p; 216 kstat_named_t arcstat_c; 217 kstat_named_t arcstat_c_min; 218 kstat_named_t arcstat_c_max; 219 kstat_named_t arcstat_size; 220 } arc_stats_t; 221 222 static arc_stats_t arc_stats = { 223 { "hits", KSTAT_DATA_UINT64 }, 224 { "misses", KSTAT_DATA_UINT64 }, 225 { "demand_data_hits", KSTAT_DATA_UINT64 }, 226 { "demand_data_misses", KSTAT_DATA_UINT64 }, 227 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 228 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 229 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 230 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 231 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 232 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 233 { "mru_hits", KSTAT_DATA_UINT64 }, 234 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 235 { "mfu_hits", KSTAT_DATA_UINT64 }, 236 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 237 { "deleted", KSTAT_DATA_UINT64 }, 238 { "recycle_miss", KSTAT_DATA_UINT64 }, 239 { "mutex_miss", KSTAT_DATA_UINT64 }, 240 { "evict_skip", KSTAT_DATA_UINT64 }, 241 { "hash_elements", KSTAT_DATA_UINT64 }, 242 { "hash_elements_max", KSTAT_DATA_UINT64 }, 243 { "hash_collisions", KSTAT_DATA_UINT64 }, 244 { "hash_chains", KSTAT_DATA_UINT64 }, 245 { "hash_chain_max", KSTAT_DATA_UINT64 }, 246 { "p", KSTAT_DATA_UINT64 }, 247 { "c", KSTAT_DATA_UINT64 }, 248 { "c_min", KSTAT_DATA_UINT64 }, 249 { "c_max", KSTAT_DATA_UINT64 }, 250 { "size", KSTAT_DATA_UINT64 } 251 }; 252 253 #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 254 255 #define ARCSTAT_INCR(stat, val) \ 256 atomic_add_64(&arc_stats.stat.value.ui64, (val)); 257 258 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 259 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 260 261 #define ARCSTAT_MAX(stat, val) { \ 262 uint64_t m; \ 263 while ((val) > (m = arc_stats.stat.value.ui64) && \ 264 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 265 continue; \ 266 } 267 268 #define ARCSTAT_MAXSTAT(stat) \ 269 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 270 271 /* 272 * We define a macro to allow ARC hits/misses to be easily broken down by 273 * two separate conditions, giving a total of four different subtypes for 274 * each of hits and misses (so eight statistics total). 275 */ 276 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 277 if (cond1) { \ 278 if (cond2) { \ 279 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 280 } else { \ 281 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 282 } \ 283 } else { \ 284 if (cond2) { \ 285 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 286 } else { \ 287 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 288 } \ 289 } 290 291 kstat_t *arc_ksp; 292 static arc_state_t *arc_anon; 293 static arc_state_t *arc_mru; 294 static arc_state_t *arc_mru_ghost; 295 static arc_state_t *arc_mfu; 296 static arc_state_t *arc_mfu_ghost; 297 298 /* 299 * There are several ARC variables that are critical to export as kstats -- 300 * but we don't want to have to grovel around in the kstat whenever we wish to 301 * manipulate them. For these variables, we therefore define them to be in 302 * terms of the statistic variable. This assures that we are not introducing 303 * the possibility of inconsistency by having shadow copies of the variables, 304 * while still allowing the code to be readable. 305 */ 306 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 307 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 308 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 309 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 310 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 311 312 static int arc_no_grow; /* Don't try to grow cache size */ 313 static uint64_t arc_tempreserve; 314 315 typedef struct arc_callback arc_callback_t; 316 317 struct arc_callback { 318 arc_done_func_t *acb_done; 319 void *acb_private; 320 arc_byteswap_func_t *acb_byteswap; 321 arc_buf_t *acb_buf; 322 zio_t *acb_zio_dummy; 323 arc_callback_t *acb_next; 324 }; 325 326 struct arc_buf_hdr { 327 /* protected by hash lock */ 328 dva_t b_dva; 329 uint64_t b_birth; 330 uint64_t b_cksum0; 331 332 kmutex_t b_freeze_lock; 333 zio_cksum_t *b_freeze_cksum; 334 335 arc_buf_hdr_t *b_hash_next; 336 arc_buf_t *b_buf; 337 uint32_t b_flags; 338 uint32_t b_datacnt; 339 340 arc_callback_t *b_acb; 341 kcondvar_t b_cv; 342 343 /* immutable */ 344 arc_buf_contents_t b_type; 345 uint64_t b_size; 346 spa_t *b_spa; 347 348 /* protected by arc state mutex */ 349 arc_state_t *b_state; 350 list_node_t b_arc_node; 351 352 /* updated atomically */ 353 clock_t b_arc_access; 354 355 /* self protecting */ 356 refcount_t b_refcnt; 357 }; 358 359 static arc_buf_t *arc_eviction_list; 360 static kmutex_t arc_eviction_mtx; 361 static arc_buf_hdr_t arc_eviction_hdr; 362 static void arc_get_data_buf(arc_buf_t *buf); 363 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 364 365 #define GHOST_STATE(state) \ 366 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost) 367 368 /* 369 * Private ARC flags. These flags are private ARC only flags that will show up 370 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 371 * be passed in as arc_flags in things like arc_read. However, these flags 372 * should never be passed and should only be set by ARC code. When adding new 373 * public flags, make sure not to smash the private ones. 374 */ 375 376 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 377 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 378 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 379 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 380 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 381 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 382 383 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 384 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 385 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 386 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 387 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 388 389 /* 390 * Hash table routines 391 */ 392 393 #define HT_LOCK_PAD 64 394 395 struct ht_lock { 396 kmutex_t ht_lock; 397 #ifdef _KERNEL 398 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 399 #endif 400 }; 401 402 #define BUF_LOCKS 256 403 typedef struct buf_hash_table { 404 uint64_t ht_mask; 405 arc_buf_hdr_t **ht_table; 406 struct ht_lock ht_locks[BUF_LOCKS]; 407 } buf_hash_table_t; 408 409 static buf_hash_table_t buf_hash_table; 410 411 #define BUF_HASH_INDEX(spa, dva, birth) \ 412 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 413 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 414 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 415 #define HDR_LOCK(buf) \ 416 (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 417 418 uint64_t zfs_crc64_table[256]; 419 420 static uint64_t 421 buf_hash(spa_t *spa, dva_t *dva, uint64_t birth) 422 { 423 uintptr_t spav = (uintptr_t)spa; 424 uint8_t *vdva = (uint8_t *)dva; 425 uint64_t crc = -1ULL; 426 int i; 427 428 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 429 430 for (i = 0; i < sizeof (dva_t); i++) 431 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 432 433 crc ^= (spav>>8) ^ birth; 434 435 return (crc); 436 } 437 438 #define BUF_EMPTY(buf) \ 439 ((buf)->b_dva.dva_word[0] == 0 && \ 440 (buf)->b_dva.dva_word[1] == 0 && \ 441 (buf)->b_birth == 0) 442 443 #define BUF_EQUAL(spa, dva, birth, buf) \ 444 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 445 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 446 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 447 448 static arc_buf_hdr_t * 449 buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp) 450 { 451 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 452 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 453 arc_buf_hdr_t *buf; 454 455 mutex_enter(hash_lock); 456 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 457 buf = buf->b_hash_next) { 458 if (BUF_EQUAL(spa, dva, birth, buf)) { 459 *lockp = hash_lock; 460 return (buf); 461 } 462 } 463 mutex_exit(hash_lock); 464 *lockp = NULL; 465 return (NULL); 466 } 467 468 /* 469 * Insert an entry into the hash table. If there is already an element 470 * equal to elem in the hash table, then the already existing element 471 * will be returned and the new element will not be inserted. 472 * Otherwise returns NULL. 473 */ 474 static arc_buf_hdr_t * 475 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 476 { 477 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 478 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 479 arc_buf_hdr_t *fbuf; 480 uint32_t i; 481 482 ASSERT(!HDR_IN_HASH_TABLE(buf)); 483 *lockp = hash_lock; 484 mutex_enter(hash_lock); 485 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 486 fbuf = fbuf->b_hash_next, i++) { 487 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 488 return (fbuf); 489 } 490 491 buf->b_hash_next = buf_hash_table.ht_table[idx]; 492 buf_hash_table.ht_table[idx] = buf; 493 buf->b_flags |= ARC_IN_HASH_TABLE; 494 495 /* collect some hash table performance data */ 496 if (i > 0) { 497 ARCSTAT_BUMP(arcstat_hash_collisions); 498 if (i == 1) 499 ARCSTAT_BUMP(arcstat_hash_chains); 500 501 ARCSTAT_MAX(arcstat_hash_chain_max, i); 502 } 503 504 ARCSTAT_BUMP(arcstat_hash_elements); 505 ARCSTAT_MAXSTAT(arcstat_hash_elements); 506 507 return (NULL); 508 } 509 510 static void 511 buf_hash_remove(arc_buf_hdr_t *buf) 512 { 513 arc_buf_hdr_t *fbuf, **bufp; 514 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 515 516 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 517 ASSERT(HDR_IN_HASH_TABLE(buf)); 518 519 bufp = &buf_hash_table.ht_table[idx]; 520 while ((fbuf = *bufp) != buf) { 521 ASSERT(fbuf != NULL); 522 bufp = &fbuf->b_hash_next; 523 } 524 *bufp = buf->b_hash_next; 525 buf->b_hash_next = NULL; 526 buf->b_flags &= ~ARC_IN_HASH_TABLE; 527 528 /* collect some hash table performance data */ 529 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 530 531 if (buf_hash_table.ht_table[idx] && 532 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 533 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 534 } 535 536 /* 537 * Global data structures and functions for the buf kmem cache. 538 */ 539 static kmem_cache_t *hdr_cache; 540 static kmem_cache_t *buf_cache; 541 542 static void 543 buf_fini(void) 544 { 545 int i; 546 547 kmem_free(buf_hash_table.ht_table, 548 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 549 for (i = 0; i < BUF_LOCKS; i++) 550 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 551 kmem_cache_destroy(hdr_cache); 552 kmem_cache_destroy(buf_cache); 553 } 554 555 /* 556 * Constructor callback - called when the cache is empty 557 * and a new buf is requested. 558 */ 559 /* ARGSUSED */ 560 static int 561 hdr_cons(void *vbuf, void *unused, int kmflag) 562 { 563 arc_buf_hdr_t *buf = vbuf; 564 565 bzero(buf, sizeof (arc_buf_hdr_t)); 566 refcount_create(&buf->b_refcnt); 567 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 568 return (0); 569 } 570 571 /* 572 * Destructor callback - called when a cached buf is 573 * no longer required. 574 */ 575 /* ARGSUSED */ 576 static void 577 hdr_dest(void *vbuf, void *unused) 578 { 579 arc_buf_hdr_t *buf = vbuf; 580 581 refcount_destroy(&buf->b_refcnt); 582 cv_destroy(&buf->b_cv); 583 } 584 585 /* 586 * Reclaim callback -- invoked when memory is low. 587 */ 588 /* ARGSUSED */ 589 static void 590 hdr_recl(void *unused) 591 { 592 dprintf("hdr_recl called\n"); 593 /* 594 * umem calls the reclaim func when we destroy the buf cache, 595 * which is after we do arc_fini(). 596 */ 597 if (!arc_dead) 598 cv_signal(&arc_reclaim_thr_cv); 599 } 600 601 static void 602 buf_init(void) 603 { 604 uint64_t *ct; 605 uint64_t hsize = 1ULL << 12; 606 int i, j; 607 608 /* 609 * The hash table is big enough to fill all of physical memory 610 * with an average 64K block size. The table will take up 611 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 612 */ 613 while (hsize * 65536 < physmem * PAGESIZE) 614 hsize <<= 1; 615 retry: 616 buf_hash_table.ht_mask = hsize - 1; 617 buf_hash_table.ht_table = 618 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 619 if (buf_hash_table.ht_table == NULL) { 620 ASSERT(hsize > (1ULL << 8)); 621 hsize >>= 1; 622 goto retry; 623 } 624 625 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 626 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 627 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 628 0, NULL, NULL, NULL, NULL, NULL, 0); 629 630 for (i = 0; i < 256; i++) 631 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 632 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 633 634 for (i = 0; i < BUF_LOCKS; i++) { 635 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 636 NULL, MUTEX_DEFAULT, NULL); 637 } 638 } 639 640 #define ARC_MINTIME (hz>>4) /* 62 ms */ 641 642 static void 643 arc_cksum_verify(arc_buf_t *buf) 644 { 645 zio_cksum_t zc; 646 647 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 648 return; 649 650 mutex_enter(&buf->b_hdr->b_freeze_lock); 651 if (buf->b_hdr->b_freeze_cksum == NULL || 652 (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 653 mutex_exit(&buf->b_hdr->b_freeze_lock); 654 return; 655 } 656 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 657 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 658 panic("buffer modified while frozen!"); 659 mutex_exit(&buf->b_hdr->b_freeze_lock); 660 } 661 662 static void 663 arc_cksum_compute(arc_buf_t *buf) 664 { 665 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 666 return; 667 668 mutex_enter(&buf->b_hdr->b_freeze_lock); 669 if (buf->b_hdr->b_freeze_cksum != NULL) { 670 mutex_exit(&buf->b_hdr->b_freeze_lock); 671 return; 672 } 673 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 674 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 675 buf->b_hdr->b_freeze_cksum); 676 mutex_exit(&buf->b_hdr->b_freeze_lock); 677 } 678 679 void 680 arc_buf_thaw(arc_buf_t *buf) 681 { 682 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 683 return; 684 685 if (buf->b_hdr->b_state != arc_anon) 686 panic("modifying non-anon buffer!"); 687 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 688 panic("modifying buffer while i/o in progress!"); 689 arc_cksum_verify(buf); 690 mutex_enter(&buf->b_hdr->b_freeze_lock); 691 if (buf->b_hdr->b_freeze_cksum != NULL) { 692 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 693 buf->b_hdr->b_freeze_cksum = NULL; 694 } 695 mutex_exit(&buf->b_hdr->b_freeze_lock); 696 } 697 698 void 699 arc_buf_freeze(arc_buf_t *buf) 700 { 701 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 702 return; 703 704 ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 705 buf->b_hdr->b_state == arc_anon); 706 arc_cksum_compute(buf); 707 } 708 709 static void 710 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 711 { 712 ASSERT(MUTEX_HELD(hash_lock)); 713 714 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 715 (ab->b_state != arc_anon)) { 716 int delta = ab->b_size * ab->b_datacnt; 717 718 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 719 mutex_enter(&ab->b_state->arcs_mtx); 720 ASSERT(list_link_active(&ab->b_arc_node)); 721 list_remove(&ab->b_state->arcs_list, ab); 722 if (GHOST_STATE(ab->b_state)) { 723 ASSERT3U(ab->b_datacnt, ==, 0); 724 ASSERT3P(ab->b_buf, ==, NULL); 725 delta = ab->b_size; 726 } 727 ASSERT(delta > 0); 728 ASSERT3U(ab->b_state->arcs_lsize, >=, delta); 729 atomic_add_64(&ab->b_state->arcs_lsize, -delta); 730 mutex_exit(&ab->b_state->arcs_mtx); 731 /* remove the prefetch flag is we get a reference */ 732 if (ab->b_flags & ARC_PREFETCH) 733 ab->b_flags &= ~ARC_PREFETCH; 734 } 735 } 736 737 static int 738 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 739 { 740 int cnt; 741 arc_state_t *state = ab->b_state; 742 743 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 744 ASSERT(!GHOST_STATE(state)); 745 746 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 747 (state != arc_anon)) { 748 ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 749 mutex_enter(&state->arcs_mtx); 750 ASSERT(!list_link_active(&ab->b_arc_node)); 751 list_insert_head(&state->arcs_list, ab); 752 ASSERT(ab->b_datacnt > 0); 753 atomic_add_64(&state->arcs_lsize, ab->b_size * ab->b_datacnt); 754 ASSERT3U(state->arcs_size, >=, state->arcs_lsize); 755 mutex_exit(&state->arcs_mtx); 756 } 757 return (cnt); 758 } 759 760 /* 761 * Move the supplied buffer to the indicated state. The mutex 762 * for the buffer must be held by the caller. 763 */ 764 static void 765 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 766 { 767 arc_state_t *old_state = ab->b_state; 768 int refcnt = refcount_count(&ab->b_refcnt); 769 int from_delta, to_delta; 770 771 ASSERT(MUTEX_HELD(hash_lock)); 772 ASSERT(new_state != old_state); 773 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 774 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 775 776 from_delta = to_delta = ab->b_datacnt * ab->b_size; 777 778 /* 779 * If this buffer is evictable, transfer it from the 780 * old state list to the new state list. 781 */ 782 if (refcnt == 0) { 783 if (old_state != arc_anon) { 784 int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 785 786 if (use_mutex) 787 mutex_enter(&old_state->arcs_mtx); 788 789 ASSERT(list_link_active(&ab->b_arc_node)); 790 list_remove(&old_state->arcs_list, ab); 791 792 /* 793 * If prefetching out of the ghost cache, 794 * we will have a non-null datacnt. 795 */ 796 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 797 /* ghost elements have a ghost size */ 798 ASSERT(ab->b_buf == NULL); 799 from_delta = ab->b_size; 800 } 801 ASSERT3U(old_state->arcs_lsize, >=, from_delta); 802 atomic_add_64(&old_state->arcs_lsize, -from_delta); 803 804 if (use_mutex) 805 mutex_exit(&old_state->arcs_mtx); 806 } 807 if (new_state != arc_anon) { 808 int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 809 810 if (use_mutex) 811 mutex_enter(&new_state->arcs_mtx); 812 813 list_insert_head(&new_state->arcs_list, ab); 814 815 /* ghost elements have a ghost size */ 816 if (GHOST_STATE(new_state)) { 817 ASSERT(ab->b_datacnt == 0); 818 ASSERT(ab->b_buf == NULL); 819 to_delta = ab->b_size; 820 } 821 atomic_add_64(&new_state->arcs_lsize, to_delta); 822 ASSERT3U(new_state->arcs_size + to_delta, >=, 823 new_state->arcs_lsize); 824 825 if (use_mutex) 826 mutex_exit(&new_state->arcs_mtx); 827 } 828 } 829 830 ASSERT(!BUF_EMPTY(ab)); 831 if (new_state == arc_anon && old_state != arc_anon) { 832 buf_hash_remove(ab); 833 } 834 835 /* adjust state sizes */ 836 if (to_delta) 837 atomic_add_64(&new_state->arcs_size, to_delta); 838 if (from_delta) { 839 ASSERT3U(old_state->arcs_size, >=, from_delta); 840 atomic_add_64(&old_state->arcs_size, -from_delta); 841 } 842 ab->b_state = new_state; 843 } 844 845 arc_buf_t * 846 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 847 { 848 arc_buf_hdr_t *hdr; 849 arc_buf_t *buf; 850 851 ASSERT3U(size, >, 0); 852 hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 853 ASSERT(BUF_EMPTY(hdr)); 854 hdr->b_size = size; 855 hdr->b_type = type; 856 hdr->b_spa = spa; 857 hdr->b_state = arc_anon; 858 hdr->b_arc_access = 0; 859 buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 860 buf->b_hdr = hdr; 861 buf->b_data = NULL; 862 buf->b_efunc = NULL; 863 buf->b_private = NULL; 864 buf->b_next = NULL; 865 hdr->b_buf = buf; 866 arc_get_data_buf(buf); 867 hdr->b_datacnt = 1; 868 hdr->b_flags = 0; 869 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 870 (void) refcount_add(&hdr->b_refcnt, tag); 871 872 return (buf); 873 } 874 875 static arc_buf_t * 876 arc_buf_clone(arc_buf_t *from) 877 { 878 arc_buf_t *buf; 879 arc_buf_hdr_t *hdr = from->b_hdr; 880 uint64_t size = hdr->b_size; 881 882 buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 883 buf->b_hdr = hdr; 884 buf->b_data = NULL; 885 buf->b_efunc = NULL; 886 buf->b_private = NULL; 887 buf->b_next = hdr->b_buf; 888 hdr->b_buf = buf; 889 arc_get_data_buf(buf); 890 bcopy(from->b_data, buf->b_data, size); 891 hdr->b_datacnt += 1; 892 return (buf); 893 } 894 895 void 896 arc_buf_add_ref(arc_buf_t *buf, void* tag) 897 { 898 arc_buf_hdr_t *hdr; 899 kmutex_t *hash_lock; 900 901 /* 902 * Check to see if this buffer is currently being evicted via 903 * arc_do_user_evicts(). 904 */ 905 mutex_enter(&arc_eviction_mtx); 906 hdr = buf->b_hdr; 907 if (hdr == NULL) { 908 mutex_exit(&arc_eviction_mtx); 909 return; 910 } 911 hash_lock = HDR_LOCK(hdr); 912 mutex_exit(&arc_eviction_mtx); 913 914 mutex_enter(hash_lock); 915 if (buf->b_data == NULL) { 916 /* 917 * This buffer is evicted. 918 */ 919 mutex_exit(hash_lock); 920 return; 921 } 922 923 ASSERT(buf->b_hdr == hdr); 924 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 925 add_reference(hdr, hash_lock, tag); 926 arc_access(hdr, hash_lock); 927 mutex_exit(hash_lock); 928 ARCSTAT_BUMP(arcstat_hits); 929 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 930 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 931 data, metadata, hits); 932 } 933 934 static void 935 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 936 { 937 arc_buf_t **bufp; 938 939 /* free up data associated with the buf */ 940 if (buf->b_data) { 941 arc_state_t *state = buf->b_hdr->b_state; 942 uint64_t size = buf->b_hdr->b_size; 943 arc_buf_contents_t type = buf->b_hdr->b_type; 944 945 arc_cksum_verify(buf); 946 if (!recycle) { 947 if (type == ARC_BUFC_METADATA) { 948 zio_buf_free(buf->b_data, size); 949 } else { 950 ASSERT(type == ARC_BUFC_DATA); 951 zio_data_buf_free(buf->b_data, size); 952 } 953 atomic_add_64(&arc_size, -size); 954 } 955 if (list_link_active(&buf->b_hdr->b_arc_node)) { 956 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 957 ASSERT(state != arc_anon); 958 ASSERT3U(state->arcs_lsize, >=, size); 959 atomic_add_64(&state->arcs_lsize, -size); 960 } 961 ASSERT3U(state->arcs_size, >=, size); 962 atomic_add_64(&state->arcs_size, -size); 963 buf->b_data = NULL; 964 ASSERT(buf->b_hdr->b_datacnt > 0); 965 buf->b_hdr->b_datacnt -= 1; 966 } 967 968 /* only remove the buf if requested */ 969 if (!all) 970 return; 971 972 /* remove the buf from the hdr list */ 973 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 974 continue; 975 *bufp = buf->b_next; 976 977 ASSERT(buf->b_efunc == NULL); 978 979 /* clean up the buf */ 980 buf->b_hdr = NULL; 981 kmem_cache_free(buf_cache, buf); 982 } 983 984 static void 985 arc_hdr_destroy(arc_buf_hdr_t *hdr) 986 { 987 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 988 ASSERT3P(hdr->b_state, ==, arc_anon); 989 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 990 991 if (!BUF_EMPTY(hdr)) { 992 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 993 bzero(&hdr->b_dva, sizeof (dva_t)); 994 hdr->b_birth = 0; 995 hdr->b_cksum0 = 0; 996 } 997 while (hdr->b_buf) { 998 arc_buf_t *buf = hdr->b_buf; 999 1000 if (buf->b_efunc) { 1001 mutex_enter(&arc_eviction_mtx); 1002 ASSERT(buf->b_hdr != NULL); 1003 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1004 hdr->b_buf = buf->b_next; 1005 buf->b_hdr = &arc_eviction_hdr; 1006 buf->b_next = arc_eviction_list; 1007 arc_eviction_list = buf; 1008 mutex_exit(&arc_eviction_mtx); 1009 } else { 1010 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1011 } 1012 } 1013 if (hdr->b_freeze_cksum != NULL) { 1014 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1015 hdr->b_freeze_cksum = NULL; 1016 } 1017 1018 ASSERT(!list_link_active(&hdr->b_arc_node)); 1019 ASSERT3P(hdr->b_hash_next, ==, NULL); 1020 ASSERT3P(hdr->b_acb, ==, NULL); 1021 kmem_cache_free(hdr_cache, hdr); 1022 } 1023 1024 void 1025 arc_buf_free(arc_buf_t *buf, void *tag) 1026 { 1027 arc_buf_hdr_t *hdr = buf->b_hdr; 1028 int hashed = hdr->b_state != arc_anon; 1029 1030 ASSERT(buf->b_efunc == NULL); 1031 ASSERT(buf->b_data != NULL); 1032 1033 if (hashed) { 1034 kmutex_t *hash_lock = HDR_LOCK(hdr); 1035 1036 mutex_enter(hash_lock); 1037 (void) remove_reference(hdr, hash_lock, tag); 1038 if (hdr->b_datacnt > 1) 1039 arc_buf_destroy(buf, FALSE, TRUE); 1040 else 1041 hdr->b_flags |= ARC_BUF_AVAILABLE; 1042 mutex_exit(hash_lock); 1043 } else if (HDR_IO_IN_PROGRESS(hdr)) { 1044 int destroy_hdr; 1045 /* 1046 * We are in the middle of an async write. Don't destroy 1047 * this buffer unless the write completes before we finish 1048 * decrementing the reference count. 1049 */ 1050 mutex_enter(&arc_eviction_mtx); 1051 (void) remove_reference(hdr, NULL, tag); 1052 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1053 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1054 mutex_exit(&arc_eviction_mtx); 1055 if (destroy_hdr) 1056 arc_hdr_destroy(hdr); 1057 } else { 1058 if (remove_reference(hdr, NULL, tag) > 0) { 1059 ASSERT(HDR_IO_ERROR(hdr)); 1060 arc_buf_destroy(buf, FALSE, TRUE); 1061 } else { 1062 arc_hdr_destroy(hdr); 1063 } 1064 } 1065 } 1066 1067 int 1068 arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1069 { 1070 arc_buf_hdr_t *hdr = buf->b_hdr; 1071 kmutex_t *hash_lock = HDR_LOCK(hdr); 1072 int no_callback = (buf->b_efunc == NULL); 1073 1074 if (hdr->b_state == arc_anon) { 1075 arc_buf_free(buf, tag); 1076 return (no_callback); 1077 } 1078 1079 mutex_enter(hash_lock); 1080 ASSERT(hdr->b_state != arc_anon); 1081 ASSERT(buf->b_data != NULL); 1082 1083 (void) remove_reference(hdr, hash_lock, tag); 1084 if (hdr->b_datacnt > 1) { 1085 if (no_callback) 1086 arc_buf_destroy(buf, FALSE, TRUE); 1087 } else if (no_callback) { 1088 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1089 hdr->b_flags |= ARC_BUF_AVAILABLE; 1090 } 1091 ASSERT(no_callback || hdr->b_datacnt > 1 || 1092 refcount_is_zero(&hdr->b_refcnt)); 1093 mutex_exit(hash_lock); 1094 return (no_callback); 1095 } 1096 1097 int 1098 arc_buf_size(arc_buf_t *buf) 1099 { 1100 return (buf->b_hdr->b_size); 1101 } 1102 1103 /* 1104 * Evict buffers from list until we've removed the specified number of 1105 * bytes. Move the removed buffers to the appropriate evict state. 1106 * If the recycle flag is set, then attempt to "recycle" a buffer: 1107 * - look for a buffer to evict that is `bytes' long. 1108 * - return the data block from this buffer rather than freeing it. 1109 * This flag is used by callers that are trying to make space for a 1110 * new buffer in a full arc cache. 1111 */ 1112 static void * 1113 arc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle, 1114 arc_buf_contents_t type) 1115 { 1116 arc_state_t *evicted_state; 1117 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1118 arc_buf_hdr_t *ab, *ab_prev = NULL; 1119 kmutex_t *hash_lock; 1120 boolean_t have_lock; 1121 void *stolen = NULL; 1122 1123 ASSERT(state == arc_mru || state == arc_mfu); 1124 1125 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1126 1127 mutex_enter(&state->arcs_mtx); 1128 mutex_enter(&evicted_state->arcs_mtx); 1129 1130 for (ab = list_tail(&state->arcs_list); ab; ab = ab_prev) { 1131 ab_prev = list_prev(&state->arcs_list, ab); 1132 /* prefetch buffers have a minimum lifespan */ 1133 if (HDR_IO_IN_PROGRESS(ab) || 1134 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1135 lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { 1136 skipped++; 1137 continue; 1138 } 1139 /* "lookahead" for better eviction candidate */ 1140 if (recycle && ab->b_size != bytes && 1141 ab_prev && ab_prev->b_size == bytes) 1142 continue; 1143 hash_lock = HDR_LOCK(ab); 1144 have_lock = MUTEX_HELD(hash_lock); 1145 if (have_lock || mutex_tryenter(hash_lock)) { 1146 ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 1147 ASSERT(ab->b_datacnt > 0); 1148 while (ab->b_buf) { 1149 arc_buf_t *buf = ab->b_buf; 1150 if (buf->b_data) { 1151 bytes_evicted += ab->b_size; 1152 if (recycle && ab->b_type == type && 1153 ab->b_size == bytes) { 1154 stolen = buf->b_data; 1155 recycle = FALSE; 1156 } 1157 } 1158 if (buf->b_efunc) { 1159 mutex_enter(&arc_eviction_mtx); 1160 arc_buf_destroy(buf, 1161 buf->b_data == stolen, FALSE); 1162 ab->b_buf = buf->b_next; 1163 buf->b_hdr = &arc_eviction_hdr; 1164 buf->b_next = arc_eviction_list; 1165 arc_eviction_list = buf; 1166 mutex_exit(&arc_eviction_mtx); 1167 } else { 1168 arc_buf_destroy(buf, 1169 buf->b_data == stolen, TRUE); 1170 } 1171 } 1172 ASSERT(ab->b_datacnt == 0); 1173 arc_change_state(evicted_state, ab, hash_lock); 1174 ASSERT(HDR_IN_HASH_TABLE(ab)); 1175 ab->b_flags = ARC_IN_HASH_TABLE; 1176 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1177 if (!have_lock) 1178 mutex_exit(hash_lock); 1179 if (bytes >= 0 && bytes_evicted >= bytes) 1180 break; 1181 } else { 1182 missed += 1; 1183 } 1184 } 1185 1186 mutex_exit(&evicted_state->arcs_mtx); 1187 mutex_exit(&state->arcs_mtx); 1188 1189 if (bytes_evicted < bytes) 1190 dprintf("only evicted %lld bytes from %x", 1191 (longlong_t)bytes_evicted, state); 1192 1193 if (skipped) 1194 ARCSTAT_INCR(arcstat_evict_skip, skipped); 1195 1196 if (missed) 1197 ARCSTAT_INCR(arcstat_mutex_miss, missed); 1198 1199 return (stolen); 1200 } 1201 1202 /* 1203 * Remove buffers from list until we've removed the specified number of 1204 * bytes. Destroy the buffers that are removed. 1205 */ 1206 static void 1207 arc_evict_ghost(arc_state_t *state, int64_t bytes) 1208 { 1209 arc_buf_hdr_t *ab, *ab_prev; 1210 kmutex_t *hash_lock; 1211 uint64_t bytes_deleted = 0; 1212 uint_t bufs_skipped = 0; 1213 1214 ASSERT(GHOST_STATE(state)); 1215 top: 1216 mutex_enter(&state->arcs_mtx); 1217 for (ab = list_tail(&state->arcs_list); ab; ab = ab_prev) { 1218 ab_prev = list_prev(&state->arcs_list, ab); 1219 hash_lock = HDR_LOCK(ab); 1220 if (mutex_tryenter(hash_lock)) { 1221 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1222 ASSERT(ab->b_buf == NULL); 1223 arc_change_state(arc_anon, ab, hash_lock); 1224 mutex_exit(hash_lock); 1225 ARCSTAT_BUMP(arcstat_deleted); 1226 bytes_deleted += ab->b_size; 1227 arc_hdr_destroy(ab); 1228 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1229 if (bytes >= 0 && bytes_deleted >= bytes) 1230 break; 1231 } else { 1232 if (bytes < 0) { 1233 mutex_exit(&state->arcs_mtx); 1234 mutex_enter(hash_lock); 1235 mutex_exit(hash_lock); 1236 goto top; 1237 } 1238 bufs_skipped += 1; 1239 } 1240 } 1241 mutex_exit(&state->arcs_mtx); 1242 1243 if (bufs_skipped) { 1244 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1245 ASSERT(bytes >= 0); 1246 } 1247 1248 if (bytes_deleted < bytes) 1249 dprintf("only deleted %lld bytes from %p", 1250 (longlong_t)bytes_deleted, state); 1251 } 1252 1253 static void 1254 arc_adjust(void) 1255 { 1256 int64_t top_sz, mru_over, arc_over, todelete; 1257 1258 top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1259 1260 if (top_sz > arc_p && arc_mru->arcs_lsize > 0) { 1261 int64_t toevict = MIN(arc_mru->arcs_lsize, top_sz - arc_p); 1262 (void) arc_evict(arc_mru, toevict, FALSE, ARC_BUFC_UNDEF); 1263 top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1264 } 1265 1266 mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; 1267 1268 if (mru_over > 0) { 1269 if (arc_mru_ghost->arcs_lsize > 0) { 1270 todelete = MIN(arc_mru_ghost->arcs_lsize, mru_over); 1271 arc_evict_ghost(arc_mru_ghost, todelete); 1272 } 1273 } 1274 1275 if ((arc_over = arc_size - arc_c) > 0) { 1276 int64_t tbl_over; 1277 1278 if (arc_mfu->arcs_lsize > 0) { 1279 int64_t toevict = MIN(arc_mfu->arcs_lsize, arc_over); 1280 (void) arc_evict(arc_mfu, toevict, FALSE, 1281 ARC_BUFC_UNDEF); 1282 } 1283 1284 tbl_over = arc_size + arc_mru_ghost->arcs_lsize + 1285 arc_mfu_ghost->arcs_lsize - arc_c*2; 1286 1287 if (tbl_over > 0 && arc_mfu_ghost->arcs_lsize > 0) { 1288 todelete = MIN(arc_mfu_ghost->arcs_lsize, tbl_over); 1289 arc_evict_ghost(arc_mfu_ghost, todelete); 1290 } 1291 } 1292 } 1293 1294 static void 1295 arc_do_user_evicts(void) 1296 { 1297 mutex_enter(&arc_eviction_mtx); 1298 while (arc_eviction_list != NULL) { 1299 arc_buf_t *buf = arc_eviction_list; 1300 arc_eviction_list = buf->b_next; 1301 buf->b_hdr = NULL; 1302 mutex_exit(&arc_eviction_mtx); 1303 1304 if (buf->b_efunc != NULL) 1305 VERIFY(buf->b_efunc(buf) == 0); 1306 1307 buf->b_efunc = NULL; 1308 buf->b_private = NULL; 1309 kmem_cache_free(buf_cache, buf); 1310 mutex_enter(&arc_eviction_mtx); 1311 } 1312 mutex_exit(&arc_eviction_mtx); 1313 } 1314 1315 /* 1316 * Flush all *evictable* data from the cache. 1317 * NOTE: this will not touch "active" (i.e. referenced) data. 1318 */ 1319 void 1320 arc_flush(void) 1321 { 1322 while (list_head(&arc_mru->arcs_list)) 1323 (void) arc_evict(arc_mru, -1, FALSE, ARC_BUFC_UNDEF); 1324 while (list_head(&arc_mfu->arcs_list)) 1325 (void) arc_evict(arc_mfu, -1, FALSE, ARC_BUFC_UNDEF); 1326 1327 arc_evict_ghost(arc_mru_ghost, -1); 1328 arc_evict_ghost(arc_mfu_ghost, -1); 1329 1330 mutex_enter(&arc_reclaim_thr_lock); 1331 arc_do_user_evicts(); 1332 mutex_exit(&arc_reclaim_thr_lock); 1333 ASSERT(arc_eviction_list == NULL); 1334 } 1335 1336 int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ 1337 1338 void 1339 arc_shrink(void) 1340 { 1341 if (arc_c > arc_c_min) { 1342 uint64_t to_free; 1343 1344 #ifdef _KERNEL 1345 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); 1346 #else 1347 to_free = arc_c >> arc_shrink_shift; 1348 #endif 1349 if (arc_c > arc_c_min + to_free) 1350 atomic_add_64(&arc_c, -to_free); 1351 else 1352 arc_c = arc_c_min; 1353 1354 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 1355 if (arc_c > arc_size) 1356 arc_c = MAX(arc_size, arc_c_min); 1357 if (arc_p > arc_c) 1358 arc_p = (arc_c >> 1); 1359 ASSERT(arc_c >= arc_c_min); 1360 ASSERT((int64_t)arc_p >= 0); 1361 } 1362 1363 if (arc_size > arc_c) 1364 arc_adjust(); 1365 } 1366 1367 static int 1368 arc_reclaim_needed(void) 1369 { 1370 uint64_t extra; 1371 1372 #ifdef _KERNEL 1373 1374 if (needfree) 1375 return (1); 1376 1377 /* 1378 * take 'desfree' extra pages, so we reclaim sooner, rather than later 1379 */ 1380 extra = desfree; 1381 1382 /* 1383 * check that we're out of range of the pageout scanner. It starts to 1384 * schedule paging if freemem is less than lotsfree and needfree. 1385 * lotsfree is the high-water mark for pageout, and needfree is the 1386 * number of needed free pages. We add extra pages here to make sure 1387 * the scanner doesn't start up while we're freeing memory. 1388 */ 1389 if (freemem < lotsfree + needfree + extra) 1390 return (1); 1391 1392 /* 1393 * check to make sure that swapfs has enough space so that anon 1394 * reservations can still succeeed. anon_resvmem() checks that the 1395 * availrmem is greater than swapfs_minfree, and the number of reserved 1396 * swap pages. We also add a bit of extra here just to prevent 1397 * circumstances from getting really dire. 1398 */ 1399 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1400 return (1); 1401 1402 /* 1403 * If zio data pages are being allocated out of a separate heap segment, 1404 * then check that the size of available vmem for this area remains 1405 * above 1/4th free. This needs to be done since the size of the 1406 * non-default segment is smaller than physical memory, so we could 1407 * conceivably run out of VA in that segment before running out of 1408 * physical memory. 1409 */ 1410 if ((zio_arena != NULL) && (btop(vmem_size(zio_arena, VMEM_FREE)) < 1411 (btop(vmem_size(zio_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))) 1412 return (1); 1413 1414 #if defined(__i386) 1415 /* 1416 * If we're on an i386 platform, it's possible that we'll exhaust the 1417 * kernel heap space before we ever run out of available physical 1418 * memory. Most checks of the size of the heap_area compare against 1419 * tune.t_minarmem, which is the minimum available real memory that we 1420 * can have in the system. However, this is generally fixed at 25 pages 1421 * which is so low that it's useless. In this comparison, we seek to 1422 * calculate the total heap-size, and reclaim if more than 3/4ths of the 1423 * heap is allocated. (Or, in the caclulation, if less than 1/4th is 1424 * free) 1425 */ 1426 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1427 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1428 return (1); 1429 #endif 1430 1431 #else 1432 if (spa_get_random(100) == 0) 1433 return (1); 1434 #endif 1435 return (0); 1436 } 1437 1438 static void 1439 arc_kmem_reap_now(arc_reclaim_strategy_t strat) 1440 { 1441 size_t i; 1442 kmem_cache_t *prev_cache = NULL; 1443 kmem_cache_t *prev_data_cache = NULL; 1444 extern kmem_cache_t *zio_buf_cache[]; 1445 extern kmem_cache_t *zio_data_buf_cache[]; 1446 1447 #ifdef _KERNEL 1448 /* 1449 * First purge some DNLC entries, in case the DNLC is using 1450 * up too much memory. 1451 */ 1452 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 1453 1454 #if defined(__i386) 1455 /* 1456 * Reclaim unused memory from all kmem caches. 1457 */ 1458 kmem_reap(); 1459 #endif 1460 #endif 1461 1462 /* 1463 * An agressive reclamation will shrink the cache size as well as 1464 * reap free buffers from the arc kmem caches. 1465 */ 1466 if (strat == ARC_RECLAIM_AGGR) 1467 arc_shrink(); 1468 1469 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1470 if (zio_buf_cache[i] != prev_cache) { 1471 prev_cache = zio_buf_cache[i]; 1472 kmem_cache_reap_now(zio_buf_cache[i]); 1473 } 1474 if (zio_data_buf_cache[i] != prev_data_cache) { 1475 prev_data_cache = zio_data_buf_cache[i]; 1476 kmem_cache_reap_now(zio_data_buf_cache[i]); 1477 } 1478 } 1479 kmem_cache_reap_now(buf_cache); 1480 kmem_cache_reap_now(hdr_cache); 1481 } 1482 1483 static void 1484 arc_reclaim_thread(void) 1485 { 1486 clock_t growtime = 0; 1487 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1488 callb_cpr_t cpr; 1489 1490 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1491 1492 mutex_enter(&arc_reclaim_thr_lock); 1493 while (arc_thread_exit == 0) { 1494 if (arc_reclaim_needed()) { 1495 1496 if (arc_no_grow) { 1497 if (last_reclaim == ARC_RECLAIM_CONS) { 1498 last_reclaim = ARC_RECLAIM_AGGR; 1499 } else { 1500 last_reclaim = ARC_RECLAIM_CONS; 1501 } 1502 } else { 1503 arc_no_grow = TRUE; 1504 last_reclaim = ARC_RECLAIM_AGGR; 1505 membar_producer(); 1506 } 1507 1508 /* reset the growth delay for every reclaim */ 1509 growtime = lbolt + (arc_grow_retry * hz); 1510 ASSERT(growtime > 0); 1511 1512 arc_kmem_reap_now(last_reclaim); 1513 1514 } else if ((growtime > 0) && ((growtime - lbolt) <= 0)) { 1515 arc_no_grow = FALSE; 1516 } 1517 1518 if (2 * arc_c < arc_size + 1519 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size) 1520 arc_adjust(); 1521 1522 if (arc_eviction_list != NULL) 1523 arc_do_user_evicts(); 1524 1525 /* block until needed, or one second, whichever is shorter */ 1526 CALLB_CPR_SAFE_BEGIN(&cpr); 1527 (void) cv_timedwait(&arc_reclaim_thr_cv, 1528 &arc_reclaim_thr_lock, (lbolt + hz)); 1529 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 1530 } 1531 1532 arc_thread_exit = 0; 1533 cv_broadcast(&arc_reclaim_thr_cv); 1534 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 1535 thread_exit(); 1536 } 1537 1538 /* 1539 * Adapt arc info given the number of bytes we are trying to add and 1540 * the state that we are comming from. This function is only called 1541 * when we are adding new content to the cache. 1542 */ 1543 static void 1544 arc_adapt(int bytes, arc_state_t *state) 1545 { 1546 int mult; 1547 1548 ASSERT(bytes > 0); 1549 /* 1550 * Adapt the target size of the MRU list: 1551 * - if we just hit in the MRU ghost list, then increase 1552 * the target size of the MRU list. 1553 * - if we just hit in the MFU ghost list, then increase 1554 * the target size of the MFU list by decreasing the 1555 * target size of the MRU list. 1556 */ 1557 if (state == arc_mru_ghost) { 1558 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 1559 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 1560 1561 arc_p = MIN(arc_c, arc_p + bytes * mult); 1562 } else if (state == arc_mfu_ghost) { 1563 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 1564 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 1565 1566 arc_p = MAX(0, (int64_t)arc_p - bytes * mult); 1567 } 1568 ASSERT((int64_t)arc_p >= 0); 1569 1570 if (arc_reclaim_needed()) { 1571 cv_signal(&arc_reclaim_thr_cv); 1572 return; 1573 } 1574 1575 if (arc_no_grow) 1576 return; 1577 1578 if (arc_c >= arc_c_max) 1579 return; 1580 1581 /* 1582 * If we're within (2 * maxblocksize) bytes of the target 1583 * cache size, increment the target cache size 1584 */ 1585 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 1586 atomic_add_64(&arc_c, (int64_t)bytes); 1587 if (arc_c > arc_c_max) 1588 arc_c = arc_c_max; 1589 else if (state == arc_anon) 1590 atomic_add_64(&arc_p, (int64_t)bytes); 1591 if (arc_p > arc_c) 1592 arc_p = arc_c; 1593 } 1594 ASSERT((int64_t)arc_p >= 0); 1595 } 1596 1597 /* 1598 * Check if the cache has reached its limits and eviction is required 1599 * prior to insert. 1600 */ 1601 static int 1602 arc_evict_needed() 1603 { 1604 if (arc_reclaim_needed()) 1605 return (1); 1606 1607 return (arc_size > arc_c); 1608 } 1609 1610 /* 1611 * The buffer, supplied as the first argument, needs a data block. 1612 * So, if we are at cache max, determine which cache should be victimized. 1613 * We have the following cases: 1614 * 1615 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 1616 * In this situation if we're out of space, but the resident size of the MFU is 1617 * under the limit, victimize the MFU cache to satisfy this insertion request. 1618 * 1619 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 1620 * Here, we've used up all of the available space for the MRU, so we need to 1621 * evict from our own cache instead. Evict from the set of resident MRU 1622 * entries. 1623 * 1624 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 1625 * c minus p represents the MFU space in the cache, since p is the size of the 1626 * cache that is dedicated to the MRU. In this situation there's still space on 1627 * the MFU side, so the MRU side needs to be victimized. 1628 * 1629 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 1630 * MFU's resident set is consuming more space than it has been allotted. In 1631 * this situation, we must victimize our own cache, the MFU, for this insertion. 1632 */ 1633 static void 1634 arc_get_data_buf(arc_buf_t *buf) 1635 { 1636 arc_state_t *state = buf->b_hdr->b_state; 1637 uint64_t size = buf->b_hdr->b_size; 1638 arc_buf_contents_t type = buf->b_hdr->b_type; 1639 1640 arc_adapt(size, state); 1641 1642 /* 1643 * We have not yet reached cache maximum size, 1644 * just allocate a new buffer. 1645 */ 1646 if (!arc_evict_needed()) { 1647 if (type == ARC_BUFC_METADATA) { 1648 buf->b_data = zio_buf_alloc(size); 1649 } else { 1650 ASSERT(type == ARC_BUFC_DATA); 1651 buf->b_data = zio_data_buf_alloc(size); 1652 } 1653 atomic_add_64(&arc_size, size); 1654 goto out; 1655 } 1656 1657 /* 1658 * If we are prefetching from the mfu ghost list, this buffer 1659 * will end up on the mru list; so steal space from there. 1660 */ 1661 if (state == arc_mfu_ghost) 1662 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 1663 else if (state == arc_mru_ghost) 1664 state = arc_mru; 1665 1666 if (state == arc_mru || state == arc_anon) { 1667 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 1668 state = (arc_p > mru_used) ? arc_mfu : arc_mru; 1669 } else { 1670 /* MFU cases */ 1671 uint64_t mfu_space = arc_c - arc_p; 1672 state = (mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 1673 } 1674 if ((buf->b_data = arc_evict(state, size, TRUE, type)) == NULL) { 1675 if (type == ARC_BUFC_METADATA) { 1676 buf->b_data = zio_buf_alloc(size); 1677 } else { 1678 ASSERT(type == ARC_BUFC_DATA); 1679 buf->b_data = zio_data_buf_alloc(size); 1680 } 1681 atomic_add_64(&arc_size, size); 1682 ARCSTAT_BUMP(arcstat_recycle_miss); 1683 } 1684 ASSERT(buf->b_data != NULL); 1685 out: 1686 /* 1687 * Update the state size. Note that ghost states have a 1688 * "ghost size" and so don't need to be updated. 1689 */ 1690 if (!GHOST_STATE(buf->b_hdr->b_state)) { 1691 arc_buf_hdr_t *hdr = buf->b_hdr; 1692 1693 atomic_add_64(&hdr->b_state->arcs_size, size); 1694 if (list_link_active(&hdr->b_arc_node)) { 1695 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1696 atomic_add_64(&hdr->b_state->arcs_lsize, size); 1697 } 1698 /* 1699 * If we are growing the cache, and we are adding anonymous 1700 * data, and we have outgrown arc_p, update arc_p 1701 */ 1702 if (arc_size < arc_c && hdr->b_state == arc_anon && 1703 arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 1704 arc_p = MIN(arc_c, arc_p + size); 1705 } 1706 } 1707 1708 /* 1709 * This routine is called whenever a buffer is accessed. 1710 * NOTE: the hash lock is dropped in this function. 1711 */ 1712 static void 1713 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 1714 { 1715 ASSERT(MUTEX_HELD(hash_lock)); 1716 1717 if (buf->b_state == arc_anon) { 1718 /* 1719 * This buffer is not in the cache, and does not 1720 * appear in our "ghost" list. Add the new buffer 1721 * to the MRU state. 1722 */ 1723 1724 ASSERT(buf->b_arc_access == 0); 1725 buf->b_arc_access = lbolt; 1726 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 1727 arc_change_state(arc_mru, buf, hash_lock); 1728 1729 } else if (buf->b_state == arc_mru) { 1730 /* 1731 * If this buffer is here because of a prefetch, then either: 1732 * - clear the flag if this is a "referencing" read 1733 * (any subsequent access will bump this into the MFU state). 1734 * or 1735 * - move the buffer to the head of the list if this is 1736 * another prefetch (to make it less likely to be evicted). 1737 */ 1738 if ((buf->b_flags & ARC_PREFETCH) != 0) { 1739 if (refcount_count(&buf->b_refcnt) == 0) { 1740 ASSERT(list_link_active(&buf->b_arc_node)); 1741 mutex_enter(&arc_mru->arcs_mtx); 1742 list_remove(&arc_mru->arcs_list, buf); 1743 list_insert_head(&arc_mru->arcs_list, buf); 1744 mutex_exit(&arc_mru->arcs_mtx); 1745 } else { 1746 buf->b_flags &= ~ARC_PREFETCH; 1747 ARCSTAT_BUMP(arcstat_mru_hits); 1748 } 1749 buf->b_arc_access = lbolt; 1750 return; 1751 } 1752 1753 /* 1754 * This buffer has been "accessed" only once so far, 1755 * but it is still in the cache. Move it to the MFU 1756 * state. 1757 */ 1758 if (lbolt > buf->b_arc_access + ARC_MINTIME) { 1759 /* 1760 * More than 125ms have passed since we 1761 * instantiated this buffer. Move it to the 1762 * most frequently used state. 1763 */ 1764 buf->b_arc_access = lbolt; 1765 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1766 arc_change_state(arc_mfu, buf, hash_lock); 1767 } 1768 ARCSTAT_BUMP(arcstat_mru_hits); 1769 } else if (buf->b_state == arc_mru_ghost) { 1770 arc_state_t *new_state; 1771 /* 1772 * This buffer has been "accessed" recently, but 1773 * was evicted from the cache. Move it to the 1774 * MFU state. 1775 */ 1776 1777 if (buf->b_flags & ARC_PREFETCH) { 1778 new_state = arc_mru; 1779 if (refcount_count(&buf->b_refcnt) > 0) 1780 buf->b_flags &= ~ARC_PREFETCH; 1781 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 1782 } else { 1783 new_state = arc_mfu; 1784 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1785 } 1786 1787 buf->b_arc_access = lbolt; 1788 arc_change_state(new_state, buf, hash_lock); 1789 1790 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 1791 } else if (buf->b_state == arc_mfu) { 1792 /* 1793 * This buffer has been accessed more than once and is 1794 * still in the cache. Keep it in the MFU state. 1795 * 1796 * NOTE: an add_reference() that occurred when we did 1797 * the arc_read() will have kicked this off the list. 1798 * If it was a prefetch, we will explicitly move it to 1799 * the head of the list now. 1800 */ 1801 if ((buf->b_flags & ARC_PREFETCH) != 0) { 1802 ASSERT(refcount_count(&buf->b_refcnt) == 0); 1803 ASSERT(list_link_active(&buf->b_arc_node)); 1804 mutex_enter(&arc_mfu->arcs_mtx); 1805 list_remove(&arc_mfu->arcs_list, buf); 1806 list_insert_head(&arc_mfu->arcs_list, buf); 1807 mutex_exit(&arc_mfu->arcs_mtx); 1808 } 1809 ARCSTAT_BUMP(arcstat_mfu_hits); 1810 buf->b_arc_access = lbolt; 1811 } else if (buf->b_state == arc_mfu_ghost) { 1812 arc_state_t *new_state = arc_mfu; 1813 /* 1814 * This buffer has been accessed more than once but has 1815 * been evicted from the cache. Move it back to the 1816 * MFU state. 1817 */ 1818 1819 if (buf->b_flags & ARC_PREFETCH) { 1820 /* 1821 * This is a prefetch access... 1822 * move this block back to the MRU state. 1823 */ 1824 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 1825 new_state = arc_mru; 1826 } 1827 1828 buf->b_arc_access = lbolt; 1829 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1830 arc_change_state(new_state, buf, hash_lock); 1831 1832 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 1833 } else { 1834 ASSERT(!"invalid arc state"); 1835 } 1836 } 1837 1838 /* a generic arc_done_func_t which you can use */ 1839 /* ARGSUSED */ 1840 void 1841 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 1842 { 1843 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 1844 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 1845 } 1846 1847 /* a generic arc_done_func_t which you can use */ 1848 void 1849 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 1850 { 1851 arc_buf_t **bufp = arg; 1852 if (zio && zio->io_error) { 1853 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 1854 *bufp = NULL; 1855 } else { 1856 *bufp = buf; 1857 } 1858 } 1859 1860 static void 1861 arc_read_done(zio_t *zio) 1862 { 1863 arc_buf_hdr_t *hdr, *found; 1864 arc_buf_t *buf; 1865 arc_buf_t *abuf; /* buffer we're assigning to callback */ 1866 kmutex_t *hash_lock; 1867 arc_callback_t *callback_list, *acb; 1868 int freeable = FALSE; 1869 1870 buf = zio->io_private; 1871 hdr = buf->b_hdr; 1872 1873 /* 1874 * The hdr was inserted into hash-table and removed from lists 1875 * prior to starting I/O. We should find this header, since 1876 * it's in the hash table, and it should be legit since it's 1877 * not possible to evict it during the I/O. The only possible 1878 * reason for it not to be found is if we were freed during the 1879 * read. 1880 */ 1881 found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 1882 &hash_lock); 1883 1884 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 1885 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp)))); 1886 1887 /* byteswap if necessary */ 1888 callback_list = hdr->b_acb; 1889 ASSERT(callback_list != NULL); 1890 if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap) 1891 callback_list->acb_byteswap(buf->b_data, hdr->b_size); 1892 1893 arc_cksum_compute(buf); 1894 1895 /* create copies of the data buffer for the callers */ 1896 abuf = buf; 1897 for (acb = callback_list; acb; acb = acb->acb_next) { 1898 if (acb->acb_done) { 1899 if (abuf == NULL) 1900 abuf = arc_buf_clone(buf); 1901 acb->acb_buf = abuf; 1902 abuf = NULL; 1903 } 1904 } 1905 hdr->b_acb = NULL; 1906 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 1907 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 1908 if (abuf == buf) 1909 hdr->b_flags |= ARC_BUF_AVAILABLE; 1910 1911 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 1912 1913 if (zio->io_error != 0) { 1914 hdr->b_flags |= ARC_IO_ERROR; 1915 if (hdr->b_state != arc_anon) 1916 arc_change_state(arc_anon, hdr, hash_lock); 1917 if (HDR_IN_HASH_TABLE(hdr)) 1918 buf_hash_remove(hdr); 1919 freeable = refcount_is_zero(&hdr->b_refcnt); 1920 /* convert checksum errors into IO errors */ 1921 if (zio->io_error == ECKSUM) 1922 zio->io_error = EIO; 1923 } 1924 1925 /* 1926 * Broadcast before we drop the hash_lock to avoid the possibility 1927 * that the hdr (and hence the cv) might be freed before we get to 1928 * the cv_broadcast(). 1929 */ 1930 cv_broadcast(&hdr->b_cv); 1931 1932 if (hash_lock) { 1933 /* 1934 * Only call arc_access on anonymous buffers. This is because 1935 * if we've issued an I/O for an evicted buffer, we've already 1936 * called arc_access (to prevent any simultaneous readers from 1937 * getting confused). 1938 */ 1939 if (zio->io_error == 0 && hdr->b_state == arc_anon) 1940 arc_access(hdr, hash_lock); 1941 mutex_exit(hash_lock); 1942 } else { 1943 /* 1944 * This block was freed while we waited for the read to 1945 * complete. It has been removed from the hash table and 1946 * moved to the anonymous state (so that it won't show up 1947 * in the cache). 1948 */ 1949 ASSERT3P(hdr->b_state, ==, arc_anon); 1950 freeable = refcount_is_zero(&hdr->b_refcnt); 1951 } 1952 1953 /* execute each callback and free its structure */ 1954 while ((acb = callback_list) != NULL) { 1955 if (acb->acb_done) 1956 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 1957 1958 if (acb->acb_zio_dummy != NULL) { 1959 acb->acb_zio_dummy->io_error = zio->io_error; 1960 zio_nowait(acb->acb_zio_dummy); 1961 } 1962 1963 callback_list = acb->acb_next; 1964 kmem_free(acb, sizeof (arc_callback_t)); 1965 } 1966 1967 if (freeable) 1968 arc_hdr_destroy(hdr); 1969 } 1970 1971 /* 1972 * "Read" the block block at the specified DVA (in bp) via the 1973 * cache. If the block is found in the cache, invoke the provided 1974 * callback immediately and return. Note that the `zio' parameter 1975 * in the callback will be NULL in this case, since no IO was 1976 * required. If the block is not in the cache pass the read request 1977 * on to the spa with a substitute callback function, so that the 1978 * requested block will be added to the cache. 1979 * 1980 * If a read request arrives for a block that has a read in-progress, 1981 * either wait for the in-progress read to complete (and return the 1982 * results); or, if this is a read with a "done" func, add a record 1983 * to the read to invoke the "done" func when the read completes, 1984 * and return; or just return. 1985 * 1986 * arc_read_done() will invoke all the requested "done" functions 1987 * for readers of this block. 1988 */ 1989 int 1990 arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap, 1991 arc_done_func_t *done, void *private, int priority, int flags, 1992 uint32_t *arc_flags, zbookmark_t *zb) 1993 { 1994 arc_buf_hdr_t *hdr; 1995 arc_buf_t *buf; 1996 kmutex_t *hash_lock; 1997 zio_t *rzio; 1998 1999 top: 2000 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2001 if (hdr && hdr->b_datacnt > 0) { 2002 2003 *arc_flags |= ARC_CACHED; 2004 2005 if (HDR_IO_IN_PROGRESS(hdr)) { 2006 2007 if (*arc_flags & ARC_WAIT) { 2008 cv_wait(&hdr->b_cv, hash_lock); 2009 mutex_exit(hash_lock); 2010 goto top; 2011 } 2012 ASSERT(*arc_flags & ARC_NOWAIT); 2013 2014 if (done) { 2015 arc_callback_t *acb = NULL; 2016 2017 acb = kmem_zalloc(sizeof (arc_callback_t), 2018 KM_SLEEP); 2019 acb->acb_done = done; 2020 acb->acb_private = private; 2021 acb->acb_byteswap = swap; 2022 if (pio != NULL) 2023 acb->acb_zio_dummy = zio_null(pio, 2024 spa, NULL, NULL, flags); 2025 2026 ASSERT(acb->acb_done != NULL); 2027 acb->acb_next = hdr->b_acb; 2028 hdr->b_acb = acb; 2029 add_reference(hdr, hash_lock, private); 2030 mutex_exit(hash_lock); 2031 return (0); 2032 } 2033 mutex_exit(hash_lock); 2034 return (0); 2035 } 2036 2037 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2038 2039 if (done) { 2040 add_reference(hdr, hash_lock, private); 2041 /* 2042 * If this block is already in use, create a new 2043 * copy of the data so that we will be guaranteed 2044 * that arc_release() will always succeed. 2045 */ 2046 buf = hdr->b_buf; 2047 ASSERT(buf); 2048 ASSERT(buf->b_data); 2049 if (HDR_BUF_AVAILABLE(hdr)) { 2050 ASSERT(buf->b_efunc == NULL); 2051 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2052 } else { 2053 buf = arc_buf_clone(buf); 2054 } 2055 } else if (*arc_flags & ARC_PREFETCH && 2056 refcount_count(&hdr->b_refcnt) == 0) { 2057 hdr->b_flags |= ARC_PREFETCH; 2058 } 2059 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 2060 arc_access(hdr, hash_lock); 2061 mutex_exit(hash_lock); 2062 ARCSTAT_BUMP(arcstat_hits); 2063 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2064 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2065 data, metadata, hits); 2066 2067 if (done) 2068 done(NULL, buf, private); 2069 } else { 2070 uint64_t size = BP_GET_LSIZE(bp); 2071 arc_callback_t *acb; 2072 2073 if (hdr == NULL) { 2074 /* this block is not in the cache */ 2075 arc_buf_hdr_t *exists; 2076 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 2077 buf = arc_buf_alloc(spa, size, private, type); 2078 hdr = buf->b_hdr; 2079 hdr->b_dva = *BP_IDENTITY(bp); 2080 hdr->b_birth = bp->blk_birth; 2081 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2082 exists = buf_hash_insert(hdr, &hash_lock); 2083 if (exists) { 2084 /* somebody beat us to the hash insert */ 2085 mutex_exit(hash_lock); 2086 bzero(&hdr->b_dva, sizeof (dva_t)); 2087 hdr->b_birth = 0; 2088 hdr->b_cksum0 = 0; 2089 (void) arc_buf_remove_ref(buf, private); 2090 goto top; /* restart the IO request */ 2091 } 2092 /* if this is a prefetch, we don't have a reference */ 2093 if (*arc_flags & ARC_PREFETCH) { 2094 (void) remove_reference(hdr, hash_lock, 2095 private); 2096 hdr->b_flags |= ARC_PREFETCH; 2097 } 2098 if (BP_GET_LEVEL(bp) > 0) 2099 hdr->b_flags |= ARC_INDIRECT; 2100 } else { 2101 /* this block is in the ghost cache */ 2102 ASSERT(GHOST_STATE(hdr->b_state)); 2103 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2104 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 2105 ASSERT(hdr->b_buf == NULL); 2106 2107 /* if this is a prefetch, we don't have a reference */ 2108 if (*arc_flags & ARC_PREFETCH) 2109 hdr->b_flags |= ARC_PREFETCH; 2110 else 2111 add_reference(hdr, hash_lock, private); 2112 buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 2113 buf->b_hdr = hdr; 2114 buf->b_data = NULL; 2115 buf->b_efunc = NULL; 2116 buf->b_private = NULL; 2117 buf->b_next = NULL; 2118 hdr->b_buf = buf; 2119 arc_get_data_buf(buf); 2120 ASSERT(hdr->b_datacnt == 0); 2121 hdr->b_datacnt = 1; 2122 2123 } 2124 2125 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2126 acb->acb_done = done; 2127 acb->acb_private = private; 2128 acb->acb_byteswap = swap; 2129 2130 ASSERT(hdr->b_acb == NULL); 2131 hdr->b_acb = acb; 2132 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2133 2134 /* 2135 * If the buffer has been evicted, migrate it to a present state 2136 * before issuing the I/O. Once we drop the hash-table lock, 2137 * the header will be marked as I/O in progress and have an 2138 * attached buffer. At this point, anybody who finds this 2139 * buffer ought to notice that it's legit but has a pending I/O. 2140 */ 2141 2142 if (GHOST_STATE(hdr->b_state)) 2143 arc_access(hdr, hash_lock); 2144 mutex_exit(hash_lock); 2145 2146 ASSERT3U(hdr->b_size, ==, size); 2147 DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 2148 zbookmark_t *, zb); 2149 ARCSTAT_BUMP(arcstat_misses); 2150 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2151 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2152 data, metadata, misses); 2153 2154 rzio = zio_read(pio, spa, bp, buf->b_data, size, 2155 arc_read_done, buf, priority, flags, zb); 2156 2157 if (*arc_flags & ARC_WAIT) 2158 return (zio_wait(rzio)); 2159 2160 ASSERT(*arc_flags & ARC_NOWAIT); 2161 zio_nowait(rzio); 2162 } 2163 return (0); 2164 } 2165 2166 /* 2167 * arc_read() variant to support pool traversal. If the block is already 2168 * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 2169 * The idea is that we don't want pool traversal filling up memory, but 2170 * if the ARC already has the data anyway, we shouldn't pay for the I/O. 2171 */ 2172 int 2173 arc_tryread(spa_t *spa, blkptr_t *bp, void *data) 2174 { 2175 arc_buf_hdr_t *hdr; 2176 kmutex_t *hash_mtx; 2177 int rc = 0; 2178 2179 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 2180 2181 if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 2182 arc_buf_t *buf = hdr->b_buf; 2183 2184 ASSERT(buf); 2185 while (buf->b_data == NULL) { 2186 buf = buf->b_next; 2187 ASSERT(buf); 2188 } 2189 bcopy(buf->b_data, data, hdr->b_size); 2190 } else { 2191 rc = ENOENT; 2192 } 2193 2194 if (hash_mtx) 2195 mutex_exit(hash_mtx); 2196 2197 return (rc); 2198 } 2199 2200 void 2201 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 2202 { 2203 ASSERT(buf->b_hdr != NULL); 2204 ASSERT(buf->b_hdr->b_state != arc_anon); 2205 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 2206 buf->b_efunc = func; 2207 buf->b_private = private; 2208 } 2209 2210 /* 2211 * This is used by the DMU to let the ARC know that a buffer is 2212 * being evicted, so the ARC should clean up. If this arc buf 2213 * is not yet in the evicted state, it will be put there. 2214 */ 2215 int 2216 arc_buf_evict(arc_buf_t *buf) 2217 { 2218 arc_buf_hdr_t *hdr; 2219 kmutex_t *hash_lock; 2220 arc_buf_t **bufp; 2221 2222 mutex_enter(&arc_eviction_mtx); 2223 hdr = buf->b_hdr; 2224 if (hdr == NULL) { 2225 /* 2226 * We are in arc_do_user_evicts(). 2227 */ 2228 ASSERT(buf->b_data == NULL); 2229 mutex_exit(&arc_eviction_mtx); 2230 return (0); 2231 } 2232 hash_lock = HDR_LOCK(hdr); 2233 mutex_exit(&arc_eviction_mtx); 2234 2235 mutex_enter(hash_lock); 2236 2237 if (buf->b_data == NULL) { 2238 /* 2239 * We are on the eviction list. 2240 */ 2241 mutex_exit(hash_lock); 2242 mutex_enter(&arc_eviction_mtx); 2243 if (buf->b_hdr == NULL) { 2244 /* 2245 * We are already in arc_do_user_evicts(). 2246 */ 2247 mutex_exit(&arc_eviction_mtx); 2248 return (0); 2249 } else { 2250 arc_buf_t copy = *buf; /* structure assignment */ 2251 /* 2252 * Process this buffer now 2253 * but let arc_do_user_evicts() do the reaping. 2254 */ 2255 buf->b_efunc = NULL; 2256 mutex_exit(&arc_eviction_mtx); 2257 VERIFY(copy.b_efunc(©) == 0); 2258 return (1); 2259 } 2260 } 2261 2262 ASSERT(buf->b_hdr == hdr); 2263 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 2264 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2265 2266 /* 2267 * Pull this buffer off of the hdr 2268 */ 2269 bufp = &hdr->b_buf; 2270 while (*bufp != buf) 2271 bufp = &(*bufp)->b_next; 2272 *bufp = buf->b_next; 2273 2274 ASSERT(buf->b_data != NULL); 2275 arc_buf_destroy(buf, FALSE, FALSE); 2276 2277 if (hdr->b_datacnt == 0) { 2278 arc_state_t *old_state = hdr->b_state; 2279 arc_state_t *evicted_state; 2280 2281 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2282 2283 evicted_state = 2284 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 2285 2286 mutex_enter(&old_state->arcs_mtx); 2287 mutex_enter(&evicted_state->arcs_mtx); 2288 2289 arc_change_state(evicted_state, hdr, hash_lock); 2290 ASSERT(HDR_IN_HASH_TABLE(hdr)); 2291 hdr->b_flags = ARC_IN_HASH_TABLE; 2292 2293 mutex_exit(&evicted_state->arcs_mtx); 2294 mutex_exit(&old_state->arcs_mtx); 2295 } 2296 mutex_exit(hash_lock); 2297 2298 VERIFY(buf->b_efunc(buf) == 0); 2299 buf->b_efunc = NULL; 2300 buf->b_private = NULL; 2301 buf->b_hdr = NULL; 2302 kmem_cache_free(buf_cache, buf); 2303 return (1); 2304 } 2305 2306 /* 2307 * Release this buffer from the cache. This must be done 2308 * after a read and prior to modifying the buffer contents. 2309 * If the buffer has more than one reference, we must make 2310 * make a new hdr for the buffer. 2311 */ 2312 void 2313 arc_release(arc_buf_t *buf, void *tag) 2314 { 2315 arc_buf_hdr_t *hdr = buf->b_hdr; 2316 kmutex_t *hash_lock = HDR_LOCK(hdr); 2317 2318 /* this buffer is not on any list */ 2319 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2320 2321 if (hdr->b_state == arc_anon) { 2322 /* this buffer is already released */ 2323 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2324 ASSERT(BUF_EMPTY(hdr)); 2325 ASSERT(buf->b_efunc == NULL); 2326 arc_buf_thaw(buf); 2327 return; 2328 } 2329 2330 mutex_enter(hash_lock); 2331 2332 /* 2333 * Do we have more than one buf? 2334 */ 2335 if (hdr->b_buf != buf || buf->b_next != NULL) { 2336 arc_buf_hdr_t *nhdr; 2337 arc_buf_t **bufp; 2338 uint64_t blksz = hdr->b_size; 2339 spa_t *spa = hdr->b_spa; 2340 arc_buf_contents_t type = hdr->b_type; 2341 2342 ASSERT(hdr->b_datacnt > 1); 2343 /* 2344 * Pull the data off of this buf and attach it to 2345 * a new anonymous buf. 2346 */ 2347 (void) remove_reference(hdr, hash_lock, tag); 2348 bufp = &hdr->b_buf; 2349 while (*bufp != buf) 2350 bufp = &(*bufp)->b_next; 2351 *bufp = (*bufp)->b_next; 2352 2353 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 2354 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 2355 if (refcount_is_zero(&hdr->b_refcnt)) { 2356 ASSERT3U(hdr->b_state->arcs_lsize, >=, hdr->b_size); 2357 atomic_add_64(&hdr->b_state->arcs_lsize, -hdr->b_size); 2358 } 2359 hdr->b_datacnt -= 1; 2360 2361 mutex_exit(hash_lock); 2362 2363 nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 2364 nhdr->b_size = blksz; 2365 nhdr->b_spa = spa; 2366 nhdr->b_type = type; 2367 nhdr->b_buf = buf; 2368 nhdr->b_state = arc_anon; 2369 nhdr->b_arc_access = 0; 2370 nhdr->b_flags = 0; 2371 nhdr->b_datacnt = 1; 2372 if (hdr->b_freeze_cksum != NULL) { 2373 nhdr->b_freeze_cksum = 2374 kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 2375 *nhdr->b_freeze_cksum = *hdr->b_freeze_cksum; 2376 } 2377 buf->b_hdr = nhdr; 2378 buf->b_next = NULL; 2379 (void) refcount_add(&nhdr->b_refcnt, tag); 2380 atomic_add_64(&arc_anon->arcs_size, blksz); 2381 2382 hdr = nhdr; 2383 } else { 2384 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2385 ASSERT(!list_link_active(&hdr->b_arc_node)); 2386 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2387 arc_change_state(arc_anon, hdr, hash_lock); 2388 hdr->b_arc_access = 0; 2389 mutex_exit(hash_lock); 2390 bzero(&hdr->b_dva, sizeof (dva_t)); 2391 hdr->b_birth = 0; 2392 hdr->b_cksum0 = 0; 2393 } 2394 buf->b_efunc = NULL; 2395 buf->b_private = NULL; 2396 arc_buf_thaw(buf); 2397 } 2398 2399 int 2400 arc_released(arc_buf_t *buf) 2401 { 2402 return (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 2403 } 2404 2405 int 2406 arc_has_callback(arc_buf_t *buf) 2407 { 2408 return (buf->b_efunc != NULL); 2409 } 2410 2411 #ifdef ZFS_DEBUG 2412 int 2413 arc_referenced(arc_buf_t *buf) 2414 { 2415 return (refcount_count(&buf->b_hdr->b_refcnt)); 2416 } 2417 #endif 2418 2419 static void 2420 arc_write_done(zio_t *zio) 2421 { 2422 arc_buf_t *buf; 2423 arc_buf_hdr_t *hdr; 2424 arc_callback_t *acb; 2425 2426 buf = zio->io_private; 2427 hdr = buf->b_hdr; 2428 acb = hdr->b_acb; 2429 hdr->b_acb = NULL; 2430 ASSERT(acb != NULL); 2431 2432 /* this buffer is on no lists and is not in the hash table */ 2433 ASSERT3P(hdr->b_state, ==, arc_anon); 2434 2435 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 2436 hdr->b_birth = zio->io_bp->blk_birth; 2437 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 2438 /* 2439 * If the block to be written was all-zero, we may have 2440 * compressed it away. In this case no write was performed 2441 * so there will be no dva/birth-date/checksum. The buffer 2442 * must therefor remain anonymous (and uncached). 2443 */ 2444 if (!BUF_EMPTY(hdr)) { 2445 arc_buf_hdr_t *exists; 2446 kmutex_t *hash_lock; 2447 2448 arc_cksum_verify(buf); 2449 2450 exists = buf_hash_insert(hdr, &hash_lock); 2451 if (exists) { 2452 /* 2453 * This can only happen if we overwrite for 2454 * sync-to-convergence, because we remove 2455 * buffers from the hash table when we arc_free(). 2456 */ 2457 ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 2458 BP_IDENTITY(zio->io_bp))); 2459 ASSERT3U(zio->io_bp_orig.blk_birth, ==, 2460 zio->io_bp->blk_birth); 2461 2462 ASSERT(refcount_is_zero(&exists->b_refcnt)); 2463 arc_change_state(arc_anon, exists, hash_lock); 2464 mutex_exit(hash_lock); 2465 arc_hdr_destroy(exists); 2466 exists = buf_hash_insert(hdr, &hash_lock); 2467 ASSERT3P(exists, ==, NULL); 2468 } 2469 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2470 arc_access(hdr, hash_lock); 2471 mutex_exit(hash_lock); 2472 } else if (acb->acb_done == NULL) { 2473 int destroy_hdr; 2474 /* 2475 * This is an anonymous buffer with no user callback, 2476 * destroy it if there are no active references. 2477 */ 2478 mutex_enter(&arc_eviction_mtx); 2479 destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 2480 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2481 mutex_exit(&arc_eviction_mtx); 2482 if (destroy_hdr) 2483 arc_hdr_destroy(hdr); 2484 } else { 2485 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2486 } 2487 2488 if (acb->acb_done) { 2489 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 2490 acb->acb_done(zio, buf, acb->acb_private); 2491 } 2492 2493 kmem_free(acb, sizeof (arc_callback_t)); 2494 } 2495 2496 int 2497 arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies, 2498 uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 2499 arc_done_func_t *done, void *private, int priority, int flags, 2500 uint32_t arc_flags, zbookmark_t *zb) 2501 { 2502 arc_buf_hdr_t *hdr = buf->b_hdr; 2503 arc_callback_t *acb; 2504 zio_t *rzio; 2505 2506 /* this is a private buffer - no locking required */ 2507 ASSERT3P(hdr->b_state, ==, arc_anon); 2508 ASSERT(BUF_EMPTY(hdr)); 2509 ASSERT(!HDR_IO_ERROR(hdr)); 2510 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 2511 ASSERT(hdr->b_acb == 0); 2512 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2513 acb->acb_done = done; 2514 acb->acb_private = private; 2515 acb->acb_byteswap = (arc_byteswap_func_t *)-1; 2516 hdr->b_acb = acb; 2517 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2518 arc_cksum_compute(buf); 2519 rzio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp, 2520 buf->b_data, hdr->b_size, arc_write_done, buf, priority, flags, zb); 2521 2522 if (arc_flags & ARC_WAIT) 2523 return (zio_wait(rzio)); 2524 2525 ASSERT(arc_flags & ARC_NOWAIT); 2526 zio_nowait(rzio); 2527 2528 return (0); 2529 } 2530 2531 int 2532 arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 2533 zio_done_func_t *done, void *private, uint32_t arc_flags) 2534 { 2535 arc_buf_hdr_t *ab; 2536 kmutex_t *hash_lock; 2537 zio_t *zio; 2538 2539 /* 2540 * If this buffer is in the cache, release it, so it 2541 * can be re-used. 2542 */ 2543 ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2544 if (ab != NULL) { 2545 /* 2546 * The checksum of blocks to free is not always 2547 * preserved (eg. on the deadlist). However, if it is 2548 * nonzero, it should match what we have in the cache. 2549 */ 2550 ASSERT(bp->blk_cksum.zc_word[0] == 0 || 2551 ab->b_cksum0 == bp->blk_cksum.zc_word[0]); 2552 if (ab->b_state != arc_anon) 2553 arc_change_state(arc_anon, ab, hash_lock); 2554 if (HDR_IO_IN_PROGRESS(ab)) { 2555 /* 2556 * This should only happen when we prefetch. 2557 */ 2558 ASSERT(ab->b_flags & ARC_PREFETCH); 2559 ASSERT3U(ab->b_datacnt, ==, 1); 2560 ab->b_flags |= ARC_FREED_IN_READ; 2561 if (HDR_IN_HASH_TABLE(ab)) 2562 buf_hash_remove(ab); 2563 ab->b_arc_access = 0; 2564 bzero(&ab->b_dva, sizeof (dva_t)); 2565 ab->b_birth = 0; 2566 ab->b_cksum0 = 0; 2567 ab->b_buf->b_efunc = NULL; 2568 ab->b_buf->b_private = NULL; 2569 mutex_exit(hash_lock); 2570 } else if (refcount_is_zero(&ab->b_refcnt)) { 2571 mutex_exit(hash_lock); 2572 arc_hdr_destroy(ab); 2573 ARCSTAT_BUMP(arcstat_deleted); 2574 } else { 2575 /* 2576 * We still have an active reference on this 2577 * buffer. This can happen, e.g., from 2578 * dbuf_unoverride(). 2579 */ 2580 ASSERT(!HDR_IN_HASH_TABLE(ab)); 2581 ab->b_arc_access = 0; 2582 bzero(&ab->b_dva, sizeof (dva_t)); 2583 ab->b_birth = 0; 2584 ab->b_cksum0 = 0; 2585 ab->b_buf->b_efunc = NULL; 2586 ab->b_buf->b_private = NULL; 2587 mutex_exit(hash_lock); 2588 } 2589 } 2590 2591 zio = zio_free(pio, spa, txg, bp, done, private); 2592 2593 if (arc_flags & ARC_WAIT) 2594 return (zio_wait(zio)); 2595 2596 ASSERT(arc_flags & ARC_NOWAIT); 2597 zio_nowait(zio); 2598 2599 return (0); 2600 } 2601 2602 void 2603 arc_tempreserve_clear(uint64_t tempreserve) 2604 { 2605 atomic_add_64(&arc_tempreserve, -tempreserve); 2606 ASSERT((int64_t)arc_tempreserve >= 0); 2607 } 2608 2609 int 2610 arc_tempreserve_space(uint64_t tempreserve) 2611 { 2612 #ifdef ZFS_DEBUG 2613 /* 2614 * Once in a while, fail for no reason. Everything should cope. 2615 */ 2616 if (spa_get_random(10000) == 0) { 2617 dprintf("forcing random failure\n"); 2618 return (ERESTART); 2619 } 2620 #endif 2621 if (tempreserve > arc_c/4 && !arc_no_grow) 2622 arc_c = MIN(arc_c_max, tempreserve * 4); 2623 if (tempreserve > arc_c) 2624 return (ENOMEM); 2625 2626 /* 2627 * Throttle writes when the amount of dirty data in the cache 2628 * gets too large. We try to keep the cache less than half full 2629 * of dirty blocks so that our sync times don't grow too large. 2630 * Note: if two requests come in concurrently, we might let them 2631 * both succeed, when one of them should fail. Not a huge deal. 2632 * 2633 * XXX The limit should be adjusted dynamically to keep the time 2634 * to sync a dataset fixed (around 1-5 seconds?). 2635 */ 2636 2637 if (tempreserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && 2638 arc_tempreserve + arc_anon->arcs_size > arc_c / 4) { 2639 dprintf("failing, arc_tempreserve=%lluK anon=%lluK " 2640 "tempreserve=%lluK arc_c=%lluK\n", 2641 arc_tempreserve>>10, arc_anon->arcs_lsize>>10, 2642 tempreserve>>10, arc_c>>10); 2643 return (ERESTART); 2644 } 2645 atomic_add_64(&arc_tempreserve, tempreserve); 2646 return (0); 2647 } 2648 2649 void 2650 arc_init(void) 2651 { 2652 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 2653 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 2654 2655 /* Convert seconds to clock ticks */ 2656 arc_min_prefetch_lifespan = 1 * hz; 2657 2658 /* Start out with 1/8 of all memory */ 2659 arc_c = physmem * PAGESIZE / 8; 2660 2661 #ifdef _KERNEL 2662 /* 2663 * On architectures where the physical memory can be larger 2664 * than the addressable space (intel in 32-bit mode), we may 2665 * need to limit the cache to 1/8 of VM size. 2666 */ 2667 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 2668 #endif 2669 2670 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 2671 arc_c_min = MAX(arc_c / 4, 64<<20); 2672 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 2673 if (arc_c * 8 >= 1<<30) 2674 arc_c_max = (arc_c * 8) - (1<<30); 2675 else 2676 arc_c_max = arc_c_min; 2677 arc_c_max = MAX(arc_c * 6, arc_c_max); 2678 2679 /* 2680 * Allow the tunables to override our calculations if they are 2681 * reasonable (ie. over 64MB) 2682 */ 2683 if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) 2684 arc_c_max = zfs_arc_max; 2685 if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) 2686 arc_c_min = zfs_arc_min; 2687 2688 arc_c = arc_c_max; 2689 arc_p = (arc_c >> 1); 2690 2691 /* if kmem_flags are set, lets try to use less memory */ 2692 if (kmem_debugging()) 2693 arc_c = arc_c / 2; 2694 if (arc_c < arc_c_min) 2695 arc_c = arc_c_min; 2696 2697 arc_anon = &ARC_anon; 2698 arc_mru = &ARC_mru; 2699 arc_mru_ghost = &ARC_mru_ghost; 2700 arc_mfu = &ARC_mfu; 2701 arc_mfu_ghost = &ARC_mfu_ghost; 2702 arc_size = 0; 2703 2704 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2705 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2706 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2707 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2708 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2709 2710 list_create(&arc_mru->arcs_list, sizeof (arc_buf_hdr_t), 2711 offsetof(arc_buf_hdr_t, b_arc_node)); 2712 list_create(&arc_mru_ghost->arcs_list, sizeof (arc_buf_hdr_t), 2713 offsetof(arc_buf_hdr_t, b_arc_node)); 2714 list_create(&arc_mfu->arcs_list, sizeof (arc_buf_hdr_t), 2715 offsetof(arc_buf_hdr_t, b_arc_node)); 2716 list_create(&arc_mfu_ghost->arcs_list, sizeof (arc_buf_hdr_t), 2717 offsetof(arc_buf_hdr_t, b_arc_node)); 2718 2719 buf_init(); 2720 2721 arc_thread_exit = 0; 2722 arc_eviction_list = NULL; 2723 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 2724 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 2725 2726 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 2727 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 2728 2729 if (arc_ksp != NULL) { 2730 arc_ksp->ks_data = &arc_stats; 2731 kstat_install(arc_ksp); 2732 } 2733 2734 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 2735 TS_RUN, minclsyspri); 2736 2737 arc_dead = FALSE; 2738 } 2739 2740 void 2741 arc_fini(void) 2742 { 2743 mutex_enter(&arc_reclaim_thr_lock); 2744 arc_thread_exit = 1; 2745 while (arc_thread_exit != 0) 2746 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 2747 mutex_exit(&arc_reclaim_thr_lock); 2748 2749 arc_flush(); 2750 2751 arc_dead = TRUE; 2752 2753 if (arc_ksp != NULL) { 2754 kstat_delete(arc_ksp); 2755 arc_ksp = NULL; 2756 } 2757 2758 mutex_destroy(&arc_eviction_mtx); 2759 mutex_destroy(&arc_reclaim_thr_lock); 2760 cv_destroy(&arc_reclaim_thr_cv); 2761 2762 list_destroy(&arc_mru->arcs_list); 2763 list_destroy(&arc_mru_ghost->arcs_list); 2764 list_destroy(&arc_mfu->arcs_list); 2765 list_destroy(&arc_mfu_ghost->arcs_list); 2766 2767 mutex_destroy(&arc_anon->arcs_mtx); 2768 mutex_destroy(&arc_mru->arcs_mtx); 2769 mutex_destroy(&arc_mru_ghost->arcs_mtx); 2770 mutex_destroy(&arc_mfu->arcs_mtx); 2771 mutex_destroy(&arc_mfu_ghost->arcs_mtx); 2772 2773 buf_fini(); 2774 } 2775