1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * DVA-based Adjustable Replacement Cache 30 * 31 * While much of the theory of operation used here is 32 * based on the self-tuning, low overhead replacement cache 33 * presented by Megiddo and Modha at FAST 2003, there are some 34 * significant differences: 35 * 36 * 1. The Megiddo and Modha model assumes any page is evictable. 37 * Pages in its cache cannot be "locked" into memory. This makes 38 * the eviction algorithm simple: evict the last page in the list. 39 * This also make the performance characteristics easy to reason 40 * about. Our cache is not so simple. At any given moment, some 41 * subset of the blocks in the cache are un-evictable because we 42 * have handed out a reference to them. Blocks are only evictable 43 * when there are no external references active. This makes 44 * eviction far more problematic: we choose to evict the evictable 45 * blocks that are the "lowest" in the list. 46 * 47 * There are times when it is not possible to evict the requested 48 * space. In these circumstances we are unable to adjust the cache 49 * size. To prevent the cache growing unbounded at these times we 50 * implement a "cache throttle" that slowes the flow of new data 51 * into the cache until we can make space avaiable. 52 * 53 * 2. The Megiddo and Modha model assumes a fixed cache size. 54 * Pages are evicted when the cache is full and there is a cache 55 * miss. Our model has a variable sized cache. It grows with 56 * high use, but also tries to react to memory preasure from the 57 * operating system: decreasing its size when system memory is 58 * tight. 59 * 60 * 3. The Megiddo and Modha model assumes a fixed page size. All 61 * elements of the cache are therefor exactly the same size. So 62 * when adjusting the cache size following a cache miss, its simply 63 * a matter of choosing a single page to evict. In our model, we 64 * have variable sized cache blocks (rangeing from 512 bytes to 65 * 128K bytes). We therefor choose a set of blocks to evict to make 66 * space for a cache miss that approximates as closely as possible 67 * the space used by the new block. 68 * 69 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 70 * by N. Megiddo & D. Modha, FAST 2003 71 */ 72 73 /* 74 * The locking model: 75 * 76 * A new reference to a cache buffer can be obtained in two 77 * ways: 1) via a hash table lookup using the DVA as a key, 78 * or 2) via one of the ARC lists. The arc_read() inerface 79 * uses method 1, while the internal arc algorithms for 80 * adjusting the cache use method 2. We therefor provide two 81 * types of locks: 1) the hash table lock array, and 2) the 82 * arc list locks. 83 * 84 * Buffers do not have their own mutexs, rather they rely on the 85 * hash table mutexs for the bulk of their protection (i.e. most 86 * fields in the arc_buf_hdr_t are protected by these mutexs). 87 * 88 * buf_hash_find() returns the appropriate mutex (held) when it 89 * locates the requested buffer in the hash table. It returns 90 * NULL for the mutex if the buffer was not in the table. 91 * 92 * buf_hash_remove() expects the appropriate hash mutex to be 93 * already held before it is invoked. 94 * 95 * Each arc state also has a mutex which is used to protect the 96 * buffer list associated with the state. When attempting to 97 * obtain a hash table lock while holding an arc list lock you 98 * must use: mutex_tryenter() to avoid deadlock. Also note that 99 * the active state mutex must be held before the ghost state mutex. 100 * 101 * Arc buffers may have an associated eviction callback function. 102 * This function will be invoked prior to removing the buffer (e.g. 103 * in arc_do_user_evicts()). Note however that the data associated 104 * with the buffer may be evicted prior to the callback. The callback 105 * must be made with *no locks held* (to prevent deadlock). Additionally, 106 * the users of callbacks must ensure that their private data is 107 * protected from simultaneous callbacks from arc_buf_evict() 108 * and arc_do_user_evicts(). 109 * 110 * Note that the majority of the performance stats are manipulated 111 * with atomic operations. 112 */ 113 114 #include <sys/spa.h> 115 #include <sys/zio.h> 116 #include <sys/zio_checksum.h> 117 #include <sys/zfs_context.h> 118 #include <sys/arc.h> 119 #include <sys/refcount.h> 120 #ifdef _KERNEL 121 #include <sys/vmsystm.h> 122 #include <vm/anon.h> 123 #include <sys/fs/swapnode.h> 124 #include <sys/dnlc.h> 125 #endif 126 #include <sys/callb.h> 127 #include <sys/kstat.h> 128 129 static kmutex_t arc_reclaim_thr_lock; 130 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 131 static uint8_t arc_thread_exit; 132 133 #define ARC_REDUCE_DNLC_PERCENT 3 134 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 135 136 typedef enum arc_reclaim_strategy { 137 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 138 ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 139 } arc_reclaim_strategy_t; 140 141 /* number of seconds before growing cache again */ 142 static int arc_grow_retry = 60; 143 144 /* 145 * minimum lifespan of a prefetch block in clock ticks 146 * (initialized in arc_init()) 147 */ 148 static int arc_min_prefetch_lifespan; 149 150 static int arc_dead; 151 152 /* 153 * These tunables are for performance analysis. 154 */ 155 uint64_t zfs_arc_max; 156 uint64_t zfs_arc_min; 157 158 /* 159 * Note that buffers can be on one of 5 states: 160 * ARC_anon - anonymous (discussed below) 161 * ARC_mru - recently used, currently cached 162 * ARC_mru_ghost - recentely used, no longer in cache 163 * ARC_mfu - frequently used, currently cached 164 * ARC_mfu_ghost - frequently used, no longer in cache 165 * When there are no active references to the buffer, they 166 * are linked onto one of the lists in arc. These are the 167 * only buffers that can be evicted or deleted. 168 * 169 * Anonymous buffers are buffers that are not associated with 170 * a DVA. These are buffers that hold dirty block copies 171 * before they are written to stable storage. By definition, 172 * they are "ref'd" and are considered part of arc_mru 173 * that cannot be freed. Generally, they will aquire a DVA 174 * as they are written and migrate onto the arc_mru list. 175 */ 176 177 typedef struct arc_state { 178 list_t arcs_list; /* linked list of evictable buffer in state */ 179 uint64_t arcs_lsize; /* total size of buffers in the linked list */ 180 uint64_t arcs_size; /* total size of all buffers in this state */ 181 kmutex_t arcs_mtx; 182 } arc_state_t; 183 184 /* The 5 states: */ 185 static arc_state_t ARC_anon; 186 static arc_state_t ARC_mru; 187 static arc_state_t ARC_mru_ghost; 188 static arc_state_t ARC_mfu; 189 static arc_state_t ARC_mfu_ghost; 190 191 typedef struct arc_stats { 192 kstat_named_t arcstat_hits; 193 kstat_named_t arcstat_misses; 194 kstat_named_t arcstat_demand_data_hits; 195 kstat_named_t arcstat_demand_data_misses; 196 kstat_named_t arcstat_demand_metadata_hits; 197 kstat_named_t arcstat_demand_metadata_misses; 198 kstat_named_t arcstat_prefetch_data_hits; 199 kstat_named_t arcstat_prefetch_data_misses; 200 kstat_named_t arcstat_prefetch_metadata_hits; 201 kstat_named_t arcstat_prefetch_metadata_misses; 202 kstat_named_t arcstat_mru_hits; 203 kstat_named_t arcstat_mru_ghost_hits; 204 kstat_named_t arcstat_mfu_hits; 205 kstat_named_t arcstat_mfu_ghost_hits; 206 kstat_named_t arcstat_deleted; 207 kstat_named_t arcstat_recycle_miss; 208 kstat_named_t arcstat_mutex_miss; 209 kstat_named_t arcstat_evict_skip; 210 kstat_named_t arcstat_hash_elements; 211 kstat_named_t arcstat_hash_elements_max; 212 kstat_named_t arcstat_hash_collisions; 213 kstat_named_t arcstat_hash_chains; 214 kstat_named_t arcstat_hash_chain_max; 215 kstat_named_t arcstat_p; 216 kstat_named_t arcstat_c; 217 kstat_named_t arcstat_c_min; 218 kstat_named_t arcstat_c_max; 219 kstat_named_t arcstat_size; 220 } arc_stats_t; 221 222 static arc_stats_t arc_stats = { 223 { "hits", KSTAT_DATA_UINT64 }, 224 { "misses", KSTAT_DATA_UINT64 }, 225 { "demand_data_hits", KSTAT_DATA_UINT64 }, 226 { "demand_data_misses", KSTAT_DATA_UINT64 }, 227 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 228 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 229 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 230 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 231 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 232 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 233 { "mru_hits", KSTAT_DATA_UINT64 }, 234 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 235 { "mfu_hits", KSTAT_DATA_UINT64 }, 236 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 237 { "deleted", KSTAT_DATA_UINT64 }, 238 { "recycle_miss", KSTAT_DATA_UINT64 }, 239 { "mutex_miss", KSTAT_DATA_UINT64 }, 240 { "evict_skip", KSTAT_DATA_UINT64 }, 241 { "hash_elements", KSTAT_DATA_UINT64 }, 242 { "hash_elements_max", KSTAT_DATA_UINT64 }, 243 { "hash_collisions", KSTAT_DATA_UINT64 }, 244 { "hash_chains", KSTAT_DATA_UINT64 }, 245 { "hash_chain_max", KSTAT_DATA_UINT64 }, 246 { "p", KSTAT_DATA_UINT64 }, 247 { "c", KSTAT_DATA_UINT64 }, 248 { "c_min", KSTAT_DATA_UINT64 }, 249 { "c_max", KSTAT_DATA_UINT64 }, 250 { "size", KSTAT_DATA_UINT64 } 251 }; 252 253 #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 254 255 #define ARCSTAT_INCR(stat, val) \ 256 atomic_add_64(&arc_stats.stat.value.ui64, (val)); 257 258 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 259 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 260 261 #define ARCSTAT_MAX(stat, val) { \ 262 uint64_t m; \ 263 while ((val) > (m = arc_stats.stat.value.ui64) && \ 264 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 265 continue; \ 266 } 267 268 #define ARCSTAT_MAXSTAT(stat) \ 269 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 270 271 /* 272 * We define a macro to allow ARC hits/misses to be easily broken down by 273 * two separate conditions, giving a total of four different subtypes for 274 * each of hits and misses (so eight statistics total). 275 */ 276 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 277 if (cond1) { \ 278 if (cond2) { \ 279 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 280 } else { \ 281 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 282 } \ 283 } else { \ 284 if (cond2) { \ 285 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 286 } else { \ 287 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 288 } \ 289 } 290 291 kstat_t *arc_ksp; 292 static arc_state_t *arc_anon; 293 static arc_state_t *arc_mru; 294 static arc_state_t *arc_mru_ghost; 295 static arc_state_t *arc_mfu; 296 static arc_state_t *arc_mfu_ghost; 297 298 /* 299 * There are several ARC variables that are critical to export as kstats -- 300 * but we don't want to have to grovel around in the kstat whenever we wish to 301 * manipulate them. For these variables, we therefore define them to be in 302 * terms of the statistic variable. This assures that we are not introducing 303 * the possibility of inconsistency by having shadow copies of the variables, 304 * while still allowing the code to be readable. 305 */ 306 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 307 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 308 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 309 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 310 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 311 312 static int arc_no_grow; /* Don't try to grow cache size */ 313 static uint64_t arc_tempreserve; 314 315 typedef struct arc_callback arc_callback_t; 316 317 struct arc_callback { 318 void *acb_private; 319 arc_done_func_t *acb_done; 320 arc_byteswap_func_t *acb_byteswap; 321 arc_buf_t *acb_buf; 322 zio_t *acb_zio_dummy; 323 arc_callback_t *acb_next; 324 }; 325 326 typedef struct arc_write_callback arc_write_callback_t; 327 328 struct arc_write_callback { 329 void *awcb_private; 330 arc_done_func_t *awcb_ready; 331 arc_done_func_t *awcb_done; 332 arc_buf_t *awcb_buf; 333 }; 334 335 struct arc_buf_hdr { 336 /* protected by hash lock */ 337 dva_t b_dva; 338 uint64_t b_birth; 339 uint64_t b_cksum0; 340 341 kmutex_t b_freeze_lock; 342 zio_cksum_t *b_freeze_cksum; 343 344 arc_buf_hdr_t *b_hash_next; 345 arc_buf_t *b_buf; 346 uint32_t b_flags; 347 uint32_t b_datacnt; 348 349 arc_callback_t *b_acb; 350 kcondvar_t b_cv; 351 352 /* immutable */ 353 arc_buf_contents_t b_type; 354 uint64_t b_size; 355 spa_t *b_spa; 356 357 /* protected by arc state mutex */ 358 arc_state_t *b_state; 359 list_node_t b_arc_node; 360 361 /* updated atomically */ 362 clock_t b_arc_access; 363 364 /* self protecting */ 365 refcount_t b_refcnt; 366 }; 367 368 static arc_buf_t *arc_eviction_list; 369 static kmutex_t arc_eviction_mtx; 370 static arc_buf_hdr_t arc_eviction_hdr; 371 static void arc_get_data_buf(arc_buf_t *buf); 372 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 373 374 #define GHOST_STATE(state) \ 375 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost) 376 377 /* 378 * Private ARC flags. These flags are private ARC only flags that will show up 379 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 380 * be passed in as arc_flags in things like arc_read. However, these flags 381 * should never be passed and should only be set by ARC code. When adding new 382 * public flags, make sure not to smash the private ones. 383 */ 384 385 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 386 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 387 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 388 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 389 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 390 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 391 392 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 393 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 394 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 395 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 396 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 397 398 /* 399 * Hash table routines 400 */ 401 402 #define HT_LOCK_PAD 64 403 404 struct ht_lock { 405 kmutex_t ht_lock; 406 #ifdef _KERNEL 407 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 408 #endif 409 }; 410 411 #define BUF_LOCKS 256 412 typedef struct buf_hash_table { 413 uint64_t ht_mask; 414 arc_buf_hdr_t **ht_table; 415 struct ht_lock ht_locks[BUF_LOCKS]; 416 } buf_hash_table_t; 417 418 static buf_hash_table_t buf_hash_table; 419 420 #define BUF_HASH_INDEX(spa, dva, birth) \ 421 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 422 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 423 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 424 #define HDR_LOCK(buf) \ 425 (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 426 427 uint64_t zfs_crc64_table[256]; 428 429 static uint64_t 430 buf_hash(spa_t *spa, dva_t *dva, uint64_t birth) 431 { 432 uintptr_t spav = (uintptr_t)spa; 433 uint8_t *vdva = (uint8_t *)dva; 434 uint64_t crc = -1ULL; 435 int i; 436 437 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 438 439 for (i = 0; i < sizeof (dva_t); i++) 440 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 441 442 crc ^= (spav>>8) ^ birth; 443 444 return (crc); 445 } 446 447 #define BUF_EMPTY(buf) \ 448 ((buf)->b_dva.dva_word[0] == 0 && \ 449 (buf)->b_dva.dva_word[1] == 0 && \ 450 (buf)->b_birth == 0) 451 452 #define BUF_EQUAL(spa, dva, birth, buf) \ 453 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 454 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 455 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 456 457 static arc_buf_hdr_t * 458 buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp) 459 { 460 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 461 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 462 arc_buf_hdr_t *buf; 463 464 mutex_enter(hash_lock); 465 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 466 buf = buf->b_hash_next) { 467 if (BUF_EQUAL(spa, dva, birth, buf)) { 468 *lockp = hash_lock; 469 return (buf); 470 } 471 } 472 mutex_exit(hash_lock); 473 *lockp = NULL; 474 return (NULL); 475 } 476 477 /* 478 * Insert an entry into the hash table. If there is already an element 479 * equal to elem in the hash table, then the already existing element 480 * will be returned and the new element will not be inserted. 481 * Otherwise returns NULL. 482 */ 483 static arc_buf_hdr_t * 484 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 485 { 486 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 487 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 488 arc_buf_hdr_t *fbuf; 489 uint32_t i; 490 491 ASSERT(!HDR_IN_HASH_TABLE(buf)); 492 *lockp = hash_lock; 493 mutex_enter(hash_lock); 494 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 495 fbuf = fbuf->b_hash_next, i++) { 496 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 497 return (fbuf); 498 } 499 500 buf->b_hash_next = buf_hash_table.ht_table[idx]; 501 buf_hash_table.ht_table[idx] = buf; 502 buf->b_flags |= ARC_IN_HASH_TABLE; 503 504 /* collect some hash table performance data */ 505 if (i > 0) { 506 ARCSTAT_BUMP(arcstat_hash_collisions); 507 if (i == 1) 508 ARCSTAT_BUMP(arcstat_hash_chains); 509 510 ARCSTAT_MAX(arcstat_hash_chain_max, i); 511 } 512 513 ARCSTAT_BUMP(arcstat_hash_elements); 514 ARCSTAT_MAXSTAT(arcstat_hash_elements); 515 516 return (NULL); 517 } 518 519 static void 520 buf_hash_remove(arc_buf_hdr_t *buf) 521 { 522 arc_buf_hdr_t *fbuf, **bufp; 523 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 524 525 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 526 ASSERT(HDR_IN_HASH_TABLE(buf)); 527 528 bufp = &buf_hash_table.ht_table[idx]; 529 while ((fbuf = *bufp) != buf) { 530 ASSERT(fbuf != NULL); 531 bufp = &fbuf->b_hash_next; 532 } 533 *bufp = buf->b_hash_next; 534 buf->b_hash_next = NULL; 535 buf->b_flags &= ~ARC_IN_HASH_TABLE; 536 537 /* collect some hash table performance data */ 538 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 539 540 if (buf_hash_table.ht_table[idx] && 541 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 542 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 543 } 544 545 /* 546 * Global data structures and functions for the buf kmem cache. 547 */ 548 static kmem_cache_t *hdr_cache; 549 static kmem_cache_t *buf_cache; 550 551 static void 552 buf_fini(void) 553 { 554 int i; 555 556 kmem_free(buf_hash_table.ht_table, 557 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 558 for (i = 0; i < BUF_LOCKS; i++) 559 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 560 kmem_cache_destroy(hdr_cache); 561 kmem_cache_destroy(buf_cache); 562 } 563 564 /* 565 * Constructor callback - called when the cache is empty 566 * and a new buf is requested. 567 */ 568 /* ARGSUSED */ 569 static int 570 hdr_cons(void *vbuf, void *unused, int kmflag) 571 { 572 arc_buf_hdr_t *buf = vbuf; 573 574 bzero(buf, sizeof (arc_buf_hdr_t)); 575 refcount_create(&buf->b_refcnt); 576 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 577 return (0); 578 } 579 580 /* 581 * Destructor callback - called when a cached buf is 582 * no longer required. 583 */ 584 /* ARGSUSED */ 585 static void 586 hdr_dest(void *vbuf, void *unused) 587 { 588 arc_buf_hdr_t *buf = vbuf; 589 590 refcount_destroy(&buf->b_refcnt); 591 cv_destroy(&buf->b_cv); 592 } 593 594 /* 595 * Reclaim callback -- invoked when memory is low. 596 */ 597 /* ARGSUSED */ 598 static void 599 hdr_recl(void *unused) 600 { 601 dprintf("hdr_recl called\n"); 602 /* 603 * umem calls the reclaim func when we destroy the buf cache, 604 * which is after we do arc_fini(). 605 */ 606 if (!arc_dead) 607 cv_signal(&arc_reclaim_thr_cv); 608 } 609 610 static void 611 buf_init(void) 612 { 613 uint64_t *ct; 614 uint64_t hsize = 1ULL << 12; 615 int i, j; 616 617 /* 618 * The hash table is big enough to fill all of physical memory 619 * with an average 64K block size. The table will take up 620 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 621 */ 622 while (hsize * 65536 < physmem * PAGESIZE) 623 hsize <<= 1; 624 retry: 625 buf_hash_table.ht_mask = hsize - 1; 626 buf_hash_table.ht_table = 627 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 628 if (buf_hash_table.ht_table == NULL) { 629 ASSERT(hsize > (1ULL << 8)); 630 hsize >>= 1; 631 goto retry; 632 } 633 634 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 635 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 636 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 637 0, NULL, NULL, NULL, NULL, NULL, 0); 638 639 for (i = 0; i < 256; i++) 640 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 641 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 642 643 for (i = 0; i < BUF_LOCKS; i++) { 644 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 645 NULL, MUTEX_DEFAULT, NULL); 646 } 647 } 648 649 #define ARC_MINTIME (hz>>4) /* 62 ms */ 650 651 static void 652 arc_cksum_verify(arc_buf_t *buf) 653 { 654 zio_cksum_t zc; 655 656 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 657 return; 658 659 mutex_enter(&buf->b_hdr->b_freeze_lock); 660 if (buf->b_hdr->b_freeze_cksum == NULL || 661 (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 662 mutex_exit(&buf->b_hdr->b_freeze_lock); 663 return; 664 } 665 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 666 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 667 panic("buffer modified while frozen!"); 668 mutex_exit(&buf->b_hdr->b_freeze_lock); 669 } 670 671 static void 672 arc_cksum_compute(arc_buf_t *buf) 673 { 674 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 675 return; 676 677 mutex_enter(&buf->b_hdr->b_freeze_lock); 678 if (buf->b_hdr->b_freeze_cksum != NULL) { 679 mutex_exit(&buf->b_hdr->b_freeze_lock); 680 return; 681 } 682 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 683 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 684 buf->b_hdr->b_freeze_cksum); 685 mutex_exit(&buf->b_hdr->b_freeze_lock); 686 } 687 688 void 689 arc_buf_thaw(arc_buf_t *buf) 690 { 691 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 692 return; 693 694 if (buf->b_hdr->b_state != arc_anon) 695 panic("modifying non-anon buffer!"); 696 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 697 panic("modifying buffer while i/o in progress!"); 698 arc_cksum_verify(buf); 699 mutex_enter(&buf->b_hdr->b_freeze_lock); 700 if (buf->b_hdr->b_freeze_cksum != NULL) { 701 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 702 buf->b_hdr->b_freeze_cksum = NULL; 703 } 704 mutex_exit(&buf->b_hdr->b_freeze_lock); 705 } 706 707 void 708 arc_buf_freeze(arc_buf_t *buf) 709 { 710 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 711 return; 712 713 ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 714 buf->b_hdr->b_state == arc_anon); 715 arc_cksum_compute(buf); 716 } 717 718 static void 719 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 720 { 721 ASSERT(MUTEX_HELD(hash_lock)); 722 723 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 724 (ab->b_state != arc_anon)) { 725 uint64_t delta = ab->b_size * ab->b_datacnt; 726 727 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 728 mutex_enter(&ab->b_state->arcs_mtx); 729 ASSERT(list_link_active(&ab->b_arc_node)); 730 list_remove(&ab->b_state->arcs_list, ab); 731 if (GHOST_STATE(ab->b_state)) { 732 ASSERT3U(ab->b_datacnt, ==, 0); 733 ASSERT3P(ab->b_buf, ==, NULL); 734 delta = ab->b_size; 735 } 736 ASSERT(delta > 0); 737 ASSERT3U(ab->b_state->arcs_lsize, >=, delta); 738 atomic_add_64(&ab->b_state->arcs_lsize, -delta); 739 mutex_exit(&ab->b_state->arcs_mtx); 740 /* remove the prefetch flag is we get a reference */ 741 if (ab->b_flags & ARC_PREFETCH) 742 ab->b_flags &= ~ARC_PREFETCH; 743 } 744 } 745 746 static int 747 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 748 { 749 int cnt; 750 arc_state_t *state = ab->b_state; 751 752 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 753 ASSERT(!GHOST_STATE(state)); 754 755 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 756 (state != arc_anon)) { 757 ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 758 mutex_enter(&state->arcs_mtx); 759 ASSERT(!list_link_active(&ab->b_arc_node)); 760 list_insert_head(&state->arcs_list, ab); 761 ASSERT(ab->b_datacnt > 0); 762 atomic_add_64(&state->arcs_lsize, ab->b_size * ab->b_datacnt); 763 ASSERT3U(state->arcs_size, >=, state->arcs_lsize); 764 mutex_exit(&state->arcs_mtx); 765 } 766 return (cnt); 767 } 768 769 /* 770 * Move the supplied buffer to the indicated state. The mutex 771 * for the buffer must be held by the caller. 772 */ 773 static void 774 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 775 { 776 arc_state_t *old_state = ab->b_state; 777 int64_t refcnt = refcount_count(&ab->b_refcnt); 778 uint64_t from_delta, to_delta; 779 780 ASSERT(MUTEX_HELD(hash_lock)); 781 ASSERT(new_state != old_state); 782 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 783 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 784 785 from_delta = to_delta = ab->b_datacnt * ab->b_size; 786 787 /* 788 * If this buffer is evictable, transfer it from the 789 * old state list to the new state list. 790 */ 791 if (refcnt == 0) { 792 if (old_state != arc_anon) { 793 int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 794 795 if (use_mutex) 796 mutex_enter(&old_state->arcs_mtx); 797 798 ASSERT(list_link_active(&ab->b_arc_node)); 799 list_remove(&old_state->arcs_list, ab); 800 801 /* 802 * If prefetching out of the ghost cache, 803 * we will have a non-null datacnt. 804 */ 805 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 806 /* ghost elements have a ghost size */ 807 ASSERT(ab->b_buf == NULL); 808 from_delta = ab->b_size; 809 } 810 ASSERT3U(old_state->arcs_lsize, >=, from_delta); 811 atomic_add_64(&old_state->arcs_lsize, -from_delta); 812 813 if (use_mutex) 814 mutex_exit(&old_state->arcs_mtx); 815 } 816 if (new_state != arc_anon) { 817 int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 818 819 if (use_mutex) 820 mutex_enter(&new_state->arcs_mtx); 821 822 list_insert_head(&new_state->arcs_list, ab); 823 824 /* ghost elements have a ghost size */ 825 if (GHOST_STATE(new_state)) { 826 ASSERT(ab->b_datacnt == 0); 827 ASSERT(ab->b_buf == NULL); 828 to_delta = ab->b_size; 829 } 830 atomic_add_64(&new_state->arcs_lsize, to_delta); 831 ASSERT3U(new_state->arcs_size + to_delta, >=, 832 new_state->arcs_lsize); 833 834 if (use_mutex) 835 mutex_exit(&new_state->arcs_mtx); 836 } 837 } 838 839 ASSERT(!BUF_EMPTY(ab)); 840 if (new_state == arc_anon && old_state != arc_anon) { 841 buf_hash_remove(ab); 842 } 843 844 /* adjust state sizes */ 845 if (to_delta) 846 atomic_add_64(&new_state->arcs_size, to_delta); 847 if (from_delta) { 848 ASSERT3U(old_state->arcs_size, >=, from_delta); 849 atomic_add_64(&old_state->arcs_size, -from_delta); 850 } 851 ab->b_state = new_state; 852 } 853 854 arc_buf_t * 855 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 856 { 857 arc_buf_hdr_t *hdr; 858 arc_buf_t *buf; 859 860 ASSERT3U(size, >, 0); 861 hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 862 ASSERT(BUF_EMPTY(hdr)); 863 hdr->b_size = size; 864 hdr->b_type = type; 865 hdr->b_spa = spa; 866 hdr->b_state = arc_anon; 867 hdr->b_arc_access = 0; 868 buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 869 buf->b_hdr = hdr; 870 buf->b_data = NULL; 871 buf->b_efunc = NULL; 872 buf->b_private = NULL; 873 buf->b_next = NULL; 874 hdr->b_buf = buf; 875 arc_get_data_buf(buf); 876 hdr->b_datacnt = 1; 877 hdr->b_flags = 0; 878 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 879 (void) refcount_add(&hdr->b_refcnt, tag); 880 881 return (buf); 882 } 883 884 static arc_buf_t * 885 arc_buf_clone(arc_buf_t *from) 886 { 887 arc_buf_t *buf; 888 arc_buf_hdr_t *hdr = from->b_hdr; 889 uint64_t size = hdr->b_size; 890 891 buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 892 buf->b_hdr = hdr; 893 buf->b_data = NULL; 894 buf->b_efunc = NULL; 895 buf->b_private = NULL; 896 buf->b_next = hdr->b_buf; 897 hdr->b_buf = buf; 898 arc_get_data_buf(buf); 899 bcopy(from->b_data, buf->b_data, size); 900 hdr->b_datacnt += 1; 901 return (buf); 902 } 903 904 void 905 arc_buf_add_ref(arc_buf_t *buf, void* tag) 906 { 907 arc_buf_hdr_t *hdr; 908 kmutex_t *hash_lock; 909 910 /* 911 * Check to see if this buffer is currently being evicted via 912 * arc_do_user_evicts(). 913 */ 914 mutex_enter(&arc_eviction_mtx); 915 hdr = buf->b_hdr; 916 if (hdr == NULL) { 917 mutex_exit(&arc_eviction_mtx); 918 return; 919 } 920 hash_lock = HDR_LOCK(hdr); 921 mutex_exit(&arc_eviction_mtx); 922 923 mutex_enter(hash_lock); 924 if (buf->b_data == NULL) { 925 /* 926 * This buffer is evicted. 927 */ 928 mutex_exit(hash_lock); 929 return; 930 } 931 932 ASSERT(buf->b_hdr == hdr); 933 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 934 add_reference(hdr, hash_lock, tag); 935 arc_access(hdr, hash_lock); 936 mutex_exit(hash_lock); 937 ARCSTAT_BUMP(arcstat_hits); 938 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 939 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 940 data, metadata, hits); 941 } 942 943 static void 944 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 945 { 946 arc_buf_t **bufp; 947 948 /* free up data associated with the buf */ 949 if (buf->b_data) { 950 arc_state_t *state = buf->b_hdr->b_state; 951 uint64_t size = buf->b_hdr->b_size; 952 arc_buf_contents_t type = buf->b_hdr->b_type; 953 954 arc_cksum_verify(buf); 955 if (!recycle) { 956 if (type == ARC_BUFC_METADATA) { 957 zio_buf_free(buf->b_data, size); 958 } else { 959 ASSERT(type == ARC_BUFC_DATA); 960 zio_data_buf_free(buf->b_data, size); 961 } 962 atomic_add_64(&arc_size, -size); 963 } 964 if (list_link_active(&buf->b_hdr->b_arc_node)) { 965 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 966 ASSERT(state != arc_anon); 967 ASSERT3U(state->arcs_lsize, >=, size); 968 atomic_add_64(&state->arcs_lsize, -size); 969 } 970 ASSERT3U(state->arcs_size, >=, size); 971 atomic_add_64(&state->arcs_size, -size); 972 buf->b_data = NULL; 973 ASSERT(buf->b_hdr->b_datacnt > 0); 974 buf->b_hdr->b_datacnt -= 1; 975 } 976 977 /* only remove the buf if requested */ 978 if (!all) 979 return; 980 981 /* remove the buf from the hdr list */ 982 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 983 continue; 984 *bufp = buf->b_next; 985 986 ASSERT(buf->b_efunc == NULL); 987 988 /* clean up the buf */ 989 buf->b_hdr = NULL; 990 kmem_cache_free(buf_cache, buf); 991 } 992 993 static void 994 arc_hdr_destroy(arc_buf_hdr_t *hdr) 995 { 996 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 997 ASSERT3P(hdr->b_state, ==, arc_anon); 998 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 999 1000 if (!BUF_EMPTY(hdr)) { 1001 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1002 bzero(&hdr->b_dva, sizeof (dva_t)); 1003 hdr->b_birth = 0; 1004 hdr->b_cksum0 = 0; 1005 } 1006 while (hdr->b_buf) { 1007 arc_buf_t *buf = hdr->b_buf; 1008 1009 if (buf->b_efunc) { 1010 mutex_enter(&arc_eviction_mtx); 1011 ASSERT(buf->b_hdr != NULL); 1012 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1013 hdr->b_buf = buf->b_next; 1014 buf->b_hdr = &arc_eviction_hdr; 1015 buf->b_next = arc_eviction_list; 1016 arc_eviction_list = buf; 1017 mutex_exit(&arc_eviction_mtx); 1018 } else { 1019 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1020 } 1021 } 1022 if (hdr->b_freeze_cksum != NULL) { 1023 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1024 hdr->b_freeze_cksum = NULL; 1025 } 1026 1027 ASSERT(!list_link_active(&hdr->b_arc_node)); 1028 ASSERT3P(hdr->b_hash_next, ==, NULL); 1029 ASSERT3P(hdr->b_acb, ==, NULL); 1030 kmem_cache_free(hdr_cache, hdr); 1031 } 1032 1033 void 1034 arc_buf_free(arc_buf_t *buf, void *tag) 1035 { 1036 arc_buf_hdr_t *hdr = buf->b_hdr; 1037 int hashed = hdr->b_state != arc_anon; 1038 1039 ASSERT(buf->b_efunc == NULL); 1040 ASSERT(buf->b_data != NULL); 1041 1042 if (hashed) { 1043 kmutex_t *hash_lock = HDR_LOCK(hdr); 1044 1045 mutex_enter(hash_lock); 1046 (void) remove_reference(hdr, hash_lock, tag); 1047 if (hdr->b_datacnt > 1) 1048 arc_buf_destroy(buf, FALSE, TRUE); 1049 else 1050 hdr->b_flags |= ARC_BUF_AVAILABLE; 1051 mutex_exit(hash_lock); 1052 } else if (HDR_IO_IN_PROGRESS(hdr)) { 1053 int destroy_hdr; 1054 /* 1055 * We are in the middle of an async write. Don't destroy 1056 * this buffer unless the write completes before we finish 1057 * decrementing the reference count. 1058 */ 1059 mutex_enter(&arc_eviction_mtx); 1060 (void) remove_reference(hdr, NULL, tag); 1061 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1062 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1063 mutex_exit(&arc_eviction_mtx); 1064 if (destroy_hdr) 1065 arc_hdr_destroy(hdr); 1066 } else { 1067 if (remove_reference(hdr, NULL, tag) > 0) { 1068 ASSERT(HDR_IO_ERROR(hdr)); 1069 arc_buf_destroy(buf, FALSE, TRUE); 1070 } else { 1071 arc_hdr_destroy(hdr); 1072 } 1073 } 1074 } 1075 1076 int 1077 arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1078 { 1079 arc_buf_hdr_t *hdr = buf->b_hdr; 1080 kmutex_t *hash_lock = HDR_LOCK(hdr); 1081 int no_callback = (buf->b_efunc == NULL); 1082 1083 if (hdr->b_state == arc_anon) { 1084 arc_buf_free(buf, tag); 1085 return (no_callback); 1086 } 1087 1088 mutex_enter(hash_lock); 1089 ASSERT(hdr->b_state != arc_anon); 1090 ASSERT(buf->b_data != NULL); 1091 1092 (void) remove_reference(hdr, hash_lock, tag); 1093 if (hdr->b_datacnt > 1) { 1094 if (no_callback) 1095 arc_buf_destroy(buf, FALSE, TRUE); 1096 } else if (no_callback) { 1097 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1098 hdr->b_flags |= ARC_BUF_AVAILABLE; 1099 } 1100 ASSERT(no_callback || hdr->b_datacnt > 1 || 1101 refcount_is_zero(&hdr->b_refcnt)); 1102 mutex_exit(hash_lock); 1103 return (no_callback); 1104 } 1105 1106 int 1107 arc_buf_size(arc_buf_t *buf) 1108 { 1109 return (buf->b_hdr->b_size); 1110 } 1111 1112 /* 1113 * Evict buffers from list until we've removed the specified number of 1114 * bytes. Move the removed buffers to the appropriate evict state. 1115 * If the recycle flag is set, then attempt to "recycle" a buffer: 1116 * - look for a buffer to evict that is `bytes' long. 1117 * - return the data block from this buffer rather than freeing it. 1118 * This flag is used by callers that are trying to make space for a 1119 * new buffer in a full arc cache. 1120 */ 1121 static void * 1122 arc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle, 1123 arc_buf_contents_t type) 1124 { 1125 arc_state_t *evicted_state; 1126 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1127 arc_buf_hdr_t *ab, *ab_prev = NULL; 1128 kmutex_t *hash_lock; 1129 boolean_t have_lock; 1130 void *stolen = NULL; 1131 1132 ASSERT(state == arc_mru || state == arc_mfu); 1133 1134 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1135 1136 mutex_enter(&state->arcs_mtx); 1137 mutex_enter(&evicted_state->arcs_mtx); 1138 1139 for (ab = list_tail(&state->arcs_list); ab; ab = ab_prev) { 1140 ab_prev = list_prev(&state->arcs_list, ab); 1141 /* prefetch buffers have a minimum lifespan */ 1142 if (HDR_IO_IN_PROGRESS(ab) || 1143 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1144 lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { 1145 skipped++; 1146 continue; 1147 } 1148 /* "lookahead" for better eviction candidate */ 1149 if (recycle && ab->b_size != bytes && 1150 ab_prev && ab_prev->b_size == bytes) 1151 continue; 1152 hash_lock = HDR_LOCK(ab); 1153 have_lock = MUTEX_HELD(hash_lock); 1154 if (have_lock || mutex_tryenter(hash_lock)) { 1155 ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 1156 ASSERT(ab->b_datacnt > 0); 1157 while (ab->b_buf) { 1158 arc_buf_t *buf = ab->b_buf; 1159 if (buf->b_data) { 1160 bytes_evicted += ab->b_size; 1161 if (recycle && ab->b_type == type && 1162 ab->b_size == bytes) { 1163 stolen = buf->b_data; 1164 recycle = FALSE; 1165 } 1166 } 1167 if (buf->b_efunc) { 1168 mutex_enter(&arc_eviction_mtx); 1169 arc_buf_destroy(buf, 1170 buf->b_data == stolen, FALSE); 1171 ab->b_buf = buf->b_next; 1172 buf->b_hdr = &arc_eviction_hdr; 1173 buf->b_next = arc_eviction_list; 1174 arc_eviction_list = buf; 1175 mutex_exit(&arc_eviction_mtx); 1176 } else { 1177 arc_buf_destroy(buf, 1178 buf->b_data == stolen, TRUE); 1179 } 1180 } 1181 ASSERT(ab->b_datacnt == 0); 1182 arc_change_state(evicted_state, ab, hash_lock); 1183 ASSERT(HDR_IN_HASH_TABLE(ab)); 1184 ab->b_flags = ARC_IN_HASH_TABLE; 1185 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1186 if (!have_lock) 1187 mutex_exit(hash_lock); 1188 if (bytes >= 0 && bytes_evicted >= bytes) 1189 break; 1190 } else { 1191 missed += 1; 1192 } 1193 } 1194 1195 mutex_exit(&evicted_state->arcs_mtx); 1196 mutex_exit(&state->arcs_mtx); 1197 1198 if (bytes_evicted < bytes) 1199 dprintf("only evicted %lld bytes from %x", 1200 (longlong_t)bytes_evicted, state); 1201 1202 if (skipped) 1203 ARCSTAT_INCR(arcstat_evict_skip, skipped); 1204 1205 if (missed) 1206 ARCSTAT_INCR(arcstat_mutex_miss, missed); 1207 1208 return (stolen); 1209 } 1210 1211 /* 1212 * Remove buffers from list until we've removed the specified number of 1213 * bytes. Destroy the buffers that are removed. 1214 */ 1215 static void 1216 arc_evict_ghost(arc_state_t *state, int64_t bytes) 1217 { 1218 arc_buf_hdr_t *ab, *ab_prev; 1219 kmutex_t *hash_lock; 1220 uint64_t bytes_deleted = 0; 1221 uint64_t bufs_skipped = 0; 1222 1223 ASSERT(GHOST_STATE(state)); 1224 top: 1225 mutex_enter(&state->arcs_mtx); 1226 for (ab = list_tail(&state->arcs_list); ab; ab = ab_prev) { 1227 ab_prev = list_prev(&state->arcs_list, ab); 1228 hash_lock = HDR_LOCK(ab); 1229 if (mutex_tryenter(hash_lock)) { 1230 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1231 ASSERT(ab->b_buf == NULL); 1232 arc_change_state(arc_anon, ab, hash_lock); 1233 mutex_exit(hash_lock); 1234 ARCSTAT_BUMP(arcstat_deleted); 1235 bytes_deleted += ab->b_size; 1236 arc_hdr_destroy(ab); 1237 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1238 if (bytes >= 0 && bytes_deleted >= bytes) 1239 break; 1240 } else { 1241 if (bytes < 0) { 1242 mutex_exit(&state->arcs_mtx); 1243 mutex_enter(hash_lock); 1244 mutex_exit(hash_lock); 1245 goto top; 1246 } 1247 bufs_skipped += 1; 1248 } 1249 } 1250 mutex_exit(&state->arcs_mtx); 1251 1252 if (bufs_skipped) { 1253 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1254 ASSERT(bytes >= 0); 1255 } 1256 1257 if (bytes_deleted < bytes) 1258 dprintf("only deleted %lld bytes from %p", 1259 (longlong_t)bytes_deleted, state); 1260 } 1261 1262 static void 1263 arc_adjust(void) 1264 { 1265 int64_t top_sz, mru_over, arc_over, todelete; 1266 1267 top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1268 1269 if (top_sz > arc_p && arc_mru->arcs_lsize > 0) { 1270 int64_t toevict = MIN(arc_mru->arcs_lsize, top_sz - arc_p); 1271 (void) arc_evict(arc_mru, toevict, FALSE, ARC_BUFC_UNDEF); 1272 top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1273 } 1274 1275 mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; 1276 1277 if (mru_over > 0) { 1278 if (arc_mru_ghost->arcs_lsize > 0) { 1279 todelete = MIN(arc_mru_ghost->arcs_lsize, mru_over); 1280 arc_evict_ghost(arc_mru_ghost, todelete); 1281 } 1282 } 1283 1284 if ((arc_over = arc_size - arc_c) > 0) { 1285 int64_t tbl_over; 1286 1287 if (arc_mfu->arcs_lsize > 0) { 1288 int64_t toevict = MIN(arc_mfu->arcs_lsize, arc_over); 1289 (void) arc_evict(arc_mfu, toevict, FALSE, 1290 ARC_BUFC_UNDEF); 1291 } 1292 1293 tbl_over = arc_size + arc_mru_ghost->arcs_lsize + 1294 arc_mfu_ghost->arcs_lsize - arc_c*2; 1295 1296 if (tbl_over > 0 && arc_mfu_ghost->arcs_lsize > 0) { 1297 todelete = MIN(arc_mfu_ghost->arcs_lsize, tbl_over); 1298 arc_evict_ghost(arc_mfu_ghost, todelete); 1299 } 1300 } 1301 } 1302 1303 static void 1304 arc_do_user_evicts(void) 1305 { 1306 mutex_enter(&arc_eviction_mtx); 1307 while (arc_eviction_list != NULL) { 1308 arc_buf_t *buf = arc_eviction_list; 1309 arc_eviction_list = buf->b_next; 1310 buf->b_hdr = NULL; 1311 mutex_exit(&arc_eviction_mtx); 1312 1313 if (buf->b_efunc != NULL) 1314 VERIFY(buf->b_efunc(buf) == 0); 1315 1316 buf->b_efunc = NULL; 1317 buf->b_private = NULL; 1318 kmem_cache_free(buf_cache, buf); 1319 mutex_enter(&arc_eviction_mtx); 1320 } 1321 mutex_exit(&arc_eviction_mtx); 1322 } 1323 1324 /* 1325 * Flush all *evictable* data from the cache. 1326 * NOTE: this will not touch "active" (i.e. referenced) data. 1327 */ 1328 void 1329 arc_flush(void) 1330 { 1331 while (list_head(&arc_mru->arcs_list)) 1332 (void) arc_evict(arc_mru, -1, FALSE, ARC_BUFC_UNDEF); 1333 while (list_head(&arc_mfu->arcs_list)) 1334 (void) arc_evict(arc_mfu, -1, FALSE, ARC_BUFC_UNDEF); 1335 1336 arc_evict_ghost(arc_mru_ghost, -1); 1337 arc_evict_ghost(arc_mfu_ghost, -1); 1338 1339 mutex_enter(&arc_reclaim_thr_lock); 1340 arc_do_user_evicts(); 1341 mutex_exit(&arc_reclaim_thr_lock); 1342 ASSERT(arc_eviction_list == NULL); 1343 } 1344 1345 int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ 1346 1347 void 1348 arc_shrink(void) 1349 { 1350 if (arc_c > arc_c_min) { 1351 uint64_t to_free; 1352 1353 #ifdef _KERNEL 1354 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); 1355 #else 1356 to_free = arc_c >> arc_shrink_shift; 1357 #endif 1358 if (arc_c > arc_c_min + to_free) 1359 atomic_add_64(&arc_c, -to_free); 1360 else 1361 arc_c = arc_c_min; 1362 1363 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 1364 if (arc_c > arc_size) 1365 arc_c = MAX(arc_size, arc_c_min); 1366 if (arc_p > arc_c) 1367 arc_p = (arc_c >> 1); 1368 ASSERT(arc_c >= arc_c_min); 1369 ASSERT((int64_t)arc_p >= 0); 1370 } 1371 1372 if (arc_size > arc_c) 1373 arc_adjust(); 1374 } 1375 1376 static int 1377 arc_reclaim_needed(void) 1378 { 1379 uint64_t extra; 1380 1381 #ifdef _KERNEL 1382 1383 if (needfree) 1384 return (1); 1385 1386 /* 1387 * take 'desfree' extra pages, so we reclaim sooner, rather than later 1388 */ 1389 extra = desfree; 1390 1391 /* 1392 * check that we're out of range of the pageout scanner. It starts to 1393 * schedule paging if freemem is less than lotsfree and needfree. 1394 * lotsfree is the high-water mark for pageout, and needfree is the 1395 * number of needed free pages. We add extra pages here to make sure 1396 * the scanner doesn't start up while we're freeing memory. 1397 */ 1398 if (freemem < lotsfree + needfree + extra) 1399 return (1); 1400 1401 /* 1402 * check to make sure that swapfs has enough space so that anon 1403 * reservations can still succeeed. anon_resvmem() checks that the 1404 * availrmem is greater than swapfs_minfree, and the number of reserved 1405 * swap pages. We also add a bit of extra here just to prevent 1406 * circumstances from getting really dire. 1407 */ 1408 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1409 return (1); 1410 1411 /* 1412 * If zio data pages are being allocated out of a separate heap segment, 1413 * then check that the size of available vmem for this area remains 1414 * above 1/4th free. This needs to be done when the size of the 1415 * non-default segment is smaller than physical memory, so we could 1416 * conceivably run out of VA in that segment before running out of 1417 * physical memory. 1418 */ 1419 if (zio_arena != NULL) { 1420 size_t arc_ziosize = 1421 btop(vmem_size(zio_arena, VMEM_FREE | VMEM_ALLOC)); 1422 1423 if ((physmem > arc_ziosize) && 1424 (btop(vmem_size(zio_arena, VMEM_FREE)) < arc_ziosize >> 2)) 1425 return (1); 1426 } 1427 1428 #if defined(__i386) 1429 /* 1430 * If we're on an i386 platform, it's possible that we'll exhaust the 1431 * kernel heap space before we ever run out of available physical 1432 * memory. Most checks of the size of the heap_area compare against 1433 * tune.t_minarmem, which is the minimum available real memory that we 1434 * can have in the system. However, this is generally fixed at 25 pages 1435 * which is so low that it's useless. In this comparison, we seek to 1436 * calculate the total heap-size, and reclaim if more than 3/4ths of the 1437 * heap is allocated. (Or, in the caclulation, if less than 1/4th is 1438 * free) 1439 */ 1440 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1441 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1442 return (1); 1443 #endif 1444 1445 #else 1446 if (spa_get_random(100) == 0) 1447 return (1); 1448 #endif 1449 return (0); 1450 } 1451 1452 static void 1453 arc_kmem_reap_now(arc_reclaim_strategy_t strat) 1454 { 1455 size_t i; 1456 kmem_cache_t *prev_cache = NULL; 1457 kmem_cache_t *prev_data_cache = NULL; 1458 extern kmem_cache_t *zio_buf_cache[]; 1459 extern kmem_cache_t *zio_data_buf_cache[]; 1460 1461 #ifdef _KERNEL 1462 /* 1463 * First purge some DNLC entries, in case the DNLC is using 1464 * up too much memory. 1465 */ 1466 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 1467 1468 #if defined(__i386) 1469 /* 1470 * Reclaim unused memory from all kmem caches. 1471 */ 1472 kmem_reap(); 1473 #endif 1474 #endif 1475 1476 /* 1477 * An agressive reclamation will shrink the cache size as well as 1478 * reap free buffers from the arc kmem caches. 1479 */ 1480 if (strat == ARC_RECLAIM_AGGR) 1481 arc_shrink(); 1482 1483 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1484 if (zio_buf_cache[i] != prev_cache) { 1485 prev_cache = zio_buf_cache[i]; 1486 kmem_cache_reap_now(zio_buf_cache[i]); 1487 } 1488 if (zio_data_buf_cache[i] != prev_data_cache) { 1489 prev_data_cache = zio_data_buf_cache[i]; 1490 kmem_cache_reap_now(zio_data_buf_cache[i]); 1491 } 1492 } 1493 kmem_cache_reap_now(buf_cache); 1494 kmem_cache_reap_now(hdr_cache); 1495 } 1496 1497 static void 1498 arc_reclaim_thread(void) 1499 { 1500 clock_t growtime = 0; 1501 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1502 callb_cpr_t cpr; 1503 1504 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1505 1506 mutex_enter(&arc_reclaim_thr_lock); 1507 while (arc_thread_exit == 0) { 1508 if (arc_reclaim_needed()) { 1509 1510 if (arc_no_grow) { 1511 if (last_reclaim == ARC_RECLAIM_CONS) { 1512 last_reclaim = ARC_RECLAIM_AGGR; 1513 } else { 1514 last_reclaim = ARC_RECLAIM_CONS; 1515 } 1516 } else { 1517 arc_no_grow = TRUE; 1518 last_reclaim = ARC_RECLAIM_AGGR; 1519 membar_producer(); 1520 } 1521 1522 /* reset the growth delay for every reclaim */ 1523 growtime = lbolt + (arc_grow_retry * hz); 1524 ASSERT(growtime > 0); 1525 1526 arc_kmem_reap_now(last_reclaim); 1527 1528 } else if ((growtime > 0) && ((growtime - lbolt) <= 0)) { 1529 arc_no_grow = FALSE; 1530 } 1531 1532 if (2 * arc_c < arc_size + 1533 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size) 1534 arc_adjust(); 1535 1536 if (arc_eviction_list != NULL) 1537 arc_do_user_evicts(); 1538 1539 /* block until needed, or one second, whichever is shorter */ 1540 CALLB_CPR_SAFE_BEGIN(&cpr); 1541 (void) cv_timedwait(&arc_reclaim_thr_cv, 1542 &arc_reclaim_thr_lock, (lbolt + hz)); 1543 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 1544 } 1545 1546 arc_thread_exit = 0; 1547 cv_broadcast(&arc_reclaim_thr_cv); 1548 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 1549 thread_exit(); 1550 } 1551 1552 /* 1553 * Adapt arc info given the number of bytes we are trying to add and 1554 * the state that we are comming from. This function is only called 1555 * when we are adding new content to the cache. 1556 */ 1557 static void 1558 arc_adapt(int bytes, arc_state_t *state) 1559 { 1560 int mult; 1561 1562 ASSERT(bytes > 0); 1563 /* 1564 * Adapt the target size of the MRU list: 1565 * - if we just hit in the MRU ghost list, then increase 1566 * the target size of the MRU list. 1567 * - if we just hit in the MFU ghost list, then increase 1568 * the target size of the MFU list by decreasing the 1569 * target size of the MRU list. 1570 */ 1571 if (state == arc_mru_ghost) { 1572 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 1573 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 1574 1575 arc_p = MIN(arc_c, arc_p + bytes * mult); 1576 } else if (state == arc_mfu_ghost) { 1577 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 1578 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 1579 1580 arc_p = MAX(0, (int64_t)arc_p - bytes * mult); 1581 } 1582 ASSERT((int64_t)arc_p >= 0); 1583 1584 if (arc_reclaim_needed()) { 1585 cv_signal(&arc_reclaim_thr_cv); 1586 return; 1587 } 1588 1589 if (arc_no_grow) 1590 return; 1591 1592 if (arc_c >= arc_c_max) 1593 return; 1594 1595 /* 1596 * If we're within (2 * maxblocksize) bytes of the target 1597 * cache size, increment the target cache size 1598 */ 1599 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 1600 atomic_add_64(&arc_c, (int64_t)bytes); 1601 if (arc_c > arc_c_max) 1602 arc_c = arc_c_max; 1603 else if (state == arc_anon) 1604 atomic_add_64(&arc_p, (int64_t)bytes); 1605 if (arc_p > arc_c) 1606 arc_p = arc_c; 1607 } 1608 ASSERT((int64_t)arc_p >= 0); 1609 } 1610 1611 /* 1612 * Check if the cache has reached its limits and eviction is required 1613 * prior to insert. 1614 */ 1615 static int 1616 arc_evict_needed() 1617 { 1618 if (arc_reclaim_needed()) 1619 return (1); 1620 1621 return (arc_size > arc_c); 1622 } 1623 1624 /* 1625 * The buffer, supplied as the first argument, needs a data block. 1626 * So, if we are at cache max, determine which cache should be victimized. 1627 * We have the following cases: 1628 * 1629 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 1630 * In this situation if we're out of space, but the resident size of the MFU is 1631 * under the limit, victimize the MFU cache to satisfy this insertion request. 1632 * 1633 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 1634 * Here, we've used up all of the available space for the MRU, so we need to 1635 * evict from our own cache instead. Evict from the set of resident MRU 1636 * entries. 1637 * 1638 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 1639 * c minus p represents the MFU space in the cache, since p is the size of the 1640 * cache that is dedicated to the MRU. In this situation there's still space on 1641 * the MFU side, so the MRU side needs to be victimized. 1642 * 1643 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 1644 * MFU's resident set is consuming more space than it has been allotted. In 1645 * this situation, we must victimize our own cache, the MFU, for this insertion. 1646 */ 1647 static void 1648 arc_get_data_buf(arc_buf_t *buf) 1649 { 1650 arc_state_t *state = buf->b_hdr->b_state; 1651 uint64_t size = buf->b_hdr->b_size; 1652 arc_buf_contents_t type = buf->b_hdr->b_type; 1653 1654 arc_adapt(size, state); 1655 1656 /* 1657 * We have not yet reached cache maximum size, 1658 * just allocate a new buffer. 1659 */ 1660 if (!arc_evict_needed()) { 1661 if (type == ARC_BUFC_METADATA) { 1662 buf->b_data = zio_buf_alloc(size); 1663 } else { 1664 ASSERT(type == ARC_BUFC_DATA); 1665 buf->b_data = zio_data_buf_alloc(size); 1666 } 1667 atomic_add_64(&arc_size, size); 1668 goto out; 1669 } 1670 1671 /* 1672 * If we are prefetching from the mfu ghost list, this buffer 1673 * will end up on the mru list; so steal space from there. 1674 */ 1675 if (state == arc_mfu_ghost) 1676 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 1677 else if (state == arc_mru_ghost) 1678 state = arc_mru; 1679 1680 if (state == arc_mru || state == arc_anon) { 1681 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 1682 state = (arc_p > mru_used) ? arc_mfu : arc_mru; 1683 } else { 1684 /* MFU cases */ 1685 uint64_t mfu_space = arc_c - arc_p; 1686 state = (mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 1687 } 1688 if ((buf->b_data = arc_evict(state, size, TRUE, type)) == NULL) { 1689 if (type == ARC_BUFC_METADATA) { 1690 buf->b_data = zio_buf_alloc(size); 1691 } else { 1692 ASSERT(type == ARC_BUFC_DATA); 1693 buf->b_data = zio_data_buf_alloc(size); 1694 } 1695 atomic_add_64(&arc_size, size); 1696 ARCSTAT_BUMP(arcstat_recycle_miss); 1697 } 1698 ASSERT(buf->b_data != NULL); 1699 out: 1700 /* 1701 * Update the state size. Note that ghost states have a 1702 * "ghost size" and so don't need to be updated. 1703 */ 1704 if (!GHOST_STATE(buf->b_hdr->b_state)) { 1705 arc_buf_hdr_t *hdr = buf->b_hdr; 1706 1707 atomic_add_64(&hdr->b_state->arcs_size, size); 1708 if (list_link_active(&hdr->b_arc_node)) { 1709 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1710 atomic_add_64(&hdr->b_state->arcs_lsize, size); 1711 } 1712 /* 1713 * If we are growing the cache, and we are adding anonymous 1714 * data, and we have outgrown arc_p, update arc_p 1715 */ 1716 if (arc_size < arc_c && hdr->b_state == arc_anon && 1717 arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 1718 arc_p = MIN(arc_c, arc_p + size); 1719 } 1720 } 1721 1722 /* 1723 * This routine is called whenever a buffer is accessed. 1724 * NOTE: the hash lock is dropped in this function. 1725 */ 1726 static void 1727 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 1728 { 1729 ASSERT(MUTEX_HELD(hash_lock)); 1730 1731 if (buf->b_state == arc_anon) { 1732 /* 1733 * This buffer is not in the cache, and does not 1734 * appear in our "ghost" list. Add the new buffer 1735 * to the MRU state. 1736 */ 1737 1738 ASSERT(buf->b_arc_access == 0); 1739 buf->b_arc_access = lbolt; 1740 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 1741 arc_change_state(arc_mru, buf, hash_lock); 1742 1743 } else if (buf->b_state == arc_mru) { 1744 /* 1745 * If this buffer is here because of a prefetch, then either: 1746 * - clear the flag if this is a "referencing" read 1747 * (any subsequent access will bump this into the MFU state). 1748 * or 1749 * - move the buffer to the head of the list if this is 1750 * another prefetch (to make it less likely to be evicted). 1751 */ 1752 if ((buf->b_flags & ARC_PREFETCH) != 0) { 1753 if (refcount_count(&buf->b_refcnt) == 0) { 1754 ASSERT(list_link_active(&buf->b_arc_node)); 1755 mutex_enter(&arc_mru->arcs_mtx); 1756 list_remove(&arc_mru->arcs_list, buf); 1757 list_insert_head(&arc_mru->arcs_list, buf); 1758 mutex_exit(&arc_mru->arcs_mtx); 1759 } else { 1760 buf->b_flags &= ~ARC_PREFETCH; 1761 ARCSTAT_BUMP(arcstat_mru_hits); 1762 } 1763 buf->b_arc_access = lbolt; 1764 return; 1765 } 1766 1767 /* 1768 * This buffer has been "accessed" only once so far, 1769 * but it is still in the cache. Move it to the MFU 1770 * state. 1771 */ 1772 if (lbolt > buf->b_arc_access + ARC_MINTIME) { 1773 /* 1774 * More than 125ms have passed since we 1775 * instantiated this buffer. Move it to the 1776 * most frequently used state. 1777 */ 1778 buf->b_arc_access = lbolt; 1779 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1780 arc_change_state(arc_mfu, buf, hash_lock); 1781 } 1782 ARCSTAT_BUMP(arcstat_mru_hits); 1783 } else if (buf->b_state == arc_mru_ghost) { 1784 arc_state_t *new_state; 1785 /* 1786 * This buffer has been "accessed" recently, but 1787 * was evicted from the cache. Move it to the 1788 * MFU state. 1789 */ 1790 1791 if (buf->b_flags & ARC_PREFETCH) { 1792 new_state = arc_mru; 1793 if (refcount_count(&buf->b_refcnt) > 0) 1794 buf->b_flags &= ~ARC_PREFETCH; 1795 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 1796 } else { 1797 new_state = arc_mfu; 1798 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1799 } 1800 1801 buf->b_arc_access = lbolt; 1802 arc_change_state(new_state, buf, hash_lock); 1803 1804 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 1805 } else if (buf->b_state == arc_mfu) { 1806 /* 1807 * This buffer has been accessed more than once and is 1808 * still in the cache. Keep it in the MFU state. 1809 * 1810 * NOTE: an add_reference() that occurred when we did 1811 * the arc_read() will have kicked this off the list. 1812 * If it was a prefetch, we will explicitly move it to 1813 * the head of the list now. 1814 */ 1815 if ((buf->b_flags & ARC_PREFETCH) != 0) { 1816 ASSERT(refcount_count(&buf->b_refcnt) == 0); 1817 ASSERT(list_link_active(&buf->b_arc_node)); 1818 mutex_enter(&arc_mfu->arcs_mtx); 1819 list_remove(&arc_mfu->arcs_list, buf); 1820 list_insert_head(&arc_mfu->arcs_list, buf); 1821 mutex_exit(&arc_mfu->arcs_mtx); 1822 } 1823 ARCSTAT_BUMP(arcstat_mfu_hits); 1824 buf->b_arc_access = lbolt; 1825 } else if (buf->b_state == arc_mfu_ghost) { 1826 arc_state_t *new_state = arc_mfu; 1827 /* 1828 * This buffer has been accessed more than once but has 1829 * been evicted from the cache. Move it back to the 1830 * MFU state. 1831 */ 1832 1833 if (buf->b_flags & ARC_PREFETCH) { 1834 /* 1835 * This is a prefetch access... 1836 * move this block back to the MRU state. 1837 */ 1838 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 1839 new_state = arc_mru; 1840 } 1841 1842 buf->b_arc_access = lbolt; 1843 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1844 arc_change_state(new_state, buf, hash_lock); 1845 1846 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 1847 } else { 1848 ASSERT(!"invalid arc state"); 1849 } 1850 } 1851 1852 /* a generic arc_done_func_t which you can use */ 1853 /* ARGSUSED */ 1854 void 1855 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 1856 { 1857 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 1858 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 1859 } 1860 1861 /* a generic arc_done_func_t which you can use */ 1862 void 1863 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 1864 { 1865 arc_buf_t **bufp = arg; 1866 if (zio && zio->io_error) { 1867 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 1868 *bufp = NULL; 1869 } else { 1870 *bufp = buf; 1871 } 1872 } 1873 1874 static void 1875 arc_read_done(zio_t *zio) 1876 { 1877 arc_buf_hdr_t *hdr, *found; 1878 arc_buf_t *buf; 1879 arc_buf_t *abuf; /* buffer we're assigning to callback */ 1880 kmutex_t *hash_lock; 1881 arc_callback_t *callback_list, *acb; 1882 int freeable = FALSE; 1883 1884 buf = zio->io_private; 1885 hdr = buf->b_hdr; 1886 1887 /* 1888 * The hdr was inserted into hash-table and removed from lists 1889 * prior to starting I/O. We should find this header, since 1890 * it's in the hash table, and it should be legit since it's 1891 * not possible to evict it during the I/O. The only possible 1892 * reason for it not to be found is if we were freed during the 1893 * read. 1894 */ 1895 found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 1896 &hash_lock); 1897 1898 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 1899 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp)))); 1900 1901 /* byteswap if necessary */ 1902 callback_list = hdr->b_acb; 1903 ASSERT(callback_list != NULL); 1904 if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap) 1905 callback_list->acb_byteswap(buf->b_data, hdr->b_size); 1906 1907 arc_cksum_compute(buf); 1908 1909 /* create copies of the data buffer for the callers */ 1910 abuf = buf; 1911 for (acb = callback_list; acb; acb = acb->acb_next) { 1912 if (acb->acb_done) { 1913 if (abuf == NULL) 1914 abuf = arc_buf_clone(buf); 1915 acb->acb_buf = abuf; 1916 abuf = NULL; 1917 } 1918 } 1919 hdr->b_acb = NULL; 1920 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 1921 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 1922 if (abuf == buf) 1923 hdr->b_flags |= ARC_BUF_AVAILABLE; 1924 1925 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 1926 1927 if (zio->io_error != 0) { 1928 hdr->b_flags |= ARC_IO_ERROR; 1929 if (hdr->b_state != arc_anon) 1930 arc_change_state(arc_anon, hdr, hash_lock); 1931 if (HDR_IN_HASH_TABLE(hdr)) 1932 buf_hash_remove(hdr); 1933 freeable = refcount_is_zero(&hdr->b_refcnt); 1934 /* convert checksum errors into IO errors */ 1935 if (zio->io_error == ECKSUM) 1936 zio->io_error = EIO; 1937 } 1938 1939 /* 1940 * Broadcast before we drop the hash_lock to avoid the possibility 1941 * that the hdr (and hence the cv) might be freed before we get to 1942 * the cv_broadcast(). 1943 */ 1944 cv_broadcast(&hdr->b_cv); 1945 1946 if (hash_lock) { 1947 /* 1948 * Only call arc_access on anonymous buffers. This is because 1949 * if we've issued an I/O for an evicted buffer, we've already 1950 * called arc_access (to prevent any simultaneous readers from 1951 * getting confused). 1952 */ 1953 if (zio->io_error == 0 && hdr->b_state == arc_anon) 1954 arc_access(hdr, hash_lock); 1955 mutex_exit(hash_lock); 1956 } else { 1957 /* 1958 * This block was freed while we waited for the read to 1959 * complete. It has been removed from the hash table and 1960 * moved to the anonymous state (so that it won't show up 1961 * in the cache). 1962 */ 1963 ASSERT3P(hdr->b_state, ==, arc_anon); 1964 freeable = refcount_is_zero(&hdr->b_refcnt); 1965 } 1966 1967 /* execute each callback and free its structure */ 1968 while ((acb = callback_list) != NULL) { 1969 if (acb->acb_done) 1970 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 1971 1972 if (acb->acb_zio_dummy != NULL) { 1973 acb->acb_zio_dummy->io_error = zio->io_error; 1974 zio_nowait(acb->acb_zio_dummy); 1975 } 1976 1977 callback_list = acb->acb_next; 1978 kmem_free(acb, sizeof (arc_callback_t)); 1979 } 1980 1981 if (freeable) 1982 arc_hdr_destroy(hdr); 1983 } 1984 1985 /* 1986 * "Read" the block block at the specified DVA (in bp) via the 1987 * cache. If the block is found in the cache, invoke the provided 1988 * callback immediately and return. Note that the `zio' parameter 1989 * in the callback will be NULL in this case, since no IO was 1990 * required. If the block is not in the cache pass the read request 1991 * on to the spa with a substitute callback function, so that the 1992 * requested block will be added to the cache. 1993 * 1994 * If a read request arrives for a block that has a read in-progress, 1995 * either wait for the in-progress read to complete (and return the 1996 * results); or, if this is a read with a "done" func, add a record 1997 * to the read to invoke the "done" func when the read completes, 1998 * and return; or just return. 1999 * 2000 * arc_read_done() will invoke all the requested "done" functions 2001 * for readers of this block. 2002 */ 2003 int 2004 arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap, 2005 arc_done_func_t *done, void *private, int priority, int flags, 2006 uint32_t *arc_flags, zbookmark_t *zb) 2007 { 2008 arc_buf_hdr_t *hdr; 2009 arc_buf_t *buf; 2010 kmutex_t *hash_lock; 2011 zio_t *rzio; 2012 2013 top: 2014 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2015 if (hdr && hdr->b_datacnt > 0) { 2016 2017 *arc_flags |= ARC_CACHED; 2018 2019 if (HDR_IO_IN_PROGRESS(hdr)) { 2020 2021 if (*arc_flags & ARC_WAIT) { 2022 cv_wait(&hdr->b_cv, hash_lock); 2023 mutex_exit(hash_lock); 2024 goto top; 2025 } 2026 ASSERT(*arc_flags & ARC_NOWAIT); 2027 2028 if (done) { 2029 arc_callback_t *acb = NULL; 2030 2031 acb = kmem_zalloc(sizeof (arc_callback_t), 2032 KM_SLEEP); 2033 acb->acb_done = done; 2034 acb->acb_private = private; 2035 acb->acb_byteswap = swap; 2036 if (pio != NULL) 2037 acb->acb_zio_dummy = zio_null(pio, 2038 spa, NULL, NULL, flags); 2039 2040 ASSERT(acb->acb_done != NULL); 2041 acb->acb_next = hdr->b_acb; 2042 hdr->b_acb = acb; 2043 add_reference(hdr, hash_lock, private); 2044 mutex_exit(hash_lock); 2045 return (0); 2046 } 2047 mutex_exit(hash_lock); 2048 return (0); 2049 } 2050 2051 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2052 2053 if (done) { 2054 add_reference(hdr, hash_lock, private); 2055 /* 2056 * If this block is already in use, create a new 2057 * copy of the data so that we will be guaranteed 2058 * that arc_release() will always succeed. 2059 */ 2060 buf = hdr->b_buf; 2061 ASSERT(buf); 2062 ASSERT(buf->b_data); 2063 if (HDR_BUF_AVAILABLE(hdr)) { 2064 ASSERT(buf->b_efunc == NULL); 2065 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2066 } else { 2067 buf = arc_buf_clone(buf); 2068 } 2069 } else if (*arc_flags & ARC_PREFETCH && 2070 refcount_count(&hdr->b_refcnt) == 0) { 2071 hdr->b_flags |= ARC_PREFETCH; 2072 } 2073 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 2074 arc_access(hdr, hash_lock); 2075 mutex_exit(hash_lock); 2076 ARCSTAT_BUMP(arcstat_hits); 2077 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2078 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2079 data, metadata, hits); 2080 2081 if (done) 2082 done(NULL, buf, private); 2083 } else { 2084 uint64_t size = BP_GET_LSIZE(bp); 2085 arc_callback_t *acb; 2086 2087 if (hdr == NULL) { 2088 /* this block is not in the cache */ 2089 arc_buf_hdr_t *exists; 2090 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 2091 buf = arc_buf_alloc(spa, size, private, type); 2092 hdr = buf->b_hdr; 2093 hdr->b_dva = *BP_IDENTITY(bp); 2094 hdr->b_birth = bp->blk_birth; 2095 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2096 exists = buf_hash_insert(hdr, &hash_lock); 2097 if (exists) { 2098 /* somebody beat us to the hash insert */ 2099 mutex_exit(hash_lock); 2100 bzero(&hdr->b_dva, sizeof (dva_t)); 2101 hdr->b_birth = 0; 2102 hdr->b_cksum0 = 0; 2103 (void) arc_buf_remove_ref(buf, private); 2104 goto top; /* restart the IO request */ 2105 } 2106 /* if this is a prefetch, we don't have a reference */ 2107 if (*arc_flags & ARC_PREFETCH) { 2108 (void) remove_reference(hdr, hash_lock, 2109 private); 2110 hdr->b_flags |= ARC_PREFETCH; 2111 } 2112 if (BP_GET_LEVEL(bp) > 0) 2113 hdr->b_flags |= ARC_INDIRECT; 2114 } else { 2115 /* this block is in the ghost cache */ 2116 ASSERT(GHOST_STATE(hdr->b_state)); 2117 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2118 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 2119 ASSERT(hdr->b_buf == NULL); 2120 2121 /* if this is a prefetch, we don't have a reference */ 2122 if (*arc_flags & ARC_PREFETCH) 2123 hdr->b_flags |= ARC_PREFETCH; 2124 else 2125 add_reference(hdr, hash_lock, private); 2126 buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 2127 buf->b_hdr = hdr; 2128 buf->b_data = NULL; 2129 buf->b_efunc = NULL; 2130 buf->b_private = NULL; 2131 buf->b_next = NULL; 2132 hdr->b_buf = buf; 2133 arc_get_data_buf(buf); 2134 ASSERT(hdr->b_datacnt == 0); 2135 hdr->b_datacnt = 1; 2136 2137 } 2138 2139 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2140 acb->acb_done = done; 2141 acb->acb_private = private; 2142 acb->acb_byteswap = swap; 2143 2144 ASSERT(hdr->b_acb == NULL); 2145 hdr->b_acb = acb; 2146 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2147 2148 /* 2149 * If the buffer has been evicted, migrate it to a present state 2150 * before issuing the I/O. Once we drop the hash-table lock, 2151 * the header will be marked as I/O in progress and have an 2152 * attached buffer. At this point, anybody who finds this 2153 * buffer ought to notice that it's legit but has a pending I/O. 2154 */ 2155 2156 if (GHOST_STATE(hdr->b_state)) 2157 arc_access(hdr, hash_lock); 2158 mutex_exit(hash_lock); 2159 2160 ASSERT3U(hdr->b_size, ==, size); 2161 DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 2162 zbookmark_t *, zb); 2163 ARCSTAT_BUMP(arcstat_misses); 2164 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2165 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2166 data, metadata, misses); 2167 2168 rzio = zio_read(pio, spa, bp, buf->b_data, size, 2169 arc_read_done, buf, priority, flags, zb); 2170 2171 if (*arc_flags & ARC_WAIT) 2172 return (zio_wait(rzio)); 2173 2174 ASSERT(*arc_flags & ARC_NOWAIT); 2175 zio_nowait(rzio); 2176 } 2177 return (0); 2178 } 2179 2180 /* 2181 * arc_read() variant to support pool traversal. If the block is already 2182 * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 2183 * The idea is that we don't want pool traversal filling up memory, but 2184 * if the ARC already has the data anyway, we shouldn't pay for the I/O. 2185 */ 2186 int 2187 arc_tryread(spa_t *spa, blkptr_t *bp, void *data) 2188 { 2189 arc_buf_hdr_t *hdr; 2190 kmutex_t *hash_mtx; 2191 int rc = 0; 2192 2193 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 2194 2195 if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 2196 arc_buf_t *buf = hdr->b_buf; 2197 2198 ASSERT(buf); 2199 while (buf->b_data == NULL) { 2200 buf = buf->b_next; 2201 ASSERT(buf); 2202 } 2203 bcopy(buf->b_data, data, hdr->b_size); 2204 } else { 2205 rc = ENOENT; 2206 } 2207 2208 if (hash_mtx) 2209 mutex_exit(hash_mtx); 2210 2211 return (rc); 2212 } 2213 2214 void 2215 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 2216 { 2217 ASSERT(buf->b_hdr != NULL); 2218 ASSERT(buf->b_hdr->b_state != arc_anon); 2219 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 2220 buf->b_efunc = func; 2221 buf->b_private = private; 2222 } 2223 2224 /* 2225 * This is used by the DMU to let the ARC know that a buffer is 2226 * being evicted, so the ARC should clean up. If this arc buf 2227 * is not yet in the evicted state, it will be put there. 2228 */ 2229 int 2230 arc_buf_evict(arc_buf_t *buf) 2231 { 2232 arc_buf_hdr_t *hdr; 2233 kmutex_t *hash_lock; 2234 arc_buf_t **bufp; 2235 2236 mutex_enter(&arc_eviction_mtx); 2237 hdr = buf->b_hdr; 2238 if (hdr == NULL) { 2239 /* 2240 * We are in arc_do_user_evicts(). 2241 */ 2242 ASSERT(buf->b_data == NULL); 2243 mutex_exit(&arc_eviction_mtx); 2244 return (0); 2245 } 2246 hash_lock = HDR_LOCK(hdr); 2247 mutex_exit(&arc_eviction_mtx); 2248 2249 mutex_enter(hash_lock); 2250 2251 if (buf->b_data == NULL) { 2252 /* 2253 * We are on the eviction list. 2254 */ 2255 mutex_exit(hash_lock); 2256 mutex_enter(&arc_eviction_mtx); 2257 if (buf->b_hdr == NULL) { 2258 /* 2259 * We are already in arc_do_user_evicts(). 2260 */ 2261 mutex_exit(&arc_eviction_mtx); 2262 return (0); 2263 } else { 2264 arc_buf_t copy = *buf; /* structure assignment */ 2265 /* 2266 * Process this buffer now 2267 * but let arc_do_user_evicts() do the reaping. 2268 */ 2269 buf->b_efunc = NULL; 2270 mutex_exit(&arc_eviction_mtx); 2271 VERIFY(copy.b_efunc(©) == 0); 2272 return (1); 2273 } 2274 } 2275 2276 ASSERT(buf->b_hdr == hdr); 2277 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 2278 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2279 2280 /* 2281 * Pull this buffer off of the hdr 2282 */ 2283 bufp = &hdr->b_buf; 2284 while (*bufp != buf) 2285 bufp = &(*bufp)->b_next; 2286 *bufp = buf->b_next; 2287 2288 ASSERT(buf->b_data != NULL); 2289 arc_buf_destroy(buf, FALSE, FALSE); 2290 2291 if (hdr->b_datacnt == 0) { 2292 arc_state_t *old_state = hdr->b_state; 2293 arc_state_t *evicted_state; 2294 2295 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2296 2297 evicted_state = 2298 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 2299 2300 mutex_enter(&old_state->arcs_mtx); 2301 mutex_enter(&evicted_state->arcs_mtx); 2302 2303 arc_change_state(evicted_state, hdr, hash_lock); 2304 ASSERT(HDR_IN_HASH_TABLE(hdr)); 2305 hdr->b_flags = ARC_IN_HASH_TABLE; 2306 2307 mutex_exit(&evicted_state->arcs_mtx); 2308 mutex_exit(&old_state->arcs_mtx); 2309 } 2310 mutex_exit(hash_lock); 2311 2312 VERIFY(buf->b_efunc(buf) == 0); 2313 buf->b_efunc = NULL; 2314 buf->b_private = NULL; 2315 buf->b_hdr = NULL; 2316 kmem_cache_free(buf_cache, buf); 2317 return (1); 2318 } 2319 2320 /* 2321 * Release this buffer from the cache. This must be done 2322 * after a read and prior to modifying the buffer contents. 2323 * If the buffer has more than one reference, we must make 2324 * make a new hdr for the buffer. 2325 */ 2326 void 2327 arc_release(arc_buf_t *buf, void *tag) 2328 { 2329 arc_buf_hdr_t *hdr = buf->b_hdr; 2330 kmutex_t *hash_lock = HDR_LOCK(hdr); 2331 2332 /* this buffer is not on any list */ 2333 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2334 2335 if (hdr->b_state == arc_anon) { 2336 /* this buffer is already released */ 2337 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2338 ASSERT(BUF_EMPTY(hdr)); 2339 ASSERT(buf->b_efunc == NULL); 2340 arc_buf_thaw(buf); 2341 return; 2342 } 2343 2344 mutex_enter(hash_lock); 2345 2346 /* 2347 * Do we have more than one buf? 2348 */ 2349 if (hdr->b_buf != buf || buf->b_next != NULL) { 2350 arc_buf_hdr_t *nhdr; 2351 arc_buf_t **bufp; 2352 uint64_t blksz = hdr->b_size; 2353 spa_t *spa = hdr->b_spa; 2354 arc_buf_contents_t type = hdr->b_type; 2355 2356 ASSERT(hdr->b_datacnt > 1); 2357 /* 2358 * Pull the data off of this buf and attach it to 2359 * a new anonymous buf. 2360 */ 2361 (void) remove_reference(hdr, hash_lock, tag); 2362 bufp = &hdr->b_buf; 2363 while (*bufp != buf) 2364 bufp = &(*bufp)->b_next; 2365 *bufp = (*bufp)->b_next; 2366 buf->b_next = NULL; 2367 2368 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 2369 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 2370 if (refcount_is_zero(&hdr->b_refcnt)) { 2371 ASSERT3U(hdr->b_state->arcs_lsize, >=, hdr->b_size); 2372 atomic_add_64(&hdr->b_state->arcs_lsize, -hdr->b_size); 2373 } 2374 hdr->b_datacnt -= 1; 2375 arc_cksum_verify(buf); 2376 2377 mutex_exit(hash_lock); 2378 2379 nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 2380 nhdr->b_size = blksz; 2381 nhdr->b_spa = spa; 2382 nhdr->b_type = type; 2383 nhdr->b_buf = buf; 2384 nhdr->b_state = arc_anon; 2385 nhdr->b_arc_access = 0; 2386 nhdr->b_flags = 0; 2387 nhdr->b_datacnt = 1; 2388 nhdr->b_freeze_cksum = NULL; 2389 (void) refcount_add(&nhdr->b_refcnt, tag); 2390 buf->b_hdr = nhdr; 2391 atomic_add_64(&arc_anon->arcs_size, blksz); 2392 2393 hdr = nhdr; 2394 } else { 2395 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2396 ASSERT(!list_link_active(&hdr->b_arc_node)); 2397 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2398 arc_change_state(arc_anon, hdr, hash_lock); 2399 hdr->b_arc_access = 0; 2400 mutex_exit(hash_lock); 2401 bzero(&hdr->b_dva, sizeof (dva_t)); 2402 hdr->b_birth = 0; 2403 hdr->b_cksum0 = 0; 2404 arc_buf_thaw(buf); 2405 } 2406 buf->b_efunc = NULL; 2407 buf->b_private = NULL; 2408 } 2409 2410 int 2411 arc_released(arc_buf_t *buf) 2412 { 2413 return (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 2414 } 2415 2416 int 2417 arc_has_callback(arc_buf_t *buf) 2418 { 2419 return (buf->b_efunc != NULL); 2420 } 2421 2422 #ifdef ZFS_DEBUG 2423 int 2424 arc_referenced(arc_buf_t *buf) 2425 { 2426 return (refcount_count(&buf->b_hdr->b_refcnt)); 2427 } 2428 #endif 2429 2430 static void 2431 arc_write_ready(zio_t *zio) 2432 { 2433 arc_write_callback_t *callback = zio->io_private; 2434 arc_buf_t *buf = callback->awcb_buf; 2435 2436 if (callback->awcb_ready) { 2437 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 2438 callback->awcb_ready(zio, buf, callback->awcb_private); 2439 } 2440 arc_cksum_compute(buf); 2441 } 2442 2443 static void 2444 arc_write_done(zio_t *zio) 2445 { 2446 arc_write_callback_t *callback = zio->io_private; 2447 arc_buf_t *buf = callback->awcb_buf; 2448 arc_buf_hdr_t *hdr = buf->b_hdr; 2449 2450 hdr->b_acb = NULL; 2451 2452 /* this buffer is on no lists and is not in the hash table */ 2453 ASSERT3P(hdr->b_state, ==, arc_anon); 2454 2455 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 2456 hdr->b_birth = zio->io_bp->blk_birth; 2457 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 2458 /* 2459 * If the block to be written was all-zero, we may have 2460 * compressed it away. In this case no write was performed 2461 * so there will be no dva/birth-date/checksum. The buffer 2462 * must therefor remain anonymous (and uncached). 2463 */ 2464 if (!BUF_EMPTY(hdr)) { 2465 arc_buf_hdr_t *exists; 2466 kmutex_t *hash_lock; 2467 2468 arc_cksum_verify(buf); 2469 2470 exists = buf_hash_insert(hdr, &hash_lock); 2471 if (exists) { 2472 /* 2473 * This can only happen if we overwrite for 2474 * sync-to-convergence, because we remove 2475 * buffers from the hash table when we arc_free(). 2476 */ 2477 ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 2478 BP_IDENTITY(zio->io_bp))); 2479 ASSERT3U(zio->io_bp_orig.blk_birth, ==, 2480 zio->io_bp->blk_birth); 2481 2482 ASSERT(refcount_is_zero(&exists->b_refcnt)); 2483 arc_change_state(arc_anon, exists, hash_lock); 2484 mutex_exit(hash_lock); 2485 arc_hdr_destroy(exists); 2486 exists = buf_hash_insert(hdr, &hash_lock); 2487 ASSERT3P(exists, ==, NULL); 2488 } 2489 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2490 arc_access(hdr, hash_lock); 2491 mutex_exit(hash_lock); 2492 } else if (callback->awcb_done == NULL) { 2493 int destroy_hdr; 2494 /* 2495 * This is an anonymous buffer with no user callback, 2496 * destroy it if there are no active references. 2497 */ 2498 mutex_enter(&arc_eviction_mtx); 2499 destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 2500 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2501 mutex_exit(&arc_eviction_mtx); 2502 if (destroy_hdr) 2503 arc_hdr_destroy(hdr); 2504 } else { 2505 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2506 } 2507 2508 if (callback->awcb_done) { 2509 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 2510 callback->awcb_done(zio, buf, callback->awcb_private); 2511 } 2512 2513 kmem_free(callback, sizeof (arc_write_callback_t)); 2514 } 2515 2516 zio_t * 2517 arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies, 2518 uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 2519 arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, 2520 int flags, zbookmark_t *zb) 2521 { 2522 arc_buf_hdr_t *hdr = buf->b_hdr; 2523 arc_write_callback_t *callback; 2524 zio_t *zio; 2525 2526 /* this is a private buffer - no locking required */ 2527 ASSERT3P(hdr->b_state, ==, arc_anon); 2528 ASSERT(BUF_EMPTY(hdr)); 2529 ASSERT(!HDR_IO_ERROR(hdr)); 2530 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 2531 ASSERT(hdr->b_acb == 0); 2532 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 2533 callback->awcb_ready = ready; 2534 callback->awcb_done = done; 2535 callback->awcb_private = private; 2536 callback->awcb_buf = buf; 2537 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2538 zio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp, 2539 buf->b_data, hdr->b_size, arc_write_ready, arc_write_done, callback, 2540 priority, flags, zb); 2541 2542 return (zio); 2543 } 2544 2545 int 2546 arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 2547 zio_done_func_t *done, void *private, uint32_t arc_flags) 2548 { 2549 arc_buf_hdr_t *ab; 2550 kmutex_t *hash_lock; 2551 zio_t *zio; 2552 2553 /* 2554 * If this buffer is in the cache, release it, so it 2555 * can be re-used. 2556 */ 2557 ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2558 if (ab != NULL) { 2559 /* 2560 * The checksum of blocks to free is not always 2561 * preserved (eg. on the deadlist). However, if it is 2562 * nonzero, it should match what we have in the cache. 2563 */ 2564 ASSERT(bp->blk_cksum.zc_word[0] == 0 || 2565 ab->b_cksum0 == bp->blk_cksum.zc_word[0]); 2566 if (ab->b_state != arc_anon) 2567 arc_change_state(arc_anon, ab, hash_lock); 2568 if (HDR_IO_IN_PROGRESS(ab)) { 2569 /* 2570 * This should only happen when we prefetch. 2571 */ 2572 ASSERT(ab->b_flags & ARC_PREFETCH); 2573 ASSERT3U(ab->b_datacnt, ==, 1); 2574 ab->b_flags |= ARC_FREED_IN_READ; 2575 if (HDR_IN_HASH_TABLE(ab)) 2576 buf_hash_remove(ab); 2577 ab->b_arc_access = 0; 2578 bzero(&ab->b_dva, sizeof (dva_t)); 2579 ab->b_birth = 0; 2580 ab->b_cksum0 = 0; 2581 ab->b_buf->b_efunc = NULL; 2582 ab->b_buf->b_private = NULL; 2583 mutex_exit(hash_lock); 2584 } else if (refcount_is_zero(&ab->b_refcnt)) { 2585 mutex_exit(hash_lock); 2586 arc_hdr_destroy(ab); 2587 ARCSTAT_BUMP(arcstat_deleted); 2588 } else { 2589 /* 2590 * We still have an active reference on this 2591 * buffer. This can happen, e.g., from 2592 * dbuf_unoverride(). 2593 */ 2594 ASSERT(!HDR_IN_HASH_TABLE(ab)); 2595 ab->b_arc_access = 0; 2596 bzero(&ab->b_dva, sizeof (dva_t)); 2597 ab->b_birth = 0; 2598 ab->b_cksum0 = 0; 2599 ab->b_buf->b_efunc = NULL; 2600 ab->b_buf->b_private = NULL; 2601 mutex_exit(hash_lock); 2602 } 2603 } 2604 2605 zio = zio_free(pio, spa, txg, bp, done, private); 2606 2607 if (arc_flags & ARC_WAIT) 2608 return (zio_wait(zio)); 2609 2610 ASSERT(arc_flags & ARC_NOWAIT); 2611 zio_nowait(zio); 2612 2613 return (0); 2614 } 2615 2616 void 2617 arc_tempreserve_clear(uint64_t tempreserve) 2618 { 2619 atomic_add_64(&arc_tempreserve, -tempreserve); 2620 ASSERT((int64_t)arc_tempreserve >= 0); 2621 } 2622 2623 int 2624 arc_tempreserve_space(uint64_t tempreserve) 2625 { 2626 #ifdef ZFS_DEBUG 2627 /* 2628 * Once in a while, fail for no reason. Everything should cope. 2629 */ 2630 if (spa_get_random(10000) == 0) { 2631 dprintf("forcing random failure\n"); 2632 return (ERESTART); 2633 } 2634 #endif 2635 if (tempreserve > arc_c/4 && !arc_no_grow) 2636 arc_c = MIN(arc_c_max, tempreserve * 4); 2637 if (tempreserve > arc_c) 2638 return (ENOMEM); 2639 2640 /* 2641 * Throttle writes when the amount of dirty data in the cache 2642 * gets too large. We try to keep the cache less than half full 2643 * of dirty blocks so that our sync times don't grow too large. 2644 * Note: if two requests come in concurrently, we might let them 2645 * both succeed, when one of them should fail. Not a huge deal. 2646 * 2647 * XXX The limit should be adjusted dynamically to keep the time 2648 * to sync a dataset fixed (around 1-5 seconds?). 2649 */ 2650 2651 if (tempreserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && 2652 arc_tempreserve + arc_anon->arcs_size > arc_c / 4) { 2653 dprintf("failing, arc_tempreserve=%lluK anon=%lluK " 2654 "tempreserve=%lluK arc_c=%lluK\n", 2655 arc_tempreserve>>10, arc_anon->arcs_lsize>>10, 2656 tempreserve>>10, arc_c>>10); 2657 return (ERESTART); 2658 } 2659 atomic_add_64(&arc_tempreserve, tempreserve); 2660 return (0); 2661 } 2662 2663 void 2664 arc_init(void) 2665 { 2666 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 2667 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 2668 2669 /* Convert seconds to clock ticks */ 2670 arc_min_prefetch_lifespan = 1 * hz; 2671 2672 /* Start out with 1/8 of all memory */ 2673 arc_c = physmem * PAGESIZE / 8; 2674 2675 #ifdef _KERNEL 2676 /* 2677 * On architectures where the physical memory can be larger 2678 * than the addressable space (intel in 32-bit mode), we may 2679 * need to limit the cache to 1/8 of VM size. 2680 */ 2681 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 2682 #endif 2683 2684 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 2685 arc_c_min = MAX(arc_c / 4, 64<<20); 2686 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 2687 if (arc_c * 8 >= 1<<30) 2688 arc_c_max = (arc_c * 8) - (1<<30); 2689 else 2690 arc_c_max = arc_c_min; 2691 arc_c_max = MAX(arc_c * 6, arc_c_max); 2692 2693 /* 2694 * Allow the tunables to override our calculations if they are 2695 * reasonable (ie. over 64MB) 2696 */ 2697 if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) 2698 arc_c_max = zfs_arc_max; 2699 if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) 2700 arc_c_min = zfs_arc_min; 2701 2702 arc_c = arc_c_max; 2703 arc_p = (arc_c >> 1); 2704 2705 /* if kmem_flags are set, lets try to use less memory */ 2706 if (kmem_debugging()) 2707 arc_c = arc_c / 2; 2708 if (arc_c < arc_c_min) 2709 arc_c = arc_c_min; 2710 2711 arc_anon = &ARC_anon; 2712 arc_mru = &ARC_mru; 2713 arc_mru_ghost = &ARC_mru_ghost; 2714 arc_mfu = &ARC_mfu; 2715 arc_mfu_ghost = &ARC_mfu_ghost; 2716 arc_size = 0; 2717 2718 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2719 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2720 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2721 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2722 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2723 2724 list_create(&arc_mru->arcs_list, sizeof (arc_buf_hdr_t), 2725 offsetof(arc_buf_hdr_t, b_arc_node)); 2726 list_create(&arc_mru_ghost->arcs_list, sizeof (arc_buf_hdr_t), 2727 offsetof(arc_buf_hdr_t, b_arc_node)); 2728 list_create(&arc_mfu->arcs_list, sizeof (arc_buf_hdr_t), 2729 offsetof(arc_buf_hdr_t, b_arc_node)); 2730 list_create(&arc_mfu_ghost->arcs_list, sizeof (arc_buf_hdr_t), 2731 offsetof(arc_buf_hdr_t, b_arc_node)); 2732 2733 buf_init(); 2734 2735 arc_thread_exit = 0; 2736 arc_eviction_list = NULL; 2737 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 2738 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 2739 2740 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 2741 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 2742 2743 if (arc_ksp != NULL) { 2744 arc_ksp->ks_data = &arc_stats; 2745 kstat_install(arc_ksp); 2746 } 2747 2748 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 2749 TS_RUN, minclsyspri); 2750 2751 arc_dead = FALSE; 2752 } 2753 2754 void 2755 arc_fini(void) 2756 { 2757 mutex_enter(&arc_reclaim_thr_lock); 2758 arc_thread_exit = 1; 2759 while (arc_thread_exit != 0) 2760 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 2761 mutex_exit(&arc_reclaim_thr_lock); 2762 2763 arc_flush(); 2764 2765 arc_dead = TRUE; 2766 2767 if (arc_ksp != NULL) { 2768 kstat_delete(arc_ksp); 2769 arc_ksp = NULL; 2770 } 2771 2772 mutex_destroy(&arc_eviction_mtx); 2773 mutex_destroy(&arc_reclaim_thr_lock); 2774 cv_destroy(&arc_reclaim_thr_cv); 2775 2776 list_destroy(&arc_mru->arcs_list); 2777 list_destroy(&arc_mru_ghost->arcs_list); 2778 list_destroy(&arc_mfu->arcs_list); 2779 list_destroy(&arc_mfu_ghost->arcs_list); 2780 2781 mutex_destroy(&arc_anon->arcs_mtx); 2782 mutex_destroy(&arc_mru->arcs_mtx); 2783 mutex_destroy(&arc_mru_ghost->arcs_mtx); 2784 mutex_destroy(&arc_mfu->arcs_mtx); 2785 mutex_destroy(&arc_mfu_ghost->arcs_mtx); 2786 2787 buf_fini(); 2788 } 2789