1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * DVA-based Adjustable Relpacement Cache 30 * 31 * While much of the theory of operation used here is 32 * based on the self-tuning, low overhead replacement cache 33 * presented by Megiddo and Modha at FAST 2003, there are some 34 * significant differences: 35 * 36 * 1. The Megiddo and Modha model assumes any page is evictable. 37 * Pages in its cache cannot be "locked" into memory. This makes 38 * the eviction algorithm simple: evict the last page in the list. 39 * This also make the performance characteristics easy to reason 40 * about. Our cache is not so simple. At any given moment, some 41 * subset of the blocks in the cache are un-evictable because we 42 * have handed out a reference to them. Blocks are only evictable 43 * when there are no external references active. This makes 44 * eviction far more problematic: we choose to evict the evictable 45 * blocks that are the "lowest" in the list. 46 * 47 * There are times when it is not possible to evict the requested 48 * space. In these circumstances we are unable to adjust the cache 49 * size. To prevent the cache growing unbounded at these times we 50 * implement a "cache throttle" that slowes the flow of new data 51 * into the cache until we can make space avaiable. 52 * 53 * 2. The Megiddo and Modha model assumes a fixed cache size. 54 * Pages are evicted when the cache is full and there is a cache 55 * miss. Our model has a variable sized cache. It grows with 56 * high use, but also tries to react to memory preasure from the 57 * operating system: decreasing its size when system memory is 58 * tight. 59 * 60 * 3. The Megiddo and Modha model assumes a fixed page size. All 61 * elements of the cache are therefor exactly the same size. So 62 * when adjusting the cache size following a cache miss, its simply 63 * a matter of choosing a single page to evict. In our model, we 64 * have variable sized cache blocks (rangeing from 512 bytes to 65 * 128K bytes). We therefor choose a set of blocks to evict to make 66 * space for a cache miss that approximates as closely as possible 67 * the space used by the new block. 68 * 69 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 70 * by N. Megiddo & D. Modha, FAST 2003 71 */ 72 73 /* 74 * The locking model: 75 * 76 * A new reference to a cache buffer can be obtained in two 77 * ways: 1) via a hash table lookup using the DVA as a key, 78 * or 2) via one of the ARC lists. The arc_read() inerface 79 * uses method 1, while the internal arc algorithms for 80 * adjusting the cache use method 2. We therefor provide two 81 * types of locks: 1) the hash table lock array, and 2) the 82 * arc list locks. 83 * 84 * Buffers do not have their own mutexs, rather they rely on the 85 * hash table mutexs for the bulk of their protection (i.e. most 86 * fields in the arc_buf_hdr_t are protected by these mutexs). 87 * 88 * buf_hash_find() returns the appropriate mutex (held) when it 89 * locates the requested buffer in the hash table. It returns 90 * NULL for the mutex if the buffer was not in the table. 91 * 92 * buf_hash_remove() expects the appropriate hash mutex to be 93 * already held before it is invoked. 94 * 95 * Each arc state also has a mutex which is used to protect the 96 * buffer list associated with the state. When attempting to 97 * obtain a hash table lock while holding an arc list lock you 98 * must use: mutex_tryenter() to avoid deadlock. Also note that 99 * the active state mutex must be held before the ghost state mutex. 100 * 101 * Arc buffers may have an associated eviction callback function. 102 * This function will be invoked prior to removing the buffer (e.g. 103 * in arc_do_user_evicts()). Note however that the data associated 104 * with the buffer may be evicted prior to the callback. The callback 105 * must be made with *no locks held* (to prevent deadlock). Additionally, 106 * the users of callbacks must ensure that their private data is 107 * protected from simultaneous callbacks from arc_buf_evict() 108 * and arc_do_user_evicts(). 109 * 110 * Note that the majority of the performance stats are manipulated 111 * with atomic operations. 112 */ 113 114 #include <sys/spa.h> 115 #include <sys/zio.h> 116 #include <sys/zfs_context.h> 117 #include <sys/arc.h> 118 #include <sys/refcount.h> 119 #ifdef _KERNEL 120 #include <sys/vmsystm.h> 121 #include <vm/anon.h> 122 #include <sys/fs/swapnode.h> 123 #include <sys/dnlc.h> 124 #endif 125 #include <sys/callb.h> 126 127 static kmutex_t arc_reclaim_thr_lock; 128 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 129 static uint8_t arc_thread_exit; 130 131 #define ARC_REDUCE_DNLC_PERCENT 3 132 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 133 134 typedef enum arc_reclaim_strategy { 135 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 136 ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 137 } arc_reclaim_strategy_t; 138 139 /* number of seconds before growing cache again */ 140 static int arc_grow_retry = 60; 141 142 /* 143 * minimum lifespan of a prefetch block in clock ticks 144 * (initialized in arc_init()) 145 */ 146 static int arc_min_prefetch_lifespan; 147 148 static kmutex_t arc_reclaim_lock; 149 static int arc_dead; 150 151 /* 152 * Note that buffers can be on one of 5 states: 153 * ARC_anon - anonymous (discussed below) 154 * ARC_mru - recently used, currently cached 155 * ARC_mru_ghost - recentely used, no longer in cache 156 * ARC_mfu - frequently used, currently cached 157 * ARC_mfu_ghost - frequently used, no longer in cache 158 * When there are no active references to the buffer, they 159 * are linked onto one of the lists in arc. These are the 160 * only buffers that can be evicted or deleted. 161 * 162 * Anonymous buffers are buffers that are not associated with 163 * a DVA. These are buffers that hold dirty block copies 164 * before they are written to stable storage. By definition, 165 * they are "ref'd" and are considered part of arc_mru 166 * that cannot be freed. Generally, they will aquire a DVA 167 * as they are written and migrate onto the arc_mru list. 168 */ 169 170 typedef struct arc_state { 171 list_t list; /* linked list of evictable buffer in state */ 172 uint64_t lsize; /* total size of buffers in the linked list */ 173 uint64_t size; /* total size of all buffers in this state */ 174 uint64_t hits; 175 kmutex_t mtx; 176 } arc_state_t; 177 178 /* The 5 states: */ 179 static arc_state_t ARC_anon; 180 static arc_state_t ARC_mru; 181 static arc_state_t ARC_mru_ghost; 182 static arc_state_t ARC_mfu; 183 static arc_state_t ARC_mfu_ghost; 184 185 static struct arc { 186 arc_state_t *anon; 187 arc_state_t *mru; 188 arc_state_t *mru_ghost; 189 arc_state_t *mfu; 190 arc_state_t *mfu_ghost; 191 uint64_t size; /* Actual total arc size */ 192 uint64_t p; /* Target size (in bytes) of mru */ 193 uint64_t c; /* Target size of cache (in bytes) */ 194 uint64_t c_min; /* Minimum target cache size */ 195 uint64_t c_max; /* Maximum target cache size */ 196 197 /* performance stats */ 198 uint64_t hits; 199 uint64_t misses; 200 uint64_t deleted; 201 uint64_t recycle_miss; 202 uint64_t mutex_miss; 203 uint64_t evict_skip; 204 uint64_t hash_elements; 205 uint64_t hash_elements_max; 206 uint64_t hash_collisions; 207 uint64_t hash_chains; 208 uint32_t hash_chain_max; 209 210 int no_grow; /* Don't try to grow cache size */ 211 } arc; 212 213 static uint64_t arc_tempreserve; 214 215 typedef struct arc_callback arc_callback_t; 216 217 struct arc_callback { 218 arc_done_func_t *acb_done; 219 void *acb_private; 220 arc_byteswap_func_t *acb_byteswap; 221 arc_buf_t *acb_buf; 222 zio_t *acb_zio_dummy; 223 arc_callback_t *acb_next; 224 }; 225 226 struct arc_buf_hdr { 227 /* immutable */ 228 uint64_t b_size; 229 spa_t *b_spa; 230 231 /* protected by hash lock */ 232 dva_t b_dva; 233 uint64_t b_birth; 234 uint64_t b_cksum0; 235 236 arc_buf_hdr_t *b_hash_next; 237 arc_buf_t *b_buf; 238 uint32_t b_flags; 239 uint32_t b_datacnt; 240 241 kcondvar_t b_cv; 242 arc_callback_t *b_acb; 243 244 /* protected by arc state mutex */ 245 arc_state_t *b_state; 246 list_node_t b_arc_node; 247 248 /* updated atomically */ 249 clock_t b_arc_access; 250 251 /* self protecting */ 252 refcount_t b_refcnt; 253 }; 254 255 static arc_buf_t *arc_eviction_list; 256 static kmutex_t arc_eviction_mtx; 257 static void arc_get_data_buf(arc_buf_t *buf); 258 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 259 260 #define GHOST_STATE(state) \ 261 ((state) == arc.mru_ghost || (state) == arc.mfu_ghost) 262 263 /* 264 * Private ARC flags. These flags are private ARC only flags that will show up 265 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 266 * be passed in as arc_flags in things like arc_read. However, these flags 267 * should never be passed and should only be set by ARC code. When adding new 268 * public flags, make sure not to smash the private ones. 269 */ 270 271 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 272 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 273 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 274 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 275 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 276 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 277 278 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 279 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 280 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 281 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 282 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 283 284 /* 285 * Hash table routines 286 */ 287 288 #define HT_LOCK_PAD 64 289 290 struct ht_lock { 291 kmutex_t ht_lock; 292 #ifdef _KERNEL 293 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 294 #endif 295 }; 296 297 #define BUF_LOCKS 256 298 typedef struct buf_hash_table { 299 uint64_t ht_mask; 300 arc_buf_hdr_t **ht_table; 301 struct ht_lock ht_locks[BUF_LOCKS]; 302 } buf_hash_table_t; 303 304 static buf_hash_table_t buf_hash_table; 305 306 #define BUF_HASH_INDEX(spa, dva, birth) \ 307 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 308 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 309 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 310 #define HDR_LOCK(buf) \ 311 (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 312 313 uint64_t zfs_crc64_table[256]; 314 315 static uint64_t 316 buf_hash(spa_t *spa, dva_t *dva, uint64_t birth) 317 { 318 uintptr_t spav = (uintptr_t)spa; 319 uint8_t *vdva = (uint8_t *)dva; 320 uint64_t crc = -1ULL; 321 int i; 322 323 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 324 325 for (i = 0; i < sizeof (dva_t); i++) 326 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 327 328 crc ^= (spav>>8) ^ birth; 329 330 return (crc); 331 } 332 333 #define BUF_EMPTY(buf) \ 334 ((buf)->b_dva.dva_word[0] == 0 && \ 335 (buf)->b_dva.dva_word[1] == 0 && \ 336 (buf)->b_birth == 0) 337 338 #define BUF_EQUAL(spa, dva, birth, buf) \ 339 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 340 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 341 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 342 343 static arc_buf_hdr_t * 344 buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp) 345 { 346 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 347 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 348 arc_buf_hdr_t *buf; 349 350 mutex_enter(hash_lock); 351 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 352 buf = buf->b_hash_next) { 353 if (BUF_EQUAL(spa, dva, birth, buf)) { 354 *lockp = hash_lock; 355 return (buf); 356 } 357 } 358 mutex_exit(hash_lock); 359 *lockp = NULL; 360 return (NULL); 361 } 362 363 /* 364 * Insert an entry into the hash table. If there is already an element 365 * equal to elem in the hash table, then the already existing element 366 * will be returned and the new element will not be inserted. 367 * Otherwise returns NULL. 368 */ 369 static arc_buf_hdr_t * 370 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 371 { 372 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 373 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 374 arc_buf_hdr_t *fbuf; 375 uint32_t max, i; 376 377 ASSERT(!HDR_IN_HASH_TABLE(buf)); 378 *lockp = hash_lock; 379 mutex_enter(hash_lock); 380 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 381 fbuf = fbuf->b_hash_next, i++) { 382 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 383 return (fbuf); 384 } 385 386 buf->b_hash_next = buf_hash_table.ht_table[idx]; 387 buf_hash_table.ht_table[idx] = buf; 388 buf->b_flags |= ARC_IN_HASH_TABLE; 389 390 /* collect some hash table performance data */ 391 if (i > 0) { 392 atomic_add_64(&arc.hash_collisions, 1); 393 if (i == 1) 394 atomic_add_64(&arc.hash_chains, 1); 395 } 396 while (i > (max = arc.hash_chain_max) && 397 max != atomic_cas_32(&arc.hash_chain_max, max, i)) { 398 continue; 399 } 400 atomic_add_64(&arc.hash_elements, 1); 401 if (arc.hash_elements > arc.hash_elements_max) 402 atomic_add_64(&arc.hash_elements_max, 1); 403 404 return (NULL); 405 } 406 407 static void 408 buf_hash_remove(arc_buf_hdr_t *buf) 409 { 410 arc_buf_hdr_t *fbuf, **bufp; 411 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 412 413 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 414 ASSERT(HDR_IN_HASH_TABLE(buf)); 415 416 bufp = &buf_hash_table.ht_table[idx]; 417 while ((fbuf = *bufp) != buf) { 418 ASSERT(fbuf != NULL); 419 bufp = &fbuf->b_hash_next; 420 } 421 *bufp = buf->b_hash_next; 422 buf->b_hash_next = NULL; 423 buf->b_flags &= ~ARC_IN_HASH_TABLE; 424 425 /* collect some hash table performance data */ 426 atomic_add_64(&arc.hash_elements, -1); 427 if (buf_hash_table.ht_table[idx] && 428 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 429 atomic_add_64(&arc.hash_chains, -1); 430 } 431 432 /* 433 * Global data structures and functions for the buf kmem cache. 434 */ 435 static kmem_cache_t *hdr_cache; 436 static kmem_cache_t *buf_cache; 437 438 static void 439 buf_fini(void) 440 { 441 int i; 442 443 kmem_free(buf_hash_table.ht_table, 444 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 445 for (i = 0; i < BUF_LOCKS; i++) 446 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 447 kmem_cache_destroy(hdr_cache); 448 kmem_cache_destroy(buf_cache); 449 } 450 451 /* 452 * Constructor callback - called when the cache is empty 453 * and a new buf is requested. 454 */ 455 /* ARGSUSED */ 456 static int 457 hdr_cons(void *vbuf, void *unused, int kmflag) 458 { 459 arc_buf_hdr_t *buf = vbuf; 460 461 bzero(buf, sizeof (arc_buf_hdr_t)); 462 refcount_create(&buf->b_refcnt); 463 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 464 return (0); 465 } 466 467 /* 468 * Destructor callback - called when a cached buf is 469 * no longer required. 470 */ 471 /* ARGSUSED */ 472 static void 473 hdr_dest(void *vbuf, void *unused) 474 { 475 arc_buf_hdr_t *buf = vbuf; 476 477 refcount_destroy(&buf->b_refcnt); 478 cv_destroy(&buf->b_cv); 479 } 480 481 static int arc_reclaim_needed(void); 482 void arc_kmem_reclaim(void); 483 484 /* 485 * Reclaim callback -- invoked when memory is low. 486 */ 487 /* ARGSUSED */ 488 static void 489 hdr_recl(void *unused) 490 { 491 dprintf("hdr_recl called\n"); 492 if (arc_reclaim_needed()) 493 arc_kmem_reclaim(); 494 } 495 496 static void 497 buf_init(void) 498 { 499 uint64_t *ct; 500 uint64_t hsize = 1ULL << 12; 501 int i, j; 502 503 /* 504 * The hash table is big enough to fill all of physical memory 505 * with an average 64K block size. The table will take up 506 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 507 */ 508 while (hsize * 65536 < physmem * PAGESIZE) 509 hsize <<= 1; 510 retry: 511 buf_hash_table.ht_mask = hsize - 1; 512 buf_hash_table.ht_table = 513 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 514 if (buf_hash_table.ht_table == NULL) { 515 ASSERT(hsize > (1ULL << 8)); 516 hsize >>= 1; 517 goto retry; 518 } 519 520 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 521 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 522 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 523 0, NULL, NULL, NULL, NULL, NULL, 0); 524 525 for (i = 0; i < 256; i++) 526 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 527 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 528 529 for (i = 0; i < BUF_LOCKS; i++) { 530 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 531 NULL, MUTEX_DEFAULT, NULL); 532 } 533 } 534 535 #define ARC_MINTIME (hz>>4) /* 62 ms */ 536 537 static void 538 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 539 { 540 ASSERT(MUTEX_HELD(hash_lock)); 541 542 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 543 (ab->b_state != arc.anon)) { 544 int delta = ab->b_size * ab->b_datacnt; 545 546 ASSERT(!MUTEX_HELD(&ab->b_state->mtx)); 547 mutex_enter(&ab->b_state->mtx); 548 ASSERT(list_link_active(&ab->b_arc_node)); 549 list_remove(&ab->b_state->list, ab); 550 if (GHOST_STATE(ab->b_state)) { 551 ASSERT3U(ab->b_datacnt, ==, 0); 552 ASSERT3P(ab->b_buf, ==, NULL); 553 delta = ab->b_size; 554 } 555 ASSERT(delta > 0); 556 ASSERT3U(ab->b_state->lsize, >=, delta); 557 atomic_add_64(&ab->b_state->lsize, -delta); 558 mutex_exit(&ab->b_state->mtx); 559 /* remove the prefetch flag is we get a reference */ 560 if (ab->b_flags & ARC_PREFETCH) 561 ab->b_flags &= ~ARC_PREFETCH; 562 } 563 } 564 565 static int 566 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 567 { 568 int cnt; 569 570 ASSERT(ab->b_state == arc.anon || MUTEX_HELD(hash_lock)); 571 ASSERT(!GHOST_STATE(ab->b_state)); 572 573 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 574 (ab->b_state != arc.anon)) { 575 576 ASSERT(!MUTEX_HELD(&ab->b_state->mtx)); 577 mutex_enter(&ab->b_state->mtx); 578 ASSERT(!list_link_active(&ab->b_arc_node)); 579 list_insert_head(&ab->b_state->list, ab); 580 ASSERT(ab->b_datacnt > 0); 581 atomic_add_64(&ab->b_state->lsize, ab->b_size * ab->b_datacnt); 582 ASSERT3U(ab->b_state->size, >=, ab->b_state->lsize); 583 mutex_exit(&ab->b_state->mtx); 584 } 585 return (cnt); 586 } 587 588 /* 589 * Move the supplied buffer to the indicated state. The mutex 590 * for the buffer must be held by the caller. 591 */ 592 static void 593 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 594 { 595 arc_state_t *old_state = ab->b_state; 596 int refcnt = refcount_count(&ab->b_refcnt); 597 int from_delta, to_delta; 598 599 ASSERT(MUTEX_HELD(hash_lock)); 600 ASSERT(new_state != old_state); 601 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 602 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 603 604 from_delta = to_delta = ab->b_datacnt * ab->b_size; 605 606 /* 607 * If this buffer is evictable, transfer it from the 608 * old state list to the new state list. 609 */ 610 if (refcnt == 0) { 611 if (old_state != arc.anon) { 612 int use_mutex = !MUTEX_HELD(&old_state->mtx); 613 614 if (use_mutex) 615 mutex_enter(&old_state->mtx); 616 617 ASSERT(list_link_active(&ab->b_arc_node)); 618 list_remove(&old_state->list, ab); 619 620 /* 621 * If prefetching out of the ghost cache, 622 * we will have a non-null datacnt. 623 */ 624 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 625 /* ghost elements have a ghost size */ 626 ASSERT(ab->b_buf == NULL); 627 from_delta = ab->b_size; 628 } 629 ASSERT3U(old_state->lsize, >=, from_delta); 630 atomic_add_64(&old_state->lsize, -from_delta); 631 632 if (use_mutex) 633 mutex_exit(&old_state->mtx); 634 } 635 if (new_state != arc.anon) { 636 int use_mutex = !MUTEX_HELD(&new_state->mtx); 637 638 if (use_mutex) 639 mutex_enter(&new_state->mtx); 640 641 list_insert_head(&new_state->list, ab); 642 643 /* ghost elements have a ghost size */ 644 if (GHOST_STATE(new_state)) { 645 ASSERT(ab->b_datacnt == 0); 646 ASSERT(ab->b_buf == NULL); 647 to_delta = ab->b_size; 648 } 649 atomic_add_64(&new_state->lsize, to_delta); 650 ASSERT3U(new_state->size + to_delta, >=, 651 new_state->lsize); 652 653 if (use_mutex) 654 mutex_exit(&new_state->mtx); 655 } 656 } 657 658 ASSERT(!BUF_EMPTY(ab)); 659 if (new_state == arc.anon && old_state != arc.anon) { 660 buf_hash_remove(ab); 661 } 662 663 /* adjust state sizes */ 664 if (to_delta) 665 atomic_add_64(&new_state->size, to_delta); 666 if (from_delta) { 667 ASSERT3U(old_state->size, >=, from_delta); 668 atomic_add_64(&old_state->size, -from_delta); 669 } 670 ab->b_state = new_state; 671 } 672 673 arc_buf_t * 674 arc_buf_alloc(spa_t *spa, int size, void *tag) 675 { 676 arc_buf_hdr_t *hdr; 677 arc_buf_t *buf; 678 679 ASSERT3U(size, >, 0); 680 hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 681 ASSERT(BUF_EMPTY(hdr)); 682 hdr->b_size = size; 683 hdr->b_spa = spa; 684 hdr->b_state = arc.anon; 685 hdr->b_arc_access = 0; 686 buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 687 buf->b_hdr = hdr; 688 buf->b_data = NULL; 689 buf->b_efunc = NULL; 690 buf->b_private = NULL; 691 buf->b_next = NULL; 692 hdr->b_buf = buf; 693 arc_get_data_buf(buf); 694 hdr->b_datacnt = 1; 695 hdr->b_flags = 0; 696 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 697 (void) refcount_add(&hdr->b_refcnt, tag); 698 699 return (buf); 700 } 701 702 static arc_buf_t * 703 arc_buf_clone(arc_buf_t *from) 704 { 705 arc_buf_t *buf; 706 arc_buf_hdr_t *hdr = from->b_hdr; 707 uint64_t size = hdr->b_size; 708 709 buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 710 buf->b_hdr = hdr; 711 buf->b_data = NULL; 712 buf->b_efunc = NULL; 713 buf->b_private = NULL; 714 buf->b_next = hdr->b_buf; 715 hdr->b_buf = buf; 716 arc_get_data_buf(buf); 717 bcopy(from->b_data, buf->b_data, size); 718 hdr->b_datacnt += 1; 719 return (buf); 720 } 721 722 void 723 arc_buf_add_ref(arc_buf_t *buf, void* tag) 724 { 725 arc_buf_hdr_t *hdr; 726 kmutex_t *hash_lock; 727 728 mutex_enter(&arc_eviction_mtx); 729 hdr = buf->b_hdr; 730 if (buf->b_data == NULL) { 731 /* 732 * This buffer is evicted. 733 */ 734 mutex_exit(&arc_eviction_mtx); 735 return; 736 } else { 737 /* 738 * Prevent this buffer from being evicted 739 * while we add a reference. 740 */ 741 buf->b_hdr = NULL; 742 } 743 mutex_exit(&arc_eviction_mtx); 744 745 ASSERT(hdr->b_state != arc.anon); 746 hash_lock = HDR_LOCK(hdr); 747 mutex_enter(hash_lock); 748 ASSERT(!GHOST_STATE(hdr->b_state)); 749 buf->b_hdr = hdr; 750 add_reference(hdr, hash_lock, tag); 751 arc_access(hdr, hash_lock); 752 mutex_exit(hash_lock); 753 atomic_add_64(&arc.hits, 1); 754 } 755 756 static void 757 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 758 { 759 arc_buf_t **bufp; 760 761 /* free up data associated with the buf */ 762 if (buf->b_data) { 763 arc_state_t *state = buf->b_hdr->b_state; 764 uint64_t size = buf->b_hdr->b_size; 765 766 if (!recycle) { 767 zio_buf_free(buf->b_data, size); 768 atomic_add_64(&arc.size, -size); 769 } 770 if (list_link_active(&buf->b_hdr->b_arc_node)) { 771 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 772 ASSERT(state != arc.anon); 773 ASSERT3U(state->lsize, >=, size); 774 atomic_add_64(&state->lsize, -size); 775 } 776 ASSERT3U(state->size, >=, size); 777 atomic_add_64(&state->size, -size); 778 buf->b_data = NULL; 779 ASSERT(buf->b_hdr->b_datacnt > 0); 780 buf->b_hdr->b_datacnt -= 1; 781 } 782 783 /* only remove the buf if requested */ 784 if (!all) 785 return; 786 787 /* remove the buf from the hdr list */ 788 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 789 continue; 790 *bufp = buf->b_next; 791 792 ASSERT(buf->b_efunc == NULL); 793 794 /* clean up the buf */ 795 buf->b_hdr = NULL; 796 kmem_cache_free(buf_cache, buf); 797 } 798 799 static void 800 arc_hdr_destroy(arc_buf_hdr_t *hdr) 801 { 802 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 803 ASSERT3P(hdr->b_state, ==, arc.anon); 804 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 805 806 if (!BUF_EMPTY(hdr)) { 807 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 808 bzero(&hdr->b_dva, sizeof (dva_t)); 809 hdr->b_birth = 0; 810 hdr->b_cksum0 = 0; 811 } 812 while (hdr->b_buf) { 813 arc_buf_t *buf = hdr->b_buf; 814 815 if (buf->b_efunc) { 816 mutex_enter(&arc_eviction_mtx); 817 ASSERT(buf->b_hdr != NULL); 818 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 819 hdr->b_buf = buf->b_next; 820 buf->b_next = arc_eviction_list; 821 arc_eviction_list = buf; 822 mutex_exit(&arc_eviction_mtx); 823 } else { 824 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 825 } 826 } 827 828 ASSERT(!list_link_active(&hdr->b_arc_node)); 829 ASSERT3P(hdr->b_hash_next, ==, NULL); 830 ASSERT3P(hdr->b_acb, ==, NULL); 831 kmem_cache_free(hdr_cache, hdr); 832 } 833 834 void 835 arc_buf_free(arc_buf_t *buf, void *tag) 836 { 837 arc_buf_hdr_t *hdr = buf->b_hdr; 838 int hashed = hdr->b_state != arc.anon; 839 840 ASSERT(buf->b_efunc == NULL); 841 ASSERT(buf->b_data != NULL); 842 843 if (hashed) { 844 kmutex_t *hash_lock = HDR_LOCK(hdr); 845 846 mutex_enter(hash_lock); 847 (void) remove_reference(hdr, hash_lock, tag); 848 if (hdr->b_datacnt > 1) 849 arc_buf_destroy(buf, FALSE, TRUE); 850 else 851 hdr->b_flags |= ARC_BUF_AVAILABLE; 852 mutex_exit(hash_lock); 853 } else if (HDR_IO_IN_PROGRESS(hdr)) { 854 int destroy_hdr; 855 /* 856 * We are in the middle of an async write. Don't destroy 857 * this buffer unless the write completes before we finish 858 * decrementing the reference count. 859 */ 860 mutex_enter(&arc_eviction_mtx); 861 (void) remove_reference(hdr, NULL, tag); 862 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 863 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 864 mutex_exit(&arc_eviction_mtx); 865 if (destroy_hdr) 866 arc_hdr_destroy(hdr); 867 } else { 868 if (remove_reference(hdr, NULL, tag) > 0) { 869 ASSERT(HDR_IO_ERROR(hdr)); 870 arc_buf_destroy(buf, FALSE, TRUE); 871 } else { 872 arc_hdr_destroy(hdr); 873 } 874 } 875 } 876 877 int 878 arc_buf_remove_ref(arc_buf_t *buf, void* tag) 879 { 880 arc_buf_hdr_t *hdr = buf->b_hdr; 881 kmutex_t *hash_lock = HDR_LOCK(hdr); 882 int no_callback = (buf->b_efunc == NULL); 883 884 if (hdr->b_state == arc.anon) { 885 arc_buf_free(buf, tag); 886 return (no_callback); 887 } 888 889 mutex_enter(hash_lock); 890 ASSERT(hdr->b_state != arc.anon); 891 ASSERT(buf->b_data != NULL); 892 893 (void) remove_reference(hdr, hash_lock, tag); 894 if (hdr->b_datacnt > 1) { 895 if (no_callback) 896 arc_buf_destroy(buf, FALSE, TRUE); 897 } else if (no_callback) { 898 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 899 hdr->b_flags |= ARC_BUF_AVAILABLE; 900 } 901 ASSERT(no_callback || hdr->b_datacnt > 1 || 902 refcount_is_zero(&hdr->b_refcnt)); 903 mutex_exit(hash_lock); 904 return (no_callback); 905 } 906 907 int 908 arc_buf_size(arc_buf_t *buf) 909 { 910 return (buf->b_hdr->b_size); 911 } 912 913 /* 914 * Evict buffers from list until we've removed the specified number of 915 * bytes. Move the removed buffers to the appropriate evict state. 916 * If the recycle flag is set, then attempt to "recycle" a buffer: 917 * - look for a buffer to evict that is `bytes' long. 918 * - return the data block from this buffer rather than freeing it. 919 * This flag is used by callers that are trying to make space for a 920 * new buffer in a full arc cache. 921 */ 922 static void * 923 arc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle) 924 { 925 arc_state_t *evicted_state; 926 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 927 arc_buf_hdr_t *ab, *ab_prev; 928 kmutex_t *hash_lock; 929 boolean_t have_lock; 930 void *steal = NULL; 931 932 ASSERT(state == arc.mru || state == arc.mfu); 933 934 evicted_state = (state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost; 935 936 mutex_enter(&state->mtx); 937 mutex_enter(&evicted_state->mtx); 938 939 for (ab = list_tail(&state->list); ab; ab = ab_prev) { 940 ab_prev = list_prev(&state->list, ab); 941 /* prefetch buffers have a minimum lifespan */ 942 if (HDR_IO_IN_PROGRESS(ab) || 943 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 944 lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { 945 skipped++; 946 continue; 947 } 948 if (recycle && (ab->b_size != bytes || ab->b_datacnt > 1)) 949 continue; 950 hash_lock = HDR_LOCK(ab); 951 have_lock = MUTEX_HELD(hash_lock); 952 if (have_lock || mutex_tryenter(hash_lock)) { 953 ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 954 ASSERT(ab->b_datacnt > 0); 955 while (ab->b_buf) { 956 arc_buf_t *buf = ab->b_buf; 957 if (buf->b_data) { 958 bytes_evicted += ab->b_size; 959 if (recycle) 960 steal = buf->b_data; 961 } 962 if (buf->b_efunc) { 963 mutex_enter(&arc_eviction_mtx); 964 /* 965 * arc_buf_add_ref() could derail 966 * this eviction. 967 */ 968 if (buf->b_hdr == NULL) { 969 mutex_exit(&arc_eviction_mtx); 970 bytes_evicted -= ab->b_size; 971 if (recycle) 972 steal = NULL; 973 if (!have_lock) 974 mutex_exit(hash_lock); 975 goto derailed; 976 } 977 arc_buf_destroy(buf, recycle, FALSE); 978 ab->b_buf = buf->b_next; 979 buf->b_next = arc_eviction_list; 980 arc_eviction_list = buf; 981 mutex_exit(&arc_eviction_mtx); 982 } else { 983 arc_buf_destroy(buf, recycle, TRUE); 984 } 985 } 986 ASSERT(ab->b_datacnt == 0); 987 arc_change_state(evicted_state, ab, hash_lock); 988 ASSERT(HDR_IN_HASH_TABLE(ab)); 989 ab->b_flags = ARC_IN_HASH_TABLE; 990 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 991 if (!have_lock) 992 mutex_exit(hash_lock); 993 if (bytes >= 0 && bytes_evicted >= bytes) 994 break; 995 } else { 996 missed += 1; 997 } 998 derailed: 999 /* null statement */; 1000 } 1001 mutex_exit(&evicted_state->mtx); 1002 mutex_exit(&state->mtx); 1003 1004 if (bytes_evicted < bytes) 1005 dprintf("only evicted %lld bytes from %x", 1006 (longlong_t)bytes_evicted, state); 1007 1008 if (skipped) 1009 atomic_add_64(&arc.evict_skip, skipped); 1010 if (missed) 1011 atomic_add_64(&arc.mutex_miss, missed); 1012 return (steal); 1013 } 1014 1015 /* 1016 * Remove buffers from list until we've removed the specified number of 1017 * bytes. Destroy the buffers that are removed. 1018 */ 1019 static void 1020 arc_evict_ghost(arc_state_t *state, int64_t bytes) 1021 { 1022 arc_buf_hdr_t *ab, *ab_prev; 1023 kmutex_t *hash_lock; 1024 uint64_t bytes_deleted = 0; 1025 uint_t bufs_skipped = 0; 1026 1027 ASSERT(GHOST_STATE(state)); 1028 top: 1029 mutex_enter(&state->mtx); 1030 for (ab = list_tail(&state->list); ab; ab = ab_prev) { 1031 ab_prev = list_prev(&state->list, ab); 1032 hash_lock = HDR_LOCK(ab); 1033 if (mutex_tryenter(hash_lock)) { 1034 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1035 ASSERT(ab->b_buf == NULL); 1036 arc_change_state(arc.anon, ab, hash_lock); 1037 mutex_exit(hash_lock); 1038 atomic_add_64(&arc.deleted, 1); 1039 bytes_deleted += ab->b_size; 1040 arc_hdr_destroy(ab); 1041 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1042 if (bytes >= 0 && bytes_deleted >= bytes) 1043 break; 1044 } else { 1045 if (bytes < 0) { 1046 mutex_exit(&state->mtx); 1047 mutex_enter(hash_lock); 1048 mutex_exit(hash_lock); 1049 goto top; 1050 } 1051 bufs_skipped += 1; 1052 } 1053 } 1054 mutex_exit(&state->mtx); 1055 1056 if (bufs_skipped) { 1057 atomic_add_64(&arc.mutex_miss, bufs_skipped); 1058 ASSERT(bytes >= 0); 1059 } 1060 1061 if (bytes_deleted < bytes) 1062 dprintf("only deleted %lld bytes from %p", 1063 (longlong_t)bytes_deleted, state); 1064 } 1065 1066 static void 1067 arc_adjust(void) 1068 { 1069 int64_t top_sz, mru_over, arc_over; 1070 1071 top_sz = arc.anon->size + arc.mru->size; 1072 1073 if (top_sz > arc.p && arc.mru->lsize > 0) { 1074 int64_t toevict = MIN(arc.mru->lsize, top_sz-arc.p); 1075 (void) arc_evict(arc.mru, toevict, FALSE); 1076 top_sz = arc.anon->size + arc.mru->size; 1077 } 1078 1079 mru_over = top_sz + arc.mru_ghost->size - arc.c; 1080 1081 if (mru_over > 0) { 1082 if (arc.mru_ghost->lsize > 0) { 1083 int64_t todelete = MIN(arc.mru_ghost->lsize, mru_over); 1084 arc_evict_ghost(arc.mru_ghost, todelete); 1085 } 1086 } 1087 1088 if ((arc_over = arc.size - arc.c) > 0) { 1089 int64_t tbl_over; 1090 1091 if (arc.mfu->lsize > 0) { 1092 int64_t toevict = MIN(arc.mfu->lsize, arc_over); 1093 (void) arc_evict(arc.mfu, toevict, FALSE); 1094 } 1095 1096 tbl_over = arc.size + arc.mru_ghost->lsize + 1097 arc.mfu_ghost->lsize - arc.c*2; 1098 1099 if (tbl_over > 0 && arc.mfu_ghost->lsize > 0) { 1100 int64_t todelete = MIN(arc.mfu_ghost->lsize, tbl_over); 1101 arc_evict_ghost(arc.mfu_ghost, todelete); 1102 } 1103 } 1104 } 1105 1106 static void 1107 arc_do_user_evicts(void) 1108 { 1109 mutex_enter(&arc_eviction_mtx); 1110 while (arc_eviction_list != NULL) { 1111 arc_buf_t *buf = arc_eviction_list; 1112 arc_eviction_list = buf->b_next; 1113 buf->b_hdr = NULL; 1114 mutex_exit(&arc_eviction_mtx); 1115 1116 if (buf->b_efunc != NULL) 1117 VERIFY(buf->b_efunc(buf) == 0); 1118 1119 buf->b_efunc = NULL; 1120 buf->b_private = NULL; 1121 kmem_cache_free(buf_cache, buf); 1122 mutex_enter(&arc_eviction_mtx); 1123 } 1124 mutex_exit(&arc_eviction_mtx); 1125 } 1126 1127 /* 1128 * Flush all *evictable* data from the cache. 1129 * NOTE: this will not touch "active" (i.e. referenced) data. 1130 */ 1131 void 1132 arc_flush(void) 1133 { 1134 while (list_head(&arc.mru->list)) 1135 (void) arc_evict(arc.mru, -1, FALSE); 1136 while (list_head(&arc.mfu->list)) 1137 (void) arc_evict(arc.mfu, -1, FALSE); 1138 1139 arc_evict_ghost(arc.mru_ghost, -1); 1140 arc_evict_ghost(arc.mfu_ghost, -1); 1141 1142 mutex_enter(&arc_reclaim_thr_lock); 1143 arc_do_user_evicts(); 1144 mutex_exit(&arc_reclaim_thr_lock); 1145 ASSERT(arc_eviction_list == NULL); 1146 } 1147 1148 int arc_kmem_reclaim_shift = 5; /* log2(fraction of arc to reclaim) */ 1149 1150 void 1151 arc_kmem_reclaim(void) 1152 { 1153 uint64_t to_free; 1154 1155 /* 1156 * We need arc_reclaim_lock because we don't want multiple 1157 * threads trying to reclaim concurrently. 1158 */ 1159 1160 /* 1161 * umem calls the reclaim func when we destroy the buf cache, 1162 * which is after we do arc_fini(). So we set a flag to prevent 1163 * accessing the destroyed mutexes and lists. 1164 */ 1165 if (arc_dead) 1166 return; 1167 1168 if (arc.c <= arc.c_min) 1169 return; 1170 1171 mutex_enter(&arc_reclaim_lock); 1172 1173 #ifdef _KERNEL 1174 to_free = MAX(arc.c >> arc_kmem_reclaim_shift, ptob(needfree)); 1175 #else 1176 to_free = arc.c >> arc_kmem_reclaim_shift; 1177 #endif 1178 if (arc.c > to_free) 1179 atomic_add_64(&arc.c, -to_free); 1180 else 1181 arc.c = arc.c_min; 1182 1183 atomic_add_64(&arc.p, -(arc.p >> arc_kmem_reclaim_shift)); 1184 if (arc.c > arc.size) 1185 arc.c = arc.size; 1186 if (arc.c < arc.c_min) 1187 arc.c = arc.c_min; 1188 if (arc.p > arc.c) 1189 arc.p = (arc.c >> 1); 1190 ASSERT((int64_t)arc.p >= 0); 1191 1192 arc_adjust(); 1193 1194 mutex_exit(&arc_reclaim_lock); 1195 } 1196 1197 static int 1198 arc_reclaim_needed(void) 1199 { 1200 uint64_t extra; 1201 1202 #ifdef _KERNEL 1203 1204 if (needfree) 1205 return (1); 1206 1207 /* 1208 * take 'desfree' extra pages, so we reclaim sooner, rather than later 1209 */ 1210 extra = desfree; 1211 1212 /* 1213 * check that we're out of range of the pageout scanner. It starts to 1214 * schedule paging if freemem is less than lotsfree and needfree. 1215 * lotsfree is the high-water mark for pageout, and needfree is the 1216 * number of needed free pages. We add extra pages here to make sure 1217 * the scanner doesn't start up while we're freeing memory. 1218 */ 1219 if (freemem < lotsfree + needfree + extra) 1220 return (1); 1221 1222 /* 1223 * check to make sure that swapfs has enough space so that anon 1224 * reservations can still succeeed. anon_resvmem() checks that the 1225 * availrmem is greater than swapfs_minfree, and the number of reserved 1226 * swap pages. We also add a bit of extra here just to prevent 1227 * circumstances from getting really dire. 1228 */ 1229 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1230 return (1); 1231 1232 #if defined(__i386) 1233 /* 1234 * If we're on an i386 platform, it's possible that we'll exhaust the 1235 * kernel heap space before we ever run out of available physical 1236 * memory. Most checks of the size of the heap_area compare against 1237 * tune.t_minarmem, which is the minimum available real memory that we 1238 * can have in the system. However, this is generally fixed at 25 pages 1239 * which is so low that it's useless. In this comparison, we seek to 1240 * calculate the total heap-size, and reclaim if more than 3/4ths of the 1241 * heap is allocated. (Or, in the caclulation, if less than 1/4th is 1242 * free) 1243 */ 1244 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1245 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1246 return (1); 1247 #endif 1248 1249 #else 1250 if (spa_get_random(100) == 0) 1251 return (1); 1252 #endif 1253 return (0); 1254 } 1255 1256 static void 1257 arc_kmem_reap_now(arc_reclaim_strategy_t strat) 1258 { 1259 size_t i; 1260 kmem_cache_t *prev_cache = NULL; 1261 extern kmem_cache_t *zio_buf_cache[]; 1262 1263 #ifdef _KERNEL 1264 /* 1265 * First purge some DNLC entries, in case the DNLC is using 1266 * up too much memory. 1267 */ 1268 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 1269 1270 #if defined(__i386) 1271 /* 1272 * Reclaim unused memory from all kmem caches. 1273 */ 1274 kmem_reap(); 1275 #endif 1276 #endif 1277 1278 /* 1279 * An agressive reclamation will shrink the cache size as well as 1280 * reap free buffers from the arc kmem caches. 1281 */ 1282 if (strat == ARC_RECLAIM_AGGR) 1283 arc_kmem_reclaim(); 1284 1285 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1286 if (zio_buf_cache[i] != prev_cache) { 1287 prev_cache = zio_buf_cache[i]; 1288 kmem_cache_reap_now(zio_buf_cache[i]); 1289 } 1290 } 1291 kmem_cache_reap_now(buf_cache); 1292 kmem_cache_reap_now(hdr_cache); 1293 } 1294 1295 static void 1296 arc_reclaim_thread(void) 1297 { 1298 clock_t growtime = 0; 1299 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1300 callb_cpr_t cpr; 1301 1302 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1303 1304 mutex_enter(&arc_reclaim_thr_lock); 1305 while (arc_thread_exit == 0) { 1306 if (arc_reclaim_needed()) { 1307 1308 if (arc.no_grow) { 1309 if (last_reclaim == ARC_RECLAIM_CONS) { 1310 last_reclaim = ARC_RECLAIM_AGGR; 1311 } else { 1312 last_reclaim = ARC_RECLAIM_CONS; 1313 } 1314 } else { 1315 arc.no_grow = TRUE; 1316 last_reclaim = ARC_RECLAIM_AGGR; 1317 membar_producer(); 1318 } 1319 1320 /* reset the growth delay for every reclaim */ 1321 growtime = lbolt + (arc_grow_retry * hz); 1322 1323 arc_kmem_reap_now(last_reclaim); 1324 1325 } else if ((growtime > 0) && ((growtime - lbolt) <= 0)) { 1326 arc.no_grow = FALSE; 1327 } 1328 1329 if (arc_eviction_list != NULL) 1330 arc_do_user_evicts(); 1331 1332 /* block until needed, or one second, whichever is shorter */ 1333 CALLB_CPR_SAFE_BEGIN(&cpr); 1334 (void) cv_timedwait(&arc_reclaim_thr_cv, 1335 &arc_reclaim_thr_lock, (lbolt + hz)); 1336 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 1337 } 1338 1339 arc_thread_exit = 0; 1340 cv_broadcast(&arc_reclaim_thr_cv); 1341 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 1342 thread_exit(); 1343 } 1344 1345 /* 1346 * Adapt arc info given the number of bytes we are trying to add and 1347 * the state that we are comming from. This function is only called 1348 * when we are adding new content to the cache. 1349 */ 1350 static void 1351 arc_adapt(int bytes, arc_state_t *state) 1352 { 1353 int mult; 1354 1355 ASSERT(bytes > 0); 1356 /* 1357 * Adapt the target size of the MRU list: 1358 * - if we just hit in the MRU ghost list, then increase 1359 * the target size of the MRU list. 1360 * - if we just hit in the MFU ghost list, then increase 1361 * the target size of the MFU list by decreasing the 1362 * target size of the MRU list. 1363 */ 1364 if (state == arc.mru_ghost) { 1365 mult = ((arc.mru_ghost->size >= arc.mfu_ghost->size) ? 1366 1 : (arc.mfu_ghost->size/arc.mru_ghost->size)); 1367 1368 arc.p = MIN(arc.c, arc.p + bytes * mult); 1369 } else if (state == arc.mfu_ghost) { 1370 mult = ((arc.mfu_ghost->size >= arc.mru_ghost->size) ? 1371 1 : (arc.mru_ghost->size/arc.mfu_ghost->size)); 1372 1373 arc.p = MAX(0, (int64_t)arc.p - bytes * mult); 1374 } 1375 ASSERT((int64_t)arc.p >= 0); 1376 1377 if (arc_reclaim_needed()) { 1378 cv_signal(&arc_reclaim_thr_cv); 1379 return; 1380 } 1381 1382 if (arc.no_grow) 1383 return; 1384 1385 if (arc.c >= arc.c_max) 1386 return; 1387 1388 /* 1389 * If we're within (2 * maxblocksize) bytes of the target 1390 * cache size, increment the target cache size 1391 */ 1392 if (arc.size > arc.c - (2ULL << SPA_MAXBLOCKSHIFT)) { 1393 atomic_add_64(&arc.c, (int64_t)bytes); 1394 if (arc.c > arc.c_max) 1395 arc.c = arc.c_max; 1396 else if (state == arc.anon) 1397 atomic_add_64(&arc.p, (int64_t)bytes); 1398 if (arc.p > arc.c) 1399 arc.p = arc.c; 1400 } 1401 ASSERT((int64_t)arc.p >= 0); 1402 } 1403 1404 /* 1405 * Check if the cache has reached its limits and eviction is required 1406 * prior to insert. 1407 */ 1408 static int 1409 arc_evict_needed() 1410 { 1411 if (arc_reclaim_needed()) 1412 return (1); 1413 1414 return (arc.size > arc.c); 1415 } 1416 1417 /* 1418 * The buffer, supplied as the first argument, needs a data block. 1419 * So, if we are at cache max, determine which cache should be victimized. 1420 * We have the following cases: 1421 * 1422 * 1. Insert for MRU, p > sizeof(arc.anon + arc.mru) -> 1423 * In this situation if we're out of space, but the resident size of the MFU is 1424 * under the limit, victimize the MFU cache to satisfy this insertion request. 1425 * 1426 * 2. Insert for MRU, p <= sizeof(arc.anon + arc.mru) -> 1427 * Here, we've used up all of the available space for the MRU, so we need to 1428 * evict from our own cache instead. Evict from the set of resident MRU 1429 * entries. 1430 * 1431 * 3. Insert for MFU (c - p) > sizeof(arc.mfu) -> 1432 * c minus p represents the MFU space in the cache, since p is the size of the 1433 * cache that is dedicated to the MRU. In this situation there's still space on 1434 * the MFU side, so the MRU side needs to be victimized. 1435 * 1436 * 4. Insert for MFU (c - p) < sizeof(arc.mfu) -> 1437 * MFU's resident set is consuming more space than it has been allotted. In 1438 * this situation, we must victimize our own cache, the MFU, for this insertion. 1439 */ 1440 static void 1441 arc_get_data_buf(arc_buf_t *buf) 1442 { 1443 arc_state_t *state = buf->b_hdr->b_state; 1444 uint64_t size = buf->b_hdr->b_size; 1445 1446 arc_adapt(size, state); 1447 1448 /* 1449 * We have not yet reached cache maximum size, 1450 * just allocate a new buffer. 1451 */ 1452 if (!arc_evict_needed()) { 1453 buf->b_data = zio_buf_alloc(size); 1454 atomic_add_64(&arc.size, size); 1455 goto out; 1456 } 1457 1458 /* 1459 * If we are prefetching from the mfu ghost list, this buffer 1460 * will end up on the mru list; so steal space from there. 1461 */ 1462 if (state == arc.mfu_ghost) 1463 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc.mru : arc.mfu; 1464 else if (state == arc.mru_ghost) 1465 state = arc.mru; 1466 1467 if (state == arc.mru || state == arc.anon) { 1468 uint64_t mru_used = arc.anon->size + arc.mru->size; 1469 state = (arc.p > mru_used) ? arc.mfu : arc.mru; 1470 } else { 1471 /* MFU cases */ 1472 uint64_t mfu_space = arc.c - arc.p; 1473 state = (mfu_space > arc.mfu->size) ? arc.mru : arc.mfu; 1474 } 1475 if ((buf->b_data = arc_evict(state, size, TRUE)) == NULL) { 1476 (void) arc_evict(state, size, FALSE); 1477 buf->b_data = zio_buf_alloc(size); 1478 atomic_add_64(&arc.size, size); 1479 atomic_add_64(&arc.recycle_miss, 1); 1480 if (arc.size > arc.c) 1481 arc_adjust(); 1482 } 1483 ASSERT(buf->b_data != NULL); 1484 out: 1485 /* 1486 * Update the state size. Note that ghost states have a 1487 * "ghost size" and so don't need to be updated. 1488 */ 1489 if (!GHOST_STATE(buf->b_hdr->b_state)) { 1490 arc_buf_hdr_t *hdr = buf->b_hdr; 1491 1492 atomic_add_64(&hdr->b_state->size, size); 1493 if (list_link_active(&hdr->b_arc_node)) { 1494 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1495 atomic_add_64(&hdr->b_state->lsize, size); 1496 } 1497 } 1498 } 1499 1500 /* 1501 * This routine is called whenever a buffer is accessed. 1502 * NOTE: the hash lock is dropped in this function. 1503 */ 1504 static void 1505 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 1506 { 1507 ASSERT(MUTEX_HELD(hash_lock)); 1508 1509 if (buf->b_state == arc.anon) { 1510 /* 1511 * This buffer is not in the cache, and does not 1512 * appear in our "ghost" list. Add the new buffer 1513 * to the MRU state. 1514 */ 1515 1516 ASSERT(buf->b_arc_access == 0); 1517 buf->b_arc_access = lbolt; 1518 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 1519 arc_change_state(arc.mru, buf, hash_lock); 1520 1521 } else if (buf->b_state == arc.mru) { 1522 /* 1523 * If this buffer is here because of a prefetch, then either: 1524 * - clear the flag if this is a "referencing" read 1525 * (any subsequent access will bump this into the MFU state). 1526 * or 1527 * - move the buffer to the head of the list if this is 1528 * another prefetch (to make it less likely to be evicted). 1529 */ 1530 if ((buf->b_flags & ARC_PREFETCH) != 0) { 1531 if (refcount_count(&buf->b_refcnt) == 0) { 1532 ASSERT(list_link_active(&buf->b_arc_node)); 1533 mutex_enter(&arc.mru->mtx); 1534 list_remove(&arc.mru->list, buf); 1535 list_insert_head(&arc.mru->list, buf); 1536 mutex_exit(&arc.mru->mtx); 1537 } else { 1538 buf->b_flags &= ~ARC_PREFETCH; 1539 atomic_add_64(&arc.mru->hits, 1); 1540 } 1541 buf->b_arc_access = lbolt; 1542 return; 1543 } 1544 1545 /* 1546 * This buffer has been "accessed" only once so far, 1547 * but it is still in the cache. Move it to the MFU 1548 * state. 1549 */ 1550 if (lbolt > buf->b_arc_access + ARC_MINTIME) { 1551 /* 1552 * More than 125ms have passed since we 1553 * instantiated this buffer. Move it to the 1554 * most frequently used state. 1555 */ 1556 buf->b_arc_access = lbolt; 1557 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1558 arc_change_state(arc.mfu, buf, hash_lock); 1559 } 1560 atomic_add_64(&arc.mru->hits, 1); 1561 } else if (buf->b_state == arc.mru_ghost) { 1562 arc_state_t *new_state; 1563 /* 1564 * This buffer has been "accessed" recently, but 1565 * was evicted from the cache. Move it to the 1566 * MFU state. 1567 */ 1568 1569 if (buf->b_flags & ARC_PREFETCH) { 1570 new_state = arc.mru; 1571 if (refcount_count(&buf->b_refcnt) > 0) 1572 buf->b_flags &= ~ARC_PREFETCH; 1573 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 1574 } else { 1575 new_state = arc.mfu; 1576 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1577 } 1578 1579 buf->b_arc_access = lbolt; 1580 arc_change_state(new_state, buf, hash_lock); 1581 1582 atomic_add_64(&arc.mru_ghost->hits, 1); 1583 } else if (buf->b_state == arc.mfu) { 1584 /* 1585 * This buffer has been accessed more than once and is 1586 * still in the cache. Keep it in the MFU state. 1587 * 1588 * NOTE: an add_reference() that occurred when we did 1589 * the arc_read() will have kicked this off the list. 1590 * If it was a prefetch, we will explicitly move it to 1591 * the head of the list now. 1592 */ 1593 if ((buf->b_flags & ARC_PREFETCH) != 0) { 1594 ASSERT(refcount_count(&buf->b_refcnt) == 0); 1595 ASSERT(list_link_active(&buf->b_arc_node)); 1596 mutex_enter(&arc.mfu->mtx); 1597 list_remove(&arc.mfu->list, buf); 1598 list_insert_head(&arc.mfu->list, buf); 1599 mutex_exit(&arc.mfu->mtx); 1600 } 1601 atomic_add_64(&arc.mfu->hits, 1); 1602 buf->b_arc_access = lbolt; 1603 } else if (buf->b_state == arc.mfu_ghost) { 1604 arc_state_t *new_state = arc.mfu; 1605 /* 1606 * This buffer has been accessed more than once but has 1607 * been evicted from the cache. Move it back to the 1608 * MFU state. 1609 */ 1610 1611 if (buf->b_flags & ARC_PREFETCH) { 1612 /* 1613 * This is a prefetch access... 1614 * move this block back to the MRU state. 1615 */ 1616 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 1617 new_state = arc.mru; 1618 } 1619 1620 buf->b_arc_access = lbolt; 1621 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1622 arc_change_state(new_state, buf, hash_lock); 1623 1624 atomic_add_64(&arc.mfu_ghost->hits, 1); 1625 } else { 1626 ASSERT(!"invalid arc state"); 1627 } 1628 } 1629 1630 /* a generic arc_done_func_t which you can use */ 1631 /* ARGSUSED */ 1632 void 1633 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 1634 { 1635 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 1636 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 1637 } 1638 1639 /* a generic arc_done_func_t which you can use */ 1640 void 1641 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 1642 { 1643 arc_buf_t **bufp = arg; 1644 if (zio && zio->io_error) { 1645 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 1646 *bufp = NULL; 1647 } else { 1648 *bufp = buf; 1649 } 1650 } 1651 1652 static void 1653 arc_read_done(zio_t *zio) 1654 { 1655 arc_buf_hdr_t *hdr, *found; 1656 arc_buf_t *buf; 1657 arc_buf_t *abuf; /* buffer we're assigning to callback */ 1658 kmutex_t *hash_lock; 1659 arc_callback_t *callback_list, *acb; 1660 int freeable = FALSE; 1661 1662 buf = zio->io_private; 1663 hdr = buf->b_hdr; 1664 1665 /* 1666 * The hdr was inserted into hash-table and removed from lists 1667 * prior to starting I/O. We should find this header, since 1668 * it's in the hash table, and it should be legit since it's 1669 * not possible to evict it during the I/O. The only possible 1670 * reason for it not to be found is if we were freed during the 1671 * read. 1672 */ 1673 found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 1674 &hash_lock); 1675 1676 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 1677 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp)))); 1678 1679 /* byteswap if necessary */ 1680 callback_list = hdr->b_acb; 1681 ASSERT(callback_list != NULL); 1682 if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap) 1683 callback_list->acb_byteswap(buf->b_data, hdr->b_size); 1684 1685 /* create copies of the data buffer for the callers */ 1686 abuf = buf; 1687 for (acb = callback_list; acb; acb = acb->acb_next) { 1688 if (acb->acb_done) { 1689 if (abuf == NULL) 1690 abuf = arc_buf_clone(buf); 1691 acb->acb_buf = abuf; 1692 abuf = NULL; 1693 } 1694 } 1695 hdr->b_acb = NULL; 1696 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 1697 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 1698 if (abuf == buf) 1699 hdr->b_flags |= ARC_BUF_AVAILABLE; 1700 1701 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 1702 1703 if (zio->io_error != 0) { 1704 hdr->b_flags |= ARC_IO_ERROR; 1705 if (hdr->b_state != arc.anon) 1706 arc_change_state(arc.anon, hdr, hash_lock); 1707 if (HDR_IN_HASH_TABLE(hdr)) 1708 buf_hash_remove(hdr); 1709 freeable = refcount_is_zero(&hdr->b_refcnt); 1710 /* convert checksum errors into IO errors */ 1711 if (zio->io_error == ECKSUM) 1712 zio->io_error = EIO; 1713 } 1714 1715 /* 1716 * Broadcast before we drop the hash_lock to avoid the possibility 1717 * that the hdr (and hence the cv) might be freed before we get to 1718 * the cv_broadcast(). 1719 */ 1720 cv_broadcast(&hdr->b_cv); 1721 1722 if (hash_lock) { 1723 /* 1724 * Only call arc_access on anonymous buffers. This is because 1725 * if we've issued an I/O for an evicted buffer, we've already 1726 * called arc_access (to prevent any simultaneous readers from 1727 * getting confused). 1728 */ 1729 if (zio->io_error == 0 && hdr->b_state == arc.anon) 1730 arc_access(hdr, hash_lock); 1731 mutex_exit(hash_lock); 1732 } else { 1733 /* 1734 * This block was freed while we waited for the read to 1735 * complete. It has been removed from the hash table and 1736 * moved to the anonymous state (so that it won't show up 1737 * in the cache). 1738 */ 1739 ASSERT3P(hdr->b_state, ==, arc.anon); 1740 freeable = refcount_is_zero(&hdr->b_refcnt); 1741 } 1742 1743 /* execute each callback and free its structure */ 1744 while ((acb = callback_list) != NULL) { 1745 if (acb->acb_done) 1746 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 1747 1748 if (acb->acb_zio_dummy != NULL) { 1749 acb->acb_zio_dummy->io_error = zio->io_error; 1750 zio_nowait(acb->acb_zio_dummy); 1751 } 1752 1753 callback_list = acb->acb_next; 1754 kmem_free(acb, sizeof (arc_callback_t)); 1755 } 1756 1757 if (freeable) 1758 arc_hdr_destroy(hdr); 1759 } 1760 1761 /* 1762 * "Read" the block block at the specified DVA (in bp) via the 1763 * cache. If the block is found in the cache, invoke the provided 1764 * callback immediately and return. Note that the `zio' parameter 1765 * in the callback will be NULL in this case, since no IO was 1766 * required. If the block is not in the cache pass the read request 1767 * on to the spa with a substitute callback function, so that the 1768 * requested block will be added to the cache. 1769 * 1770 * If a read request arrives for a block that has a read in-progress, 1771 * either wait for the in-progress read to complete (and return the 1772 * results); or, if this is a read with a "done" func, add a record 1773 * to the read to invoke the "done" func when the read completes, 1774 * and return; or just return. 1775 * 1776 * arc_read_done() will invoke all the requested "done" functions 1777 * for readers of this block. 1778 */ 1779 int 1780 arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap, 1781 arc_done_func_t *done, void *private, int priority, int flags, 1782 uint32_t *arc_flags, zbookmark_t *zb) 1783 { 1784 arc_buf_hdr_t *hdr; 1785 arc_buf_t *buf; 1786 kmutex_t *hash_lock; 1787 zio_t *rzio; 1788 1789 top: 1790 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 1791 if (hdr && hdr->b_datacnt > 0) { 1792 1793 *arc_flags |= ARC_CACHED; 1794 1795 if (HDR_IO_IN_PROGRESS(hdr)) { 1796 1797 if (*arc_flags & ARC_WAIT) { 1798 cv_wait(&hdr->b_cv, hash_lock); 1799 mutex_exit(hash_lock); 1800 goto top; 1801 } 1802 ASSERT(*arc_flags & ARC_NOWAIT); 1803 1804 if (done) { 1805 arc_callback_t *acb = NULL; 1806 1807 acb = kmem_zalloc(sizeof (arc_callback_t), 1808 KM_SLEEP); 1809 acb->acb_done = done; 1810 acb->acb_private = private; 1811 acb->acb_byteswap = swap; 1812 if (pio != NULL) 1813 acb->acb_zio_dummy = zio_null(pio, 1814 spa, NULL, NULL, flags); 1815 1816 ASSERT(acb->acb_done != NULL); 1817 acb->acb_next = hdr->b_acb; 1818 hdr->b_acb = acb; 1819 add_reference(hdr, hash_lock, private); 1820 mutex_exit(hash_lock); 1821 return (0); 1822 } 1823 mutex_exit(hash_lock); 1824 return (0); 1825 } 1826 1827 ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu); 1828 1829 if (done) { 1830 add_reference(hdr, hash_lock, private); 1831 /* 1832 * If this block is already in use, create a new 1833 * copy of the data so that we will be guaranteed 1834 * that arc_release() will always succeed. 1835 */ 1836 buf = hdr->b_buf; 1837 ASSERT(buf); 1838 ASSERT(buf->b_data); 1839 if (HDR_BUF_AVAILABLE(hdr)) { 1840 ASSERT(buf->b_efunc == NULL); 1841 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 1842 } else { 1843 buf = arc_buf_clone(buf); 1844 } 1845 } else if (*arc_flags & ARC_PREFETCH && 1846 refcount_count(&hdr->b_refcnt) == 0) { 1847 hdr->b_flags |= ARC_PREFETCH; 1848 } 1849 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 1850 arc_access(hdr, hash_lock); 1851 mutex_exit(hash_lock); 1852 atomic_add_64(&arc.hits, 1); 1853 if (done) 1854 done(NULL, buf, private); 1855 } else { 1856 uint64_t size = BP_GET_LSIZE(bp); 1857 arc_callback_t *acb; 1858 1859 if (hdr == NULL) { 1860 /* this block is not in the cache */ 1861 arc_buf_hdr_t *exists; 1862 1863 buf = arc_buf_alloc(spa, size, private); 1864 hdr = buf->b_hdr; 1865 hdr->b_dva = *BP_IDENTITY(bp); 1866 hdr->b_birth = bp->blk_birth; 1867 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 1868 exists = buf_hash_insert(hdr, &hash_lock); 1869 if (exists) { 1870 /* somebody beat us to the hash insert */ 1871 mutex_exit(hash_lock); 1872 bzero(&hdr->b_dva, sizeof (dva_t)); 1873 hdr->b_birth = 0; 1874 hdr->b_cksum0 = 0; 1875 (void) arc_buf_remove_ref(buf, private); 1876 goto top; /* restart the IO request */ 1877 } 1878 /* if this is a prefetch, we don't have a reference */ 1879 if (*arc_flags & ARC_PREFETCH) { 1880 (void) remove_reference(hdr, hash_lock, 1881 private); 1882 hdr->b_flags |= ARC_PREFETCH; 1883 } 1884 if (BP_GET_LEVEL(bp) > 0) 1885 hdr->b_flags |= ARC_INDIRECT; 1886 } else { 1887 /* this block is in the ghost cache */ 1888 ASSERT(GHOST_STATE(hdr->b_state)); 1889 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1890 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 1891 ASSERT(hdr->b_buf == NULL); 1892 1893 /* if this is a prefetch, we don't have a reference */ 1894 if (*arc_flags & ARC_PREFETCH) 1895 hdr->b_flags |= ARC_PREFETCH; 1896 else 1897 add_reference(hdr, hash_lock, private); 1898 buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 1899 buf->b_hdr = hdr; 1900 buf->b_data = NULL; 1901 buf->b_efunc = NULL; 1902 buf->b_private = NULL; 1903 buf->b_next = NULL; 1904 hdr->b_buf = buf; 1905 arc_get_data_buf(buf); 1906 ASSERT(hdr->b_datacnt == 0); 1907 hdr->b_datacnt = 1; 1908 1909 } 1910 1911 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 1912 acb->acb_done = done; 1913 acb->acb_private = private; 1914 acb->acb_byteswap = swap; 1915 1916 ASSERT(hdr->b_acb == NULL); 1917 hdr->b_acb = acb; 1918 hdr->b_flags |= ARC_IO_IN_PROGRESS; 1919 1920 /* 1921 * If the buffer has been evicted, migrate it to a present state 1922 * before issuing the I/O. Once we drop the hash-table lock, 1923 * the header will be marked as I/O in progress and have an 1924 * attached buffer. At this point, anybody who finds this 1925 * buffer ought to notice that it's legit but has a pending I/O. 1926 */ 1927 1928 if (GHOST_STATE(hdr->b_state)) 1929 arc_access(hdr, hash_lock); 1930 mutex_exit(hash_lock); 1931 1932 ASSERT3U(hdr->b_size, ==, size); 1933 DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 1934 zbookmark_t *, zb); 1935 atomic_add_64(&arc.misses, 1); 1936 1937 rzio = zio_read(pio, spa, bp, buf->b_data, size, 1938 arc_read_done, buf, priority, flags, zb); 1939 1940 if (*arc_flags & ARC_WAIT) 1941 return (zio_wait(rzio)); 1942 1943 ASSERT(*arc_flags & ARC_NOWAIT); 1944 zio_nowait(rzio); 1945 } 1946 return (0); 1947 } 1948 1949 /* 1950 * arc_read() variant to support pool traversal. If the block is already 1951 * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 1952 * The idea is that we don't want pool traversal filling up memory, but 1953 * if the ARC already has the data anyway, we shouldn't pay for the I/O. 1954 */ 1955 int 1956 arc_tryread(spa_t *spa, blkptr_t *bp, void *data) 1957 { 1958 arc_buf_hdr_t *hdr; 1959 kmutex_t *hash_mtx; 1960 int rc = 0; 1961 1962 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 1963 1964 if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 1965 arc_buf_t *buf = hdr->b_buf; 1966 1967 ASSERT(buf); 1968 while (buf->b_data == NULL) { 1969 buf = buf->b_next; 1970 ASSERT(buf); 1971 } 1972 bcopy(buf->b_data, data, hdr->b_size); 1973 } else { 1974 rc = ENOENT; 1975 } 1976 1977 if (hash_mtx) 1978 mutex_exit(hash_mtx); 1979 1980 return (rc); 1981 } 1982 1983 void 1984 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 1985 { 1986 ASSERT(buf->b_hdr != NULL); 1987 ASSERT(buf->b_hdr->b_state != arc.anon); 1988 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 1989 buf->b_efunc = func; 1990 buf->b_private = private; 1991 } 1992 1993 /* 1994 * This is used by the DMU to let the ARC know that a buffer is 1995 * being evicted, so the ARC should clean up. If this arc buf 1996 * is not yet in the evicted state, it will be put there. 1997 */ 1998 int 1999 arc_buf_evict(arc_buf_t *buf) 2000 { 2001 arc_buf_hdr_t *hdr; 2002 kmutex_t *hash_lock; 2003 arc_buf_t **bufp; 2004 2005 mutex_enter(&arc_eviction_mtx); 2006 hdr = buf->b_hdr; 2007 if (hdr == NULL) { 2008 /* 2009 * We are in arc_do_user_evicts(). 2010 * NOTE: We can't be in arc_buf_add_ref() because 2011 * that would violate the interface rules. 2012 */ 2013 ASSERT(buf->b_data == NULL); 2014 mutex_exit(&arc_eviction_mtx); 2015 return (0); 2016 } else if (buf->b_data == NULL) { 2017 arc_buf_t copy = *buf; /* structure assignment */ 2018 /* 2019 * We are on the eviction list. Process this buffer 2020 * now but let arc_do_user_evicts() do the reaping. 2021 */ 2022 buf->b_efunc = NULL; 2023 buf->b_hdr = NULL; 2024 mutex_exit(&arc_eviction_mtx); 2025 VERIFY(copy.b_efunc(©) == 0); 2026 return (1); 2027 } else { 2028 /* 2029 * Prevent a race with arc_evict() 2030 */ 2031 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 2032 buf->b_hdr = NULL; 2033 } 2034 mutex_exit(&arc_eviction_mtx); 2035 2036 hash_lock = HDR_LOCK(hdr); 2037 mutex_enter(hash_lock); 2038 2039 ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu); 2040 2041 /* 2042 * Pull this buffer off of the hdr 2043 */ 2044 bufp = &hdr->b_buf; 2045 while (*bufp != buf) 2046 bufp = &(*bufp)->b_next; 2047 *bufp = buf->b_next; 2048 2049 ASSERT(buf->b_data != NULL); 2050 buf->b_hdr = hdr; 2051 arc_buf_destroy(buf, FALSE, FALSE); 2052 2053 if (hdr->b_datacnt == 0) { 2054 arc_state_t *old_state = hdr->b_state; 2055 arc_state_t *evicted_state; 2056 2057 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2058 2059 evicted_state = 2060 (old_state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost; 2061 2062 mutex_enter(&old_state->mtx); 2063 mutex_enter(&evicted_state->mtx); 2064 2065 arc_change_state(evicted_state, hdr, hash_lock); 2066 ASSERT(HDR_IN_HASH_TABLE(hdr)); 2067 hdr->b_flags = ARC_IN_HASH_TABLE; 2068 2069 mutex_exit(&evicted_state->mtx); 2070 mutex_exit(&old_state->mtx); 2071 } 2072 mutex_exit(hash_lock); 2073 2074 VERIFY(buf->b_efunc(buf) == 0); 2075 buf->b_efunc = NULL; 2076 buf->b_private = NULL; 2077 buf->b_hdr = NULL; 2078 kmem_cache_free(buf_cache, buf); 2079 return (1); 2080 } 2081 2082 /* 2083 * Release this buffer from the cache. This must be done 2084 * after a read and prior to modifying the buffer contents. 2085 * If the buffer has more than one reference, we must make 2086 * make a new hdr for the buffer. 2087 */ 2088 void 2089 arc_release(arc_buf_t *buf, void *tag) 2090 { 2091 arc_buf_hdr_t *hdr = buf->b_hdr; 2092 kmutex_t *hash_lock = HDR_LOCK(hdr); 2093 2094 /* this buffer is not on any list */ 2095 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2096 2097 if (hdr->b_state == arc.anon) { 2098 /* this buffer is already released */ 2099 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2100 ASSERT(BUF_EMPTY(hdr)); 2101 ASSERT(buf->b_efunc == NULL); 2102 return; 2103 } 2104 2105 mutex_enter(hash_lock); 2106 2107 /* 2108 * Do we have more than one buf? 2109 */ 2110 if (hdr->b_buf != buf || buf->b_next != NULL) { 2111 arc_buf_hdr_t *nhdr; 2112 arc_buf_t **bufp; 2113 uint64_t blksz = hdr->b_size; 2114 spa_t *spa = hdr->b_spa; 2115 2116 ASSERT(hdr->b_datacnt > 1); 2117 /* 2118 * Pull the data off of this buf and attach it to 2119 * a new anonymous buf. 2120 */ 2121 (void) remove_reference(hdr, hash_lock, tag); 2122 bufp = &hdr->b_buf; 2123 while (*bufp != buf) 2124 bufp = &(*bufp)->b_next; 2125 *bufp = (*bufp)->b_next; 2126 2127 ASSERT3U(hdr->b_state->size, >=, hdr->b_size); 2128 atomic_add_64(&hdr->b_state->size, -hdr->b_size); 2129 if (refcount_is_zero(&hdr->b_refcnt)) { 2130 ASSERT3U(hdr->b_state->lsize, >=, hdr->b_size); 2131 atomic_add_64(&hdr->b_state->lsize, -hdr->b_size); 2132 } 2133 hdr->b_datacnt -= 1; 2134 2135 mutex_exit(hash_lock); 2136 2137 nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 2138 nhdr->b_size = blksz; 2139 nhdr->b_spa = spa; 2140 nhdr->b_buf = buf; 2141 nhdr->b_state = arc.anon; 2142 nhdr->b_arc_access = 0; 2143 nhdr->b_flags = 0; 2144 nhdr->b_datacnt = 1; 2145 buf->b_hdr = nhdr; 2146 buf->b_next = NULL; 2147 (void) refcount_add(&nhdr->b_refcnt, tag); 2148 atomic_add_64(&arc.anon->size, blksz); 2149 2150 hdr = nhdr; 2151 } else { 2152 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2153 ASSERT(!list_link_active(&hdr->b_arc_node)); 2154 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2155 arc_change_state(arc.anon, hdr, hash_lock); 2156 hdr->b_arc_access = 0; 2157 mutex_exit(hash_lock); 2158 bzero(&hdr->b_dva, sizeof (dva_t)); 2159 hdr->b_birth = 0; 2160 hdr->b_cksum0 = 0; 2161 } 2162 buf->b_efunc = NULL; 2163 buf->b_private = NULL; 2164 } 2165 2166 int 2167 arc_released(arc_buf_t *buf) 2168 { 2169 return (buf->b_data != NULL && buf->b_hdr->b_state == arc.anon); 2170 } 2171 2172 int 2173 arc_has_callback(arc_buf_t *buf) 2174 { 2175 return (buf->b_efunc != NULL); 2176 } 2177 2178 #ifdef ZFS_DEBUG 2179 int 2180 arc_referenced(arc_buf_t *buf) 2181 { 2182 return (refcount_count(&buf->b_hdr->b_refcnt)); 2183 } 2184 #endif 2185 2186 static void 2187 arc_write_done(zio_t *zio) 2188 { 2189 arc_buf_t *buf; 2190 arc_buf_hdr_t *hdr; 2191 arc_callback_t *acb; 2192 2193 buf = zio->io_private; 2194 hdr = buf->b_hdr; 2195 acb = hdr->b_acb; 2196 hdr->b_acb = NULL; 2197 ASSERT(acb != NULL); 2198 2199 /* this buffer is on no lists and is not in the hash table */ 2200 ASSERT3P(hdr->b_state, ==, arc.anon); 2201 2202 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 2203 hdr->b_birth = zio->io_bp->blk_birth; 2204 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 2205 /* 2206 * If the block to be written was all-zero, we may have 2207 * compressed it away. In this case no write was performed 2208 * so there will be no dva/birth-date/checksum. The buffer 2209 * must therefor remain anonymous (and uncached). 2210 */ 2211 if (!BUF_EMPTY(hdr)) { 2212 arc_buf_hdr_t *exists; 2213 kmutex_t *hash_lock; 2214 2215 exists = buf_hash_insert(hdr, &hash_lock); 2216 if (exists) { 2217 /* 2218 * This can only happen if we overwrite for 2219 * sync-to-convergence, because we remove 2220 * buffers from the hash table when we arc_free(). 2221 */ 2222 ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 2223 BP_IDENTITY(zio->io_bp))); 2224 ASSERT3U(zio->io_bp_orig.blk_birth, ==, 2225 zio->io_bp->blk_birth); 2226 2227 ASSERT(refcount_is_zero(&exists->b_refcnt)); 2228 arc_change_state(arc.anon, exists, hash_lock); 2229 mutex_exit(hash_lock); 2230 arc_hdr_destroy(exists); 2231 exists = buf_hash_insert(hdr, &hash_lock); 2232 ASSERT3P(exists, ==, NULL); 2233 } 2234 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2235 arc_access(hdr, hash_lock); 2236 mutex_exit(hash_lock); 2237 } else if (acb->acb_done == NULL) { 2238 int destroy_hdr; 2239 /* 2240 * This is an anonymous buffer with no user callback, 2241 * destroy it if there are no active references. 2242 */ 2243 mutex_enter(&arc_eviction_mtx); 2244 destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 2245 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2246 mutex_exit(&arc_eviction_mtx); 2247 if (destroy_hdr) 2248 arc_hdr_destroy(hdr); 2249 } else { 2250 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2251 } 2252 2253 if (acb->acb_done) { 2254 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 2255 acb->acb_done(zio, buf, acb->acb_private); 2256 } 2257 2258 kmem_free(acb, sizeof (arc_callback_t)); 2259 } 2260 2261 int 2262 arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies, 2263 uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 2264 arc_done_func_t *done, void *private, int priority, int flags, 2265 uint32_t arc_flags, zbookmark_t *zb) 2266 { 2267 arc_buf_hdr_t *hdr = buf->b_hdr; 2268 arc_callback_t *acb; 2269 zio_t *rzio; 2270 2271 /* this is a private buffer - no locking required */ 2272 ASSERT3P(hdr->b_state, ==, arc.anon); 2273 ASSERT(BUF_EMPTY(hdr)); 2274 ASSERT(!HDR_IO_ERROR(hdr)); 2275 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 2276 ASSERT(hdr->b_acb == 0); 2277 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2278 acb->acb_done = done; 2279 acb->acb_private = private; 2280 acb->acb_byteswap = (arc_byteswap_func_t *)-1; 2281 hdr->b_acb = acb; 2282 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2283 rzio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp, 2284 buf->b_data, hdr->b_size, arc_write_done, buf, priority, flags, zb); 2285 2286 if (arc_flags & ARC_WAIT) 2287 return (zio_wait(rzio)); 2288 2289 ASSERT(arc_flags & ARC_NOWAIT); 2290 zio_nowait(rzio); 2291 2292 return (0); 2293 } 2294 2295 int 2296 arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 2297 zio_done_func_t *done, void *private, uint32_t arc_flags) 2298 { 2299 arc_buf_hdr_t *ab; 2300 kmutex_t *hash_lock; 2301 zio_t *zio; 2302 2303 /* 2304 * If this buffer is in the cache, release it, so it 2305 * can be re-used. 2306 */ 2307 ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2308 if (ab != NULL) { 2309 /* 2310 * The checksum of blocks to free is not always 2311 * preserved (eg. on the deadlist). However, if it is 2312 * nonzero, it should match what we have in the cache. 2313 */ 2314 ASSERT(bp->blk_cksum.zc_word[0] == 0 || 2315 ab->b_cksum0 == bp->blk_cksum.zc_word[0]); 2316 if (ab->b_state != arc.anon) 2317 arc_change_state(arc.anon, ab, hash_lock); 2318 if (HDR_IO_IN_PROGRESS(ab)) { 2319 /* 2320 * This should only happen when we prefetch. 2321 */ 2322 ASSERT(ab->b_flags & ARC_PREFETCH); 2323 ASSERT3U(ab->b_datacnt, ==, 1); 2324 ab->b_flags |= ARC_FREED_IN_READ; 2325 if (HDR_IN_HASH_TABLE(ab)) 2326 buf_hash_remove(ab); 2327 ab->b_arc_access = 0; 2328 bzero(&ab->b_dva, sizeof (dva_t)); 2329 ab->b_birth = 0; 2330 ab->b_cksum0 = 0; 2331 ab->b_buf->b_efunc = NULL; 2332 ab->b_buf->b_private = NULL; 2333 mutex_exit(hash_lock); 2334 } else if (refcount_is_zero(&ab->b_refcnt)) { 2335 mutex_exit(hash_lock); 2336 arc_hdr_destroy(ab); 2337 atomic_add_64(&arc.deleted, 1); 2338 } else { 2339 /* 2340 * We still have an active reference on this 2341 * buffer. This can happen, e.g., from 2342 * dbuf_unoverride(). 2343 */ 2344 ASSERT(!HDR_IN_HASH_TABLE(ab)); 2345 ab->b_arc_access = 0; 2346 bzero(&ab->b_dva, sizeof (dva_t)); 2347 ab->b_birth = 0; 2348 ab->b_cksum0 = 0; 2349 ab->b_buf->b_efunc = NULL; 2350 ab->b_buf->b_private = NULL; 2351 mutex_exit(hash_lock); 2352 } 2353 } 2354 2355 zio = zio_free(pio, spa, txg, bp, done, private); 2356 2357 if (arc_flags & ARC_WAIT) 2358 return (zio_wait(zio)); 2359 2360 ASSERT(arc_flags & ARC_NOWAIT); 2361 zio_nowait(zio); 2362 2363 return (0); 2364 } 2365 2366 void 2367 arc_tempreserve_clear(uint64_t tempreserve) 2368 { 2369 atomic_add_64(&arc_tempreserve, -tempreserve); 2370 ASSERT((int64_t)arc_tempreserve >= 0); 2371 } 2372 2373 int 2374 arc_tempreserve_space(uint64_t tempreserve) 2375 { 2376 #ifdef ZFS_DEBUG 2377 /* 2378 * Once in a while, fail for no reason. Everything should cope. 2379 */ 2380 if (spa_get_random(10000) == 0) { 2381 dprintf("forcing random failure\n"); 2382 return (ERESTART); 2383 } 2384 #endif 2385 if (tempreserve > arc.c/4 && !arc.no_grow) 2386 arc.c = MIN(arc.c_max, tempreserve * 4); 2387 if (tempreserve > arc.c) 2388 return (ENOMEM); 2389 2390 /* 2391 * Throttle writes when the amount of dirty data in the cache 2392 * gets too large. We try to keep the cache less than half full 2393 * of dirty blocks so that our sync times don't grow too large. 2394 * Note: if two requests come in concurrently, we might let them 2395 * both succeed, when one of them should fail. Not a huge deal. 2396 * 2397 * XXX The limit should be adjusted dynamically to keep the time 2398 * to sync a dataset fixed (around 1-5 seconds?). 2399 */ 2400 2401 if (tempreserve + arc_tempreserve + arc.anon->size > arc.c / 2 && 2402 arc_tempreserve + arc.anon->size > arc.c / 4) { 2403 dprintf("failing, arc_tempreserve=%lluK anon=%lluK " 2404 "tempreserve=%lluK arc.c=%lluK\n", 2405 arc_tempreserve>>10, arc.anon->lsize>>10, 2406 tempreserve>>10, arc.c>>10); 2407 return (ERESTART); 2408 } 2409 atomic_add_64(&arc_tempreserve, tempreserve); 2410 return (0); 2411 } 2412 2413 void 2414 arc_init(void) 2415 { 2416 mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL); 2417 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 2418 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 2419 2420 /* Convert seconds to clock ticks */ 2421 arc_min_prefetch_lifespan = 1 * hz; 2422 2423 /* Start out with 1/8 of all memory */ 2424 arc.c = physmem * PAGESIZE / 8; 2425 2426 #ifdef _KERNEL 2427 /* 2428 * On architectures where the physical memory can be larger 2429 * than the addressable space (intel in 32-bit mode), we may 2430 * need to limit the cache to 1/8 of VM size. 2431 */ 2432 arc.c = MIN(arc.c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 2433 #endif 2434 2435 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 2436 arc.c_min = MAX(arc.c / 4, 64<<20); 2437 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 2438 if (arc.c * 8 >= 1<<30) 2439 arc.c_max = (arc.c * 8) - (1<<30); 2440 else 2441 arc.c_max = arc.c_min; 2442 arc.c_max = MAX(arc.c * 6, arc.c_max); 2443 arc.c = arc.c_max; 2444 arc.p = (arc.c >> 1); 2445 2446 /* if kmem_flags are set, lets try to use less memory */ 2447 if (kmem_debugging()) 2448 arc.c = arc.c / 2; 2449 if (arc.c < arc.c_min) 2450 arc.c = arc.c_min; 2451 2452 arc.anon = &ARC_anon; 2453 arc.mru = &ARC_mru; 2454 arc.mru_ghost = &ARC_mru_ghost; 2455 arc.mfu = &ARC_mfu; 2456 arc.mfu_ghost = &ARC_mfu_ghost; 2457 arc.size = 0; 2458 2459 arc.hits = 0; 2460 arc.recycle_miss = 0; 2461 arc.evict_skip = 0; 2462 arc.mutex_miss = 0; 2463 2464 list_create(&arc.mru->list, sizeof (arc_buf_hdr_t), 2465 offsetof(arc_buf_hdr_t, b_arc_node)); 2466 list_create(&arc.mru_ghost->list, sizeof (arc_buf_hdr_t), 2467 offsetof(arc_buf_hdr_t, b_arc_node)); 2468 list_create(&arc.mfu->list, sizeof (arc_buf_hdr_t), 2469 offsetof(arc_buf_hdr_t, b_arc_node)); 2470 list_create(&arc.mfu_ghost->list, sizeof (arc_buf_hdr_t), 2471 offsetof(arc_buf_hdr_t, b_arc_node)); 2472 2473 buf_init(); 2474 2475 arc_thread_exit = 0; 2476 arc_eviction_list = NULL; 2477 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 2478 2479 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 2480 TS_RUN, minclsyspri); 2481 } 2482 2483 void 2484 arc_fini(void) 2485 { 2486 mutex_enter(&arc_reclaim_thr_lock); 2487 arc_thread_exit = 1; 2488 while (arc_thread_exit != 0) 2489 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 2490 mutex_exit(&arc_reclaim_thr_lock); 2491 2492 arc_flush(); 2493 2494 arc_dead = TRUE; 2495 2496 mutex_destroy(&arc_eviction_mtx); 2497 mutex_destroy(&arc_reclaim_lock); 2498 mutex_destroy(&arc_reclaim_thr_lock); 2499 cv_destroy(&arc_reclaim_thr_cv); 2500 2501 list_destroy(&arc.mru->list); 2502 list_destroy(&arc.mru_ghost->list); 2503 list_destroy(&arc.mfu->list); 2504 list_destroy(&arc.mfu_ghost->list); 2505 2506 buf_fini(); 2507 } 2508